ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 13
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 78
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 93
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
0dc90f43-3fd2-43ff-b407-9a4a3aaa2aa1 | cpp | tensorflow/tensorflow | shape_tree | third_party/xla/xla/shape_tree.cc | third_party/xla/xla/shape_tree_test.cc | #include "xla/shape_tree.h"
#include <cstddef>
#include <cstdint>
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace internal {
IndexTable::IndexTable(const Shape& shape) : entries_(1) {
size_t next_node_id = 0;
CreateEntry(entries_[0], shape, next_node_id);
}
void IndexTable::CreateEntry(Entry& entry, const Shape& shape,
size_t& next_node_id) {
entry.node_id = next_node_id++;
if (!shape.IsTuple()) return;
size_t children_start_id = entries_.size();
entry.children_start_id = children_start_id;
entries_.resize(entries_.size() + shape.tuple_shapes_size());
for (size_t i = 0; i < shape.tuple_shapes_size(); ++i) {
CreateEntry(entries_[children_start_id + i], shape.tuple_shapes(i),
next_node_id);
}
}
const IndexTable::Entry& IndexTable::operator[](ShapeIndexView index) const {
const Entry* result = &entries_.front();
for (int64_t i : index) {
CHECK_GE(result->children_start_id, 0);
result = &entries_[result->children_start_id + i];
}
return *result;
}
}
} | #include "xla/shape_tree.h"
#include <iterator>
#include <memory>
#include <utility>
#include <vector>
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
class ShapeTreeTest : public ::testing::Test {
protected:
ShapeTreeTest() {
array_shape_ = ShapeUtil::MakeShape(F32, {42, 42, 123});
tuple_shape_ =
ShapeUtil::MakeTupleShape({array_shape_, array_shape_, array_shape_});
nested_tuple_shape_ = ShapeUtil::MakeTupleShape(
{array_shape_, ShapeUtil::MakeTupleShape({array_shape_, array_shape_}),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({array_shape_, array_shape_}),
array_shape_})});
}
void TestShapeConstructor(const Shape& shape, int expected_num_nodes);
void TestInitValueConstructor(const Shape& shape, int expected_num_nodes);
Shape array_shape_;
Shape tuple_shape_;
Shape nested_tuple_shape_;
};
TEST_F(ShapeTreeTest, DefaultConstructor) {
ShapeTree<int> int_tree;
EXPECT_TRUE(ShapeUtil::IsEmptyTuple(int_tree.shape()));
ShapeTree<bool> bool_tree;
EXPECT_TRUE(ShapeUtil::IsEmptyTuple(bool_tree.shape()));
}
void ShapeTreeTest::TestShapeConstructor(const Shape& shape,
int expected_num_nodes) {
ShapeTree<int> int_tree(shape);
int num_nodes = 0;
int_tree.ForEachElement([&num_nodes](const ShapeIndex& , int data) {
EXPECT_EQ(0, data);
++num_nodes;
});
EXPECT_EQ(expected_num_nodes, num_nodes);
ShapeTree<bool> bool_tree(shape);
num_nodes = 0;
bool_tree.ForEachElement(
[&num_nodes](const ShapeIndex& , bool data) {
EXPECT_EQ(false, data);
++num_nodes;
});
EXPECT_EQ(expected_num_nodes, num_nodes);
}
TEST_F(ShapeTreeTest, ShapeConstructor) {
TestShapeConstructor(array_shape_, 1);
TestShapeConstructor(tuple_shape_, 4);
TestShapeConstructor(nested_tuple_shape_, 10);
}
void ShapeTreeTest::TestInitValueConstructor(const Shape& shape,
int expected_num_nodes) {
ShapeTree<int> tree(shape, 42);
int num_nodes = 0;
tree.ForEachElement([&num_nodes](const ShapeIndex& , int data) {
EXPECT_EQ(42, data);
++num_nodes;
});
EXPECT_EQ(expected_num_nodes, num_nodes);
num_nodes = 0;
tree.ForEachMutableElement(
[&num_nodes](const ShapeIndex& , int* data) {
EXPECT_EQ(42, *data);
*data = num_nodes;
++num_nodes;
});
EXPECT_EQ(expected_num_nodes, num_nodes);
num_nodes = 0;
tree.ForEachElement([&num_nodes](const ShapeIndex& , int data) {
EXPECT_EQ(num_nodes, data);
++num_nodes;
});
EXPECT_EQ(expected_num_nodes, num_nodes);
}
TEST_F(ShapeTreeTest, InitValueConstructor) {
TestInitValueConstructor(array_shape_, 1);
TestInitValueConstructor(tuple_shape_, 4);
TestInitValueConstructor(nested_tuple_shape_, 10);
}
TEST_F(ShapeTreeTest, EmptyTupleMustHaveNoLeaves) {
ShapeTree<int> shape_tree{ShapeUtil::MakeTupleShape({})};
EXPECT_EQ(0, shape_tree.leaf_count());
}
TEST_F(ShapeTreeTest, NestedEmptyTuple) {
Shape shape(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeTupleShape({}), array_shape_}));
ShapeTree<int> shape_tree{shape};
EXPECT_EQ(ShapeUtil::GetLeafCount(shape), shape_tree.leaf_count());
}
TEST_F(ShapeTreeTest, ArrayShape) {
ShapeTree<int> shape_tree{array_shape_};
*shape_tree.mutable_element({}) = 42;
EXPECT_EQ(42, shape_tree.element({}));
*shape_tree.mutable_element({}) = 123;
EXPECT_EQ(123, shape_tree.element({}));
EXPECT_TRUE(ShapeUtil::Compatible(array_shape_, shape_tree.shape()));
ShapeTree<int> copy{shape_tree};
EXPECT_EQ(123, copy.element({}));
*copy.mutable_element({}) = 99;
EXPECT_EQ(99, copy.element({}));
EXPECT_EQ(123, shape_tree.element({}));
copy = shape_tree;
EXPECT_EQ(123, copy.element({}));
}
TEST_F(ShapeTreeTest, TupleShape) {
ShapeTree<int> shape_tree{tuple_shape_};
*shape_tree.mutable_element({}) = 1;
*shape_tree.mutable_element({0}) = 42;
*shape_tree.mutable_element({1}) = 123;
*shape_tree.mutable_element({2}) = -100;
EXPECT_EQ(1, shape_tree.element({}));
EXPECT_EQ(42, shape_tree.element({0}));
EXPECT_EQ(123, shape_tree.element({1}));
EXPECT_EQ(-100, shape_tree.element({2}));
EXPECT_TRUE(ShapeUtil::Compatible(tuple_shape_, shape_tree.shape()));
int sum = 0;
shape_tree.ForEachElement(
[&sum](const ShapeIndex& , int data) { sum += data; });
EXPECT_EQ(66, sum);
ShapeTree<int> copy{shape_tree};
EXPECT_EQ(1, copy.element({}));
EXPECT_EQ(42, copy.element({0}));
EXPECT_EQ(123, copy.element({1}));
EXPECT_EQ(-100, copy.element({2}));
shape_tree.ForEachMutableElement(
[](const ShapeIndex& , int* data) { *data = 0; });
EXPECT_EQ(0, shape_tree.element({}));
EXPECT_EQ(0, shape_tree.element({0}));
EXPECT_EQ(0, shape_tree.element({1}));
EXPECT_EQ(0, shape_tree.element({2}));
EXPECT_EQ(1, copy.element({}));
EXPECT_EQ(42, copy.element({0}));
EXPECT_EQ(123, copy.element({1}));
EXPECT_EQ(-100, copy.element({2}));
copy = shape_tree;
EXPECT_EQ(0, copy.element({}));
EXPECT_EQ(0, copy.element({0}));
EXPECT_EQ(0, copy.element({1}));
EXPECT_EQ(0, copy.element({2}));
}
TEST_F(ShapeTreeTest, NestedTupleShape) {
ShapeTree<int> shape_tree{nested_tuple_shape_};
*shape_tree.mutable_element({0}) = 42;
*shape_tree.mutable_element({1, 1}) = 123;
*shape_tree.mutable_element({2, 0, 1}) = -100;
EXPECT_EQ(42, shape_tree.element({0}));
EXPECT_EQ(123, shape_tree.element({1, 1}));
EXPECT_EQ(-100, shape_tree.element({2, 0, 1}));
EXPECT_TRUE(ShapeUtil::Compatible(nested_tuple_shape_, shape_tree.shape()));
ShapeTree<int> copy{shape_tree};
EXPECT_EQ(42, copy.element({0}));
EXPECT_EQ(123, copy.element({1, 1}));
EXPECT_EQ(-100, copy.element({2, 0, 1}));
*copy.mutable_element({0}) = 1;
*copy.mutable_element({1, 1}) = 2;
*copy.mutable_element({2, 0, 1}) = 3;
EXPECT_EQ(1, copy.element({0}));
EXPECT_EQ(2, copy.element({1, 1}));
EXPECT_EQ(3, copy.element({2, 0, 1}));
EXPECT_EQ(42, shape_tree.element({0}));
EXPECT_EQ(123, shape_tree.element({1, 1}));
EXPECT_EQ(-100, shape_tree.element({2, 0, 1}));
copy = shape_tree;
EXPECT_EQ(42, copy.element({0}));
EXPECT_EQ(123, copy.element({1, 1}));
EXPECT_EQ(-100, copy.element({2, 0, 1}));
}
TEST_F(ShapeTreeTest, InvalidIndexingTuple) {
ShapeTree<int> shape_tree{tuple_shape_};
#ifndef NDEBUG
EXPECT_DEATH(shape_tree.element({4}), "");
#endif
}
TEST_F(ShapeTreeTest, InvalidIndexingNestedTuple) {
ShapeTree<int> shape_tree{nested_tuple_shape_};
#ifndef NDEBUG
EXPECT_DEATH(shape_tree.element({0, 0}), "");
#endif
}
TEST_F(ShapeTreeTest, ShapeTreeOfNonCopyableType) {
ShapeTree<std::unique_ptr<int>> shape_tree{tuple_shape_};
EXPECT_EQ(shape_tree.element({2}).get(), nullptr);
*shape_tree.mutable_element({2}) = std::make_unique<int>(42);
EXPECT_EQ(*shape_tree.element({2}), 42);
}
TEST_F(ShapeTreeTest, CopySubtreeFromArrayShape) {
ShapeTree<int> source(array_shape_);
*source.mutable_element({}) = 42;
ShapeTree<int> destination(array_shape_, 123);
EXPECT_EQ(destination.element({}), 123);
destination.CopySubtreeFrom(source, {},
{});
EXPECT_EQ(destination.element({}), 42);
}
TEST_F(ShapeTreeTest, FullCopySubtreeFromTupleShape) {
ShapeTree<int> source(tuple_shape_);
*source.mutable_element({}) = 10;
*source.mutable_element({0}) = 11;
*source.mutable_element({1}) = 12;
*source.mutable_element({2}) = 13;
ShapeTree<int> destination(tuple_shape_, 0);
destination.CopySubtreeFrom(source, {},
{});
EXPECT_EQ(destination.element({}), 10);
EXPECT_EQ(destination.element({0}), 11);
EXPECT_EQ(destination.element({1}), 12);
EXPECT_EQ(destination.element({2}), 13);
}
TEST_F(ShapeTreeTest, SingleElementCopySubtreeFromTupleShape) {
ShapeTree<int> source(tuple_shape_);
*source.mutable_element({}) = 10;
*source.mutable_element({0}) = 11;
*source.mutable_element({1}) = 12;
*source.mutable_element({2}) = 13;
ShapeTree<int> destination(tuple_shape_, 0);
destination.CopySubtreeFrom(source, {0},
{1});
EXPECT_EQ(destination.element({}), 0);
EXPECT_EQ(destination.element({0}), 0);
EXPECT_EQ(destination.element({1}), 11);
EXPECT_EQ(destination.element({2}), 0);
}
TEST_F(ShapeTreeTest, CopySubtreeIntoNestedShape) {
ShapeTree<int> source(
ShapeUtil::MakeTupleShape({array_shape_, array_shape_}));
*source.mutable_element({}) = 10;
*source.mutable_element({0}) = 11;
*source.mutable_element({1}) = 12;
ShapeTree<int> destination(nested_tuple_shape_, 0);
destination.CopySubtreeFrom(source, {},
{2, 0});
EXPECT_EQ(destination.element({}), 0);
EXPECT_EQ(destination.element({0}), 0);
EXPECT_EQ(destination.element({1}), 0);
EXPECT_EQ(destination.element({1, 0}), 0);
EXPECT_EQ(destination.element({1, 1}), 0);
EXPECT_EQ(destination.element({2}), 0);
EXPECT_EQ(destination.element({2, 0}), 10);
EXPECT_EQ(destination.element({2, 0, 0}), 11);
EXPECT_EQ(destination.element({2, 0, 1}), 12);
EXPECT_EQ(destination.element({2, 1}), 0);
}
TEST_F(ShapeTreeTest, CopySubtreeFromNestedShape) {
ShapeTree<int> source(nested_tuple_shape_, 42);
*source.mutable_element({1}) = 10;
*source.mutable_element({1, 0}) = 11;
*source.mutable_element({1, 1}) = 12;
ShapeTree<int> destination(
ShapeUtil::MakeTupleShape({array_shape_, array_shape_}), 0);
destination.CopySubtreeFrom(source, {1},
{});
EXPECT_EQ(destination.element({}), 10);
EXPECT_EQ(destination.element({0}), 11);
EXPECT_EQ(destination.element({1}), 12);
}
TEST_F(ShapeTreeTest, OperatorEquals) {
{
ShapeTree<int> a(array_shape_, 123);
ShapeTree<int> b(array_shape_, 42);
ShapeTree<int> c(array_shape_, 42);
EXPECT_FALSE(a == b);
EXPECT_TRUE(a != b);
EXPECT_TRUE(b == c);
}
{
ShapeTree<int> a(tuple_shape_);
*a.mutable_element({}) = 10;
*a.mutable_element({0}) = 11;
*a.mutable_element({1}) = 12;
ShapeTree<int> b(tuple_shape_);
*b.mutable_element({}) = 10;
*b.mutable_element({0}) = 42;
*b.mutable_element({1}) = 11;
ShapeTree<int> c(tuple_shape_);
*c.mutable_element({}) = 10;
*c.mutable_element({0}) = 42;
*c.mutable_element({1}) = 11;
EXPECT_FALSE(a == b);
EXPECT_TRUE(a != b);
EXPECT_TRUE(b == c);
EXPECT_FALSE(b != c);
}
}
TEST_F(ShapeTreeTest, ConstructWithPointerToShape) {
ShapeTree<int> t(&nested_tuple_shape_, 42);
int num_nodes = 0;
t.ForEachElement([&num_nodes](const ShapeIndex& , int data) {
EXPECT_EQ(42, data);
++num_nodes;
});
EXPECT_EQ(10, num_nodes);
}
TEST_F(ShapeTreeTest, CopyWithPointerToShape) {
ShapeTree<int> source(&nested_tuple_shape_, 0);
ShapeTree<int> dest(source);
EXPECT_EQ(&dest.shape(), &nested_tuple_shape_);
}
TEST_F(ShapeTreeTest, CopyAssignWithPointerToShape) {
ShapeTree<int> source(&nested_tuple_shape_, 0);
ShapeTree<int> dest;
dest = source;
EXPECT_EQ(&dest.shape(), &nested_tuple_shape_);
}
TEST_F(ShapeTreeTest, IterateSimple) {
ShapeTree<int> t(nested_tuple_shape_, 42);
int num_nodes = 0;
for (auto index_to_data : t) {
EXPECT_EQ(42, index_to_data.second);
++num_nodes;
}
EXPECT_EQ(10, num_nodes);
}
TEST_F(ShapeTreeTest, ConstIterate) {
const ShapeTree<int> t(nested_tuple_shape_, 42);
int num_nodes = 0;
for (const auto& index_to_data : t) {
EXPECT_EQ(42, index_to_data.second);
++num_nodes;
}
EXPECT_EQ(10, num_nodes);
}
TEST_F(ShapeTreeTest, IterateAndMutate) {
ShapeTree<int> t(nested_tuple_shape_, 42);
int i = 0;
for (auto& index_to_data : t) {
EXPECT_EQ(42, index_to_data.second);
if (i == 1) {
index_to_data.second = 98;
}
++i;
}
(*t.begin()).second = 78;
EXPECT_EQ(78, (*t.begin()).second);
i = 0;
for (auto& index_to_data : t) {
if (i == 0) {
EXPECT_EQ(78, index_to_data.second);
} else if (i == 1) {
EXPECT_EQ(98, index_to_data.second);
} else {
EXPECT_EQ(42, index_to_data.second);
}
++i;
}
EXPECT_EQ(78, (*t.begin()).second);
EXPECT_EQ(98, (*std::next(t.begin())).second);
}
TEST_F(ShapeTreeTest, IterateOrder) {
ShapeTree<int> t(nested_tuple_shape_, 42);
std::vector<ShapeIndex> v;
v.reserve(t.leaf_count());
for (auto index_to_data : t) {
v.push_back(index_to_data.first);
}
EXPECT_EQ(v, (std::vector<ShapeIndex>{{},
{0},
{1},
{1, 0},
{1, 1},
{2},
{2, 0},
{2, 0, 0},
{2, 0, 1},
{2, 1}}));
}
TEST_F(ShapeTreeTest, ReverseIterateOrder) {
ShapeTree<int> t(nested_tuple_shape_, 42);
std::vector<ShapeIndex> v;
v.reserve(t.leaf_count());
for (auto it = t.rbegin(); it != t.rend(); ++it) {
v.push_back(it->first);
}
EXPECT_EQ(v, (std::vector<ShapeIndex>{
{2, 1},
{2, 0, 1},
{2, 0, 0},
{2, 0},
{2},
{1, 1},
{1, 0},
{1},
{0},
{},
}));
}
TEST_F(ShapeTreeTest, Find) {
ShapeTree<int> t(nested_tuple_shape_, 42);
auto found = t.find({1, 0});
EXPECT_NE(found, t.end());
EXPECT_EQ(found->first, ShapeIndex({1, 0}));
EXPECT_EQ(found->second, 42);
}
TEST_F(ShapeTreeTest, ConstFind) {
const ShapeTree<int> t(nested_tuple_shape_, 42);
auto found = t.find({1, 0});
EXPECT_NE(found, t.end());
EXPECT_EQ(found->first, ShapeIndex({1, 0}));
EXPECT_EQ(found->second, 42);
}
TEST_F(ShapeTreeTest, IterateOrderLeaves) {
ShapeTree<int> t(nested_tuple_shape_, 42);
std::vector<ShapeIndex> v;
const auto& leaves = t.leaves();
v.reserve(t.leaf_count());
for (auto index_to_data : leaves) {
v.push_back(index_to_data.first);
}
EXPECT_EQ(v, (std::vector<ShapeIndex>{
{0}, {1, 0}, {1, 1}, {2, 0, 0}, {2, 0, 1}, {2, 1}}));
}
TEST_F(ShapeTreeTest, ReverseIterateOrderLeaves) {
ShapeTree<int> t(nested_tuple_shape_, 42);
std::vector<ShapeIndex> v;
v.reserve(t.leaf_count());
for (auto it = t.leaf_rbegin(); it != t.leaf_rend(); ++it) {
v.push_back(it->first);
}
EXPECT_EQ(v, (std::vector<ShapeIndex>{
{2, 1},
{2, 0, 1},
{2, 0, 0},
{1, 1},
{1, 0},
{0},
}));
}
void BM_Construct(::testing::benchmark::State& state) {
const int depth = state.range(0);
const int fan_out = state.range(1);
Shape shape = ShapeUtil::MakeShape(F32, {32, 64, 128});
for (int i = 0; i < depth; ++i) {
std::vector<xla::Shape> shapes(fan_out, shape);
shape = ShapeUtil::MakeTupleShape(shapes);
}
for (auto s : state) {
ShapeTree<int> shape_tree(shape);
}
}
void BM_ConstructUnowned(::testing::benchmark::State& state) {
const int depth = state.range(0);
const int fan_out = state.range(1);
Shape shape = ShapeUtil::MakeShape(F32, {32, 64, 128});
for (int i = 0; i < depth; ++i) {
std::vector<xla::Shape> shapes(fan_out, shape);
shape = ShapeUtil::MakeTupleShape(shapes);
}
for (auto s : state) {
ShapeTree<int> shape_tree(&shape);
}
}
void BM_Copy(::testing::benchmark::State& state) {
const int depth = state.range(0);
const int fan_out = state.range(1);
Shape shape = ShapeUtil::MakeShape(F32, {32, 64, 128});
for (int i = 0; i < depth; ++i) {
std::vector<xla::Shape> shapes(fan_out, shape);
shape = ShapeUtil::MakeTupleShape(shapes);
}
ShapeTree<int> shape_tree(shape);
for (auto s : state) {
ShapeTree<int> copy = shape_tree;
tsl::testing::DoNotOptimize(copy);
}
}
void BM_Move(::testing::benchmark::State& state) {
const int depth = state.range(0);
const int fan_out = state.range(1);
Shape shape = ShapeUtil::MakeShape(F32, {32, 64, 128});
for (int i = 0; i < depth; ++i) {
std::vector<xla::Shape> shapes(fan_out, shape);
shape = ShapeUtil::MakeTupleShape(shapes);
}
ShapeTree<int> shape_tree(shape);
for (auto s : state) {
ShapeTree<int> copy = std::move(shape_tree);
shape_tree = std::move(copy);
}
}
void BM_ForEach(::testing::benchmark::State& state) {
const int depth = state.range(0);
const int fan_out = state.range(1);
Shape shape = ShapeUtil::MakeShape(F32, {32, 64, 128});
for (int i = 0; i < depth; ++i) {
std::vector<xla::Shape> shapes(fan_out, shape);
shape = ShapeUtil::MakeTupleShape(shapes);
}
ShapeTree<int> shape_tree(shape);
for (auto s : state) {
shape_tree.ForEachMutableElement([](const ShapeIndex& index, int* data) {
tsl::testing::DoNotOptimize(index);
});
}
}
void BM_Iterate(::testing::benchmark::State& state) {
const int depth = state.range(0);
const int fan_out = state.range(1);
Shape shape = ShapeUtil::MakeShape(F32, {32, 64, 128});
for (int i = 0; i < depth; ++i) {
std::vector<xla::Shape> shapes(fan_out, shape);
shape = ShapeUtil::MakeTupleShape(shapes);
}
ShapeTree<int> shape_tree(shape);
for (auto s : state) {
for (auto& iter : shape_tree) {
tsl::testing::DoNotOptimize(iter.second);
}
}
}
#define BENCHMARK_WITH_ARGS(name) \
BENCHMARK(name)->ArgPair(2, 8)->ArgPair(1, 1000)
BENCHMARK_WITH_ARGS(BM_Construct);
BENCHMARK_WITH_ARGS(BM_ConstructUnowned);
BENCHMARK_WITH_ARGS(BM_Copy);
BENCHMARK_WITH_ARGS(BM_Move);
BENCHMARK_WITH_ARGS(BM_ForEach);
BENCHMARK_WITH_ARGS(BM_Iterate);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/shape_tree.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/shape_tree_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3adfd356-0a6b-4561-8157-72882331708c | cpp | tensorflow/tensorflow | index_util | third_party/xla/xla/index_util.cc | third_party/xla/xla/index_util_test.cc | #include "xla/index_util.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include <vector>
#include "absl/types/span.h"
#include "xla/layout_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/logging.h"
namespace xla {
DimensionVector IndexUtil::LinearIndexToMultidimensionalIndex(
const Shape& shape, int64_t linear_index) {
DCHECK_GE(linear_index, 0);
DCHECK_LT(linear_index, ShapeUtil::ElementsIn(shape));
DimensionVector multi_index(shape.dimensions_size());
int64_t divisor = 1;
for (auto dimension : LayoutUtil::MinorToMajor(shape)) {
multi_index[dimension] =
(linear_index / divisor) % shape.dimensions(dimension);
divisor *= shape.dimensions(dimension);
}
return multi_index;
}
bool IndexUtil::BumpIndices(const Shape& shape,
absl::Span<int64_t> indices) {
for (int64_t dimno = indices.size() - 1; dimno >= 0; --dimno) {
int64_t limit = shape.dimensions(dimno);
if (indices[dimno] + 1 < limit) {
indices[dimno]++;
std::fill(indices.begin() + dimno + 1, indices.end(), 0);
return true;
}
}
return false;
}
int64_t IndexUtil::GetDimensionStride(const Shape& shape,
int64_t dimension) {
int64_t stride = 1;
for (auto dim : LayoutUtil::MinorToMajor(shape)) {
if (dim == dimension) {
break;
}
stride *= shape.dimensions()[dim];
}
return stride;
}
bool IndexUtil::IndexInBounds(const Shape& shape,
absl::Span<const int64_t> index) {
int64_t rank = shape.rank();
const int64_t index_size = index.size();
if (rank != index_size) {
return false;
}
for (int64_t d = 0; d < rank; ++d) {
if (index[d] >= shape.dimensions(d)) {
return false;
}
}
return true;
}
int IndexUtil::CompareIndices(absl::Span<const int64_t> lhs,
absl::Span<const int64_t> rhs) {
int64_t rank = lhs.size();
const int64_t rhs_rank = rhs.size();
CHECK_EQ(rhs_rank, rank);
for (int64_t dim = 0; dim < rank; ++dim) {
if (lhs[dim] < rhs[dim]) {
return -1;
} else if (lhs[dim] > rhs[dim]) {
return 1;
}
}
return 0;
}
} | #include "xla/index_util.h"
#include <initializer_list>
#include <vector>
#include "absl/types/span.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
void SetMinorToMajorLayout(Shape* shape, std::vector<int64_t> dimensions) {
shape->mutable_layout()->clear_minor_to_major();
for (auto dimension : dimensions) {
shape->mutable_layout()->add_minor_to_major(dimension);
}
}
TEST(IndexUtilTest, VectorIndexing) {
Shape vector_shape = ShapeUtil::MakeShape(F32, {100});
EXPECT_EQ(42,
IndexUtil::MultidimensionalIndexToLinearIndex(vector_shape, {42}));
auto multi_index =
IndexUtil::LinearIndexToMultidimensionalIndex(vector_shape, 42);
EXPECT_EQ(1, multi_index.size());
EXPECT_EQ(42, multi_index[0]);
}
TEST(IndexUtilTest, MatrixIndexingRowMajor) {
Shape matrix_shape_01 = ShapeUtil::MakeShape(F32, {10, 20});
SetMinorToMajorLayout(&matrix_shape_01, {0, 1});
EXPECT_EQ(0, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_01,
{0, 0}));
EXPECT_EQ(199, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_01,
{9, 19}));
EXPECT_EQ(53, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_01,
{3, 5}));
EXPECT_THAT(
IndexUtil::LinearIndexToMultidimensionalIndex(matrix_shape_01, 53),
testing::ElementsAre(3, 5));
}
TEST(IndexUtilTest, MatrixIndexingColumnMajor) {
Shape matrix_shape_10 = ShapeUtil::MakeShape(F32, {10, 20});
SetMinorToMajorLayout(&matrix_shape_10, {1, 0});
EXPECT_EQ(0, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_10,
{0, 0}));
EXPECT_EQ(199, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_10,
{9, 19}));
EXPECT_EQ(65, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_10,
{3, 5}));
EXPECT_THAT(
IndexUtil::LinearIndexToMultidimensionalIndex(matrix_shape_10, 65),
testing::ElementsAre(3, 5));
}
TEST(IndexUtilTest, ThreeDArrayIndexing210) {
Shape shape_210 = ShapeUtil::MakeShape(F32, {10, 20, 30});
SetMinorToMajorLayout(&shape_210, {2, 1, 0});
EXPECT_EQ(1957, IndexUtil::MultidimensionalIndexToLinearIndex(shape_210,
{3, 5, 7}));
EXPECT_EQ(5277, IndexUtil::MultidimensionalIndexToLinearIndex(shape_210,
{8, 15, 27}));
}
TEST(IndexUtilTest, ThreeDArrayIndexing120) {
Shape shape_120 = ShapeUtil::MakeShape(F32, {10, 20, 30});
SetMinorToMajorLayout(&shape_120, {1, 2, 0});
EXPECT_EQ(1945, IndexUtil::MultidimensionalIndexToLinearIndex(shape_120,
{3, 5, 7}));
EXPECT_EQ(5355, IndexUtil::MultidimensionalIndexToLinearIndex(shape_120,
{8, 15, 27}));
}
TEST(IndexUtilTest, FourDArrayIndexing3210) {
Shape shape_3210 = ShapeUtil::MakeShape(F32, {10, 20, 30, 40});
SetMinorToMajorLayout(&shape_3210, {3, 2, 1, 0});
EXPECT_EQ(78289, IndexUtil::MultidimensionalIndexToLinearIndex(shape_3210,
{3, 5, 7, 9}));
EXPECT_EQ(211113, IndexUtil::MultidimensionalIndexToLinearIndex(
shape_3210, {8, 15, 27, 33}));
}
TEST(IndexUtilTest, LinearToMultiToLinear) {
std::vector<int64_t> linear_indexes = {0, 1439999999, 1145567336,
43883404, 617295214, 1117613654};
std::vector<std::vector<int64_t>> minor_to_major_orders;
minor_to_major_orders.push_back({6, 5, 4, 3, 2, 1, 0});
minor_to_major_orders.push_back({0, 1, 2, 3, 4, 5, 6});
minor_to_major_orders.push_back({4, 5, 1, 2, 6, 0, 3});
for (auto minor_to_major_order : minor_to_major_orders) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 20, 30, 40, 30, 20, 10});
SetMinorToMajorLayout(&shape, minor_to_major_order);
for (auto linear_index : linear_indexes) {
auto multi_index =
IndexUtil::LinearIndexToMultidimensionalIndex(shape, linear_index);
EXPECT_EQ(linear_index, IndexUtil::MultidimensionalIndexToLinearIndex(
shape, multi_index));
}
}
}
TEST(IndexUtilTest, BumpIndices2x2) {
auto shape = ShapeUtil::MakeShape(S32, {2, 2});
std::vector<int64_t> indices = {0, 0};
EXPECT_TRUE(IndexUtil::BumpIndices(shape, absl::MakeSpan(indices)));
EXPECT_THAT(indices, ::testing::ElementsAre(0, 1));
EXPECT_TRUE(IndexUtil::BumpIndices(shape, absl::MakeSpan(indices)));
EXPECT_THAT(indices, ::testing::ElementsAre(1, 0));
EXPECT_TRUE(IndexUtil::BumpIndices(shape, absl::MakeSpan(indices)));
EXPECT_THAT(indices, ::testing::ElementsAre(1, 1));
EXPECT_FALSE(IndexUtil::BumpIndices(shape, absl::MakeSpan(indices)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/index_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/index_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
eb05537e-6e3d-43eb-98f5-278684d16219 | cpp | tensorflow/tensorflow | literal | third_party/xla/xla/literal.cc | third_party/xla/xla/literal_test.cc | #include "xla/literal.h"
#include <algorithm>
#include <complex>
#include <cstdint>
#include <cstring>
#include <functional>
#include <limits>
#include <memory>
#include <optional>
#include <ostream>
#include <string>
#include <type_traits>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/casts.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "xla/index_util.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/permutation_util.h"
#include "xla/primitive_util.h"
#include "xla/printer.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tsl/lib/core/bitmap.h"
#include "xla/tsl/util/byte_swap_array.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/mem.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using absl::StrCat;
using primitive_util::NativeTypeOf;
constexpr bool kLittleEndian = __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__;
void ConvertEndianShort(std::string* bytes) {
CHECK_EQ(bytes->size() % 2, 0);
for (int64_t i = 0, end = bytes->size(); i < end; i += 2) {
std::swap((*bytes)[i], (*bytes)[i + 1]);
}
}
void ConvertEndianShort(char* bytes, int64_t size) {
CHECK_EQ(size % 2, 0);
for (int64_t i = 0; i < size; i += 2) {
std::swap(bytes[i], bytes[i + 1]);
}
}
bool LiteralProtoHasValues(const LiteralProto& proto) {
return !proto.s2s().empty() || !proto.s4s().empty() || !proto.s8s().empty() ||
!proto.s16s().empty() || proto.s32s_size() || proto.s64s_size() ||
!proto.u2s().empty() || !proto.u4s().empty() || !proto.u8s().empty() ||
!proto.u16s().empty() || proto.u32s_size() || proto.u64s_size() ||
!proto.f8e5m2s().empty() || !proto.f8e4m3s().empty() ||
!proto.f8e4m3fns().empty() || !proto.f8e4m3b11fnuzs().empty() ||
!proto.f8e5m2fnuzs().empty() || !proto.f8e4m3fnuzs().empty() ||
!proto.f8e3m4s().empty() || !proto.f16s().empty() ||
!proto.bf16s().empty() || proto.f32s_size() || proto.f64s_size() ||
proto.c64s_size() || proto.c128s_size() || proto.preds_size() ||
proto.tuple_literals_size();
}
template <PrimitiveType kType>
const Shape& ScalarShapeImpl() {
static_assert(primitive_util::IsArrayType(kType),
"Not a valid type for a scalar.");
static const Shape* shape = [] {
auto shape = new Shape(kType, {}, {}, {});
shape->mutable_layout();
return shape;
}();
return *shape;
}
const Shape& ScalarShape(PrimitiveType type) {
return primitive_util::ArrayTypeSwitch<const Shape&>(
[&](auto primitive_type_constant) -> const Shape& {
return ScalarShapeImpl<primitive_type_constant>();
},
type);
}
const Shape& NilShape() {
static const Shape* shape = new Shape(TUPLE, {}, {}, {});
return *shape;
}
const Shape* TryInternShape(const Shape& shape) {
if (shape.IsTuple() && shape.tuple_shapes_size() == 0) {
return &NilShape();
}
if (shape.IsArray() && shape.dimensions_size() == 0 && shape.is_static() &&
shape.layout().tiles_size() == 0 && shape.layout().memory_space() == 0) {
return &ScalarShape(shape.element_type());
}
return nullptr;
}
struct StrideConfig {
StrideConfig(const Shape& source_shape, const Shape& dest_shape,
absl::Span<const int64_t> dimensions);
absl::Span<const int64_t> dimensions;
DimensionVector base;
DimensionVector step;
int64_t minor_dimension = 0;
int64_t dest_stride = 1;
int64_t source_stride = 1;
int64_t minor_loop_size = 1;
};
StrideConfig::StrideConfig(const Shape& source_shape, const Shape& dest_shape,
absl::Span<const int64_t> dimensions)
: dimensions(dimensions),
base(dimensions.size(), 0),
step(dimensions.size(), 1) {
if (!dimensions.empty()) {
if (dimensions[LayoutUtil::Minor(source_shape.layout(), 0)] >=
dimensions[LayoutUtil::Minor(dest_shape.layout(), 0)]) {
minor_dimension = LayoutUtil::Minor(source_shape.layout(), 0);
dest_stride = IndexUtil::GetDimensionStride(dest_shape, minor_dimension);
} else {
minor_dimension = LayoutUtil::Minor(dest_shape.layout(), 0);
source_stride =
IndexUtil::GetDimensionStride(source_shape, minor_dimension);
}
minor_loop_size = dimensions[minor_dimension];
step[minor_dimension] = minor_loop_size;
}
}
}
LiteralBase::~LiteralBase() = default;
const Shape& LiteralBase::shape() const { return root_piece().subshape(); }
const char* LiteralBase::Piece::buffer() const {
if (auto* r = std::get_if<DenseRep>(&rep_)) {
return r->data;
}
if (auto* r = std::get_if<DenseInlinedRep>(&rep_)) {
return r->data;
}
DCHECK(std::holds_alternative<TupleRep>(rep_) ||
std::holds_alternative<Uninitialized>(rep_));
return nullptr;
}
const LiteralBase::Piece& LiteralBase::piece(
const ShapeIndex& shape_index) const {
const Piece* piece = &root_piece();
for (const auto i : shape_index) {
DCHECK_GE(i, 0);
DCHECK_LT(i, piece->children_size());
piece = &piece->child(i);
}
return *piece;
}
std::ostream& operator<<(std::ostream& out, const Literal& literal) {
out << literal.ToString();
return out;
}
Shape* MutableLiteralBase::mutable_shape_do_not_use() {
const Shape* const_shape = shape_.get();
if (!shape_.OwnsPtr()) {
shape_ = MaybeOwningShapePtr(std::make_unique<Shape>(*shape_));
}
Shape* shape = shape_.get_mutable();
if (shape != const_shape) {
std::function<void(const Shape&, Piece*)> set_piece_shapes =
[&set_piece_shapes](const Shape& shape, Piece* piece) {
piece->set_subshape(&shape);
if (shape.IsTuple()) {
for (int i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
const Shape& subshape = shape.tuple_shapes(i);
set_piece_shapes(subshape, &piece->child(i));
}
}
};
set_piece_shapes(*shape, &mutable_root_piece());
}
return shape;
}
Literal::Literal() : Literal(NilShape()) {}
Literal::Literal(const Shape& shape)
: Literal(shape, true) {}
void Literal::SetShape(const Shape& shape) {
Shape shape_storage;
const Shape* shape_ptr = &shape;
if (shape.IsArray() && LayoutUtil::HasCustomElementSizeInBits(shape)) {
shape_storage = shape;
shape_storage.mutable_layout()->set_element_size_in_bits(0);
shape_ptr = &shape_storage;
}
if (const Shape* intered_shape_ptr = TryInternShape(*shape_ptr)) {
shape_ = intered_shape_ptr;
} else {
shape_ = std::make_unique<Shape>(*shape_ptr);
}
}
void Literal::SetPiece(const Shape& shape, Piece* piece, bool allocate_arrays,
ArrayValueState leaf_array_value_state) {
if (shape.IsTuple()) {
for (const Shape& subshape : shape.tuple_shapes()) {
Piece child_piece;
child_piece.set_subshape(&subshape);
SetPiece(subshape, &child_piece, allocate_arrays, leaf_array_value_state);
piece->emplace_back(std::move(child_piece));
}
} else if (shape.IsArray()) {
DCHECK(LayoutUtil::IsDenseArray(shape))
<< "literal array storage is currently only supported for dense "
"arrays: "
<< shape;
piece->set_array_value_state(leaf_array_value_state);
if (leaf_array_value_state == LiteralBase::ArrayValueState::kKnown &&
allocate_arrays) {
piece->AllocateBuffers();
}
}
}
Literal::Literal(const Shape& shape, bool allocate_arrays,
ArrayValueState leaf_array_value_state)
: MutableLiteralBase() {
SetShape(shape);
CHECK(leaf_array_value_state != ArrayValueState::kKnown ||
LayoutUtil::HasLayout(*shape_));
root_piece_.set_subshape(shape_.get());
CHECK(&root_piece_.subshape() == shape_.get());
SetPiece(*shape_, &root_piece_, allocate_arrays, leaf_array_value_state);
}
Literal::~Literal() { DeallocateBuffers(); }
void Literal::DeallocateBuffers() {
root_piece_.ForEachMutableSubpiece(
[&](const ShapeIndex& index, Piece* piece) {
piece->DeallocateBuffers();
});
}
Literal::Literal(Literal&& other) : MutableLiteralBase() {
*this = std::move(other);
}
Literal& Literal::operator=(Literal&& other) {
DCHECK(&other.root_piece_.subshape() == other.shape_.get());
using std::swap;
swap(shape_, other.shape_);
swap(root_piece_, other.root_piece_);
DCHECK(&root_piece_.subshape() == shape_.get());
return *this;
}
Literal LiteralBase::CreateFromShape(const Shape& shape) {
Literal literal(shape);
literal.root_piece_.ForEachMutableSubpiece(
[&](const ShapeIndex& index, Piece* piece) {
if (piece->subshape().IsArray()) {
memset(piece->untyped_data(), 0, piece->size_bytes_dense());
}
});
return literal;
}
Literal LiteralBase::CreateFromShapeWithUnknownLeafArrays(const Shape& shape) {
Literal literal(shape, false, ArrayValueState::kUnknown);
return literal;
}
Literal LiteralBase::CreateFromShapeWithUndeterminedLeafArrays(
const Shape& shape) {
Literal literal(shape, false,
ArrayValueState::kUndetermined);
return literal;
}
int32_t LiteralBase::GetDynamicSize(int64_t dim_index) const {
return GetDynamicSize(dim_index, {});
}
int32_t LiteralBase::GetDynamicSize(int64_t dim_index,
const ShapeIndex& shape_index) const {
return piece(shape_index).GetDynamicSize(dim_index);
}
std::optional<int64_t> LiteralBase::GetFirstInteger() const {
if (!primitive_util::IsIntegralType(shape().element_type())) {
return std::nullopt;
}
return primitive_util::IntegralTypeSwitch<std::optional<int64_t>>(
[&](auto primitive_type_constant) -> std::optional<int64_t> {
using NativeT = NativeTypeOf<primitive_type_constant>;
auto first_element = GetFirstElement<NativeT>();
if constexpr (std::is_same_v<NativeT, uint64_t>) {
int64_t v = static_cast<int64_t>(first_element);
if (v < 0) {
return std::nullopt;
}
}
return first_element;
},
shape().element_type());
}
void LiteralBase::BuildPieceSubtree(const Shape& shape, Piece* piece) {
CHECK(shape.IsTuple());
for (int i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
const Shape& subshape = shape.tuple_shapes(i);
Piece child_piece;
child_piece.set_subshape(&subshape);
if (subshape.IsTuple()) {
BuildPieceSubtree(subshape, &child_piece);
}
piece->emplace_back(std::move(child_piece));
}
}
absl::Status LiteralBase::SerializeToString(std::string* output) const {
ShapeProto shape_proto = shape().ToProto();
TF_ASSIGN_OR_RETURN(int64_t size,
ShapeUtil::SerializedSizeWithProto(shape(), shape_proto));
output->resize(size);
return SerializeWithShapeProto(shape_proto, output->data());
}
absl::StatusOr<std::string> LiteralBase::SerializeAsString() const {
std::string result;
TF_RETURN_IF_ERROR(SerializeToString(&result));
return std::move(result);
}
template <typename NativeT>
absl::Status MutableLiteralBase::CopySliceFromInternal(
const LiteralBase& src_literal, absl::Span<const int64_t> src_base,
absl::Span<const int64_t> dest_base, absl::Span<const int64_t> copy_size) {
auto linear_index = [](const Shape& shape,
absl::Span<const int64_t> multi_index) {
return IndexUtil::MultidimensionalIndexToLinearIndex(shape, multi_index);
};
NativeT* dest_data = this->data<NativeT>().data();
const NativeT* src_data = src_literal.data<NativeT>().data();
if (src_literal.shape().rank() == 0 || shape().rank() == 0) {
TF_RET_CHECK(copy_size.empty());
dest_data[linear_index(shape(), dest_base)] =
src_data[linear_index(src_literal.shape(), src_base)];
} else if (!ShapeUtil::IsZeroElementArray(shape()) &&
!ShapeUtil::IsZeroElementArray(src_literal.shape()) &&
absl::c_none_of(copy_size, [](auto d) { return d == 0; })) {
TF_RET_CHECK(src_base.size() == dest_base.size());
TF_RET_CHECK(src_base.size() == copy_size.size());
DimensionVector src_indexes(src_base.size(), 0);
DimensionVector dest_indexes(dest_base.size(), 0);
StrideConfig stride_config(src_literal.shape(), shape(), copy_size);
auto copy_proc = [&](absl::Span<const int64_t> indexes) {
std::transform(indexes.begin(), indexes.end(), src_base.begin(),
src_indexes.begin(), std::plus<int64_t>());
std::transform(indexes.begin(), indexes.end(), dest_base.begin(),
dest_indexes.begin(), std::plus<int64_t>());
int64_t src_index = linear_index(src_literal.shape(), src_indexes);
int64_t dest_index = linear_index(shape(), dest_indexes);
StridedCopy(dest_data + dest_index, stride_config.dest_stride,
src_data + src_index, stride_config.source_stride,
stride_config.minor_loop_size);
return true;
};
ShapeUtil::ForEachIndex(src_literal.shape(), stride_config.base,
stride_config.dimensions, stride_config.step,
copy_proc);
}
return absl::OkStatus();
}
void MutableLiteralBase::CopyElementFrom(const LiteralSlice& src_literal,
absl::Span<const int64_t> src_index,
absl::Span<const int64_t> dest_index) {
DCHECK(LayoutUtil::IsDenseArray(shape()));
DCHECK_EQ(shape().element_type(), src_literal.shape().element_type());
const int64_t src_linear_index =
IndexUtil::MultidimensionalIndexToLinearIndex(src_literal.shape(),
src_index);
const int64_t dest_linear_index =
IndexUtil::MultidimensionalIndexToLinearIndex(shape(), dest_index);
const int64_t primitive_size =
ShapeUtil::ByteSizeOfPrimitiveType(shape().element_type());
char* dest_address =
static_cast<char*>(untyped_data()) + dest_linear_index * primitive_size;
const char* source_address =
static_cast<const char*>(src_literal.untyped_data()) +
src_linear_index * primitive_size;
if (dest_address != source_address) {
memcpy(dest_address, source_address, primitive_size);
}
}
absl::StatusOr<Literal> MutableLiteralBase::CreateFromProto(
const LiteralProto& proto, bool prohibit_empty_literal) {
if (!proto.has_shape()) {
return InvalidArgument("LiteralProto has no shape");
}
Shape shape(proto.shape());
if (ShapeUtil::HasPrimitiveType(shape, OPAQUE_TYPE)) {
return InvalidArgument(
"Literal shape cannot include OPAQUE_TYPE sub-shape");
}
if (!LayoutUtil::HasLayout(shape)) {
return InvalidArgument("LiteralProto has no layout");
}
if (LayoutUtil::IsSparseArray(shape)) {
return Unimplemented("Sparse literals are not supported");
}
TF_RETURN_IF_ERROR(ShapeUtil::ValidateShapeWithOptionalLayout(shape));
Literal literal(shape);
TF_RETURN_IF_ERROR(literal.root_piece_.ForEachMutableSubpieceWithStatus(
[&](const ShapeIndex& index, Piece* piece) -> absl::Status {
const LiteralProto* proto_element = &proto;
for (int64_t i : index) {
CHECK(i < proto_element->tuple_literals_size());
proto_element = &proto_element->tuple_literals(i);
}
if (piece->subshape().IsTuple()) {
if (proto_element->tuple_literals_size() !=
ShapeUtil::TupleElementCount(piece->subshape())) {
return InvalidArgument(
"Expected %d tuple elements in LiteralProto, has %d",
ShapeUtil::TupleElementCount(piece->subshape()),
proto_element->tuple_literals_size());
}
return absl::OkStatus();
}
if (piece->subshape().element_type() == TOKEN) {
return absl::OkStatus();
}
CHECK(piece->subshape().IsArray());
if (prohibit_empty_literal || LiteralProtoHasValues(*proto_element)) {
TF_RETURN_IF_ERROR(piece->CopyFromProto(*proto_element));
}
return absl::OkStatus();
}));
return std::move(literal);
}
Literal Literal::SubLiteral(ShapeIndexView shape_index) {
if (!shape_index.empty()) {
auto decomposed = this->DecomposeTuple();
return decomposed.at(shape_index.front())
.SubLiteral(shape_index.subspan(1));
} else {
return std::move(*this);
}
}
std::vector<Literal> Literal::DecomposeTuple() {
CHECK(shape().IsTuple());
std::vector<Literal> elements;
const auto tuple_element_count = ShapeUtil::TupleElementCount(shape());
elements.reserve(tuple_element_count);
for (int i = 0; i < tuple_element_count; ++i) {
elements.push_back(Literal(ShapeUtil::GetSubshape(shape(), {i}),
false));
Literal& element = elements.back();
element.root_piece_.ForEachMutableSubpiece(
[&](const ShapeIndex& index, Piece* dest_piece) {
if (dest_piece->subshape().IsTuple()) {
return;
}
ShapeIndex src_index = {i};
for (int64_t j : index) {
src_index.push_back(j);
}
Piece& src_piece = piece(src_index);
dest_piece->MoveDataFrom(src_piece);
});
}
*this = Literal();
return elements;
}
namespace {
template <typename NativeT>
void CopyElementsBetween(absl::Span<NativeT> dest,
absl::Span<const NativeT> src, const Shape& dest_shape,
const Shape& src_shape) {
DCHECK(LayoutUtil::IsDenseArray(dest_shape));
DCHECK(LayoutUtil::IsDenseArray(src_shape));
DCHECK(ShapeUtil::Compatible(dest_shape, src_shape));
if (ShapeUtil::IsZeroElementArray(dest_shape)) {
return;
}
std::vector<int64_t> index(dest_shape.rank());
do {
dest[IndexUtil::MultidimensionalIndexToLinearIndex(dest_shape, index)] =
src[IndexUtil::MultidimensionalIndexToLinearIndex(src_shape, index)];
} while (IndexUtil::BumpIndices(dest_shape, absl::MakeSpan(index)));
}
}
int32_t LiteralBase::Piece::GetDynamicSize(int64_t dim_index) const {
CHECK(LayoutUtil::IsDenseArray(subshape()));
if (!subshape_->is_dynamic_dimension(dim_index)) {
return subshape_->dimensions(dim_index);
}
return dynamic_size_buffer()[dim_index];
}
void LiteralBase::Piece::SetDynamicSize(int64_t dim_index, int32_t size) {
CHECK(LayoutUtil::IsDenseArray(subshape()));
CHECK(subshape_->is_dynamic_dimension(dim_index));
dynamic_size_buffer()[dim_index] = size;
}
void LiteralBase::Piece::AllocateBuffers() {
const int64_t bytes = total_bytes_dense();
if (bytes > kMaxInlinedBytes) {
CHECK_EQ(buffer(), nullptr);
rep_.emplace<DenseRep>();
set_buffer(
static_cast<char*>(tsl::port::AlignedMalloc(bytes, kMinimumAlignment)));
} else {
rep_.emplace<DenseInlinedRep>();
}
}
void LiteralBase::Piece::DeallocateBuffers() {
if (auto* array_rep = GetDenseRep()) {
tsl::port::AlignedFree(array_rep->data);
rep_.emplace<Uninitialized>();
}
}
template <typename NativeT>
void LiteralBase::Piece::CopyElementsWithDynamicBound(
const LiteralBase::Piece& src) {
auto& dest_shape = subshape();
auto& src_shape = src.subshape();
CHECK(dest_shape.is_static() || src_shape.is_static());
auto& bound_shape = dest_shape.is_static() ? src_shape : dest_shape;
if (ShapeUtil::IsZeroElementArray(dest_shape)) {
return;
}
if (dest_shape.rank() == 1) {
int64_t count = std::min(GetDynamicSize(0), src.GetDynamicSize(0));
std::copy_n(src.data<NativeT>().begin(), count, data<NativeT>().begin());
return;
}
std::vector<int64_t> index(dest_shape.rank());
do {
bool out_of_bound = false;
for (int64_t i = 0; i < index.size(); ++i) {
if (index[i] >= GetDynamicSize(i) || index[i] >= src.GetDynamicSize(i)) {
out_of_bound = true;
}
}
if (out_of_bound) {
continue;
}
data<NativeT>()[IndexUtil::MultidimensionalIndexToLinearIndex(dest_shape,
index)] =
src.data<NativeT>()[IndexUtil::MultidimensionalIndexToLinearIndex(
src_shape, index)];
} while (IndexUtil::BumpIndices(bound_shape, absl::MakeSpan(index)));
}
absl::Status LiteralBase::Piece::CopyFrom(const LiteralBase::Piece& src,
bool only_dynamic_bound) {
CHECK(subshape_ != nullptr);
CHECK(src.subshape_ != nullptr);
CHECK(LayoutUtil::IsDenseArray(subshape()))
<< __func__ << " is only supported for dense arrays: " << subshape();
CHECK(LayoutUtil::IsDenseArray(src.subshape()))
<< __func__ << " is only supported for dense arrays: " << src.subshape();
if (!only_dynamic_bound) {
CHECK(ShapeUtil::Compatible(subshape(), src.subshape()));
}
if (src.array_value_state_ == ArrayValueState::kUnknown ||
src.array_value_state_ == ArrayValueState::kUndetermined) {
if (array_value_state_ == ArrayValueState::kKnown) {
DeallocateBuffers();
}
array_value_state_ = src.array_value_state_;
return absl::OkStatus();
} else {
CHECK(src.array_value_state_ == ArrayValueState::kKnown);
if (array_value_state_ == ArrayValueState::kUndetermined ||
array_value_state_ == ArrayValueState::kUnknown) {
AllocateBuffers();
}
array_value_state_ = src.array_value_state_;
}
if (ShapeUtil::Equal(subshape(), src.subshape())) {
memcpy(buffer(), src.buffer(), src.size_bytes_dense());
} else {
std::vector<int64_t> origin(subshape().rank(), 0);
primitive_util::ArrayTypeSwitch<void>(
[&](auto primitive_type_constant) {
using NativeT = NativeTypeOf<primitive_type_constant>;
if (only_dynamic_bound) {
CopyElementsWithDynamicBound<NativeT>(src);
} else {
CopyElementsBetween<NativeT>(this->data<NativeT>(),
src.data<NativeT>(), subshape(),
src.subshape());
}
},
subshape().element_type());
}
DCHECK_EQ(dynamic_size_buffer_bytes(), src.dynamic_size_buffer_bytes());
if (subshape().is_dynamic() && src.subshape().is_dynamic()) {
memcpy(dynamic_size_buffer(), src.dynamic_size_buffer(),
src.dynamic_size_buffer_bytes());
}
return absl::OkStatus();
}
void MutableLiteralBase::SetDynamicSize(int64_t dim_index, int32_t size) {
return SetDynamicSize(dim_index, {}, size);
}
void MutableLiteralBase::SetDynamicSize(int64_t dim_index,
const ShapeIndex& shape_index,
int32_t size) {
Shape* subshape =
ShapeUtil::GetMutableSubshape(mutable_shape_do_not_use(), shape_index);
CHECK(LayoutUtil::IsDenseArray(*subshape))
<< __func__ << " is only supported for dense arrays: " << *subshape;
CHECK_GE(subshape->dimensions(dim_index), size);
subshape->set_dynamic_dimension(dim_index, true);
CHECK_EQ(&piece(shape_index).subshape(), subshape);
piece(shape_index).SetDynamicSize(dim_index, size);
}
absl::Status MutableLiteralBase::CopyFrom(const LiteralSlice& src_literal,
const ShapeIndex& dest_shape_index,
const ShapeIndex& src_shape_index,
bool only_dynamic_bound) {
const Shape& dest_subshape =
ShapeUtil::GetSubshape(shape(), dest_shape_index);
const Shape& src_subshape =
ShapeUtil::GetSubshape(src_literal.shape(), src_shape_index);
if (only_dynamic_bound) {
auto& bound_shape =
dest_subshape.is_static() ? src_subshape : dest_subshape;
auto& compact_shape =
dest_subshape.is_static() ? dest_subshape : src_subshape;
CHECK(ShapeUtil::DynamicShapeIsCompatible(compact_shape, bound_shape))
<< compact_shape.ToString() << " vs " << bound_shape.ToString();
} else {
if (!ShapeUtil::Compatible(dest_subshape, src_subshape)) {
return InvalidArgument(
"Destination subshape incompatible with source subshape: %s vs %s",
ShapeUtil::HumanString(dest_subshape),
ShapeUtil::HumanString(src_subshape));
}
}
return mutable_root_piece().ForEachMutableSubpieceWithStatus(
[&](const ShapeIndex& index, Piece* piece) {
if (!piece->subshape().IsArray()) {
return absl::OkStatus();
}
bool in_subtree_to_copy = true;
for (int i = 0; i < dest_shape_index.size(); ++i) {
if (index[i] != dest_shape_index[i]) {
in_subtree_to_copy = false;
break;
}
}
if (!in_subtree_to_copy) {
return absl::OkStatus();
}
ShapeIndex src_piece_index = src_shape_index;
for (int64_t i = dest_shape_index.size(), end = index.size(); i < end;
++i) {
src_piece_index.push_back(index[i]);
}
TF_RETURN_IF_ERROR(
piece->CopyFrom(src_literal.piece(src_piece_index),
only_dynamic_bound));
return absl::OkStatus();
});
}
absl::Status Literal::MoveFrom(Literal&& src_literal,
const ShapeIndex& dest_shape_index) {
const Shape& dest_subshape =
ShapeUtil::GetSubshape(shape(), dest_shape_index);
if (!ShapeUtil::Equal(dest_subshape, src_literal.shape())) {
return InvalidArgument(
"Destination subshape not equal to source shape: %s vs %s",
ShapeUtil::HumanString(dest_subshape),
ShapeUtil::HumanString(src_literal.shape()));
}
src_literal.root_piece_.ForEachMutableSubpiece(
[&](const ShapeIndex& src_index, Piece* src_piece) {
if (!src_piece->subshape().IsArray()) {
return;
}
ShapeIndex dest_index = dest_shape_index;
for (int64_t i : src_index) {
dest_index.push_back(i);
}
Piece& dest_piece = piece(dest_index);
dest_piece.DeallocateBuffers();
dest_piece.MoveDataFrom(*src_piece);
});
src_literal.shape_ = MaybeOwningShapePtr(&NilShape());
src_literal.root_piece_ = Piece();
src_literal.root_piece_.set_subshape(src_literal.shape_.get());
return absl::OkStatus();
}
absl::Status MutableLiteralBase::CopySliceFrom(
const LiteralSlice& src_literal, absl::Span<const int64_t> src_base,
absl::Span<const int64_t> dest_base, absl::Span<const int64_t> copy_size) {
TF_RET_CHECK(LayoutUtil::IsDenseArray(shape())) << shape();
TF_RET_CHECK(LayoutUtil::IsDenseArray(src_literal.shape()))
<< src_literal.shape();
TF_RET_CHECK(ShapeUtil::SameElementType(src_literal.shape(), shape()));
TF_RET_CHECK(src_literal.shape().rank() == src_base.size());
TF_RET_CHECK(shape().rank() == dest_base.size());
return primitive_util::ArrayTypeSwitch<absl::Status>(
[&](auto primitive_type_constant) -> absl::Status {
using NativeT = NativeTypeOf<primitive_type_constant>;
return CopySliceFromInternal<NativeT>(src_literal, src_base, dest_base,
copy_size);
},
shape().element_type());
}
void MutableLiteralBase::PopulateR1(const tsl::core::Bitmap& values) {
CHECK(shape().IsArray());
CHECK_EQ(shape().rank(), 1);
CHECK_EQ(element_count(), values.bits());
CHECK_EQ(shape().element_type(), PRED);
for (int64_t i = 0; i < static_cast<int64_t>(values.bits()); ++i) {
Set({i}, values.get(i));
}
}
void MutableLiteralBase::PopulateInplaceInternal(
absl::FunctionRef<void(void*, absl::Span<const int64_t>, int)> populator,
bool parallel) {
const Shape& this_shape = shape();
const int64_t rank = this_shape.rank();
DCHECK(LayoutUtil::IsDenseArray(this_shape));
char* const dest_base = static_cast<char*>(untyped_data());
if (rank > 0) {
StrideConfig stride_config(this_shape, this_shape, this_shape.dimensions());
const int64_t primitive_size =
ShapeUtil::ByteSizeOfPrimitiveType(shape().element_type());
const int64_t num_elements = ShapeUtil::ElementsIn(shape());
if (parallel && this_shape.rank() == 1) {
const int64_t thread_count =
ShapeUtil::GetForEachIndexParallelThreadCount();
stride_config.dest_stride = stride_config.minor_loop_size =
num_elements > 32 ? std::max<int64_t>(num_elements / thread_count, 1)
: num_elements;
stride_config.step = {stride_config.minor_loop_size};
}
auto init_function = [&](absl::Span<const int64_t> indexes,
int thread_id) -> absl::StatusOr<bool> {
const int64_t index =
IndexUtil::MultidimensionalIndexToLinearIndex(shape(), indexes);
DimensionVector minor_scan_indexes(rank, 0);
std::copy(indexes.begin(), indexes.end(), minor_scan_indexes.begin());
char* dest_ptr = dest_base + index * primitive_size;
char* const dest_end =
dest_base +
std::min(index + stride_config.minor_loop_size, num_elements) *
primitive_size;
while (dest_ptr < dest_end) {
populator(dest_ptr, minor_scan_indexes, thread_id);
++minor_scan_indexes[stride_config.minor_dimension];
dest_ptr += primitive_size;
}
return true;
};
if (parallel) {
ShapeUtil::ForEachIndexParallel(this_shape, stride_config.base,
stride_config.dimensions,
stride_config.step, init_function);
} else {
ShapeUtil::ForEachIndex(
this_shape, stride_config.base, stride_config.dimensions,
stride_config.step,
[&init_function](
absl::Span<const int64_t> indexes) -> absl::StatusOr<bool> {
auto result_ignored = init_function(indexes, -1);
return true;
});
}
} else {
populator(dest_base, {}, -1);
}
}
absl::Status MutableLiteralBase::PopulateInplace(
absl::FunctionRef<void(void*, absl::Span<const int64_t>)> populator) {
TF_RET_CHECK(LayoutUtil::IsDenseArray(shape()))
<< __func__ << " is only supported for dense arrays: " << shape();
PopulateInplaceInternal(
[&](void* dest, absl::Span<const int64_t> indexes, int ) {
return populator(dest, indexes);
},
false);
return absl::OkStatus();
}
absl::Status MutableLiteralBase::PopulateInplaceParallel(
absl::FunctionRef<void(void*, absl::Span<const int64_t>, int)> populator) {
TF_RET_CHECK(LayoutUtil::IsDenseArray(shape()))
<< __func__ << " is only supported for dense arrays: " << shape();
PopulateInplaceInternal(populator,
element_count() > 32);
return absl::OkStatus();
}
Literal LiteralBase::Relayout(const Layout& new_layout,
const ShapeIndex& shape_index) const {
Shape new_shape = shape();
Shape* subshape = ShapeUtil::GetMutableSubshape(&new_shape, shape_index);
TF_CHECK_OK(LayoutUtil::ValidateLayoutForShape(new_layout, *subshape));
*subshape->mutable_layout() = new_layout;
if (subshape->layout().element_size_in_bits() == 4) {
subshape->mutable_layout()->set_element_size_in_bits(0);
}
Literal result(new_shape);
TF_CHECK_OK(result.CopyFrom(*this));
return result;
}
Literal LiteralBase::Relayout(const Shape& shape_with_layout) const {
CHECK(ShapeUtil::Compatible(shape_with_layout, shape()))
<< "Given shape_with_layout " << ShapeUtil::HumanString(shape_with_layout)
<< " not compatible with literal shape "
<< ShapeUtil::HumanString(shape());
Literal result = CreateFromShape(shape_with_layout);
ShapeUtil::ForEachSubshape(
result.shape(),
[this, &result](const Shape& subshape, const ShapeIndex& index) {
if (subshape.IsArray()) {
TF_CHECK_OK(result.CopyFrom(*this,
index,
index));
}
});
return result;
}
Literal LiteralBase::ToBoundedDynamic(const Shape& bounded_shape) const {
CHECK(bounded_shape.is_dynamic());
Literal result(bounded_shape);
ShapeUtil::ForEachSubshape(
shape(), [&](const Shape& subshape, const ShapeIndex& index) {
if (!subshape.IsArray()) {
return;
}
for (int64_t i = 0; i < subshape.rank(); ++i) {
if (bounded_shape.is_dynamic_dimension(i)) {
result.SetDynamicSize(i, subshape.dimensions(i));
}
}
});
TF_CHECK_OK(result.CopyFrom(*this, {}, {}, true));
return result;
}
Literal LiteralBase::ToStatic() const {
Shape new_shape = shape();
ShapeUtil::ForEachMutableSubshape(
&new_shape, [this](Shape* subshape, const ShapeIndex& index) {
if (!subshape->IsArray()) {
return;
}
for (int64_t i = 0; i < subshape->rank(); ++i) {
if (!subshape->is_dynamic_dimension(i)) continue;
subshape->set_dynamic_dimension(i, false);
subshape->set_dimensions(i, GetDynamicSize(i, index));
}
});
Literal result(new_shape);
TF_CHECK_OK(result.CopyFrom(*this, {}, {}, true));
return result;
}
namespace {
template <int64_t PRIMITIVE_SIZE>
absl::StatusOr<Literal> BroadcastHelper(const LiteralBase& src,
const Shape& src_shape,
const Shape& result_shape,
absl::Span<const int64_t> dimensions) {
for (int64_t i = 0, end = dimensions.size(); i < end; i++) {
TF_RET_CHECK(src_shape.dimensions(i) ==
result_shape.dimensions(dimensions[i]));
}
TF_RET_CHECK(result_shape.element_type() == src_shape.element_type());
Literal result(result_shape);
if (src_shape.is_dynamic()) {
for (int64_t i = 0; i < dimensions.size(); ++i) {
if (src_shape.is_dynamic_dimension(i)) {
int64_t dynamic_size = src.GetDynamicSize(i);
result.SetDynamicSize(dimensions[i], dynamic_size);
}
}
}
int src_shape_dims = src_shape.dimensions_size();
std::vector<int64_t> scratch_source_index(src_shape_dims);
absl::Span<int64_t> scratch_source_span(scratch_source_index);
int64_t* scratch_source_array = scratch_source_span.data();
const char* source_data = static_cast<const char*>(src.untyped_data());
char* dest_data = static_cast<char*>(result.untyped_data());
auto src_minor_to_major = LayoutUtil::MinorToMajor(src_shape);
auto result_minor_to_major = LayoutUtil::MinorToMajor(result_shape);
ShapeUtil::ForEachIndexNoStatus(
result_shape, [&](absl::Span<const int64_t> output_index) {
int64_t dest_index = IndexUtil::MultidimensionalIndexToLinearIndex(
result_shape, result_minor_to_major, output_index);
int64_t source_index;
for (int64_t i = 0, end = dimensions.size(); i < end; ++i) {
scratch_source_array[i] = output_index[dimensions[i]];
}
if (src_shape_dims == 1) {
source_index = scratch_source_array[0];
DCHECK_EQ(source_index,
IndexUtil::MultidimensionalIndexToLinearIndex(
src_shape, src_minor_to_major, scratch_source_span));
} else {
source_index = IndexUtil::MultidimensionalIndexToLinearIndex(
src_shape, src_minor_to_major, scratch_source_span);
}
memcpy(dest_data + PRIMITIVE_SIZE * dest_index,
source_data + PRIMITIVE_SIZE * source_index, PRIMITIVE_SIZE);
return true;
});
return std::move(result);
}
}
absl::StatusOr<Literal> LiteralBase::Broadcast(
const Shape& result_shape, absl::Span<const int64_t> dimensions) const {
const LiteralBase& src = *this;
const Shape& src_shape = shape();
if (!src_shape.IsArray()) {
return InvalidArgument("Broadcast only supports arrays.");
}
const int64_t primitive_size =
ShapeUtil::ByteSizeOfPrimitiveType(src_shape.element_type());
switch (primitive_size) {
case 0:
return BroadcastHelper<0>(src, src_shape, result_shape, dimensions);
case 1:
return BroadcastHelper<1>(src, src_shape, result_shape, dimensions);
case 2:
return BroadcastHelper<2>(src, src_shape, result_shape, dimensions);
case 4:
return BroadcastHelper<4>(src, src_shape, result_shape, dimensions);
case 8:
return BroadcastHelper<8>(src, src_shape, result_shape, dimensions);
case 16:
return BroadcastHelper<16>(src, src_shape, result_shape, dimensions);
default:
LOG(FATAL) << "Unhandled primitive size " << primitive_size;
return InvalidArgument("Unhandled primitive size");
break;
}
}
absl::StatusOr<Literal> LiteralBase::Reshape(
absl::Span<const int64_t> dimensions) const {
if (!LayoutUtil::IsDenseArray(shape())) {
return InvalidArgument("Reshape is only supported for dense arrays.");
}
if (shape().is_dynamic()) {
return Unimplemented("Dynamic reshape is not implemented.");
}
Literal output;
if (!LayoutUtil::IsMonotonicWithDim0Major(shape().layout())) {
output = Relayout(LayoutUtil::GetDefaultLayoutForRank(shape().rank()));
} else {
output = Clone();
}
*output.mutable_shape_do_not_use() =
ShapeUtil::MakeShape(shape().element_type(), dimensions);
int64_t elements_before = ShapeUtil::ElementsIn(shape());
int64_t elements_after = ShapeUtil::ElementsIn(output.shape());
if (elements_before != elements_after) {
return InvalidArgument(
"Shapes before and after Literal::Reshape have different numbers "
"of elements: %s vs %s.",
ShapeUtil::HumanString(shape()),
ShapeUtil::HumanString(output.shape()));
}
return std::move(output);
}
Literal LiteralBase::Transpose(absl::Span<const int64_t> permutation) const {
CHECK(LayoutUtil::IsDenseArray(shape()))
<< __func__ << " is only supported for dense arrays: " << shape();
CHECK(shape().rank() == permutation.size() && IsPermutation(permutation))
<< "Given permutation is not a permutation of dimension numbers";
Shape permuted_shape = ShapeUtil::PermuteDimensions(permutation, shape());
std::vector<int64_t> inverse_permutation = InversePermutation(permutation);
CHECK(LayoutUtil::IsDenseArray(permuted_shape));
Layout* layout = permuted_shape.mutable_layout();
layout->clear_minor_to_major();
for (auto index : LayoutUtil::MinorToMajor(shape())) {
layout->add_minor_to_major(inverse_permutation[index]);
}
Literal new_literal(permuted_shape);
if (shape().is_dynamic()) {
for (int64_t i = 0; i < shape().rank(); i++) {
if (shape().is_dynamic_dimension(i)) {
new_literal.SetDynamicSize(inverse_permutation[i], GetDynamicSize(i));
}
}
}
DCHECK_EQ(ShapeUtil::ByteSizeOf(new_literal.shape()),
ShapeUtil::ByteSizeOf(shape()));
std::memcpy(new_literal.untyped_data(), untyped_data(), size_bytes());
return new_literal;
}
namespace {
template <typename NativeT>
void SliceInternal(const LiteralBase& src_literal,
absl::Span<const int64_t> start_indices,
Literal& result_literal) {
const Shape& result_shape = result_literal.shape();
DimensionVector new_indices(result_shape.rank());
TF_CHECK_OK(
result_literal.Populate<NativeT>([&](absl::Span<const int64_t> indices) {
for (int64_t i = 0; i < result_shape.rank(); ++i) {
new_indices[i] = indices[i] + start_indices[i];
}
return src_literal.Get<NativeT>(new_indices);
}));
for (int64_t dnum = 0; dnum < src_literal.shape().rank(); ++dnum) {
if (src_literal.shape().is_dynamic_dimension(dnum)) {
int64_t dynamic_size =
src_literal.GetDynamicSize(dnum) - start_indices[dnum];
CHECK_GE(dynamic_size, 0) << src_literal.GetDynamicSize(dnum);
dynamic_size = std::min(dynamic_size, result_shape.dimensions(dnum));
result_literal.SetDynamicSize(dnum, dynamic_size);
}
}
}
}
Literal LiteralBase::Slice(absl::Span<const int64_t> start_indices,
absl::Span<const int64_t> limit_indices) const {
CHECK(shape().IsArray()) << "tuple is not supported for slice";
DimensionVector result_dimensions;
for (int64_t dnum = 0; dnum < shape().rank(); ++dnum) {
CHECK_GE(start_indices[dnum], 0);
CHECK_LE(limit_indices[dnum], shape().dimensions(dnum))
<< "dnum = " << dnum;
int64_t dimension = limit_indices[dnum] - start_indices[dnum];
CHECK_GE(dimension, 0) << "dnum = " << dnum;
result_dimensions.push_back(dimension);
}
auto result_shape = ShapeUtil::MakeShapeWithDenseLayout(
shape().element_type(), result_dimensions,
LayoutUtil::MinorToMajor(shape()));
ShapeUtil::CopyDynamicDimensions(&result_shape, shape());
Literal result_literal(result_shape);
primitive_util::ArrayTypeSwitch<void>(
[&](auto primitive_type_constant) -> void {
using NativeT = NativeTypeOf<primitive_type_constant>;
return SliceInternal<NativeT>(*this, start_indices, result_literal);
},
result_shape.element_type());
return result_literal;
}
Literal LiteralBase::Clone() const {
Literal result(shape());
TF_CHECK_OK(result.CopyFrom(*this));
return result;
}
std::unique_ptr<Literal> LiteralBase::CloneToUnique() const {
auto result = std::make_unique<Literal>(shape());
TF_CHECK_OK(result->CopyFrom(*this));
return result;
}
bool LiteralBase::IsDetermined(const ShapeIndex& shape_index) const {
return piece(shape_index).IsDetermined();
}
bool LiteralBase::IsKnown(const ShapeIndex& shape_index) const {
return piece(shape_index).IsKnown();
}
std::string LiteralBase::GetAsString(absl::Span<const int64_t> multi_index,
const ShapeIndex& shape_index) const {
const Shape& subshape = ShapeUtil::GetSubshape(shape(), shape_index);
CHECK(LayoutUtil::IsDenseArray(subshape));
return primitive_util::ArrayTypeSwitch<std::string>(
[&](auto primitive_type_constant) -> std::string {
using NativeT = NativeTypeOf<primitive_type_constant>;
if constexpr (primitive_util::IsIntegralType(primitive_type_constant)) {
return StrCat(Get<NativeT>(multi_index, shape_index));
}
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
return RoundTripFpToString(Get<NativeT>(multi_index, shape_index));
}
if constexpr (primitive_util::IsComplexType(primitive_type_constant)) {
NativeT c = Get<NativeT>(multi_index, shape_index);
return StrCat("(", RoundTripFpToString(c.real()), ", ",
RoundTripFpToString(c.imag()), ")");
}
if constexpr (primitive_type_constant == PRED) {
return Get<bool>(multi_index, shape_index) ? "true" : "false";
}
LOG(FATAL) << PrimitiveType_Name(subshape.element_type());
},
subshape.element_type());
}
std::optional<int64_t> LiteralBase::GetIntegralAsS64(
absl::Span<const int64_t> multi_index) const {
CHECK(LayoutUtil::IsDenseArray(shape()));
return primitive_util::PrimitiveTypeSwitch<std::optional<int64_t>>(
[&](auto primitive_type_constant) -> std::optional<int64_t> {
if constexpr (primitive_util::IsIntegralType(primitive_type_constant) ||
primitive_type_constant == PRED) {
using NativeT = NativeTypeOf<primitive_type_constant>;
return Get<NativeT>(multi_index);
}
return std::nullopt;
},
shape().element_type());
}
std::optional<double> LiteralBase::GetAsDouble(
absl::Span<const int64_t> multi_index) const {
const Shape& s = shape();
CHECK(LayoutUtil::IsDenseArray(s));
return primitive_util::PrimitiveTypeSwitch<std::optional<double>>(
[&](auto primitive_type_constant) -> std::optional<double> {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = NativeTypeOf<primitive_type_constant>;
return static_cast<double>(Get<NativeT>(multi_index));
}
return std::nullopt;
},
s.element_type());
}
std::optional<double> LiteralBase::GetSumAsDouble(
absl::Span<const int64_t> linear_indices) const {
const Shape& s = shape();
CHECK(LayoutUtil::IsDenseArray(s));
if (!primitive_util::IsFloatingPointType(s.element_type())) {
return std::nullopt;
}
return primitive_util::FloatingPointTypeSwitch<double>(
[&](auto primitive_type_constant) -> double {
using NativeT = NativeTypeOf<primitive_type_constant>;
double sum = 0.0;
auto d = root_piece().data<NativeT>();
for (const int64_t idx : linear_indices) {
sum += static_cast<double>(d[idx]);
}
return sum;
},
s.element_type());
}
std::optional<complex128> LiteralBase::GetAsComplex128(
absl::Span<const int64_t> multi_index) const {
return primitive_util::PrimitiveTypeSwitch<std::optional<complex128>>(
[&](auto primitive_type_constant) -> std::optional<complex128> {
if constexpr (primitive_util::IsArrayType(primitive_type_constant)) {
using NativeT = NativeTypeOf<primitive_type_constant>;
if constexpr (primitive_util::IsComplexType(
primitive_type_constant)) {
return {Get<NativeT>(multi_index)};
}
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
return {{static_cast<double>(Get<NativeT>(multi_index)), 0}};
}
if constexpr (primitive_util::IsIntegralType(
primitive_type_constant) &&
primitive_type_constant != S64 &&
primitive_type_constant != U64) {
return {{static_cast<double>(Get<NativeT>(multi_index)), 0}};
}
}
return std::nullopt;
},
shape().element_type());
}
absl::Status MutableLiteralBase::SetIntegralAsS64(
absl::Span<const int64_t> multi_index, int64_t value) {
CHECK(LayoutUtil::IsDenseArray(shape()));
return primitive_util::PrimitiveTypeSwitch<absl::Status>(
[&](auto primitive_type_constant) -> absl::Status {
if constexpr (primitive_util::IsIntegralType(primitive_type_constant) ||
primitive_type_constant == PRED) {
using NativeT = NativeTypeOf<primitive_type_constant>;
Set<NativeT>(multi_index, static_cast<NativeT>(value));
return absl::OkStatus();
}
return FailedPrecondition("Array element type is not integral: %s",
PrimitiveType_Name(shape().element_type()));
},
shape().element_type());
}
absl::Status MutableLiteralBase::SetFromDouble(
absl::Span<const int64_t> multi_index, double value) {
CHECK(LayoutUtil::IsDenseArray(shape()));
if (!primitive_util::IsFloatingPointType(shape().element_type())) {
return FailedPrecondition("Array element type is not integral: %s",
PrimitiveType_Name(shape().element_type()));
}
primitive_util::FloatingPointTypeSwitch<void>(
[&](auto primitive_type_constant) -> void {
using NativeT = NativeTypeOf<primitive_type_constant>;
Set<NativeT>(multi_index, static_cast<NativeT>(value));
},
shape().element_type());
return absl::OkStatus();
}
namespace {
void PrintShape(bool print_layout, const Shape& shape, Printer* printer) {
if (print_layout) {
ShapeUtil::PrintHumanStringWithLayout(printer, shape);
} else {
ShapeUtil::PrintHumanString(printer, shape);
}
}
void PrintHelper(const LiteralBase& literal, const ShapeIndex& shape_index,
bool print_shape, bool print_layout, bool oneline,
Printer* printer);
void TuplePrintHelper(const LiteralBase& literal, const ShapeIndex& shape_index,
bool print_shape, bool print_layout, bool oneline,
Printer* printer) {
const Shape& subshape = ShapeUtil::GetSubshape(literal.shape(), shape_index);
printer->Append(oneline ? "( " : "(\n");
for (int i = 0; i < ShapeUtil::TupleElementCount(subshape); ++i) {
ShapeIndex element_index = shape_index;
element_index.push_back(i);
if (i > 0) printer->Append(oneline ? ", " : ",\n");
PrintHelper(literal, element_index, print_shape, print_layout, oneline,
printer);
}
printer->Append(oneline ? " )" : "\n)");
}
void DenseArrayPrintHelper(const LiteralBase& literal,
const ShapeIndex& shape_index, bool print_shape,
bool print_layout, bool oneline, Printer* printer) {
const Shape& subshape = ShapeUtil::GetSubshape(literal.shape(), shape_index);
int64_t rank = subshape.rank();
const absl::string_view linebreak = oneline ? " " : "\n";
std::function<void(absl::Span<const int64_t> dimensions,
std::vector<int64_t>*)>
print_recursive = [&](absl::Span<const int64_t> dimensions,
std::vector<int64_t>* accum_indices) {
CHECK_EQ(rank, dimensions.size() + accum_indices->size());
auto brace_to_string = [&](std::string brace) -> std::string {
if (rank == 1) {
return brace;
}
if (dimensions.size() == 1 && brace == "{") {
return StrCat(oneline ? "" : " ", brace,
dimensions[0] <= 1 ? "" : " ");
}
if (dimensions.size() == 1 && brace == "}") {
return StrCat(dimensions[0] <= 1 ? "" : " ", brace);
}
if (brace == "{") {
const int64_t accum_indices_size = accum_indices->size();
if (rank > 3 && !accum_indices->empty() &&
accum_indices_size < rank) {
int index = accum_indices->size() - 1;
int value = accum_indices->back();
int size = dimensions.front();
return StrCat(brace, " ",
size > 0 ? linebreak : "");
}
return StrCat(brace, linebreak);
}
return StrCat(linebreak, brace);
};
if (dimensions.empty()) {
std::string elem;
if (subshape.element_type() == PRED && rank > 0) {
elem = literal.Get<bool>(*accum_indices, shape_index) ? "1" : "0";
} else {
elem = literal.GetAsString(*accum_indices, shape_index);
}
printer->Append(elem);
} else {
printer->Append(brace_to_string("{"));
for (int i = 0; i < dimensions[0]; ++i) {
accum_indices->push_back(i);
print_recursive(dimensions.subspan(1), accum_indices);
accum_indices->pop_back();
if (i < dimensions[0] - 1) {
printer->Append(",");
printer->Append(dimensions.size() > 1 ? linebreak : " ");
}
}
printer->Append(brace_to_string("}"));
}
};
if (print_shape) {
PrintShape(print_layout, subshape, printer);
if (subshape.is_dynamic()) {
printer->Append("(");
for (int64_t i = 0; i < subshape.dimensions_size(); ++i) {
printer->Append(literal.GetDynamicSize(i, shape_index));
if (i < subshape.dimensions_size() - 1) {
printer->Append(",");
}
}
printer->Append(")");
}
printer->Append(" ");
}
std::vector<int64_t> indices = {};
std::vector<int64_t> dimensions;
dimensions.reserve(subshape.rank());
for (int64_t i = 0; i < subshape.rank(); ++i) {
dimensions.push_back(literal.GetDynamicSize(i, shape_index));
}
print_recursive(dimensions, &indices);
}
void PrintHelper(const LiteralBase& literal, const ShapeIndex& shape_index,
bool print_shape, bool print_layout, bool oneline,
Printer* printer) {
const Shape& subshape = ShapeUtil::GetSubshape(literal.shape(), shape_index);
CHECK(LayoutUtil::HasLayout(literal.shape()));
CHECK(LayoutUtil::HasLayout(subshape));
if (subshape.IsTuple()) {
TuplePrintHelper(literal, shape_index, print_shape, print_layout, oneline,
printer);
} else if (subshape.IsToken()) {
printer->Append("token");
} else {
CHECK(LayoutUtil::IsDenseArray(subshape));
if (literal.IsKnown(shape_index)) {
DenseArrayPrintHelper(literal, shape_index, print_shape, print_layout,
oneline, printer);
} else {
PrintShape(print_layout, subshape, printer);
printer->Append(" ");
if (literal.IsDetermined(shape_index)) {
printer->Append("unknown");
} else {
printer->Append("undetermined");
}
}
}
}
}
void LiteralBase::Print(Printer* printer) const {
CHECK(LayoutUtil::HasLayout(this->shape()));
PrintHelper(*this, {}, true, false,
false, printer);
}
void LiteralBase::PrintOneline(Printer* printer) const {
CHECK(LayoutUtil::HasLayout(this->shape()));
PrintHelper(*this, {}, true, false,
true, printer);
}
void LiteralBase::PrintWithoutShape(Printer* printer) const {
CHECK(LayoutUtil::HasLayout(this->shape()));
PrintHelper(*this, {}, false, false,
false, printer);
}
void LiteralBase::PrintWithoutShapeOneline(Printer* printer) const {
CHECK(LayoutUtil::HasLayout(this->shape()));
PrintHelper(*this, {}, false, false,
true, printer);
}
void LiteralBase::PrintWithLayout(Printer* printer) const {
CHECK(LayoutUtil::HasLayout(this->shape()));
PrintHelper(*this, {}, true, true,
false, printer);
}
void LiteralBase::PrintWithLayoutOneline(Printer* printer) const {
CHECK(LayoutUtil::HasLayout(this->shape()));
PrintHelper(*this, {}, true, true,
true, printer);
}
std::string LiteralBase::ToString() const {
StringPrinter printer;
Print(&printer);
return std::move(printer).ToString();
}
std::string LiteralBase::ToStringOneline() const {
StringPrinter printer;
PrintOneline(&printer);
return std::move(printer).ToString();
}
std::string LiteralBase::ToStringWithoutShape() const {
StringPrinter printer;
PrintWithoutShape(&printer);
return std::move(printer).ToString();
}
std::string LiteralBase::ToStringWithoutShapeOneline() const {
StringPrinter printer;
PrintWithoutShapeOneline(&printer);
return std::move(printer).ToString();
}
std::string LiteralBase::ToStringWithLayout() const {
StringPrinter printer;
PrintWithLayout(&printer);
return std::move(printer).ToString();
}
std::string LiteralBase::ToStringWithLayoutOneline() const {
StringPrinter printer;
PrintWithLayoutOneline(&printer);
return std::move(printer).ToString();
}
void LiteralBase::EachCellAsString(
absl::FunctionRef<void(absl::Span<const int64_t> indices,
const std::string& value)>
per_cell) const {
if (ShapeUtil::IsZeroElementArray(shape())) {
return;
}
auto indices = IndexUtil::LinearIndexToMultidimensionalIndex(
shape(), 0);
do {
per_cell(indices, GetAsString(indices));
} while (IndexUtil::BumpIndices(shape(), absl::MakeSpan(indices)));
}
namespace {
template <typename NativeSrcT, typename NativeDestT>
void ConvertBetweenNativeTypes(absl::Span<const NativeSrcT> src_data,
void* dst_base) {
static_assert(!std::is_same_v<NativeSrcT, NativeDestT>);
auto converter = [](NativeSrcT src) -> NativeDestT {
if constexpr (!std::is_same_v<NativeDestT, bool> &&
!std::numeric_limits<NativeSrcT>::is_integer &&
std::numeric_limits<NativeDestT>::is_integer) {
if (src != src) {
return NativeDestT{0};
}
if (src >=
static_cast<NativeSrcT>(std::numeric_limits<NativeDestT>::max())) {
return std::numeric_limits<NativeDestT>::max();
}
if (src <=
static_cast<NativeSrcT>(std::numeric_limits<NativeDestT>::lowest())) {
return std::numeric_limits<NativeDestT>::lowest();
}
}
if constexpr (sizeof(src) == 1 &&
std::is_same_v<NativeDestT, tsl::float8_e3m4>) {
return static_cast<NativeDestT>(static_cast<half>(src));
} else {
return static_cast<NativeDestT>(src);
}
};
NativeDestT* dest_data = static_cast<NativeDestT*>(dst_base);
for (const NativeSrcT& src : src_data) {
*(dest_data++) = converter(src);
}
}
template <PrimitiveType kSrcType>
absl::Status ConvertIfDestTypeMatches(const LiteralBase& src_literal,
MutableLiteralBase& dst_literal) {
DCHECK(dst_literal.shape().IsArray());
using NativeSrcT = NativeTypeOf<kSrcType>;
auto src_data = src_literal.data<NativeSrcT>();
void* dst_base = dst_literal.untyped_data();
DCHECK_EQ(src_data.size(), dst_literal.element_count());
return primitive_util::ArrayTypeSwitch<absl::Status>(
[&](auto primitive_type_constant) -> absl::Status {
if constexpr (primitive_util::IsComplexType(kSrcType) &&
!primitive_util::IsComplexType(primitive_type_constant)) {
return Unimplemented("%s from type %s to type %s is not implemented.",
"Converting", PrimitiveType_Name(kSrcType),
PrimitiveType_Name(primitive_type_constant()));
} else if constexpr (kSrcType != primitive_type_constant) {
using NativeDestT = NativeTypeOf<primitive_type_constant>;
ConvertBetweenNativeTypes<NativeSrcT, NativeDestT>(src_data,
dst_base);
}
return absl::OkStatus();
},
dst_literal.shape().element_type());
}
absl::StatusOr<Literal> ConvertSwitch(const LiteralBase& literal,
PrimitiveType primitive_dest_type) {
TF_RET_CHECK(LayoutUtil::IsDenseArray(literal.shape()));
if (literal.shape().element_type() == primitive_dest_type) {
return literal.Clone();
}
if (!primitive_util::IsArrayType(primitive_dest_type) ||
!primitive_util::IsArrayType(literal.shape().element_type())) {
return Unimplemented("%s from type %s to type %s is not implemented.",
"Converting",
PrimitiveType_Name(literal.shape().element_type()),
PrimitiveType_Name(primitive_dest_type));
}
Literal result(
ShapeUtil::ChangeElementType(literal.shape(), primitive_dest_type));
TF_RETURN_IF_ERROR(primitive_util::ArrayTypeSwitch<absl::Status>(
[&](auto primitive_type_constant) -> absl::Status {
return ConvertIfDestTypeMatches<primitive_type_constant>(literal,
result);
},
literal.shape().element_type()));
return result;
}
}
absl::StatusOr<Literal> LiteralBase::Convert(
PrimitiveType primitive_dest_type) const {
return ConvertSwitch(*this, primitive_dest_type);
}
absl::StatusOr<Literal> LiteralBase::BitcastConvert(
const Shape& dest_shape) const {
if (ShapeUtil::ByteSizeOf(dest_shape) != ShapeUtil::ByteSizeOf(shape())) {
return InvalidArgument(
"Can not bitcast-convert from shape %s to a shape of different size %s",
shape().ToString(), dest_shape.ToString());
}
if (dest_shape.IsTuple() || shape().IsTuple()) {
return InvalidArgument(
"bitcast-convert is not valid for tuple shapes %s->%s",
shape().ToString(), dest_shape.ToString());
}
if (shape().is_dynamic() || dest_shape.is_dynamic()) {
return InvalidArgument(
"bitcast-convert is not valid for dynamic shape %s->%s",
shape().ToString(), dest_shape.ToString());
}
Literal out(dest_shape);
std::memcpy(out.root_piece_.buffer(), root_piece().buffer(),
root_piece().size_bytes_dense());
if constexpr (!kLittleEndian) {
size_t input_elem_size =
ShapeUtil::ByteSizeOfPrimitiveType(shape().element_type());
TF_RETURN_IF_ERROR(tsl::ByteSwapArray(
const_cast<char*>(out.root_piece().buffer()), input_elem_size,
out.root_piece().size_bytes_dense() / input_elem_size));
size_t output_elem_size =
ShapeUtil::ByteSizeOfPrimitiveType(dest_shape.element_type());
TF_RETURN_IF_ERROR(tsl::ByteSwapArray(
const_cast<char*>(out.root_piece().buffer()), output_elem_size,
out.root_piece().size_bytes_dense() / output_elem_size));
}
return out;
}
absl::StatusOr<Literal> LiteralBase::ConvertToShape(
const Shape& dest_shape) const {
if (!dest_shape.IsTuple()) {
return Convert(dest_shape.element_type());
}
std::vector<Literal> elements;
const auto tuple_element_count = ShapeUtil::TupleElementCount(shape());
elements.reserve(tuple_element_count);
for (int i = 0; i < tuple_element_count; ++i) {
auto element = LiteralSlice(*this, {i});
TF_ASSIGN_OR_RETURN(
auto new_element,
element.ConvertToShape(ShapeUtil::GetSubshape(dest_shape, {i})));
elements.push_back(std::move(new_element));
}
return MutableLiteralBase::MoveIntoTuple(absl::MakeSpan(elements));
}
Literal MutableLiteralBase::MoveIntoTuple(
absl::Span<Literal> elements) {
std::vector<const Shape*> element_shapes;
element_shapes.reserve(elements.size());
for (const Literal& element : elements) {
element_shapes.push_back(&element.shape());
}
Literal literal(ShapeUtil::MakeTupleShapeWithPtrs(element_shapes),
false);
for (int i = 0, end = elements.size(); i < end; ++i) {
TF_CHECK_OK(
literal.MoveFrom(std::move(elements[i]), {i}));
}
return literal;
}
template <typename NativeT>
bool LiteralBase::Piece::EqualElementsInternal(
const LiteralBase::Piece& other, std::vector<int64_t>* multi_index) const {
if (multi_index->size() == subshape().rank()) {
return (Get<NativeT>(*multi_index) == other.Get<NativeT>(*multi_index));
}
for (int64_t i = 0; i < GetDynamicSize(multi_index->size()); ++i) {
multi_index->push_back(i);
if (!EqualElementsInternal<NativeT>(other, multi_index)) {
return false;
}
multi_index->pop_back();
}
return true;
}
bool LiteralBase::Piece::EqualDynamicSize(
const LiteralBase::Piece& other) const {
DCHECK(ShapeUtil::Compatible(subshape(), other.subshape()));
if (subshape().is_static()) {
return true;
}
for (int64_t i = 0; i < subshape().rank(); ++i) {
if (GetDynamicSize(i) != other.GetDynamicSize(i)) {
return false;
}
}
return true;
}
bool LiteralBase::Piece::EqualElements(const LiteralBase::Piece& other) const {
if (subshape().is_static() &&
ShapeUtil::Equal(subshape(), other.subshape()) && subshape().IsArray()) {
CHECK(LayoutUtil::IsDenseArray(subshape()))
<< __func__ << " is only supported for dense arrays: " << subshape();
CHECK_EQ(size_bytes_dense(), other.size_bytes_dense());
if (primitive_util::IsSubByteNonPredType(subshape().element_type())) {
CHECK(!primitive_util::IsFloatingPointType(subshape().element_type()));
auto one_array = buffer();
auto two_array = other.buffer();
const int bits_per_element =
primitive_util::BitWidth(subshape().element_type());
const uint8_t mask = LsbMask<uint8_t>(bits_per_element);
for (int64_t i = 0; i < size_bytes_dense(); ++i) {
if ((one_array[i] & mask) != (two_array[i] & mask)) return false;
}
return true;
}
return memcmp(buffer(), other.buffer(), size_bytes_dense()) == 0;
}
std::vector<int64_t> multi_index;
return primitive_util::ArrayTypeSwitch<bool>(
[&](auto primitive_type_constant) -> bool {
using NativeSrcT = NativeTypeOf<primitive_type_constant>;
return EqualElementsInternal<NativeSrcT>(other, &multi_index);
},
subshape().element_type());
}
bool LiteralBase::Equal(const LiteralBase& other, bool layout_sensitive) const {
if (!ShapeUtil::EqualStructure(shape(), other.shape())) {
return false;
}
return root_piece().ForEachSubpieceWithBool([&](const ShapeIndex& index,
const Piece& piece) {
const Piece& other_piece = other.piece(index);
const Shape& subshape = piece.subshape();
const Shape& other_subshape = other_piece.subshape();
if (subshape.element_type() != other_subshape.element_type()) {
return false;
}
if (!piece.subshape().IsArray()) {
return true;
}
if (subshape.rank() != other_subshape.rank()) {
return false;
}
if (layout_sensitive && (subshape.layout() != other_subshape.layout())) {
return false;
}
for (int64_t i = 0; i < subshape.rank(); ++i) {
if (piece.GetDynamicSize(i) != other_piece.GetDynamicSize(i)) {
return false;
}
}
if (!piece.EqualElements(other_piece)) {
return false;
}
return true;
});
}
template <typename NativeT>
static bool EqualIncludingNan(NativeT a, NativeT b) {
if constexpr (std::numeric_limits<NativeT>::has_quiet_NaN ||
std::numeric_limits<NativeT>::has_signaling_NaN) {
if (Eigen::numext::isnan(a) && Eigen::numext::isnan(b)) {
return true;
}
}
return a == b;
}
template <typename T>
static bool EqualIncludingNan(std::complex<T> a, std::complex<T> b) {
return EqualIncludingNan(a.real(), b.real()) &&
EqualIncludingNan(a.imag(), b.imag());
}
template <typename NativeT>
static bool AllElementsEqualValue(absl::Span<const NativeT> data,
NativeT value) {
for (int64_t i = 0; i < data.size(); ++i) {
if (memcmp(&data[i], &value, sizeof value)) {
return false;
}
}
return true;
}
bool Literal::Piece::IsAll(const Literal& scalar) const {
CHECK(ShapeUtil::IsScalar(scalar.shape())) << scalar.shape().ToString();
if (!subshape().IsArray()) {
return false;
}
CHECK(LayoutUtil::IsDenseArray(subshape()))
<< __func__ << " is only supported for dense arrays: " << subshape();
CHECK_EQ(subshape().element_type(), scalar.shape().element_type());
return primitive_util::ArrayTypeSwitch<bool>(
[&](auto primitive_type_constant) -> bool {
using NativeT = NativeTypeOf<primitive_type_constant>;
return AllElementsEqualValue(this->data<NativeT>(),
scalar.GetFirstElement<NativeT>());
},
subshape().element_type());
}
int64_t Literal::Piece::CountAll(const Literal& scalar) const {
CHECK(ShapeUtil::IsScalar(scalar.shape())) << scalar.shape().ToString();
if (!subshape().IsArray()) {
return 0;
}
CHECK(LayoutUtil::IsDenseArray(subshape()))
<< __func__ << " is only supported for dense arrays: " << subshape();
CHECK_EQ(subshape().element_type(), scalar.shape().element_type());
return primitive_util::ArrayTypeSwitch<int64_t>(
[&](auto primitive_type_constant) -> int64_t {
using NativeT = NativeTypeOf<primitive_type_constant>;
return absl::c_count_if(
this->data<NativeT>(), [&](NativeT elem) -> bool {
return EqualIncludingNan(elem, scalar.GetFirstElement<NativeT>());
});
},
subshape().element_type());
}
bool LiteralBase::IsAll(const Literal& scalar) const {
return root_piece().IsAll(scalar);
}
bool LiteralBase::IsAll(int8_t value) const {
if (!shape().IsArray()) {
return false;
}
PrimitiveType ty = shape().element_type();
if (primitive_util::IsFloatingPointType(ty)) {
return IsAllFloatImpl(value, false);
}
if (primitive_util::IsUnsignedIntegralType(ty) && value < 0) {
return false;
}
Literal scalar(ShapeUtil::MakeScalarShape(ty));
return primitive_util::ArrayTypeSwitch<bool>(
[&](auto primitive_type_constant) -> bool {
using NativeT = NativeTypeOf<primitive_type_constant>;
NativeT converted(value);
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
if (!Eigen::numext::isfinite(converted)) {
return false;
}
}
if constexpr (!primitive_util::IsComplexType(primitive_type_constant)) {
if (static_cast<int8_t>(converted) != value) {
return false;
}
}
scalar.Set<NativeT>({}, converted);
return root_piece().IsAll(scalar);
},
ty);
}
bool LiteralBase::IsAllFloat(float value) const {
return IsAllFloatImpl(value, true);
}
bool LiteralBase::IsAllFloatImpl(float value, bool round_value) const {
PrimitiveType ty = shape().element_type();
if (!primitive_util::IsFloatingPointType(ty)) {
return false;
}
Literal scalar(ShapeUtil::MakeScalarShape(ty));
return primitive_util::FloatingPointTypeSwitch<bool>(
[&](auto primitive_type_constant) -> bool {
using NativeT = NativeTypeOf<primitive_type_constant>;
scalar.Set<NativeT>({}, static_cast<NativeT>(value));
if (!round_value && scalar.GetAsDouble({}) != value) {
return false;
}
return root_piece().IsAll(scalar);
},
ty);
}
bool LiteralBase::IsAllComplex(complex64 value) const {
PrimitiveType ty = shape().element_type();
if (!primitive_util::IsComplexType(ty)) {
return false;
}
Literal scalar(ShapeUtil::MakeScalarShape(ty));
return primitive_util::ComplexTypeSwitch<bool>(
[&](auto primitive_type_constant) -> bool {
using NativeT = NativeTypeOf<primitive_type_constant>;
scalar.Set<NativeT>({}, static_cast<NativeT>(value));
return root_piece().IsAll(scalar);
},
ty);
}
bool LiteralBase::IsAllFirst() const {
if (!shape().IsArray()) {
return false;
}
if (ShapeUtil::IsZeroElementArray(shape())) {
return false;
}
absl::InlinedVector<int64_t, 4> start_indices(shape().rank(), 0);
absl::InlinedVector<int64_t, 4> end_indices(shape().rank(), 1);
Literal first = Slice(start_indices, end_indices);
return IsAll(first.Reshape({}).value());
}
bool LiteralBase::IsR1Iota() const {
if (!shape().IsArray()) {
return false;
}
CHECK(LayoutUtil::IsDenseArray(shape()))
<< __func__ << " is only supported for dense arrays: " << shape();
if (shape().rank() != 1) {
return false;
}
return primitive_util::ArrayTypeSwitch<bool>(
[&](auto primitive_type_constant) -> bool {
using NativeT = NativeTypeOf<primitive_type_constant>;
const int64_t elements = ShapeUtil::ElementsIn(shape());
for (int64_t idx = 0; idx < elements; ++idx) {
if constexpr (primitive_util::IsIntegralType(
primitive_type_constant)) {
if (static_cast<int64_t>(Get<NativeT>({idx})) != idx) {
return false;
}
} else if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
if (Get<NativeT>({idx}) != static_cast<NativeT>(idx)) {
return false;
}
} else if constexpr (primitive_util::IsComplexType(
primitive_type_constant)) {
if (Get<NativeT>({idx}) != NativeT(idx, 0.0f)) {
return false;
}
} else {
return false;
}
}
return true;
},
shape().element_type());
}
std::optional<int64_t> LiteralBase::IsR1StridedIota() const {
if (!shape().IsArray() || shape().rank() != 1) {
return std::nullopt;
}
CHECK(LayoutUtil::IsDenseArray(shape()))
<< __func__ << " is only supported for dense arrays: " << shape();
const int64_t elements = ShapeUtil::ElementsIn(shape());
const PrimitiveType type = shape().element_type();
if (elements <= 1 || !primitive_util::IsIntegralType(type)) {
return std::nullopt;
}
return primitive_util::IntegralTypeSwitch<std::optional<int64_t>>(
[&](auto primitive_type_constant) -> std::optional<int64_t> {
using NativeT = NativeTypeOf<primitive_type_constant>;
const int64_t stride = static_cast<int64_t>(Get<NativeT>({1}));
if (stride == 0) {
return std::nullopt;
}
for (int64_t idx = 0; idx < elements; ++idx) {
if (static_cast<int64_t>(Get<NativeT>({idx})) != idx * stride) {
return std::nullopt;
}
}
return stride;
},
shape().element_type());
}
bool LiteralBase::IsZero(absl::Span<const int64_t> indices) const {
CHECK(LayoutUtil::IsDenseArray(shape()))
<< __func__ << " is only supported for dense arrays: " << shape();
return primitive_util::ArrayTypeSwitch<bool>(
[&](auto primitive_type_constant) -> bool {
using NativeT = NativeTypeOf<primitive_type_constant>;
return Get<NativeT>(indices) == NativeT{0};
},
shape().element_type());
}
namespace {
template <typename RepeatedFieldT, typename NativeT>
void CopyToRepeatedField(RepeatedFieldT* dest,
const absl::Span<const NativeT> src) {
*dest = RepeatedFieldT(src.begin(), src.end());
}
}
void LiteralBase::Piece::set_array_value_state(ArrayValueState state) {
array_value_state_ = state;
}
LiteralBase::ArrayValueState LiteralBase::Piece::get_array_value_state() const {
return array_value_state_;
}
void LiteralBase::Piece::WriteToProto(LiteralProto* proto) const {
*proto->mutable_shape() = subshape().ToProto();
switch (subshape().element_type()) {
case PRED:
CopyToRepeatedField(proto->mutable_preds(), data<bool>());
break;
case U2:
*proto->mutable_u2s() = std::string(
reinterpret_cast<const char*>(data<u2>().data()), size_bytes_dense());
break;
case U4:
*proto->mutable_u4s() = std::string(
reinterpret_cast<const char*>(data<u4>().data()), size_bytes_dense());
break;
case U8:
proto->set_u8s(static_cast<const unsigned char*>(data<uint8_t>().data()),
element_count());
break;
case U16:
*proto->mutable_u16s() =
std::string(reinterpret_cast<const char*>(data<uint16_t>().data()),
size_bytes_dense());
if (!kLittleEndian) {
ConvertEndianShort(proto->mutable_u16s());
}
break;
case U32:
CopyToRepeatedField(proto->mutable_u32s(), data<uint32_t>());
break;
case U64:
CopyToRepeatedField(proto->mutable_u64s(), data<uint64_t>());
break;
case S2:
*proto->mutable_s2s() = std::string(
reinterpret_cast<const char*>(data<s2>().data()), size_bytes_dense());
break;
case S4:
*proto->mutable_s4s() = std::string(
reinterpret_cast<const char*>(data<s4>().data()), size_bytes_dense());
break;
case S8:
proto->set_s8s(static_cast<const signed char*>(data<int8_t>().data()),
element_count());
break;
case S16:
*proto->mutable_s16s() =
std::string(reinterpret_cast<const char*>(data<int16_t>().data()),
size_bytes_dense());
if (!kLittleEndian) {
ConvertEndianShort(proto->mutable_s16s());
}
break;
case S32:
CopyToRepeatedField(proto->mutable_s32s(), data<int32_t>());
break;
case S64:
CopyToRepeatedField(proto->mutable_s64s(), data<int64_t>());
break;
case F8E5M2:
*proto->mutable_f8e5m2s() = std::string(
reinterpret_cast<const char*>(data<tsl::float8_e5m2>().data()),
size_bytes_dense());
break;
case F8E4M3:
*proto->mutable_f8e4m3s() = std::string(
reinterpret_cast<const char*>(data<tsl::float8_e4m3>().data()),
size_bytes_dense());
break;
case F8E4M3FN:
*proto->mutable_f8e4m3fns() = std::string(
reinterpret_cast<const char*>(data<tsl::float8_e4m3fn>().data()),
size_bytes_dense());
break;
case F8E4M3B11FNUZ:
*proto->mutable_f8e4m3b11fnuzs() = std::string(
reinterpret_cast<const char*>(data<tsl::float8_e4m3b11fnuz>().data()),
size_bytes_dense());
break;
case F8E5M2FNUZ:
*proto->mutable_f8e5m2fnuzs() = std::string(
reinterpret_cast<const char*>(data<tsl::float8_e5m2fnuz>().data()),
size_bytes_dense());
break;
case F8E4M3FNUZ:
*proto->mutable_f8e4m3fnuzs() = std::string(
reinterpret_cast<const char*>(data<tsl::float8_e4m3fnuz>().data()),
size_bytes_dense());
break;
case F8E3M4:
*proto->mutable_f8e3m4s() = std::string(
reinterpret_cast<const char*>(data<tsl::float8_e3m4>().data()),
size_bytes_dense());
break;
case F16:
*proto->mutable_f16s() =
std::string(reinterpret_cast<const char*>(data<half>().data()),
size_bytes_dense());
if (!kLittleEndian) {
ConvertEndianShort(proto->mutable_f16s());
}
break;
case BF16:
*proto->mutable_bf16s() =
std::string(reinterpret_cast<const char*>(data<bfloat16>().data()),
size_bytes_dense());
if (!kLittleEndian) {
ConvertEndianShort(proto->mutable_bf16s());
}
break;
case F32:
CopyToRepeatedField(proto->mutable_f32s(), data<float>());
break;
case F64:
CopyToRepeatedField(proto->mutable_f64s(), data<double>());
break;
case C64:
for (complex64 value : data<complex64>()) {
proto->add_c64s(value.real());
proto->add_c64s(value.imag());
}
break;
case C128:
for (complex128 value : data<complex128>()) {
proto->add_c128s(value.real());
proto->add_c128s(value.imag());
}
break;
case TUPLE:
case TOKEN:
return;
default:
LOG(FATAL) << "Unhandled primitive type "
<< PrimitiveType_Name(subshape().element_type());
}
}
const void* LiteralBase::Piece::untyped_data() const {
DCHECK(LayoutUtil::IsDenseArray(subshape()))
<< ShapeUtil::HumanString(subshape());
return buffer();
}
void* LiteralBase::Piece::untyped_data() {
DCHECK(LayoutUtil::IsDenseArray(subshape()))
<< ShapeUtil::HumanString(subshape());
return buffer();
}
namespace {
template <typename RepeatedFieldT, typename NativeT>
absl::Status CopyFromRepeatedField(absl::Span<NativeT> dest,
const RepeatedFieldT& src) {
if (dest.size() != src.size()) {
return InvalidArgument(
"Expected %lu elements in LiteralProto repeated field, has %d",
dest.size(), src.size());
}
std::copy(src.begin(), src.end(), dest.begin());
return absl::OkStatus();
}
}
absl::Status LiteralBase::Piece::CopyFromProto(const LiteralProto& proto) {
TF_RET_CHECK(proto.has_shape());
Shape shape(proto.shape());
TF_RET_CHECK(LayoutUtil::HasLayout(shape));
TF_RET_CHECK(ShapeUtil::Equal(shape, subshape()));
switch (subshape().element_type()) {
case PRED:
TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<bool>(), proto.preds()));
break;
case S2: {
const std::string& s(proto.s2s());
TF_RET_CHECK(data<s2>().size() * sizeof(s2) == s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case S4: {
const std::string& s(proto.s4s());
TF_RET_CHECK(data<s4>().size() * sizeof(s4) == s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case S8: {
auto s8_data = data<int8_t>();
TF_RET_CHECK(proto.s8s().size() == s8_data.size());
std::copy(proto.s8s().begin(), proto.s8s().end(), s8_data.begin());
break;
}
case S16: {
const std::string& s(proto.s16s());
TF_RET_CHECK(data<int16_t>().size() * sizeof(int16_t) == s.size());
memcpy(untyped_data(), s.data(), s.size());
if (!kLittleEndian) {
ConvertEndianShort(reinterpret_cast<char*>(untyped_data()), s.size());
}
break;
}
case S32:
TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<int32_t>(), proto.s32s()));
break;
case S64:
TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<int64_t>(), proto.s64s()));
break;
case U2: {
const std::string& s(proto.u2s());
TF_RET_CHECK(data<u2>().size() * sizeof(u2) == s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case U4: {
const std::string& s(proto.u4s());
TF_RET_CHECK(data<u4>().size() * sizeof(u4) == s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case U8: {
auto u8_data = data<uint8_t>();
TF_RET_CHECK(proto.u8s().size() == u8_data.size());
std::copy(proto.u8s().begin(), proto.u8s().end(), u8_data.begin());
break;
}
case U16: {
const std::string& s(proto.u16s());
TF_RET_CHECK(data<uint16_t>().size() * sizeof(uint16_t) == s.size());
memcpy(untyped_data(), s.data(), s.size());
if (!kLittleEndian) {
ConvertEndianShort(reinterpret_cast<char*>(untyped_data()), s.size());
}
break;
}
case U32:
TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<uint32_t>(), proto.u32s()));
break;
case U64:
TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<uint64_t>(), proto.u64s()));
break;
case F8E5M2: {
const std::string& s(proto.f8e5m2s());
TF_RET_CHECK(data<tsl::float8_e5m2>().size() * sizeof(tsl::float8_e5m2) ==
s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case F8E4M3: {
const std::string& s(proto.f8e4m3s());
TF_RET_CHECK(data<tsl::float8_e4m3>().size() * sizeof(tsl::float8_e4m3) ==
s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case F8E4M3FN: {
const std::string& s(proto.f8e4m3fns());
TF_RET_CHECK(data<tsl::float8_e4m3fn>().size() *
sizeof(tsl::float8_e4m3fn) ==
s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case F8E4M3B11FNUZ: {
const std::string& s(proto.f8e4m3b11fnuzs());
TF_RET_CHECK(data<tsl::float8_e4m3b11fnuz>().size() *
sizeof(tsl::float8_e4m3b11fnuz) ==
s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case F8E5M2FNUZ: {
const std::string& s(proto.f8e5m2fnuzs());
TF_RET_CHECK(data<tsl::float8_e5m2fnuz>().size() *
sizeof(tsl::float8_e5m2fnuz) ==
s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case F8E4M3FNUZ: {
const std::string& s(proto.f8e4m3fnuzs());
TF_RET_CHECK(data<tsl::float8_e4m3fnuz>().size() *
sizeof(tsl::float8_e4m3fnuz) ==
s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case F8E3M4: {
const std::string& s(proto.f8e3m4s());
TF_RET_CHECK(data<tsl::float8_e3m4>().size() * sizeof(tsl::float8_e3m4) ==
s.size());
memcpy(untyped_data(), s.data(), s.size());
break;
}
case F16: {
const std::string& s(proto.f16s());
TF_RET_CHECK(data<half>().size() * sizeof(half) == s.size());
memcpy(untyped_data(), s.data(), s.size());
if (!kLittleEndian) {
ConvertEndianShort(reinterpret_cast<char*>(untyped_data()), s.size());
}
break;
}
case BF16: {
const std::string& s(proto.bf16s());
TF_RET_CHECK(data<bfloat16>().size() * sizeof(bfloat16) == s.size());
memcpy(untyped_data(), s.data(), s.size());
if (!kLittleEndian) {
ConvertEndianShort(reinterpret_cast<char*>(untyped_data()), s.size());
}
break;
}
case F32:
TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<float>(), proto.f32s()));
break;
case F64:
TF_RETURN_IF_ERROR(CopyFromRepeatedField(data<double>(), proto.f64s()));
break;
case C64: {
auto complex_data = data<complex64>();
TF_RET_CHECK(proto.c64s_size() == complex_data.size() * 2);
for (int64_t i = 0; i < complex_data.size(); ++i) {
complex_data[i] = complex64{proto.c64s(i * 2), proto.c64s(i * 2 + 1)};
}
break;
}
case C128: {
auto complex_data = data<complex128>();
const int64_t complex_data_size_doubled = complex_data.size() * 2;
TF_RET_CHECK(proto.c128s_size() == complex_data_size_doubled);
for (int64_t i = 0, end = complex_data.size(); i < end; ++i) {
complex_data[i] =
complex128{proto.c128s(i * 2), proto.c128s(i * 2 + 1)};
}
break;
}
case TUPLE:
return InvalidArgument("Should not be called on tuple shapes: %s",
ShapeUtil::HumanString(subshape()));
default:
return InvalidArgument("Is called on unsupported shape: %s",
ShapeUtil::HumanString(subshape()));
}
return absl::OkStatus();
}
bool LiteralBase::Piece::IsKnown() const {
if (array_value_state_ != ArrayValueState::kKnown) {
return false;
}
if (subshape().IsTuple()) {
bool are_all_leaf_arrays_known = true;
ForEachSubpiece([&are_all_leaf_arrays_known](const ShapeIndex& index,
const Piece& piece) {
if (!piece.subshape().IsArray()) {
return;
}
are_all_leaf_arrays_known &= piece.IsKnown();
});
return are_all_leaf_arrays_known;
}
return true;
}
bool LiteralBase::Piece::IsDetermined() const {
if (array_value_state_ == ArrayValueState::kUndetermined) {
return false;
}
if (subshape().IsTuple()) {
bool are_all_leaf_arrays_determined = true;
ForEachSubpiece([&are_all_leaf_arrays_determined](const ShapeIndex& index,
const Piece& piece) {
if (!piece.subshape().IsArray()) {
return;
}
are_all_leaf_arrays_determined &= piece.IsDetermined();
});
return are_all_leaf_arrays_determined;
}
return true;
}
LiteralProto LiteralBase::ToProto() const {
LiteralProto proto;
root_piece().ForEachSubpiece(
[&](const ShapeIndex& index, const Piece& piece) {
LiteralProto* proto_piece = &proto;
for (int64_t i : index) {
while (proto_piece->tuple_literals_size() <= i) {
proto_piece->add_tuple_literals();
}
proto_piece = proto_piece->mutable_tuple_literals(i);
}
piece.WriteToProto(proto_piece);
});
return proto;
}
const void* LiteralBase::untyped_data(const ShapeIndex& shape_index) const {
return piece(shape_index).untyped_data();
}
void* MutableLiteralBase::untyped_data(const ShapeIndex& shape_index) {
return piece(shape_index).untyped_data();
}
int64_t LiteralBase::size_bytes(const ShapeIndex& shape_index) const {
return piece(shape_index).size_bytes_dense();
}
std::string LiteralBase::GetR1U8AsString() const {
CHECK(shape().IsArray());
CHECK_EQ(shape().rank(), 1);
CHECK_EQ(shape().element_type(), U8);
return std::string(absl::bit_cast<const char*>(data<uint8_t>().data()),
ShapeUtil::ElementsIn(shape()));
}
void MutableBorrowingLiteral::CopyPieceSubtree(const Shape& shape,
const Piece* src_piece,
Piece* dest_piece) {
DCHECK(ShapeUtil::Equal(src_piece->subshape(), dest_piece->subshape()))
<< "src_piece has shape: "
<< ShapeUtil::HumanString(src_piece->subshape())
<< "dest_piece has shape: "
<< ShapeUtil::HumanString(dest_piece->subshape());
dest_piece->set_array_value_state(src_piece->get_array_value_state());
if (shape.IsTuple()) {
for (int i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
const Shape& subshape = shape.tuple_shapes(i);
Piece child_piece;
child_piece.set_subshape(&subshape);
CopyPieceSubtree(subshape, &src_piece->child(i), &child_piece);
dest_piece->emplace_back(std::move(child_piece));
}
} else if (shape.IsArray()) {
dest_piece->set_buffer(const_cast<char*>(src_piece->buffer()));
}
}
MutableLiteralBase::~MutableLiteralBase() = default;
MutableBorrowingLiteral::MutableBorrowingLiteral(
const MutableBorrowingLiteral& literal)
: MutableLiteralBase() {
shape_ = literal.shape_.Clone();
CHECK(LayoutUtil::HasLayout(*shape_));
root_piece_ = new Piece();
root_piece_->set_subshape(shape_.get());
CopyPieceSubtree(*shape_, &literal.root_piece(), root_piece_);
}
MutableBorrowingLiteral& MutableBorrowingLiteral::operator=(
const MutableBorrowingLiteral& literal) {
shape_ = literal.shape_.Clone();
CHECK(LayoutUtil::HasLayout(*shape_));
root_piece_ = new Piece();
root_piece_->set_subshape(shape_.get());
CopyPieceSubtree(*shape_, &literal.root_piece(), root_piece_);
return *this;
}
MutableBorrowingLiteral::MutableBorrowingLiteral(MutableLiteralBase* literal)
: MutableLiteralBase() {
shape_ = literal->shape_.Clone();
CHECK(LayoutUtil::HasLayout(*shape_));
root_piece_ = new Piece();
root_piece_->set_subshape(shape_.get());
CopyPieceSubtree(*shape_, &literal->root_piece(), root_piece_);
}
MutableBorrowingLiteral::MutableBorrowingLiteral(
MutableBorrowingLiteral literal, const ShapeIndex& view_root)
: MutableLiteralBase() {
shape_ = std::make_unique<Shape>(literal.piece(view_root).subshape());
CHECK(LayoutUtil::HasLayout(*shape_));
root_piece_ = new Piece();
root_piece_->set_subshape(shape_.get());
CopyPieceSubtree(*shape_, &literal.piece(view_root), root_piece_);
}
MutableBorrowingLiteral::MutableBorrowingLiteral(const char* src_buf_ptr,
const Shape& shape)
: MutableLiteralBase() {
shape_ = std::make_unique<Shape>(shape);
CHECK(LayoutUtil::HasLayout(*shape_));
CHECK(!shape_->IsTuple());
root_piece_ = new Piece();
root_piece_->set_subshape(shape_.get());
root_piece_->set_buffer(const_cast<char*>(src_buf_ptr));
}
MutableBorrowingLiteral::MutableBorrowingLiteral(absl::Span<char*> src_buf_ptrs,
const Shape& shape)
: MutableLiteralBase() {
shape_ = std::make_unique<Shape>(shape);
if (!shape_->IsTuple()) {
CHECK_EQ(src_buf_ptrs.size(), 1);
root_piece_ = new Piece();
root_piece_->set_subshape(shape_.get());
root_piece_->set_buffer(const_cast<char*>(src_buf_ptrs[0]));
} else {
CHECK(!ShapeUtil::IsNestedTuple(*shape_));
CHECK_EQ(src_buf_ptrs.size(), ShapeUtil::TupleElementCount(*shape_));
root_piece_ = new Piece();
root_piece_->set_subshape(shape_.get());
for (int i = 0; i < src_buf_ptrs.size(); ++i) {
Piece child_piece;
const auto& src_shape = shape_->tuple_shapes(i);
CHECK(src_shape.IsArray());
child_piece.set_subshape(&src_shape);
child_piece.set_buffer(src_buf_ptrs[i]);
root_piece_->emplace_back(std::move(child_piece));
}
}
}
MutableBorrowingLiteral::MutableBorrowingLiteral(ShapeTree<char*> src_buf_ptrs)
: MutableLiteralBase() {
shape_ = std::make_unique<Shape>(src_buf_ptrs.shape());
root_piece_ = new Piece();
root_piece_->set_subshape(shape_.get());
BuildPieceSubtree(*shape_, root_piece_);
root_piece_->ForEachMutableSubpiece(
[&](const ShapeIndex& index, Piece* piece) {
if (ShapeUtil::GetSubshape(*shape_, index).IsTuple()) {
DCHECK_EQ(src_buf_ptrs.element(index), nullptr)
<< "Tuples should not have buffer pointers";
return;
}
piece->set_buffer(const_cast<char*>(src_buf_ptrs.element(index)));
});
}
MutableBorrowingLiteral::~MutableBorrowingLiteral() {
if (root_piece_ != nullptr) {
delete root_piece_;
}
}
LiteralSlice::LiteralSlice(const LiteralBase& literal)
: LiteralBase(), root_piece_(&literal.root_piece()) {}
LiteralSlice::LiteralSlice(const LiteralBase& literal,
const ShapeIndex& view_root)
: LiteralBase(), root_piece_(&literal.piece(view_root)) {}
BorrowingLiteral::BorrowingLiteral(const char* src_buf_ptr, const Shape& shape)
: LiteralBase(), shape_(std::make_unique<Shape>(shape)) {
CHECK(shape_->IsArray());
CHECK(LayoutUtil::HasLayout(*shape_));
root_piece_ = Piece();
root_piece_.set_subshape(shape_.get());
root_piece_.set_buffer(const_cast<char*>(src_buf_ptr));
}
BorrowingLiteral::BorrowingLiteral(absl::Span<const char* const> src_buf_ptrs,
const Shape& shape)
: LiteralBase(), shape_(std::make_unique<Shape>(shape)) {
CHECK(shape_->IsTuple());
CHECK(!ShapeUtil::IsNestedTuple(*shape_));
CHECK_EQ(src_buf_ptrs.size(), ShapeUtil::TupleElementCount(*shape_));
root_piece_ = Piece();
root_piece_.set_subshape(shape_.get());
BuildPieceSubtree(*shape_, &root_piece_);
for (int i = 0, end = src_buf_ptrs.size(); i < end; ++i) {
const auto& src_shape = shape_->tuple_shapes(i);
CHECK(src_shape.IsArray());
root_piece_.child(i).set_buffer(const_cast<char*>(src_buf_ptrs[i]));
}
}
BorrowingLiteral::BorrowingLiteral(ShapeTree<const char*> src_buf_ptrs)
: LiteralBase(), shape_(std::make_unique<Shape>(src_buf_ptrs.shape())) {
root_piece_ = Piece();
root_piece_.set_subshape(shape_.get());
BuildPieceSubtree(*shape_, &root_piece_);
root_piece_.ForEachMutableSubpiece(
[&](const ShapeIndex& index, Piece* piece) {
if (ShapeUtil::GetSubshape(*shape_, index).IsTuple()) {
DCHECK_EQ(src_buf_ptrs.element(index), nullptr)
<< "Tuples should not have buffer pointers";
return;
}
piece->set_buffer(const_cast<char*>(src_buf_ptrs.element(index)));
});
}
} | #include "xla/literal.h"
#include <algorithm>
#include <cmath>
#include <complex>
#include <cstdint>
#include <functional>
#include <limits>
#include <random>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/base/casts.h"
#include "absl/hash/hash.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/array4d.h"
#include "xla/index_util.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
using ::testing::HasSubstr;
class LiteralUtilTest : public ::testing::Test {
protected:
LiteralUtilTest() {
Array4D<float> arr4d({
{
{
{1, 2, 3},
{4, 5, 6},
{7, 8, 9},
},
{
{11, 12, 13},
{14, 15, 16},
{17, 18, 19},
},
},
{
{
{101, 102, 103},
{104, 105, 106},
{107, 108, 109},
},
{
{201, 202, 203},
{204, 205, 206},
{207, 208, 209},
},
},
});
layout_r2_dim0major_ = LayoutUtil::MakeLayout({1, 0});
layout_r2_dim0minor_ = LayoutUtil::MakeLayout({0, 1});
layout_r3_dim0major_ = LayoutUtil::MakeLayout({2, 1, 0});
layout_r3_dim0minor_ = LayoutUtil::MakeLayout({0, 1, 2});
layout_r4_dim0major_ = LayoutUtil::MakeLayout({3, 2, 1, 0});
layout_r4_dim0minor_ = LayoutUtil::MakeLayout({0, 1, 2, 3});
literal_r4_2x2x3x3_dim0major_ =
LiteralUtil::CreateR4FromArray4DWithLayout<float>(arr4d,
layout_r4_dim0major_);
literal_r4_2x2x3x3_dim0minor_ =
LiteralUtil::CreateR4FromArray4DWithLayout<float>(arr4d,
layout_r4_dim0minor_);
}
Layout layout_r2_dim0major_;
Layout layout_r2_dim0minor_;
Layout layout_r3_dim0major_;
Layout layout_r3_dim0minor_;
Layout layout_r4_dim0major_;
Layout layout_r4_dim0minor_;
Literal literal_r4_2x2x3x3_dim0major_;
Literal literal_r4_2x2x3x3_dim0minor_;
};
template <typename T>
class LiteralUtilFloatTest : public LiteralUtilTest {};
using FloatTypes =
::testing::Types<float, half, bfloat16, tsl::float8_e3m4, tsl::float8_e4m3,
tsl::float8_e4m3fn, tsl::float8_e4m3fnuz,
tsl::float8_e4m3b11fnuz, tsl::float8_e5m2,
tsl::float8_e5m2fnuz>;
TYPED_TEST_SUITE(LiteralUtilFloatTest, FloatTypes);
TEST_F(LiteralUtilTest, LiteralScalarToString) {
auto true_lit = LiteralUtil::CreateR0<bool>(true);
EXPECT_EQ("pred[] true", true_lit.ToString());
auto false_lit = LiteralUtil::CreateR0<bool>(false);
EXPECT_EQ("pred[] false", false_lit.ToString());
auto u4_lit = LiteralUtil::CreateR0<u4>(u4(5));
EXPECT_EQ("u4[] 5", u4_lit.ToString());
auto u32_lit = LiteralUtil::CreateR0<uint32_t>(42);
EXPECT_EQ("u32[] 42", u32_lit.ToString());
auto s4_lit = LiteralUtil::CreateR0<s4>(s4(-3));
EXPECT_EQ("s4[] -3", s4_lit.ToString());
auto s32_lit = LiteralUtil::CreateR0<int32_t>(-999);
EXPECT_EQ("s32[] -999", s32_lit.ToString());
auto f32_lit = LiteralUtil::CreateR0<float>(3.14f);
EXPECT_EQ("f32[] 3.14", f32_lit.ToString());
auto f16_lit = LiteralUtil::CreateR0<half>(static_cast<half>(0.5f));
EXPECT_EQ("f16[] 0.5", f16_lit.ToString());
auto c64_lit = LiteralUtil::CreateR0<complex64>({3.14f, 2.78f});
EXPECT_EQ("c64[] (3.14, 2.78)", c64_lit.ToString());
auto c128_lit = LiteralUtil::CreateR0<complex128>({3.14, 2.78});
EXPECT_EQ("c128[] (3.14, 2.78)", c128_lit.ToString());
auto bf16_lit = LiteralUtil::CreateR0<bfloat16>(static_cast<bfloat16>(0.5f));
EXPECT_EQ("bf16[] 0.5", bf16_lit.ToString());
auto bf16_lit_truncated =
LiteralUtil::CreateR0<bfloat16>(static_cast<bfloat16>(3.14f));
ASSERT_EQ("bf16[] 3.141", bf16_lit_truncated.ToString());
auto bf16_lit_truncated2 =
LiteralUtil::CreateR0<bfloat16>(static_cast<bfloat16>(9.001f));
EXPECT_EQ("bf16[] 9", bf16_lit_truncated2.ToString());
auto f8e5m2_lit =
LiteralUtil::CreateR0<tsl::float8_e5m2>(tsl::float8_e5m2(0.5));
EXPECT_EQ("f8e5m2[] 0.5", f8e5m2_lit.ToString());
auto f8e5m2_lit_truncated =
LiteralUtil::CreateR0<tsl::float8_e5m2>(tsl::float8_e5m2(3.141));
EXPECT_EQ("f8e5m2[] 3", f8e5m2_lit_truncated.ToString());
auto f8e4m3_lit =
LiteralUtil::CreateR0<tsl::float8_e4m3>(tsl::float8_e4m3(0.5));
EXPECT_EQ("f8e4m3[] 0.5", f8e4m3_lit.ToString());
auto f8e4m3fn_lit =
LiteralUtil::CreateR0<tsl::float8_e4m3fn>(tsl::float8_e4m3fn(0.5));
EXPECT_EQ("f8e4m3fn[] 0.5", f8e4m3fn_lit.ToString());
auto f8e4m3b11fnuz_lit = LiteralUtil::CreateR0<tsl::float8_e4m3b11fnuz>(
tsl::float8_e4m3b11fnuz(0.5));
EXPECT_EQ("f8e4m3b11fnuz[] 0.5", f8e4m3b11fnuz_lit.ToString());
auto f8e4m3fnuz_lit =
LiteralUtil::CreateR0<tsl::float8_e4m3fnuz>(tsl::float8_e4m3fnuz(0.5));
EXPECT_EQ("f8e4m3fnuz[] 0.5", f8e4m3fnuz_lit.ToString());
auto f8e5m2fnuz_lit =
LiteralUtil::CreateR0<tsl::float8_e5m2fnuz>(tsl::float8_e5m2fnuz(0.5));
EXPECT_EQ("f8e5m2fnuz[] 0.5", f8e5m2fnuz_lit.ToString());
auto f8e3m4_lit =
LiteralUtil::CreateR0<tsl::float8_e3m4>(tsl::float8_e3m4(0.5));
EXPECT_EQ("f8e3m4[] 0.5", f8e3m4_lit.ToString());
}
TEST_F(LiteralUtilTest, LiteralVectorToString) {
auto pred_vec = LiteralUtil::CreateR1<bool>({true, false, true});
EXPECT_EQ("pred[3] {1, 0, 1}", pred_vec.ToString());
}
TEST_F(LiteralUtilTest, R2ToString) {
const auto literal = LiteralUtil::CreateR2({{1, 2}, {3, 4}, {5, 6}});
const std::string expected = R"(s32[3,2] {
{ 1, 2 },
{ 3, 4 },
{ 5, 6 }
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, R2DynamicToString) {
auto literal = LiteralUtil::CreateR2({{1, 2}, {3, 4}, {5, 6}});
literal.SetDynamicSize(0, {}, 2);
const std::string expected = R"(s32[<=3,2](2,2) {
{ 1, 2 },
{ 3, 4 }
})";
EXPECT_EQ(expected, literal.ToString());
auto literal2 = LiteralUtil::CreateR2({{1, 2, 3}, {4, 5, 6}});
literal2.SetDynamicSize(1, {}, 2);
const std::string expected2 = R"(s32[2,<=3](2,2) {
{ 1, 2 },
{ 4, 5 }
})";
EXPECT_EQ(expected2, literal2.ToString());
}
TEST_F(LiteralUtilTest, R2BoolDynamicToString) {
auto literal = LiteralUtil::CreateR2<bool>(
{{true, true, true}, {true, true, true}, {true, true, true}});
literal.SetDynamicSize(0, {}, 2);
const std::string expected = R"(pred[<=3,3](2,3) {
{ 1, 1, 1 },
{ 1, 1, 1 }
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, R3ToString) {
const auto literal =
LiteralUtil::CreateR3({{{1}, {2}}, {{3}, {4}}, {{5}, {6}}});
const std::string expected = R"(s32[3,2,1] {
{
{1},
{2}
},
{
{3},
{4}
},
{
{5},
{6}
}
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, R6ToString) {
const auto literal =
LiteralUtil::CreateFromDimensions(S32, {2, 2, 1, 1, 1, 2});
const std::string expected = R"(s32[2,2,1,1,1,2] {
{
{
{
{
{ 0, 0 }
}
}
},
{
{
{
{ 0, 0 }
}
}
}
},
{
{
{
{
{ 0, 0 }
}
}
},
{
{
{
{ 0, 0 }
}
}
}
}
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, TupleToString) {
auto scalar = LiteralUtil::CreateR0<float>(1.0);
auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto tuple = LiteralUtil::MakeTuple({&scalar, &matrix});
const std::string expected = R"((
f32[] 1,
f32[2,2] {
{ 1, 2 },
{ 3, 4 }
}
))";
EXPECT_EQ(expected, tuple.ToString());
}
TEST_F(LiteralUtilTest, CreateR3FromArray3d) {
Array3D<float> array_3d({
{{1.0f, 2.0f},
{3.0f, 4.0f},
{5.0f, 6.0f}},
{{7.0f, 8.0f},
{9.0f, 10.0f},
{11.0f, 12.0f}},
});
auto literal = LiteralUtil::CreateR3FromArray3D(array_3d);
EXPECT_THAT(literal.shape().dimensions(), ElementsAre(2, 3, 2));
std::string result = literal.ToString();
const std::string expected = R"(f32[2,3,2] {
{
{ 1, 2 },
{ 3, 4 },
{ 5, 6 }
},
{
{ 7, 8 },
{ 9, 10 },
{ 11, 12 }
}
})";
EXPECT_EQ(expected, result);
}
TEST_F(LiteralUtilTest, LiteralR4F32ProjectedStringifies) {
auto literal = LiteralUtil::CreateR4Projected<float>({
{1, 2},
{1001, 1002},
{2001, 2002},
}, 1, 2);
EXPECT_THAT(literal.shape().dimensions(), ElementsAre(1, 2, 3, 2));
std::string result = literal.ToString();
const std::string expected = R"(f32[1,2,3,2] {
{
{
{ 1, 2 },
{ 1001, 1002 },
{ 2001, 2002 }
},
{
{ 1, 2 },
{ 1001, 1002 },
{ 2001, 2002 }
}
}
})";
EXPECT_EQ(expected, result);
}
TEST_F(LiteralUtilTest, LiteralR4F32Stringifies) {
EXPECT_THAT(literal_r4_2x2x3x3_dim0major_.shape().dimensions(),
ElementsAre(2, 2, 3, 3));
std::string result = literal_r4_2x2x3x3_dim0major_.ToString();
const std::string expected = R"(f32[2,2,3,3] {
{
{
{ 1, 2, 3 },
{ 4, 5, 6 },
{ 7, 8, 9 }
},
{
{ 11, 12, 13 },
{ 14, 15, 16 },
{ 17, 18, 19 }
}
},
{
{
{ 101, 102, 103 },
{ 104, 105, 106 },
{ 107, 108, 109 }
},
{
{ 201, 202, 203 },
{ 204, 205, 206 },
{ 207, 208, 209 }
}
}
})";
EXPECT_EQ(expected, result);
}
TEST_F(LiteralUtilTest, EachCellR2F32) {
auto literal = LiteralUtil::CreateR2<float>({
{3.1f, 4.2f},
{9.3f, 12.4f},
});
std::vector<std::tuple<int64_t, int64_t, std::string>> seen;
literal.EachCellAsString(
[&seen](absl::Span<const int64_t> indices, const std::string& value) {
seen.emplace_back(indices[0], indices[1], value);
});
using Elem = std::tuple<int64_t, int64_t, std::string>;
std::vector<Elem> expected = {Elem(0, 0, "3.1"), Elem(0, 1, "4.2"),
Elem(1, 0, "9.3"), Elem(1, 1, "12.4")};
EXPECT_EQ(expected, seen);
}
TEST_F(LiteralUtilTest, ScalarEquality) {
auto f32_42 = LiteralUtil::CreateR0<float>(42.0);
auto f32_42_clone = LiteralUtil::CreateR0<float>(42.0);
EXPECT_EQ(f32_42, f32_42);
EXPECT_EQ(f32_42, f32_42_clone);
auto f32_123 = LiteralUtil::CreateR0<float>(123.0);
EXPECT_NE(f32_42, f32_123);
auto f64_42 = LiteralUtil::CreateR0<double>(42.0);
EXPECT_NE(f32_42, f64_42);
}
TEST_F(LiteralUtilTest, NonScalarEquality) {
auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto matrix_clone = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto matrix_different =
LiteralUtil::CreateR2<float>({{4.0, 3.0}, {1.0, 2.0}});
auto vector_literal = LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0, 4.0});
auto scalar = LiteralUtil::CreateR0<float>(1.0);
Literal nil(ShapeUtil::MakeNil());
EXPECT_EQ(matrix, matrix);
EXPECT_EQ(matrix, matrix_clone);
EXPECT_NE(matrix, matrix_different);
EXPECT_NE(matrix, vector_literal);
EXPECT_NE(matrix, scalar);
EXPECT_NE(matrix, nil);
EXPECT_EQ(nil, nil);
}
TEST_F(LiteralUtilTest, TokenEquality) {
auto token0 = LiteralUtil::CreateToken();
auto token1 = LiteralUtil::CreateToken();
auto scalar = LiteralUtil::CreateR0<float>(1.0);
EXPECT_EQ(token0, token1);
EXPECT_NE(token0, scalar);
EXPECT_EQ(LiteralUtil::MakeTuple({&token0}),
LiteralUtil::MakeTuple({&token0}));
EXPECT_EQ(LiteralUtil::MakeTuple({&token0, &scalar}),
LiteralUtil::MakeTuple({&token1, &scalar}));
EXPECT_NE(LiteralUtil::MakeTuple({&token0, &scalar}),
LiteralUtil::MakeTuple({&scalar, &token1}));
}
TEST_F(LiteralUtilTest, DifferentLayoutEquality) {
Literal colmajor(ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2}, {0, 1}));
colmajor.Set<float>({0, 0}, 1.0);
colmajor.Set<float>({0, 1}, 2.0);
colmajor.Set<float>({1, 0}, 3.0);
colmajor.Set<float>({1, 1}, 4.0);
Literal rowmajor(ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2}, {1, 0}));
rowmajor.Set<float>({0, 0}, 1.0);
rowmajor.Set<float>({0, 1}, 2.0);
rowmajor.Set<float>({1, 0}, 3.0);
rowmajor.Set<float>({1, 1}, 4.0);
EXPECT_EQ(rowmajor, colmajor);
}
TEST_F(LiteralUtilTest, DifferentLayoutInEquality) {
Literal colmajor(ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2}, {0, 1}));
colmajor.Set<float>({0, 0}, 1.0);
colmajor.Set<float>({0, 1}, 2.0);
colmajor.Set<float>({1, 0}, 3.0);
colmajor.Set<float>({1, 1}, 4.0);
Literal rowmajor(ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2}, {1, 0}));
rowmajor.Set<float>({0, 0}, 1.0);
rowmajor.Set<float>({0, 1}, 2.0);
rowmajor.Set<float>({1, 0}, 3.0);
rowmajor.Set<float>({1, 1}, 4.0);
EXPECT_FALSE(rowmajor.Equal(colmajor, true));
EXPECT_FALSE(colmajor.Equal(rowmajor, true));
}
TEST_F(LiteralUtilTest, TupleEquality) {
auto scalar = LiteralUtil::CreateR0<float>(1.0);
auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto tuple1 = LiteralUtil::MakeTuple({&scalar, &matrix});
auto scalar_clone = LiteralUtil::CreateR0<float>(1.0);
auto tuple2 = LiteralUtil::MakeTuple({&scalar_clone, &matrix});
EXPECT_EQ(tuple1, tuple2);
auto reversed_tuple = LiteralUtil::MakeTuple({&matrix, &scalar});
EXPECT_NE(tuple1, reversed_tuple);
auto scalar_42 = LiteralUtil::CreateR0<float>(42.0);
auto different_tuple = LiteralUtil::MakeTuple({&scalar_42, &matrix});
EXPECT_NE(tuple1, different_tuple);
}
TEST_F(LiteralUtilTest, DynamicShapeEquality) {
auto r1 = LiteralUtil::CreateR1<float>({1.0, 2.0});
r1.SetDynamicSize(0, {}, 1);
auto r2 = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
r2.SetDynamicSize(0, {}, 1);
auto tuple1 = LiteralUtil::MakeTuple({&r1, &r2});
auto r1_clone = LiteralUtil::CreateR1<float>({1.0, 3.0});
r1_clone.SetDynamicSize(0, {}, 1);
auto tuple2 = LiteralUtil::MakeTuple({&r1_clone, &r2});
EXPECT_EQ(tuple1, tuple2);
auto r2_clone = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
r2_clone.SetDynamicSize(0, {}, 2);
auto tuple_3 = LiteralUtil::MakeTuple({&r1_clone, &r2_clone});
EXPECT_NE(tuple1, tuple_3);
}
TEST_F(LiteralUtilTest, C64Equality) {
auto vector = LiteralUtil::CreateR1<complex64>({{1.0, 2.0}, {3.0, 4.0}});
auto vector_clone =
LiteralUtil::CreateR1<complex64>({{1.0, 2.0}, {3.0, 4.0}});
EXPECT_EQ(vector, vector_clone);
auto vector_reversed =
LiteralUtil::CreateR1<complex64>({{3.0, 4.0}, {1.0, 2.0}});
EXPECT_NE(vector, vector_reversed);
}
TEST_F(LiteralUtilTest, C128Equality) {
auto vector = LiteralUtil::CreateR1<complex128>({{1.0, 2.0}, {3.0, 4.0}});
auto vector_clone =
LiteralUtil::CreateR1<complex128>({{1.0, 2.0}, {3.0, 4.0}});
EXPECT_EQ(vector, vector_clone);
auto vector_reversed =
LiteralUtil::CreateR1<complex128>({{3.0, 4.0}, {1.0, 2.0}});
EXPECT_NE(vector, vector_reversed);
}
TEST_F(LiteralUtilTest, IsAllTuple) {
auto element1 = LiteralUtil::CreateR0<float>(0.0);
auto element2 = LiteralUtil::CreateR2<float>({{0.0, 0.0}, {0.0, 0.0}});
auto tuple = LiteralUtil::MakeTuple({&element1, &element1});
EXPECT_FALSE(tuple.IsAll(0));
EXPECT_FALSE(tuple.IsAll(1));
}
TEST_F(LiteralUtilTest, CreateFromShapeTuple) {
auto scalar = LiteralUtil::CreateR0<float>(0.0);
auto matrix = LiteralUtil::CreateR2<int32_t>({{0, 0}, {0, 0}});
auto tuple = LiteralUtil::MakeTuple({&scalar, &matrix});
auto x = Literal::CreateFromShape(tuple.shape());
EXPECT_EQ(tuple, x);
}
TEST_F(LiteralUtilTest, IsAll) {
EXPECT_TRUE(LiteralUtil::CreateR0<bool>(false).IsAll(0));
EXPECT_TRUE(LiteralUtil::CreateR0<bool>(true).IsAll(1));
EXPECT_FALSE(LiteralUtil::CreateR0<bool>(false).IsAll(1));
EXPECT_FALSE(LiteralUtil::CreateR0<bool>(false).IsAll(2));
EXPECT_FALSE(LiteralUtil::CreateR0<bool>(true).IsAll(0));
EXPECT_FALSE(LiteralUtil::CreateR0<bool>(true).IsAll(2));
EXPECT_FALSE(LiteralUtil::CreateR0<bool>(true).IsAll(-1));
auto int8_min = std::numeric_limits<int8_t>::min();
EXPECT_FALSE(LiteralUtil::CreateR0<uint8_t>(255).IsAll(int8_min));
EXPECT_TRUE(LiteralUtil::CreateR0<float>(42.0).IsAll(42));
EXPECT_FALSE(LiteralUtil::CreateR0<float>(42.0001).IsAll(42));
EXPECT_TRUE(LiteralUtil::CreateR1<int>({100, 100, 100}).IsAll(100));
EXPECT_FALSE(LiteralUtil::CreateR1<double>({100, 100, 100.001}).IsAll(100));
EXPECT_TRUE(LiteralUtil::CreateR2<uint64_t>({{8, 8}, {8, 8}}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR2<uint64_t>({{8, 8}, {8, 9}}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR2<uint64_t>({{9, 8}, {8, 8}}).IsAll(8));
half h8(8.0f);
half h9(9.0f);
EXPECT_TRUE(LiteralUtil::CreateR2<half>({{h8}, {h8}}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR2<half>({{h8}, {h9}}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR2<half>({{h9}, {h8}}).IsAll(8));
bfloat16 b8(8.0f);
bfloat16 b9(9.0f);
EXPECT_TRUE(LiteralUtil::CreateR2<bfloat16>({{b8}, {b8}}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR2<bfloat16>({{b8}, {b9}}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR2<bfloat16>({{b9}, {b8}}).IsAll(8));
bfloat16 b91(9.001f);
bfloat16 b90(9.00f);
EXPECT_TRUE(LiteralUtil::CreateR2<bfloat16>({{b91}, {b90}}).IsAll(9.0));
tsl::float8_e5m2 p16(8);
EXPECT_TRUE(LiteralUtil::CreateR1<tsl::float8_e5m2>({p16}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR1<tsl::float8_e5m2>({p16}).IsAll(9));
tsl::float8_e4m3 q16(9);
EXPECT_FALSE(LiteralUtil::CreateR1<tsl::float8_e4m3>({q16}).IsAll(8));
EXPECT_TRUE(LiteralUtil::CreateR1<tsl::float8_e4m3>({q16}).IsAll(9));
tsl::float8_e4m3fn r16(9);
EXPECT_FALSE(LiteralUtil::CreateR1<tsl::float8_e4m3fn>({r16}).IsAll(8));
EXPECT_TRUE(LiteralUtil::CreateR1<tsl::float8_e4m3fn>({r16}).IsAll(9));
tsl::float8_e4m3b11fnuz s16(9);
EXPECT_FALSE(LiteralUtil::CreateR1<tsl::float8_e4m3b11fnuz>({s16}).IsAll(8));
EXPECT_TRUE(LiteralUtil::CreateR1<tsl::float8_e4m3b11fnuz>({s16}).IsAll(9));
tsl::float8_e4m3fnuz t16(9);
EXPECT_FALSE(LiteralUtil::CreateR1<tsl::float8_e4m3fnuz>({t16}).IsAll(8));
EXPECT_TRUE(LiteralUtil::CreateR1<tsl::float8_e4m3fnuz>({t16}).IsAll(9));
tsl::float8_e5m2fnuz u16(8);
EXPECT_TRUE(LiteralUtil::CreateR1<tsl::float8_e5m2fnuz>({u16}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR1<tsl::float8_e5m2fnuz>({u16}).IsAll(9));
tsl::float8_e3m4 v16(9);
EXPECT_FALSE(LiteralUtil::CreateR1<tsl::float8_e3m4>({v16}).IsAll(8));
EXPECT_TRUE(LiteralUtil::CreateR1<tsl::float8_e3m4>({v16}).IsAll(9));
complex64 c8_9 = {8, 9};
EXPECT_FALSE(LiteralUtil::CreateR2<complex64>({{c8_9}, {c8_9}}).IsAll(8));
auto uint64_max = std::numeric_limits<uint64_t>::max();
EXPECT_FALSE(LiteralUtil::CreateR2<uint64_t>(
{{uint64_max, uint64_max}, {uint64_max, uint64_max}})
.IsAll(-1));
}
TEST_F(LiteralUtilTest, IsAllFloat) {
EXPECT_FALSE(LiteralUtil::CreateR0<bool>(false).IsAllFloat(0));
EXPECT_FALSE(LiteralUtil::CreateR0<int8_t>(0).IsAllFloat(0));
EXPECT_FALSE(LiteralUtil::CreateR0<uint8_t>(0).IsAllFloat(0));
EXPECT_FALSE(LiteralUtil::CreateR0<int>(0).IsAllFloat(0));
EXPECT_TRUE(LiteralUtil::CreateR0<float>(0).IsAllFloat(0));
EXPECT_TRUE(LiteralUtil::CreateR0<float>(.5).IsAllFloat(.5));
EXPECT_TRUE(LiteralUtil::CreateR0<float>(-.5).IsAllFloat(-.5));
EXPECT_FALSE(LiteralUtil::CreateR0<float>(-.5).IsAllFloat(-.49));
EXPECT_FALSE(
LiteralUtil::CreateR2<float>({{0, 0, 0}, {0, .1, 0}}).IsAllFloat(0));
EXPECT_TRUE(LiteralUtil::CreateR2<float>({{.5, .5, .5}, {.5, .5, .5}})
.IsAllFloat(.5));
EXPECT_TRUE(LiteralUtil::CreateR0<double>(0).IsAllFloat(0));
EXPECT_TRUE(LiteralUtil::CreateR0<double>(.5).IsAllFloat(.5));
EXPECT_TRUE(LiteralUtil::CreateR0<double>(-.5).IsAllFloat(-.5));
EXPECT_FALSE(LiteralUtil::CreateR0<double>(-.5).IsAllFloat(-.49));
EXPECT_FALSE(
LiteralUtil::CreateR2<double>({{0, 0, 0}, {0, .1, 0}}).IsAllFloat(0));
EXPECT_TRUE(
LiteralUtil::CreateR0<bfloat16>(bfloat16(128.)).IsAllFloat(128.5));
}
TEST_F(LiteralUtilTest, IsAllComplex) {
EXPECT_FALSE(LiteralUtil::CreateR0<bool>(false).IsAllComplex(0));
EXPECT_FALSE(LiteralUtil::CreateR0<int8_t>(0).IsAllComplex(0));
EXPECT_FALSE(LiteralUtil::CreateR0<uint8_t>(0).IsAllComplex(0));
EXPECT_FALSE(LiteralUtil::CreateR0<int>(0).IsAllComplex(0));
EXPECT_FALSE(LiteralUtil::CreateR0<float>(0).IsAllComplex(0));
EXPECT_FALSE(LiteralUtil::CreateR0<double>(0).IsAllComplex(0));
complex64 c8_9 = {8, 9};
complex64 c7_9 = {7, 9};
EXPECT_TRUE(LiteralUtil::CreateR2<complex64>({{c8_9}, {c8_9}})
.IsAllComplex({8.0f, 9.0f}));
EXPECT_FALSE(LiteralUtil::CreateR2<complex64>({{c7_9}, {c8_9}})
.IsAllComplex({8.0f, 9.0f}));
EXPECT_FALSE(LiteralUtil::CreateR2<complex64>({{c8_9}, {c7_9}})
.IsAllComplex({8.0f, 9.0f}));
}
TEST_F(LiteralUtilTest, IsAllFirst) {
EXPECT_FALSE(LiteralUtil::CreateR1<bool>({false, true}).IsAllFirst());
EXPECT_TRUE(LiteralUtil::CreateR1<bool>({false, false}).IsAllFirst());
EXPECT_FALSE(LiteralUtil::CreateR1<int8_t>({1, 1, 2}).IsAllFirst());
EXPECT_TRUE(LiteralUtil::CreateR1<int8_t>({5, 5, 5, 5}).IsAllFirst());
EXPECT_FALSE(LiteralUtil::CreateR1<uint8_t>({1, 1, 2}).IsAllFirst());
EXPECT_TRUE(LiteralUtil::CreateR1<int32_t>({5, 5, 5, 5}).IsAllFirst());
EXPECT_FALSE(LiteralUtil::CreateR1<int32_t>({1, 1, 2}).IsAllFirst());
EXPECT_TRUE(LiteralUtil::CreateR1<uint32_t>({5, 5, 5, 5}).IsAllFirst());
EXPECT_FALSE(LiteralUtil::CreateR1<uint32_t>({1, 1, 2}).IsAllFirst());
complex64 c8_9 = {8, 9};
complex64 c7_9 = {7, 9};
EXPECT_TRUE(LiteralUtil::CreateR2<complex64>({{c8_9}, {c8_9}}).IsAllFirst());
EXPECT_FALSE(LiteralUtil::CreateR2<complex64>({{c7_9}, {c8_9}}).IsAllFirst());
#if defined(__x86_64__) && defined(_MM_DENORMALS_ZERO_ON)
int old_csr = _mm_getcsr();
_mm_setcsr(old_csr | _MM_DENORMALS_ZERO_ON);
#endif
bool eq0 = LiteralUtil::CreateR1<float>({0.0, 1.401298e-45}).IsAllFirst();
bool eq1 = LiteralUtil::CreateR1<float>({0.0, 2.802597e-45}).IsAllFirst();
bool eq2 =
LiteralUtil::CreateR1<float>({4.203895e-45, 7.006492e-45}).IsAllFirst();
#if defined(__x86_64__) && defined(_MM_DENORMALS_ZERO_ON)
_mm_setcsr(old_csr);
#endif
EXPECT_FALSE(eq0);
EXPECT_FALSE(eq1);
EXPECT_FALSE(eq2);
}
TEST_F(LiteralUtilTest, CountEqualInt) {
EXPECT_EQ(LiteralUtil::CreateR1<int8_t>({}).CountEqual<int8_t>(1), 0);
EXPECT_EQ(
LiteralUtil::CreateR1<int8_t>({1, 2, 3, 4, 5, 100}).CountEqual<int8_t>(2),
1);
EXPECT_EQ(LiteralUtil::CreateR1<int8_t>({0, 3, 6, 0, 9, 18, 0})
.CountEqual<int8_t>(0),
3);
EXPECT_EQ(LiteralUtil::CreateR1<int32_t>({234, 345, 4, 45, 5467, 5467, 5467})
.CountEqual<int32_t>(5467),
3);
}
TEST_F(LiteralUtilTest, CountEqualFloat) {
EXPECT_EQ(LiteralUtil::CreateR1<float>({}).CountEqual<float>(0), 0);
EXPECT_EQ(LiteralUtil::CreateR1<float>({1.1, 2.2, 3.3, 4.4, 5.5, 100.6})
.CountEqual<float>(3.3),
1);
EXPECT_EQ(LiteralUtil::CreateR1<float>({7.62, 3, 7.75, 7.62, 7.3, 2, 7.62})
.CountEqual<float>(7.62),
3);
EXPECT_EQ(LiteralUtil::CreateR1<float>(
{NAN, 0, 6.8, NAN, NAN, NAN, 63.12, 24.6, NAN})
.CountEqual<float>(NAN),
5);
}
TEST_F(LiteralUtilTest, CountEqualBool) {
EXPECT_EQ(LiteralUtil::CreateR1<bool>({false, true}).CountEqual<bool>(false),
1);
}
TEST_F(LiteralUtilTest, CountEqualComplex) {
EXPECT_EQ(LiteralUtil::CreateR1<std::complex<double>>(
{std::complex<float>(1, 2), std::complex<float>(3, 4),
std::complex<float>(5, 6), std::complex<float>(6, 7)})
.CountEqual<float>(std::complex<float>(5, 6)),
1);
}
TEST_F(LiteralUtilTest, CountEqualMismatched) {
EXPECT_EQ(LiteralUtil::CreateR1<float>({13, 10.5, 15.6, 22.7})
.CountEqual<int8_t>(13),
1);
EXPECT_EQ(
LiteralUtil::CreateR1<float>({10.5, 15.6, 22.7}).CountEqual<int8_t>(1),
0);
EXPECT_EQ(LiteralUtil::CreateR1<std::complex<float>>(
{std::complex<float>(1, 2), std::complex<float>(3, 4),
std::complex<float>(5, 6), std::complex<float>(6, 7)})
.CountEqual<float>(1),
0);
}
TEST_F(LiteralUtilTest, IsZero) {
auto scalar_zero = LiteralUtil::CreateR0<float>(0.0f);
auto scalar_one = LiteralUtil::CreateR0<float>(1.0f);
EXPECT_TRUE(scalar_zero.IsZero({}));
EXPECT_FALSE(scalar_one.IsZero({}));
auto array = LiteralUtil::CreateR2<uint32_t>({{1, 2, 0, 3}, {1, 0, 1, 2}});
EXPECT_FALSE(array.IsZero({0, 1}));
EXPECT_TRUE(array.IsZero({0, 2}));
EXPECT_TRUE(array.IsZero({1, 1}));
EXPECT_FALSE(array.IsZero({1, 2}));
auto complex_zero = LiteralUtil::CreateR0<complex64>(0.0f);
auto complex_nonzero = LiteralUtil::CreateR0<complex64>(0.5f);
EXPECT_TRUE(complex_zero.IsZero({}));
EXPECT_FALSE(complex_nonzero.IsZero({}));
}
template <typename T>
class LiteralUtilTestTemplated : public ::testing::Test {};
using TestedTypes = ::testing::Types<float, int32_t, uint32_t, complex64>;
class TestNamer {
public:
template <typename TypeParam>
static std::string GetName(int) {
return ::testing::internal::GetTypeName<TypeParam>();
}
};
TYPED_TEST_SUITE(LiteralUtilTestTemplated, TestedTypes, TestNamer);
TYPED_TEST(LiteralUtilTestTemplated, Relayout2x2) {
TypeParam half = TypeParam(1) / TypeParam(2);
auto data = LiteralUtil::CreateR2<TypeParam>({{half, 2}, {3, 4}});
const Layout layout01 = LayoutUtil::MakeLayout({0, 1});
const Layout layout10 = LayoutUtil::MakeLayout({1, 0});
auto data01 = data.Relayout(layout01);
EXPECT_TRUE(LayoutUtil::Equal(data01.shape().layout(), layout01));
EXPECT_EQ(data, data01);
auto data10 = data.Relayout(layout10);
EXPECT_TRUE(LayoutUtil::Equal(data10.shape().layout(), layout10));
EXPECT_EQ(data, data10);
}
TEST_F(LiteralUtilTest, ReshapeR0) {
auto original = LiteralUtil::CreateR0<float>(1.7f);
auto reshape = original.Reshape({}).value();
EXPECT_EQ(original, reshape);
}
TEST_F(LiteralUtilTest, ReshapeR4) {
auto original = LiteralUtil::CreateR4WithLayout<float>({{
{{10, 11, 12, 13}, {14, 15, 16, 17}},
{{18, 19, 20, 21}, {22, 23, 24, 25}},
{{26, 27, 28, 29}, {30, 31, 32, 33}},
}}, layout_r4_dim0major_);
auto expected = LiteralUtil::CreateR3WithLayout<float>({
{{10, 11}, {12, 13}, {14, 15}, {16, 17}},
{{18, 19}, {20, 21}, {22, 23}, {24, 25}},
{{26, 27}, {28, 29}, {30, 31}, {32, 33}},
}, layout_r3_dim0major_);
auto reshape = original.Reshape({3, 4, 2}).value();
EXPECT_EQ(expected, reshape);
}
TEST_F(LiteralUtilTest, ReshapeR4Dim0Minor) {
auto original = LiteralUtil::CreateR4WithLayout<float>({{
{{10, 11, 12, 13}, {14, 15, 16, 17}},
{{18, 19, 20, 21}, {22, 23, 24, 25}},
{{26, 27, 28, 29}, {30, 31, 32, 33}},
}}, layout_r4_dim0minor_);
auto expected = LiteralUtil::CreateR3WithLayout<float>({
{{10, 11}, {12, 13}, {14, 15}, {16, 17}},
{{18, 19}, {20, 21}, {22, 23}, {24, 25}},
{{26, 27}, {28, 29}, {30, 31}, {32, 33}},
}, layout_r3_dim0major_);
auto reshape = original.Reshape({3, 4, 2}).value();
EXPECT_EQ(expected, reshape);
}
TEST_F(LiteralUtilTest, TransposeR0) {
auto original = LiteralUtil::CreateR0<float>(1.7f);
auto reshape = original.Transpose({});
EXPECT_EQ(original, reshape);
}
TEST_F(LiteralUtilTest, TransposeR4) {
auto original = LiteralUtil::CreateR4<float>({{
{{10, 11, 12, 13}, {14, 15, 16, 17}},
{{18, 19, 20, 21}, {22, 23, 24, 25}},
{{26, 27, 28, 29}, {30, 31, 32, 33}},
}});
auto reshape = original.Transpose({2, 3, 0, 1});
reshape.EachCell<float>([&](absl::Span<const int64_t> indices, float value) {
EXPECT_EQ(value, original.Get<float>(
{indices[2], indices[3], indices[0], indices[1]}));
});
}
TEST_F(LiteralUtilTest, TransposeDynamicR2) {
auto original = LiteralUtil::CreateR2<float>({{1, 2, 3}, {4, 5, 6}});
original.SetDynamicSize(1, 1);
auto reshape = original.Transpose({1, 0});
reshape.EachCell<float>([&](absl::Span<const int64_t> indices, float value) {
EXPECT_EQ(value, original.Get<float>({indices[1], indices[0]}));
});
}
TEST_F(LiteralUtilTest, ToStaticR2) {
auto original = LiteralUtil::CreateR2<float>({{1, 2, 3}, {4, 5, 6}});
original.SetDynamicSize(1, 1);
auto static_literal = original.ToStatic();
EXPECT_EQ(static_literal.shape(), ShapeUtil::MakeShape(F32, {2, 1}));
EXPECT_TRUE(static_literal.shape().is_static());
static_literal.EachCell<float>(
[&](absl::Span<const int64_t> indices, float value) {
EXPECT_EQ(value, original.Get<float>({indices[0], indices[1]}));
});
}
TEST_F(LiteralUtilTest, ToBoundedDynamicR2) {
auto original = LiteralUtil::CreateR2<float>({{1}, {4}});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {2, 3}, {false, true});
auto dynamic_literal = original.ToBoundedDynamic(dynamic_shape);
EXPECT_EQ(dynamic_literal.shape(), dynamic_shape);
dynamic_literal.EachCell<float>(
[&](absl::Span<const int64_t> indices, float value) {
EXPECT_EQ(value, original.Get<float>({indices[0], indices[1]}));
});
}
TEST_F(LiteralUtilTest, TestR4RelayoutEquivalence) {
auto dim0minor_relaid_to_dim0major =
literal_r4_2x2x3x3_dim0minor_.Relayout(layout_r4_dim0major_);
EXPECT_EQ(literal_r4_2x2x3x3_dim0major_, dim0minor_relaid_to_dim0major);
auto dim0major_relaid_to_dim0minor =
literal_r4_2x2x3x3_dim0major_.Relayout(layout_r4_dim0minor_);
EXPECT_EQ(literal_r4_2x2x3x3_dim0minor_, dim0major_relaid_to_dim0minor);
}
template <bool kIsLayoutSensitive>
struct HashTester {
template <typename H>
friend H AbslHashValue(H h, const HashTester& key) {
return Literal::Hash<H, kIsLayoutSensitive, 64>(
std::move(h), *key.literal);
}
const Literal* literal;
};
TEST_F(LiteralUtilTest, TestR2LinearLayout) {
auto mat_dim0minor = LiteralUtil::CreateR2WithLayout<int32_t>(
{{1, 2, 3}, {4, 5, 6}}, layout_r2_dim0minor_);
EXPECT_EQ(mat_dim0minor.element_count(), 6);
EXPECT_THAT(mat_dim0minor.data<int32_t>(), ElementsAre(1, 4, 2, 5, 3, 6));
auto relaid_mat_to_dim0major = mat_dim0minor.Relayout(layout_r2_dim0major_);
EXPECT_THAT(relaid_mat_to_dim0major.data<int32_t>(),
ElementsAre(1, 2, 3, 4, 5, 6));
EXPECT_EQ(absl::HashOf(HashTester<false>{&mat_dim0minor}),
absl::HashOf(HashTester<false>{&relaid_mat_to_dim0major}));
auto mat_dim0major = LiteralUtil::CreateR2WithLayout<int32_t>(
{{1, 2, 3}, {4, 5, 6}}, layout_r2_dim0major_);
EXPECT_EQ(mat_dim0major.element_count(), 6);
EXPECT_THAT(mat_dim0major.data<int32_t>(), ElementsAre(1, 2, 3, 4, 5, 6));
auto relaid_mat_to_dim0minor = mat_dim0major.Relayout(layout_r2_dim0minor_);
EXPECT_THAT(relaid_mat_to_dim0minor.data<int32_t>(),
ElementsAre(1, 4, 2, 5, 3, 6));
EXPECT_EQ(absl::HashOf(HashTester<false>{&mat_dim0major}),
absl::HashOf(HashTester<false>{&relaid_mat_to_dim0minor}));
EXPECT_EQ(absl::HashOf(HashTester<true>{&mat_dim0minor}),
absl::HashOf(HashTester<true>{&relaid_mat_to_dim0minor}));
EXPECT_EQ(absl::HashOf(HashTester<true>{&mat_dim0major}),
absl::HashOf(HashTester<true>{&relaid_mat_to_dim0major}));
}
TEST_F(LiteralUtilTest, TestR3LinearLayout) {
Array3D<int> arr3d(
{
{
{1, 2, 3},
{4, 5, 6},
},
{
{7, 8, 9},
{10, 11, 12},
},
});
auto lit_dim0minor = LiteralUtil::CreateR3FromArray3DWithLayout<int>(
arr3d, layout_r3_dim0minor_);
EXPECT_EQ(lit_dim0minor.element_count(), 12);
std::vector<int> expected_dim0minor{1, 7, 4, 10, 2, 8, 5, 11, 3, 9, 6, 12};
EXPECT_THAT(lit_dim0minor.data<int32_t>(),
testing::ElementsAreArray(expected_dim0minor));
auto relaid_lit_to_dim0major = lit_dim0minor.Relayout(layout_r3_dim0major_);
std::vector<int> expected_dim0major{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
EXPECT_THAT(relaid_lit_to_dim0major.data<int32_t>(),
testing::ElementsAreArray(expected_dim0major));
auto lit_dim0major = LiteralUtil::CreateR3FromArray3DWithLayout<int>(
arr3d, layout_r3_dim0major_);
EXPECT_EQ(lit_dim0major.element_count(), 12);
EXPECT_THAT(lit_dim0major.data<int32_t>(),
testing::ElementsAreArray(expected_dim0major));
auto relaid_lit_to_dim0minor = lit_dim0major.Relayout(layout_r3_dim0minor_);
EXPECT_THAT(relaid_lit_to_dim0minor.data<int32_t>(),
testing::ElementsAreArray(expected_dim0minor));
}
TEST_F(LiteralUtilTest, SliceR0S32) {
auto input = LiteralUtil::CreateR0<int32_t>(1);
auto result = input.Slice({}, {});
EXPECT_EQ(input, result);
}
TEST_F(LiteralUtilTest, SliceR1F32) {
auto input = LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0, 4.0, 5.0});
auto result = input.Slice({3}, {4});
auto expected = LiteralUtil::CreateR1<float>({4.0});
EXPECT_EQ(expected, result);
}
TEST_F(LiteralUtilTest, SliceR2U32) {
auto input_3x4 = LiteralUtil::CreateR2<uint32_t>(
{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}});
auto result = input_3x4.Slice({0, 2}, {2, 4});
auto expected = LiteralUtil::CreateR2<uint32_t>({{3, 4}, {7, 8}});
EXPECT_EQ(expected, result);
}
TEST_F(LiteralUtilTest, SliceR3U32Full) {
auto input_2x3x2 = LiteralUtil::CreateR3<uint32_t>(
{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}});
auto result = input_2x3x2.Slice({0, 0, 0}, {2, 3, 2});
EXPECT_EQ(input_2x3x2, result);
}
TEST_F(LiteralUtilTest, SliceR2Dynamic) {
auto input_3x4 = LiteralUtil::CreateR2<uint32_t>(
{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}});
input_3x4.SetDynamicSize(1, 3);
auto result = input_3x4.Slice({0, 1}, {2, 2});
auto expected = LiteralUtil::CreateR2<uint32_t>({{2}, {6}});
EXPECT_EQ(expected, result);
EXPECT_EQ(result.GetDynamicSize(1), 1);
}
TEST_F(LiteralUtilTest, SliceR2DynamicInBound) {
auto input_3x4 = LiteralUtil::CreateR2<uint32_t>(
{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}});
input_3x4.SetDynamicSize(1, 1);
auto result = input_3x4.Slice({0, 0}, {2, 2});
auto expected = LiteralUtil::CreateR2<uint32_t>({{1}, {5}});
EXPECT_EQ(expected, result);
EXPECT_EQ(result.GetDynamicSize(1), 1);
}
TEST_F(LiteralUtilTest, SliceR2DynamicOutOfBound) {
auto input_3x4 = LiteralUtil::CreateR2<uint32_t>(
{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}});
input_3x4.SetDynamicSize(1, 1);
auto result = input_3x4.Slice({0, 1}, {2, 3});
auto expected = LiteralUtil::CreateR2<uint32_t>({{}, {}});
EXPECT_EQ(expected, result);
EXPECT_EQ(result.GetDynamicSize(1), 0);
}
TEST_F(LiteralUtilTest, PopulateR1S64) {
Literal output(ShapeUtil::MakeShape(S64, {1}));
output.PopulateR1<int64_t>({77});
auto expected = LiteralUtil::CreateR1<int64_t>({77});
EXPECT_EQ(output, expected);
}
TEST_F(LiteralUtilTest, PopulateR1U64) {
Literal output(ShapeUtil::MakeShape(U64, {2}));
output.PopulateR1<uint64_t>({{77, 88}});
auto expected = LiteralUtil::CreateR1<uint64_t>({{77, 88}});
EXPECT_EQ(output, expected);
}
TEST_F(LiteralUtilTest, PopulateR1C64) {
Literal output(ShapeUtil::MakeShape(C64, {1}));
output.PopulateR1<complex64>({{77, 88}});
auto expected = LiteralUtil::CreateR1<complex64>({{77, 88}});
EXPECT_EQ(output, expected);
}
TEST_F(LiteralUtilTest, PopulateR1C128) {
Literal output(ShapeUtil::MakeShape(C128, {1}));
output.PopulateR1<complex128>({{77, 88}});
auto expected = LiteralUtil::CreateR1<complex128>({{77, 88}});
EXPECT_EQ(output, expected);
}
TEST_F(LiteralUtilTest, PopulateR2C64) {
Literal output(ShapeUtil::MakeShape(C64, {2, 2}));
output.PopulateR2<complex64>({{{7, 8}, {9, 10}}, {{1, 2}, {3, 4}}});
auto expected =
LiteralUtil::CreateR2<complex64>({{{7, 8}, {9, 10}}, {{1, 2}, {3, 4}}});
EXPECT_EQ(output, expected);
}
TYPED_TEST(LiteralUtilFloatTest, PopulateWithValueR0Float) {
Literal output(ShapeUtil::MakeShape(
primitive_util::NativeToPrimitiveType<TypeParam>(), {}));
TypeParam h(0.25f);
output.PopulateWithValue<TypeParam>(h);
auto expected = LiteralUtil::CreateR0<TypeParam>(h);
EXPECT_EQ(output, expected);
}
TYPED_TEST(LiteralUtilFloatTest, PopulateWithValueR1Float) {
Literal output(ShapeUtil::MakeShape(
primitive_util::NativeToPrimitiveType<TypeParam>(), {3}));
TypeParam h(0.5f);
output.PopulateWithValue<TypeParam>(h);
auto expected = LiteralUtil::CreateR1<TypeParam>({h, h, h});
EXPECT_EQ(output, expected);
}
TYPED_TEST(LiteralUtilFloatTest, PopulateWithValueR2Float) {
Literal output(ShapeUtil::MakeShape(
primitive_util::NativeToPrimitiveType<TypeParam>(), {2, 2}));
TypeParam h(2.0f);
output.PopulateWithValue<TypeParam>(h);
auto expected = LiteralUtil::CreateR2<TypeParam>({{h, h}, {h, h}});
EXPECT_EQ(output, expected);
}
TEST_F(LiteralUtilTest, PopulateWithValueR1S64) {
Literal output(ShapeUtil::MakeShape(S64, {3}));
output.PopulateWithValue<int64_t>(-7);
auto expected = LiteralUtil::CreateR1<int64_t>({-7, -7, -7});
EXPECT_EQ(output, expected);
}
TEST_F(LiteralUtilTest, PopulateWithValueR2U64) {
Literal output(ShapeUtil::MakeShape(U64, {2, 2}));
output.PopulateWithValue<uint64_t>(42);
auto expected = LiteralUtil::CreateR2<uint64_t>({{42, 42}, {42, 42}});
EXPECT_EQ(output, expected);
}
TEST_F(LiteralUtilTest, PopulateWithValueR2C64) {
Literal output(ShapeUtil::MakeShape(C64, {2, 2}));
output.PopulateWithValue<complex64>({4, 2});
auto expected =
LiteralUtil::CreateR2<complex64>({{{4, 2}, {4, 2}}, {{4, 2}, {4, 2}}});
EXPECT_EQ(output, expected);
}
TEST_F(LiteralUtilTest, PopulateWithValueR2C128) {
Literal output(ShapeUtil::MakeShape(C128, {2, 2}));
output.PopulateWithValue<complex128>({4, 2});
auto expected =
LiteralUtil::CreateR2<complex128>({{{4, 2}, {4, 2}}, {{4, 2}, {4, 2}}});
EXPECT_EQ(output, expected);
}
TEST_F(LiteralUtilTest, ReplicateR2U32) {
auto input = LiteralUtil::CreateR2<uint32_t>(
{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}});
auto output = input.Replicate<uint32_t>(3);
auto expected = LiteralUtil::CreateR3<uint32_t>(
{{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}},
{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}},
{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}});
EXPECT_EQ(output, expected);
}
TEST_F(LiteralUtilTest, CopySliceFrom) {
const int64_t dimensions[] = {17, 15, 34, 21};
const int64_t layouts[][4] = {
{3, 2, 1, 0}, {0, 2, 1, 3}, {0, 1, 2, 3}, {2, 0, 3, 1}, {1, 3, 0, 2}};
for (const auto& layout : layouts) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(
primitive_util::NativeToPrimitiveType<uint32_t>(), dimensions, layout);
auto source = Literal::CreateFromShape(shape);
const int64_t zero_base[] = {0, 0, 0, 0};
const int64_t step[] = {1, 1, 1, 1};
uint32_t seqnr = 0;
auto init_proc = [&](absl::Span<const int64_t> indexes) {
source.Set(indexes, ++seqnr);
return true;
};
ShapeUtil::ForEachIndex(source.shape(), zero_base, dimensions, step,
init_proc);
auto blank = Literal::CreateFromShape(shape);
const int64_t src_base[] = {3, 1, 5, 7};
const int64_t dest_base[] = {6, 4, 12, 2};
const int64_t copy_size[] = {7, 8, 11, 9};
TF_EXPECT_OK(blank.CopySliceFrom(source, src_base, dest_base, copy_size));
std::vector<int64_t> source_indexes(TF_ARRAYSIZE(dimensions), 0);
std::vector<int64_t> blank_indexes(TF_ARRAYSIZE(dimensions), 0);
bool matched = true;
auto check_proc = [&](absl::Span<const int64_t> indexes) {
std::copy(indexes.begin(), indexes.end(), source_indexes.begin());
std::transform(source_indexes.begin(), source_indexes.end(), src_base,
source_indexes.begin(), std::plus<int64_t>());
std::copy(indexes.begin(), indexes.end(), blank_indexes.begin());
std::transform(blank_indexes.begin(), blank_indexes.end(), dest_base,
blank_indexes.begin(), std::plus<int64_t>());
auto bval = blank.Get<uint32_t>(blank_indexes);
matched = (bval != 0 && bval == source.Get<uint32_t>(source_indexes));
return matched;
};
ShapeUtil::ForEachIndex(source.shape(), zero_base, copy_size, step,
check_proc);
EXPECT_TRUE(matched);
}
}
TEST_F(LiteralUtilTest, CopyFromScalars) {
auto zero = LiteralUtil::CreateR0<uint32_t>(0);
auto nine = LiteralUtil::CreateR0<uint32_t>(9);
TF_EXPECT_OK(zero.CopyFrom(nine));
EXPECT_EQ(zero, nine);
auto vect = LiteralUtil::CreateR1<uint32_t>({3, 4, 9, 12, 5, 17, 21});
TF_EXPECT_OK(zero.CopySliceFrom(vect, {5}, {}, {}));
EXPECT_EQ(zero.Get<uint32_t>({}), 17);
TF_EXPECT_OK(vect.CopySliceFrom(zero, {}, {4}, {}));
EXPECT_EQ(vect.Get<uint32_t>({4}), 17);
}
TEST_F(LiteralUtilTest, CopyFromAndToZeroElement) {
const Shape empty_r1_shape = ShapeUtil::MakeShape(F32, {0});
const auto const_nine = LiteralUtil::CreateR1<float>({9});
const auto const_empty = Literal::CreateFromShape(empty_r1_shape);
{
const auto empty = Literal::CreateFromShape(empty_r1_shape);
auto nine = LiteralUtil::CreateR1<float>({9});
TF_EXPECT_OK(nine.CopySliceFrom(empty, {0}, {0}, {0}));
EXPECT_EQ(nine, const_nine);
}
{
auto empty = Literal::CreateFromShape(empty_r1_shape);
auto nine = LiteralUtil::CreateR1<float>({9});
TF_EXPECT_OK(empty.CopySliceFrom(nine, {0}, {0}, {0}));
EXPECT_EQ(empty, const_empty);
}
}
TEST_F(LiteralUtilTest, CopyFromNilShape) {
Literal nil_literal0(ShapeUtil::MakeNil());
Literal nil_literal1(ShapeUtil::MakeNil());
TF_ASSERT_OK(nil_literal0.CopyFrom(nil_literal1));
}
TEST_F(LiteralUtilTest, CopyFromArrays) {
auto scalar_42 = LiteralUtil::CreateR0<float>(42.0);
auto scalar_123 = LiteralUtil::CreateR0<float>(123.0);
EXPECT_NE(scalar_42, scalar_123);
TF_ASSERT_OK(scalar_42.CopyFrom(scalar_123, {},
{}));
EXPECT_EQ(scalar_42, scalar_123);
EXPECT_EQ(scalar_42.Get<float>({}), 123.0f);
auto matrix_1234 = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto matrix_5678 = LiteralUtil::CreateR2<float>({{5.0, 6.0}, {7.0, 8.0}});
EXPECT_NE(matrix_1234, matrix_5678);
EXPECT_EQ(matrix_1234.Get<float>({0, 0}), 1.0f);
TF_ASSERT_OK(matrix_1234.CopyFrom(matrix_5678, {},
{}));
EXPECT_EQ(matrix_1234, matrix_5678);
EXPECT_EQ(matrix_1234.Get<float>({0, 0}), 5.0f);
}
TEST_F(LiteralUtilTest, CopyFromTuples) {
auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
Literal nil_literal(ShapeUtil::MakeNil());
Literal inner_elements[] = {LiteralUtil::CreateR0<int32_t>(42),
LiteralUtil::CreateR1<double>({23.0, 44.0})};
Literal inner_tuple = LiteralUtil::MakeTuple(
{&inner_elements[0], &inner_elements[1], &nil_literal});
Literal nested_tuple = LiteralUtil::MakeTuple({&matrix, &inner_tuple});
Literal int32_minus5 = LiteralUtil::CreateR0<int32_t>(-5);
Literal double_2_4 = LiteralUtil::CreateR1<double>({2.0, 4.0});
Literal tuple =
LiteralUtil::MakeTuple({&int32_minus5, &double_2_4, &nil_literal});
EXPECT_EQ(matrix, LiteralSlice(nested_tuple, {0}));
EXPECT_EQ(nested_tuple.Get<int32_t>({}, {1, 0}), 42);
EXPECT_EQ(nested_tuple.Get<double>({0}, {1, 1}), 23.0);
EXPECT_EQ(nested_tuple.Get<double>({1}, {1, 1}), 44.0);
TF_ASSERT_OK(nested_tuple.CopyFrom(tuple, {1},
{}));
EXPECT_EQ(matrix, LiteralSlice(nested_tuple, {0}));
EXPECT_EQ(nested_tuple.Get<int32_t>({}, {1, 0}), -5);
EXPECT_EQ(nested_tuple.Get<double>({0}, {1, 1}), 2.0);
EXPECT_EQ(nested_tuple.Get<double>({1}, {1, 1}), 4.0);
}
TEST_F(LiteralUtilTest, CopyBetweenSameTuple) {
Literal elements[] = {LiteralUtil::CreateR0<int32_t>(-2),
LiteralUtil::CreateR0<int32_t>(4)};
Literal tuple = LiteralUtil::MakeTuple({&elements[0], &elements[1]});
EXPECT_EQ(tuple.Get<int32_t>({}, {0}), -2);
EXPECT_EQ(tuple.Get<int32_t>({}, {1}), 4);
TF_ASSERT_OK(tuple.CopyFrom(tuple, {1},
{0}));
EXPECT_EQ(tuple.Get<int32_t>({}, {0}), -2);
EXPECT_EQ(tuple.Get<int32_t>({}, {1}), -2);
}
TEST_F(LiteralUtilTest, CopyFromDifferentShapes) {
auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto vector = LiteralUtil::CreateR1<float>({5.0, 7.0});
absl::Status status = matrix.CopyFrom(vector);
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Destination subshape incompatible"));
}
TEST_F(LiteralUtilTest, F16) {
Literal m1 = Literal::CreateFromShape(ShapeUtil::MakeShape(F16, {2, 2}));
const char* d1 = reinterpret_cast<const char*>(m1.data<half>().data());
EXPECT_EQ(d1[0], 0);
EXPECT_EQ(d1[1], 0);
EXPECT_EQ(d1[2], 0);
EXPECT_EQ(d1[3], 0);
EXPECT_EQ(d1[4], 0);
EXPECT_EQ(d1[5], 0);
EXPECT_EQ(d1[6], 0);
EXPECT_EQ(d1[7], 0);
half h1(1.0f);
half h2(2.0f);
auto m2 = LiteralUtil::CreateR2<half>({{h1, h2}, {h2, h1}});
const uint16_t* d2 =
reinterpret_cast<const uint16_t*>(m2.data<half>().data());
EXPECT_EQ(d2[0], 0x3C00);
EXPECT_EQ(d2[1], 0x4000);
EXPECT_EQ(d2[2], 0x4000);
EXPECT_EQ(d2[3], 0x3C00);
}
TEST_F(LiteralUtilTest, Populate) {
struct PopulateData {
std::vector<int64_t> dimensions;
std::vector<int64_t> layout;
} populate_data[] = {
{{}, {}},
{{0}, {0}},
{{16}, {0}},
{{2, 0}, {1, 0}},
{{4, 16}, {1, 0}},
{{21, 12}, {0, 1}},
{{6, 11, 17}, {2, 0, 1}},
{{6, 11, 5, 17}, {3, 2, 0, 1}},
};
for (const auto& data : populate_data) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(
primitive_util::NativeToPrimitiveType<uint32_t>(), data.dimensions,
data.layout);
Literal literal(shape);
auto generator = [&](absl::Span<const int64_t> indexes) -> uint32_t {
return IndexUtil::MultidimensionalIndexToLinearIndex(literal.shape(),
indexes) +
17;
};
TF_EXPECT_OK(literal.Populate<uint32_t>(generator));
std::vector<int64_t> zero_base(data.dimensions.size(), 0);
std::vector<int64_t> step(data.dimensions.size(), 1);
bool matched = true;
auto check_function = [&](absl::Span<const int64_t> indexes) {
auto value = literal.Get<uint32_t>(indexes);
matched = matched && (value == generator(indexes));
return matched;
};
ShapeUtil::ForEachIndex(literal.shape(), zero_base, data.dimensions, step,
check_function);
EXPECT_TRUE(matched);
}
}
TEST_F(LiteralUtilTest, PopulateParallel) {
struct PopulateData {
std::vector<int64_t> dimensions;
std::vector<int64_t> layout;
} populate_data[] = {
{{}, {}},
{{0}, {0}},
{{16}, {0}},
{{2, 0}, {1, 0}},
{{4, 16}, {1, 0}},
{{21, 12}, {0, 1}},
{{6, 11, 17}, {2, 0, 1}},
{{6, 11, 5, 17}, {3, 2, 0, 1}},
};
for (const auto& data : populate_data) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(
primitive_util::NativeToPrimitiveType<uint32_t>(), data.dimensions,
data.layout);
Literal literal(shape);
auto generator = [&](absl::Span<const int64_t> indexes,
int ) -> uint32_t {
return IndexUtil::MultidimensionalIndexToLinearIndex(literal.shape(),
indexes) +
17;
};
TF_EXPECT_OK(literal.PopulateParallel<uint32_t>(generator));
std::vector<int64_t> zero_base(data.dimensions.size(), 0);
std::vector<int64_t> step(data.dimensions.size(), 1);
bool matched = true;
auto check_function = [&](absl::Span<const int64_t> indexes) {
auto value = literal.Get<uint32_t>(indexes);
matched = matched && (value == generator(indexes, -1));
return matched;
};
ShapeUtil::ForEachIndex(literal.shape(), zero_base, data.dimensions, step,
check_function);
EXPECT_TRUE(matched);
}
}
TEST_F(LiteralUtilTest, ConvertR4) {
auto original = LiteralUtil::CreateR4WithLayout<int8_t>({{
{{10, 11, 12, 13}, {14, 15, 16, 17}},
{{18, 19, 20, 21}, {22, 23, 24, 25}},
{{26, 27, 28, 29}, {30, 31, 32, 33}},
}}, layout_r4_dim0major_);
auto expected = LiteralUtil::CreateR4WithLayout<uint32_t>({{
{{10, 11, 12, 13}, {14, 15, 16, 17}},
{{18, 19, 20, 21}, {22, 23, 24, 25}},
{{26, 27, 28, 29}, {30, 31, 32, 33}},
}}, layout_r4_dim0major_);
TF_ASSERT_OK_AND_ASSIGN(Literal converted, original.Convert(U32));
EXPECT_EQ(expected, converted);
}
TEST_F(LiteralUtilTest, ConvertIfTypesMatch) {
auto s8 = LiteralUtil::CreateR4WithLayout<int8_t>({{
{{10, 0, 12, 0}, {0, 15, 0, 17}},
{{0, 19, 0, 21}, {22, 0, 24, 0}},
{{26, 0, 28, 0}, {0, 31, 0, 33}},
}}, layout_r4_dim0major_);
auto s16 = LiteralUtil::CreateR4WithLayout<int16_t>({{
{{10, 0, 12, 0}, {0, 15, 0, 17}},
{{0, 19, 0, 21}, {22, 0, 24, 0}},
{{26, 0, 28, 0}, {0, 31, 0, 33}},
}}, layout_r4_dim0major_);
auto s32 = LiteralUtil::CreateR4WithLayout<int32_t>({{
{{10, 0, 12, 0}, {0, 15, 0, 17}},
{{0, 19, 0, 21}, {22, 0, 24, 0}},
{{26, 0, 28, 0}, {0, 31, 0, 33}},
}}, layout_r4_dim0major_);
auto u16 = LiteralUtil::CreateR4WithLayout<uint16_t>({{
{{10, 0, 12, 0}, {0, 15, 0, 17}},
{{0, 19, 0, 21}, {22, 0, 24, 0}},
{{26, 0, 28, 0}, {0, 31, 0, 33}},
}}, layout_r4_dim0major_);
auto u32 = LiteralUtil::CreateR4WithLayout<uint32_t>({{
{{10, 0, 12, 0}, {0, 15, 0, 17}},
{{0, 19, 0, 21}, {22, 0, 24, 0}},
{{26, 0, 28, 0}, {0, 31, 0, 33}},
}}, layout_r4_dim0major_);
auto s64 = LiteralUtil::CreateR4WithLayout<int64_t>({{
{{10, 0, 12, 0}, {0, 15, 0, 17}},
{{0, 19, 0, 21}, {22, 0, 24, 0}},
{{26, 0, 28, 0}, {0, 31, 0, 33}},
}}, layout_r4_dim0major_);
auto u64 = LiteralUtil::CreateR4WithLayout<uint64_t>({{
{{10, 0, 12, 0}, {0, 15, 0, 17}},
{{0, 19, 0, 21}, {22, 0, 24, 0}},
{{26, 0, 28, 0}, {0, 31, 0, 33}},
}}, layout_r4_dim0major_);
auto pred = LiteralUtil::CreateR4WithLayout<bool>({{
{{true, false, true, false}, {false, true, false, true}},
{{false, true, false, true}, {true, false, true, false}},
{{true, false, true, false}, {false, true, false, true}},
}}, layout_r4_dim0major_);
auto int32_pred = LiteralUtil::CreateR4WithLayout<int32_t>({{
{{1, 0, 1, 0}, {0, 1, 0, 1}},
{{0, 1, 0, 1}, {1, 0, 1, 0}},
{{1, 0, 1, 0}, {0, 1, 0, 1}},
}}, layout_r4_dim0major_);
auto s4nums = LiteralUtil::CreateR4WithLayout<s4>({{
{{s4(1), s4(0), s4(2), s4(0)}, {s4(0), s4(5), s4(0), s4(7)}},
{{s4(0), s4(1), s4(0), s4(1)}, {s4(2), s4(0), s4(4), s4(0)}},
{{s4(2), s4(0), s4(2), s4(0)}, {s4(0), s4(3), s4(0), s4(3)}},
}}, layout_r4_dim0major_);
auto int32_s4nums = LiteralUtil::CreateR4WithLayout<int32_t>({{
{{1, 0, 2, 0}, {0, 5, 0, 7}},
{{0, 1, 0, 1}, {2, 0, 4, 0}},
{{2, 0, 2, 0}, {0, 3, 0, 3}},
}}, layout_r4_dim0major_);
auto f16 = LiteralUtil::CreateR4WithLayout<half>({{
{{half(10.0), half(0.0), half(12.0), half(0.0)},
{half(0.0), half(15.0), half(0.0), half(17.0)}},
{{half(0.0), half(19.0), half(0.0), half(21.0)},
{half(22.0), half(0.0), half(24.0), half(0.0)}},
{{half(26.0), half(0.0), half(28.0), half(0.0)},
{half(0.0), half(31.0), half(0.0), half(33.0)}},
}}, layout_r4_dim0major_);
auto bf16 = LiteralUtil::CreateR4WithLayout<bfloat16>({{
{{bfloat16(10.0), bfloat16(0.0), bfloat16(12.0), bfloat16(0.0)},
{bfloat16(0.0), bfloat16(15.0), bfloat16(0.0), bfloat16(17.0)}},
{{bfloat16(0.0), bfloat16(19.0), bfloat16(0.0), bfloat16(21.0)},
{bfloat16(22.0), bfloat16(0.0), bfloat16(24.0), bfloat16(0.0)}},
{{bfloat16(26.0), bfloat16(0.0), bfloat16(28.0), bfloat16(0.0)},
{bfloat16(0.0), bfloat16(31.0), bfloat16(0.0), bfloat16(33.0)}},
}}, layout_r4_dim0major_);
auto f32 = LiteralUtil::CreateR4WithLayout<float>({{
{{10.0f, 0.0f, 12.0f, 0.0f}, {0.0f, 15.0f, 0.0f, 17.0f}},
{{0.0f, 19.0f, 0.0f, 21.0f}, {22.0f, 0.0f, 24.0f, 0.0f}},
{{26.0f, 0.0f, 28.0f, 0.0f}, {0.0f, 31.0f, 0.0f, 33.0f}},
}}, layout_r4_dim0major_);
auto f64 = LiteralUtil::CreateR4WithLayout<double>({{
{{10.0, 0.0, 12.0, 0.0}, {0.0, 15.0, 0.0, 17.0}},
{{0.0, 19.0, 0.0, 21.0}, {22.0, 0.0, 24.0, 0.0}},
{{26.0, 0.0, 28.0, 0.0}, {0.0, 31.0, 0.0, 33.0}},
}}, layout_r4_dim0major_);
auto c64 = LiteralUtil::CreateR4WithLayout<complex64>({{
{{10.0f, 0.0f, 12.0f, 0.0f}, {0.0f, 15.0f, 0.0f, 17.0f}},
{{0.0f, 19.0f, 0.0f, 21.0f}, {22.0f, 0.0f, 24.0f, 0.0f}},
{{26.0f, 0.0f, 28.0f, 0.0f}, {0.0f, 31.0f, 0.0f, 33.0f}},
}}, layout_r4_dim0major_);
auto c128 = LiteralUtil::CreateR4WithLayout<complex128>({{
{{10.0, 0.0, 12.0, 0.0}, {0.0, 15.0, 0.0, 17.0}},
{{0.0, 19.0, 0.0, 21.0}, {22.0, 0.0, 24.0, 0.0}},
{{26.0, 0.0, 28.0, 0.0}, {0.0, 31.0, 0.0, 33.0}},
}}, layout_r4_dim0major_);
Literal conv;
conv = s8.Convert(U16).value();
EXPECT_EQ(conv, u16);
conv = s8.Convert(S16).value();
EXPECT_EQ(conv, s16);
conv = s8.Convert(U32).value();
EXPECT_EQ(conv, u32);
conv = s8.Convert(S32).value();
EXPECT_EQ(conv, s32);
conv = s8.Convert(U64).value();
EXPECT_EQ(conv, u64);
conv = s8.Convert(S64).value();
EXPECT_EQ(conv, s64);
conv = s8.Convert(PRED).value();
EXPECT_EQ(conv, pred);
conv = bf16.Convert(S32).value();
EXPECT_EQ(conv, s32);
conv = bf16.Convert(F32).value();
EXPECT_EQ(conv, f32);
conv = pred.Convert(S32).value();
EXPECT_EQ(conv, int32_pred);
conv = s4nums.Convert(S32).value();
EXPECT_EQ(conv, int32_s4nums);
conv = f32.Convert(S32).value();
EXPECT_EQ(conv, s32);
conv = f64.Convert(S32).value();
EXPECT_EQ(conv, s32);
conv = s32.Convert(F32).value();
EXPECT_EQ(conv, f32);
conv = f32.Convert(F16).value();
EXPECT_EQ(conv, f16);
conv = f64.Convert(F16).value();
EXPECT_EQ(conv, f16);
conv = s32.Convert(F16).value();
EXPECT_EQ(conv, f16);
conv = u32.Convert(F16).value();
EXPECT_EQ(conv, f16);
conv = s32.Convert(C64).value();
EXPECT_EQ(conv, c64);
conv = f16.Convert(C64).value();
EXPECT_EQ(conv, c64);
conv = s32.Convert(S16).value();
EXPECT_EQ(conv, s16);
conv = s32.Convert(U16).value();
EXPECT_EQ(conv, u16);
conv = s32.Convert(C128).value();
EXPECT_EQ(conv, c128);
conv = f16.Convert(C128).value();
EXPECT_EQ(conv, c128);
EXPECT_EQ(s32.Convert(TUPLE).status().code(), tsl::error::UNIMPLEMENTED);
EXPECT_EQ(c64.Convert(F32).status().code(), tsl::error::UNIMPLEMENTED);
EXPECT_EQ(c64.Convert(S32).status().code(), tsl::error::UNIMPLEMENTED);
EXPECT_EQ(c128.Convert(F32).status().code(), tsl::error::UNIMPLEMENTED);
EXPECT_EQ(c128.Convert(S32).status().code(), tsl::error::UNIMPLEMENTED);
}
TYPED_TEST(LiteralUtilFloatTest, ConvertIfTypesMatchF8) {
constexpr auto ptype = primitive_util::NativeToPrimitiveType<TypeParam>();
if (!primitive_util::IsF8Type(ptype)) {
GTEST_SKIP() << "Skipping test for non F8 types";
}
auto s8 = LiteralUtil::CreateR2WithLayout<int8_t>(
{{0, 1}, {2, 3}}, LiteralUtilTest::layout_r2_dim0major_);
auto bf16 = LiteralUtil::CreateR2WithLayout<bfloat16>(
{{bfloat16(0.), bfloat16(1.)}, {bfloat16(2.), bfloat16(3.)}},
LiteralUtilTest::layout_r2_dim0major_);
auto f32 = LiteralUtil::CreateR2WithLayout<float>(
{{0., 1.}, {2., 3.}}, LiteralUtilTest::layout_r2_dim0major_);
auto c128 = LiteralUtil::CreateR2WithLayout<complex128>(
{{0., 1.}, {2., 3.}}, LiteralUtilTest::layout_r2_dim0major_);
using f8e5m2_t = tsl::float8_e5m2;
auto f8e5m2 = LiteralUtil::CreateR2WithLayout<f8e5m2_t>(
{{f8e5m2_t{0.}, f8e5m2_t{1.}}, {f8e5m2_t{2.}, f8e5m2_t{3.}}},
LiteralUtilTest::layout_r2_dim0major_);
using e4m3fn_t = tsl::float8_e4m3fn;
auto f8e4m3fn = LiteralUtil::CreateR2WithLayout<e4m3fn_t>(
{{e4m3fn_t{0.}, e4m3fn_t{1.}}, {e4m3fn_t{2.}, e4m3fn_t{3.}}},
LiteralUtilTest::layout_r2_dim0major_);
auto f8 = LiteralUtil::CreateR2WithLayout<TypeParam>(
{{TypeParam{0.}, TypeParam{1.}}, {TypeParam{2.}, TypeParam{3.}}},
LiteralUtilTest::layout_r2_dim0major_);
Literal conv;
conv = s8.Convert(ptype).value();
EXPECT_EQ(conv, f8);
conv = bf16.Convert(ptype).value();
EXPECT_EQ(conv, f8);
conv = f32.Convert(ptype).value();
EXPECT_EQ(conv, f8);
conv = f8e5m2.Convert(ptype).value();
EXPECT_EQ(conv, f8);
conv = f8e4m3fn.Convert(ptype).value();
EXPECT_EQ(conv, f8);
conv = f8.Convert(S8).value();
EXPECT_EQ(conv, s8);
conv = f8.Convert(BF16).value();
EXPECT_EQ(conv, bf16);
conv = f8.Convert(F32).value();
EXPECT_EQ(conv, f32);
conv = f8.Convert(C128).value();
EXPECT_EQ(conv, c128);
conv = f8.Convert(F8E5M2).value();
EXPECT_EQ(conv, f8e5m2);
conv = f8.Convert(F8E4M3FN).value();
EXPECT_EQ(conv, f8e4m3fn);
}
TEST_F(LiteralUtilTest, BitcastConvert) {
Literal original = LiteralUtil::CreateR1<uint32_t>(
{absl::bit_cast<uint32_t>(2.5f), absl::bit_cast<uint32_t>(-42.25f),
absl::bit_cast<uint32_t>(100.f), 0xbeef});
Literal expected = LiteralUtil::CreateR1<float>(
{2.5f, -42.25f, 100.0f, absl::bit_cast<float>(0xbeef)});
TF_ASSERT_OK_AND_ASSIGN(Literal converted,
original.BitcastConvert(ShapeUtil::ChangeElementType(
original.shape(), F32)));
}
TEST_F(LiteralUtilTest, BitcastConvertBetweenInvalidTypes) {
Literal literal = LiteralUtil::CreateR0<uint32_t>(1234);
absl::Status status =
literal.BitcastConvert(ShapeUtil::ChangeElementType(literal.shape(), F64))
.status();
EXPECT_NE(absl::OkStatus(), status);
EXPECT_TRUE(
absl::StrContains(status.message(), "to a shape of different size"));
}
void SetDefaultLayoutOnProto(ShapeProto* shape_proto) {
CHECK(ShapeUtil::IsArrayPrimitiveType(shape_proto->element_type()));
auto* minor_to_major =
shape_proto->mutable_layout()->mutable_minor_to_major();
minor_to_major->Resize(shape_proto->dimensions_size(), 0);
const int64_t size = minor_to_major->size();
for (int64_t i = 0; i < size; ++i) {
minor_to_major->Set(i, size - 1 - i);
}
}
TEST_F(LiteralUtilTest, CopyFromProto_Bool) {
LiteralProto p;
p.mutable_shape()->set_element_type(PRED);
for (int len = 0; len < 25; ++len) {
p.mutable_shape()->clear_dimensions();
p.mutable_shape()->add_dimensions(len);
SetDefaultLayoutOnProto(p.mutable_shape());
p.clear_preds();
for (int i = 0; i < len; ++i) {
p.add_preds((i % 2) == (len % 2));
}
TF_ASSERT_OK_AND_ASSIGN(Literal literal, Literal::CreateFromProto(p));
ASSERT_EQ(len, literal.data<bool>().size());
int i = 0;
for (bool value : literal.data<bool>()) {
EXPECT_EQ((i % 2) == (len % 2), value);
++i;
}
}
}
TEST_F(LiteralUtilTest, ToProto_f16) {
half h1(1.0f);
half h2(2.0f);
auto m = LiteralUtil::CreateR2<half>({{h1, h2}, {h2, h1}});
EXPECT_EQ(4, ShapeUtil::ElementsIn(m.shape()));
EXPECT_EQ(4, m.data<half>().size());
LiteralProto p = m.ToProto();
EXPECT_EQ(4, ShapeUtil::ElementsIn(Shape(p.shape())));
EXPECT_EQ(8, p.f16s().size());
const char* d = p.f16s().data();
EXPECT_EQ(d[0], 0);
EXPECT_EQ(d[1], 0x3C);
EXPECT_EQ(d[2], 0);
EXPECT_EQ(d[3], 0x40);
EXPECT_EQ(d[4], 0);
EXPECT_EQ(d[5], 0x40);
EXPECT_EQ(d[6], 0);
EXPECT_EQ(d[7], 0x3C);
}
TEST_F(LiteralUtilTest, CopyFromProto_f16) {
half h1(1.0f);
half h2(2.0f);
const char half_vals[8] = {0x00, 0x3C, 0x00, 0x40, 0x00, 0x40, 0x00, 0x3C};
LiteralProto p;
p.mutable_shape()->set_element_type(F16);
p.mutable_shape()->clear_dimensions();
p.mutable_shape()->add_dimensions(4);
SetDefaultLayoutOnProto(p.mutable_shape());
p.clear_f16s();
p.set_f16s(half_vals, 8);
TF_ASSERT_OK_AND_ASSIGN(Literal literal, Literal::CreateFromProto(p));
auto r = literal.data<half>();
ASSERT_EQ(4, r.size());
EXPECT_EQ(h1, r[0]);
EXPECT_EQ(h2, r[1]);
EXPECT_EQ(h2, r[2]);
EXPECT_EQ(h1, r[3]);
}
TEST_F(LiteralUtilTest, CopyFromProto_u16) {
uint16_t u1(0xabcd);
uint16_t u2(0x1234);
const unsigned char uint16_vals[8] = {0xcd, 0xab, 0x34, 0x12,
0x34, 0x12, 0xcd, 0xab};
LiteralProto p;
p.mutable_shape()->set_element_type(U16);
p.mutable_shape()->clear_dimensions();
p.mutable_shape()->add_dimensions(4);
SetDefaultLayoutOnProto(p.mutable_shape());
p.clear_u16s();
p.set_u16s(uint16_vals, 8);
TF_ASSERT_OK_AND_ASSIGN(Literal literal, Literal::CreateFromProto(p));
auto r = literal.data<uint16_t>();
ASSERT_EQ(4, r.size());
EXPECT_EQ(u1, r[0]);
EXPECT_EQ(u2, r[1]);
EXPECT_EQ(u2, r[2]);
EXPECT_EQ(u1, r[3]);
}
TEST_F(LiteralUtilTest, LiteralDynamicSliceTest) {
auto scalar = LiteralUtil::CreateR0<float>(1.0);
auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto tuple = LiteralUtil::MakeTuple({&scalar, &matrix});
auto nested_tuple = LiteralUtil::MakeTuple({&tuple, &scalar});
Literal nil(ShapeUtil::MakeNil());
EXPECT_EQ(LiteralSlice(scalar, {}), scalar);
EXPECT_EQ(LiteralSlice(matrix, {}), matrix);
EXPECT_EQ(LiteralSlice(tuple, {}), tuple);
EXPECT_EQ(LiteralSlice(nested_tuple, {}), nested_tuple);
EXPECT_EQ(LiteralSlice(nil, {}), nil);
EXPECT_EQ(LiteralSlice(tuple, {0}), scalar);
EXPECT_EQ(LiteralSlice(tuple, {1}), matrix);
EXPECT_EQ(LiteralSlice(nested_tuple, {0}), tuple);
EXPECT_EQ(LiteralSlice(nested_tuple, {0, 0}), scalar);
EXPECT_EQ(LiteralSlice(nested_tuple, {0, 1}), matrix);
EXPECT_EQ(LiteralSlice(nested_tuple, {1}), scalar);
}
TEST_F(LiteralUtilTest, MutatingLiteralSlice) {
auto scalar = LiteralUtil::CreateR0<float>(1.0);
auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto tuple = LiteralUtil::MakeTuple({&scalar, &matrix});
auto nested_tuple = LiteralUtil::MakeTuple({&tuple, &scalar});
const auto nested_tuple_view = LiteralSlice(nested_tuple);
EXPECT_EQ(nested_tuple.Get<float>({}, {0, 0}),
1.0f);
EXPECT_EQ(nested_tuple_view.Get<float>({},
{0, 0}),
1.0f);
nested_tuple.Set<float>({}, {0, 0}, 555.0f);
EXPECT_EQ(nested_tuple.Get<float>({}, {0, 0}),
555.0f);
EXPECT_EQ(nested_tuple_view.Get<float>({},
{0, 0}),
555.0f);
}
TEST_F(LiteralUtilTest, LiteralSliceOfALiteralSlice) {
auto scalar = LiteralUtil::CreateR0<float>(1.0);
auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto tuple = LiteralUtil::MakeTuple({&scalar, &matrix});
auto nested_tuple = LiteralUtil::MakeTuple({&tuple, &scalar});
const auto nested_tuple_view = LiteralSlice(nested_tuple);
const auto tuple_view = LiteralSlice(nested_tuple_view, {0});
const auto matrix_view = LiteralSlice(tuple_view, {1});
EXPECT_EQ(matrix_view,
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}));
}
TEST_F(LiteralUtilTest, BorrowingLiteralFromOneBufferPtr) {
std::vector<int64_t> int64_values = {1, 2, 3};
const Shape literal_shape = ShapeUtil::MakeShape(S64, {3});
BorrowingLiteral literal(reinterpret_cast<const char*>(int64_values.data()),
literal_shape);
EXPECT_EQ(literal.Get<int64_t>({0}), 1);
EXPECT_EQ(literal.Get<int64_t>({1}), 2);
EXPECT_EQ(literal.Get<int64_t>({2}), 3);
}
TEST_F(LiteralUtilTest, BorrowingLiteralFromMultipleBufferPtrs) {
std::vector<int64_t> one_two_three = {1, 2, 3};
const Shape one_two_three_shape = ShapeUtil::MakeShape(S64, {3});
std::vector<int64_t> hundred = {100};
const Shape hundred_shape = ShapeUtil::MakeShape(S64, {1});
std::vector<const char*> src_buf_ptrs;
src_buf_ptrs.emplace_back(
reinterpret_cast<const char*>(one_two_three.data()));
src_buf_ptrs.emplace_back(reinterpret_cast<const char*>(hundred.data()));
auto literal_tuple = BorrowingLiteral(
src_buf_ptrs,
ShapeUtil::MakeTupleShape({one_two_three_shape, hundred_shape}));
EXPECT_EQ(
literal_tuple.Get<int64_t>({0}, {0}), 1);
EXPECT_EQ(
literal_tuple.Get<int64_t>({0}, {1}),
100);
EXPECT_EQ(
literal_tuple.Get<int64_t>({1}, {0}), 2);
EXPECT_EQ(
literal_tuple.Get<int64_t>({2}, {0}), 3);
}
TEST_F(LiteralUtilTest, BorrowingLiteralFromShapeTree) {
std::vector<float> data = {1.0, 2.0, 3.0};
Shape shape = ShapeUtil::MakeShape(PrimitiveType::F32, {3});
Shape tuple = ShapeUtil::MakeTupleShape({shape, shape});
Shape nested_tuple = ShapeUtil::MakeTupleShape({tuple, shape});
ShapeTree<const char*> ptr_tree(nested_tuple);
*ptr_tree.mutable_element({0, 0}) = reinterpret_cast<char*>(data.data());
*ptr_tree.mutable_element({0, 1}) = reinterpret_cast<char*>(data.data());
*ptr_tree.mutable_element({1}) = reinterpret_cast<char*>(data.data());
BorrowingLiteral literal(ptr_tree);
EXPECT_THAT(literal.data<float>({0, 0}), ElementsAre(1.0, 2.0, 3.0));
EXPECT_THAT(literal.data<float>({0, 1}), ElementsAre(1.0, 2.0, 3.0));
EXPECT_THAT(literal.data<float>({1}), ElementsAre(1.0, 2.0, 3.0));
}
TEST_F(LiteralUtilTest, MutableBorrowingLiteralFromShapeTree) {
std::vector<float> data = {1.0, 2.0, 3.0};
Shape shape = ShapeUtil::MakeShape(PrimitiveType::F32, {3});
Shape tuple = ShapeUtil::MakeTupleShape({shape, shape});
Shape nested_tuple = ShapeUtil::MakeTupleShape({tuple, shape});
ShapeTree<char*> ptr_tree(nested_tuple);
*ptr_tree.mutable_element({0, 0}) = reinterpret_cast<char*>(data.data());
*ptr_tree.mutable_element({0, 1}) = reinterpret_cast<char*>(data.data());
*ptr_tree.mutable_element({1}) = reinterpret_cast<char*>(data.data());
MutableBorrowingLiteral literal(ptr_tree);
EXPECT_THAT(literal.data<float>({0, 0}), ElementsAre(1.0, 2.0, 3.0));
EXPECT_THAT(literal.data<float>({0, 1}), ElementsAre(1.0, 2.0, 3.0));
EXPECT_THAT(literal.data<float>({1}), ElementsAre(1.0, 2.0, 3.0));
}
TEST_F(LiteralUtilTest, LiteralMove) {
Literal matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
Literal literal(std::move(matrix));
EXPECT_TRUE(
ShapeUtil::Equal(ShapeUtil::MakeShape(F32, {2, 2}), literal.shape()));
EXPECT_EQ(literal.Get<float>({0, 0}), 1.0);
EXPECT_EQ(literal.Get<float>({0, 1}), 2.0);
EXPECT_EQ(literal.Get<float>({1, 0}), 3.0);
EXPECT_EQ(literal.Get<float>({1, 1}), 4.0);
}
TEST_F(LiteralUtilTest, DecomposeTuple) {
Literal nil_literal(ShapeUtil::MakeNil());
Literal inner_elements[] = {
LiteralUtil::CreateR0<int32_t>(42),
LiteralUtil::CreateR1<double>({23.0, 44.0}),
};
Literal tuple_elements[] = {
LiteralUtil::CreateR2<int32_t>({{1, 2}, {3, 4}}),
LiteralUtil::MakeTuple(
{&inner_elements[0], &inner_elements[1], &nil_literal}),
};
Literal nested_tuple = LiteralUtil::MakeTuple(
{&tuple_elements[0], &tuple_elements[1], &nil_literal});
EXPECT_FALSE(ShapeUtil::IsEmptyTuple(nested_tuple.shape()));
std::vector<Literal> elements = nested_tuple.DecomposeTuple();
EXPECT_TRUE(ShapeUtil::IsEmptyTuple(nested_tuple.shape()));
ASSERT_EQ(elements.size(), 3);
EXPECT_TRUE(ShapeUtil::Compatible(elements[0].shape(),
ShapeUtil::MakeShape(S32, {2, 2})));
EXPECT_EQ(elements[0].Get<int32_t>({0, 0}), 1);
EXPECT_EQ(elements[0].Get<int32_t>({0, 1}), 2);
EXPECT_EQ(elements[0].Get<int32_t>({1, 0}), 3);
EXPECT_EQ(elements[0].Get<int32_t>({1, 1}), 4);
EXPECT_TRUE(ShapeUtil::Compatible(
elements[1].shape(),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(S32, {}),
ShapeUtil::MakeShape(F64, {2}),
ShapeUtil::MakeNil()})));
EXPECT_EQ(elements[1].Get<int32_t>({}, {0}), 42);
EXPECT_EQ(elements[1].Get<double>({0}, {1}), 23.0);
EXPECT_EQ(elements[1].Get<double>({1}, {1}), 44.0);
EXPECT_TRUE(ShapeUtil::Compatible(elements[2].shape(), ShapeUtil::MakeNil()));
}
TEST_F(LiteralUtilTest, DecomposeEmptyTuple) {
Literal nil_literal(ShapeUtil::MakeNil());
std::vector<Literal> elements = nil_literal.DecomposeTuple();
EXPECT_EQ(elements.size(), 0);
}
TEST_F(LiteralUtilTest, MoveIntoTuple) {
std::vector<Literal> elements;
elements.push_back(LiteralUtil::CreateR0<float>(1.0));
elements.push_back(LiteralUtil::CreateR1<int32_t>({4, 8}));
std::vector<Literal> inner_elements;
inner_elements.push_back(LiteralUtil::CreateR0<int32_t>(42));
inner_elements.push_back(LiteralUtil::CreateR1<double>({23.0, 44.0}));
elements.push_back(
LiteralUtil::MakeTuple({&inner_elements[0], &inner_elements[1]}));
Literal literal = Literal::MoveIntoTuple(absl::MakeSpan(elements));
ASSERT_TRUE(literal.shape().IsTuple());
ASSERT_EQ(ShapeUtil::TupleElementCount(literal.shape()), 3);
EXPECT_EQ(literal.Get<float>({}, {0}), 1.0);
EXPECT_EQ(literal.Get<int32_t>({0}, {1}), 4);
EXPECT_EQ(literal.Get<int32_t>({1}, {1}), 8);
EXPECT_EQ(literal.Get<int32_t>({}, {2, 0}), 42);
EXPECT_EQ(literal.Get<double>({0}, {2, 1}), 23.0);
EXPECT_EQ(literal.Get<double>({1}, {2, 1}), 44.0);
for (const Literal& element : elements) {
EXPECT_TRUE(ShapeUtil::IsEmptyTuple(element.shape()));
}
}
TEST_F(LiteralUtilTest, MoveIntoEmptyTuple) {
Literal literal = Literal::MoveIntoTuple({});
ASSERT_TRUE(literal.shape().IsTuple());
EXPECT_EQ(ShapeUtil::TupleElementCount(literal.shape()), 0);
}
TEST_F(LiteralUtilTest, LiteralMoveAssignment) {
Literal literal;
EXPECT_TRUE(ShapeUtil::Equal(ShapeUtil::MakeNil(), literal.shape()));
Literal matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
literal = std::move(matrix);
EXPECT_TRUE(
ShapeUtil::Equal(ShapeUtil::MakeShape(F32, {2, 2}), literal.shape()));
EXPECT_EQ(literal.Get<float>({0, 0}), 1.0);
EXPECT_EQ(literal.Get<float>({0, 1}), 2.0);
EXPECT_EQ(literal.Get<float>({1, 0}), 3.0);
EXPECT_EQ(literal.Get<float>({1, 1}), 4.0);
}
TEST_F(LiteralUtilTest, LiteralSliceCopy) {
Literal matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
const auto matrix_view = LiteralSlice(matrix);
LiteralSlice matrix_view_copy(matrix_view);
EXPECT_EQ(matrix_view_copy.Get<float>({0, 0}), 1.0);
EXPECT_EQ(matrix_view_copy.Get<float>({0, 1}), 2.0);
EXPECT_EQ(matrix_view_copy.Get<float>({1, 0}), 3.0);
EXPECT_EQ(matrix_view_copy.Get<float>({1, 1}), 4.0);
}
TEST_F(LiteralUtilTest, GetSetTuple) {
Literal elements[] = {
LiteralUtil::CreateR0<float>(42.0),
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}),
};
auto tuple = LiteralUtil::MakeTuple({&elements[0], &elements[1]});
EXPECT_EQ(tuple.Get<float>({}, {0}), 42.0);
tuple.Set<float>({}, {0}, -5.0);
EXPECT_EQ(tuple.Get<float>({}, {0}), -5.0);
EXPECT_EQ(tuple.Get<float>({1, 0}, {1}), 3.0);
tuple.Set<float>({1, 0}, {1}, -4.0);
EXPECT_EQ(tuple.Get<float>({1, 0}, {1}),
-4.0);
}
TEST_F(LiteralUtilTest, CreateFromShapeZeroInitialized) {
Literal scalar_f32 = Literal::CreateFromShape(ShapeUtil::MakeShape(F32, {}));
EXPECT_EQ(scalar_f32.Get<float>({}), 0.0);
EXPECT_TRUE(scalar_f32.IsAll(0));
Literal vector_s32 = Literal::CreateFromShape(ShapeUtil::MakeShape(S32, {3}));
EXPECT_EQ(vector_s32.Get<int32_t>({0}), 0);
EXPECT_EQ(vector_s32.Get<int32_t>({1}), 0);
EXPECT_EQ(vector_s32.Get<int32_t>({2}), 0);
EXPECT_TRUE(vector_s32.IsAll(0));
Literal tuple = Literal::CreateFromShape(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F64, {}), ShapeUtil::MakeShape(PRED, {2}),
ShapeUtil::MakeShape(U64, {2, 1}), ShapeUtil::MakeShape(C64, {}),
ShapeUtil::MakeShape(C128, {})}));
EXPECT_EQ(tuple.Get<double>({}, {0}), 0.0);
EXPECT_EQ(tuple.Get<bool>({0}, {1}), false);
EXPECT_EQ(tuple.Get<bool>({1}, {1}), false);
EXPECT_EQ(tuple.Get<uint64_t>({0, 0}, {2}), 0);
EXPECT_EQ(tuple.Get<uint64_t>({1, 0}, {2}), 0);
EXPECT_EQ(tuple.Get<complex64>({}, {3}), complex64(0.0f, 0.0f));
EXPECT_EQ(tuple.Get<complex128>({}, {4}), complex128(0.0, 0.0));
}
TEST_F(LiteralUtilTest, ProtoRoundTrip) {
auto one_f32 = LiteralUtil::CreateR0<float>(1.0);
auto two_f32 = LiteralUtil::CreateR0<float>(2.0);
auto vector_int8 = LiteralUtil::CreateR1<int8_t>({-128, 0, 2, 4, 7, 56, 127});
auto vector_uint8 = LiteralUtil::CreateR1<uint8_t>({128, 0, 2, 56, 127, 255});
auto vector_c64 = LiteralUtil::CreateR1<complex64>({{1.0, 2.0}, {3.0, 4.0}});
auto vector_c128 =
LiteralUtil::CreateR1<complex128>({{1.0, 2.0}, {3.0, 4.0}});
auto vector_bfloat16 = LiteralUtil::CreateR1<bfloat16>(
{bfloat16{-1.0}, bfloat16{2.0}, bfloat16{-3.0}});
auto vector_half =
LiteralUtil::CreateR1<half>({half{10.0}, half{20.0}, half{-30.0}});
using e5 = tsl::float8_e5m2;
auto vector_f8e5m2 =
LiteralUtil::CreateR1<e5>({e5{10.0}, e5{20.0}, e5{-32.0}});
using e4 = tsl::float8_e4m3;
auto vector_f8e4m3 =
LiteralUtil::CreateR1<e4>({e4{10.0}, e4{20.0}, e4{-32.0}});
using e4fn = tsl::float8_e4m3fn;
auto vector_f8e4m3fn =
LiteralUtil::CreateR1<e4fn>({e4fn{10.0}, e4fn{20.0}, e4fn{-32.0}});
using b11 = tsl::float8_e4m3b11fnuz;
auto vector_f8e4m3b11 =
LiteralUtil::CreateR1<b11>({b11{10.0}, b11{20.0}, b11{-30.0}});
using e5f = tsl::float8_e5m2fnuz;
auto vector_f8e5m2fnuz =
LiteralUtil::CreateR1<e5f>({e5f{10.0}, e5f{20.0}, e5f{-30.0}});
using e4f = tsl::float8_e4m3fnuz;
auto vector_f8e4m3fnuz =
LiteralUtil::CreateR1<e4f>({e4f{10.0}, e4f{20.0}, e4f{-30.0}});
using e3 = tsl::float8_e3m4;
auto vector_f8e3m4 = LiteralUtil::CreateR1<e3>({e3{2.5}, e3{5.0}, e3{-8.0}});
auto matrix_pred =
LiteralUtil::CreateR2<bool>({{true, false, true}, {false, false, true}});
auto vector_s4 = LiteralUtil::CreateR1<s4>({s4{-1}, s4{3}, s4{7}});
auto vector_u4 = LiteralUtil::CreateR1<u4>({u4{1}, u4{3}, u4{15}});
auto tuple = LiteralUtil::MakeTuple(
{&one_f32, &vector_half, &matrix_pred, &matrix_pred});
Literal nil_literal(ShapeUtil::MakeNil());
auto nested_tuple =
LiteralUtil::MakeTuple({&tuple, &vector_bfloat16, &tuple, &nil_literal});
auto to_from_proto = [](const Literal& literal) -> Literal {
return Literal::CreateFromProto(literal.ToProto()).value();
};
EXPECT_EQ(one_f32, to_from_proto(one_f32));
EXPECT_EQ(vector_int8, to_from_proto(vector_int8));
EXPECT_EQ(vector_uint8, to_from_proto(vector_uint8));
EXPECT_EQ(vector_c64, to_from_proto(vector_c64));
EXPECT_EQ(vector_c128, to_from_proto(vector_c128));
EXPECT_EQ(vector_bfloat16, to_from_proto(vector_bfloat16));
EXPECT_EQ(vector_f8e5m2, to_from_proto(vector_f8e5m2));
EXPECT_EQ(vector_f8e4m3, to_from_proto(vector_f8e4m3));
EXPECT_EQ(vector_f8e4m3fn, to_from_proto(vector_f8e4m3fn));
EXPECT_EQ(vector_f8e4m3b11, to_from_proto(vector_f8e4m3b11));
EXPECT_EQ(vector_f8e5m2fnuz, to_from_proto(vector_f8e5m2fnuz));
EXPECT_EQ(vector_f8e4m3fnuz, to_from_proto(vector_f8e4m3fnuz));
EXPECT_EQ(vector_f8e3m4, to_from_proto(vector_f8e3m4));
EXPECT_EQ(matrix_pred, to_from_proto(matrix_pred));
EXPECT_EQ(vector_s4, to_from_proto(vector_s4));
EXPECT_EQ(vector_u4, to_from_proto(vector_u4));
EXPECT_EQ(tuple, to_from_proto(tuple));
EXPECT_EQ(nested_tuple, to_from_proto(nested_tuple));
EXPECT_EQ(nil_literal, to_from_proto(nil_literal));
EXPECT_NE(one_f32, two_f32);
EXPECT_NE(one_f32, to_from_proto(two_f32));
}
TEST_F(LiteralUtilTest, InvalidProtoNoValues) {
LiteralProto proto;
*proto.mutable_shape() = ShapeUtil::MakeShape(F32, {3}).ToProto();
absl::Status status = Literal::CreateFromProto(proto).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected 3 elements in LiteralProto"));
}
TEST_F(LiteralUtilTest, ValidProtoNoValues) {
LiteralProto proto;
*proto.mutable_shape() = ShapeUtil::MakeShape(F32, {3}).ToProto();
absl::Status status =
Literal::CreateFromProto(proto, false)
.status();
EXPECT_TRUE(status.ok());
}
TEST_F(LiteralUtilTest, ValidProtoWithClearedValues) {
auto literal = LiteralUtil::CreateR1<bool>({true, false, true});
LiteralProto proto = literal.ToProto();
EXPECT_EQ(proto.preds_size(), 3);
proto.clear_preds();
EXPECT_EQ(proto.preds_size(), 0);
absl::Status status =
Literal::CreateFromProto(proto, false)
.status();
EXPECT_TRUE(status.ok());
}
TEST_F(LiteralUtilTest, InvalidProtoNoShape) {
LiteralProto proto;
proto.add_preds(false);
proto.add_preds(true);
proto.add_preds(false);
absl::Status status = Literal::CreateFromProto(proto).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("LiteralProto has no shape"));
}
TEST_F(LiteralUtilTest, InvalidProtoWrongContainer) {
LiteralProto proto;
*proto.mutable_shape() = ShapeUtil::MakeShape(F32, {3}).ToProto();
proto.add_preds(false);
proto.add_preds(true);
proto.add_preds(false);
absl::Status status = Literal::CreateFromProto(proto).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected 3 elements in LiteralProto"));
}
TEST_F(LiteralUtilTest, InvalidProtoTooFewValues) {
LiteralProto proto;
*proto.mutable_shape() = ShapeUtil::MakeShape(F32, {42, 2}).ToProto();
proto.add_f32s(1.0);
proto.add_f32s(2.0);
proto.add_f32s(3.0);
absl::Status status = Literal::CreateFromProto(proto).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected 84 elements in LiteralProto"));
}
TEST_F(LiteralUtilTest, InvalidProtoTooManyValues) {
LiteralProto proto;
*proto.mutable_shape() = ShapeUtil::MakeShape(S32, {2}).ToProto();
proto.add_s32s(42);
proto.add_s32s(-10);
proto.add_s32s(100);
absl::Status status = Literal::CreateFromProto(proto).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
HasSubstr("Expected 2 elements in LiteralProto"));
}
TEST_F(LiteralUtilTest, InvalidProtoMissingLayout) {
LiteralProto proto;
*proto.mutable_shape() = ShapeUtil::MakeShape(PRED, {2, 2}).ToProto();
proto.mutable_shape()->clear_layout();
proto.add_preds(true);
proto.add_preds(false);
proto.add_preds(true);
proto.add_preds(false);
absl::Status status = Literal::CreateFromProto(proto).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("LiteralProto has no layout"));
}
TEST_F(LiteralUtilTest, InvalidProtoTooFewTupleElements) {
LiteralProto proto;
*proto.mutable_shape() =
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {2}), ShapeUtil::MakeShape(F32, {})})
.ToProto();
LiteralProto* element0 = proto.add_tuple_literals();
*element0->mutable_shape() =
ShapeUtil::GetTupleElementShape(Shape(proto.shape()), 0).ToProto();
element0->add_preds(false);
element0->add_preds(true);
absl::Status status = Literal::CreateFromProto(proto).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected 2 tuple elements"));
}
TEST_F(LiteralUtilTest, InvalidProtoTooManyTupleElements) {
LiteralProto proto;
*proto.mutable_shape() =
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {2}), ShapeUtil::MakeShape(F32, {})})
.ToProto();
LiteralProto* element0 = proto.add_tuple_literals();
*element0->mutable_shape() =
ShapeUtil::GetTupleElementShape(Shape(proto.shape()), 0).ToProto();
element0->add_preds(false);
element0->add_preds(true);
LiteralProto* element1 = proto.add_tuple_literals();
*element1->mutable_shape() =
ShapeUtil::GetTupleElementShape(Shape(proto.shape()), 1).ToProto();
element1->add_f32s(42.0);
LiteralProto* element2 = proto.add_tuple_literals();
*element2->mutable_shape() = ShapeUtil::MakeShape(F32, {}).ToProto();
element2->add_f32s(123.0);
absl::Status status = Literal::CreateFromProto(proto).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected 2 tuple elements"));
}
TEST_F(LiteralUtilTest, BroadcastVectorToMatrix0) {
Literal literal = LiteralUtil::CreateR1<int64_t>({1, 2});
TF_ASSERT_OK_AND_ASSIGN(
Literal broadcasted_literal,
literal.Broadcast(ShapeUtil::MakeShape(S64, {2, 2}),
{0}));
EXPECT_EQ(broadcasted_literal,
LiteralUtil::CreateR2<int64_t>({{1, 1}, {2, 2}}));
}
TEST_F(LiteralUtilTest, BroadcastVectorToMatrix1) {
Literal literal = LiteralUtil::CreateR1<int64_t>({1, 2});
TF_ASSERT_OK_AND_ASSIGN(
Literal broadcasted_literal,
literal.Broadcast(ShapeUtil::MakeShape(S64, {2, 2}),
{1}));
EXPECT_EQ(broadcasted_literal,
LiteralUtil::CreateR2<int64_t>({{1, 2}, {1, 2}}));
}
TEST_F(LiteralUtilTest, BroadcastScalarToMatrix) {
Literal literal = LiteralUtil::CreateR0<int32_t>(9);
TF_ASSERT_OK_AND_ASSIGN(
Literal broadcasted_literal,
literal.Broadcast(ShapeUtil::MakeShape(S32, {2, 2}),
{}));
EXPECT_EQ(broadcasted_literal,
LiteralUtil::CreateR2<int32_t>({{9, 9}, {9, 9}}));
}
TEST_F(LiteralUtilTest, DynamicBroadcast) {
Literal literal = LiteralUtil::CreateR1<int64_t>({1, 2});
literal.SetDynamicSize(0, 1);
TF_ASSERT_OK_AND_ASSIGN(
Literal broadcasted_literal,
literal.Broadcast(ShapeUtil::MakeShape(S64, {2, 2}),
{1}));
EXPECT_EQ(broadcasted_literal, LiteralUtil::CreateR2<int64_t>({{1}, {1}}));
EXPECT_EQ(broadcasted_literal.GetDynamicSize(1), 1);
}
TEST_F(LiteralUtilTest, GetAsScalarInt64) {
auto scalar1 = LiteralUtil::CreateR0<int32_t>(12);
EXPECT_EQ(LiteralUtil::LiteralAsScalarInt64(scalar1).value(), (int64_t)12);
auto scalar2 = LiteralUtil::CreateR0<int8_t>(12);
EXPECT_EQ(LiteralUtil::LiteralAsScalarInt64(scalar2).value(), (int64_t)12);
auto non_scalar1 = LiteralUtil::CreateR2<int32_t>({{1, 2}, {3, 4}});
EXPECT_FALSE(LiteralUtil::LiteralAsScalarInt64(non_scalar1).has_value());
auto non_scalar2 = LiteralUtil::CreateR1<int32_t>({{1, 2}});
EXPECT_FALSE(LiteralUtil::LiteralAsScalarInt64(non_scalar2).has_value());
}
TEST_F(LiteralUtilTest, GetAsDouble) {
auto m = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
EXPECT_EQ(*m.GetAsDouble({0, 0}), 1.0);
EXPECT_EQ(*m.GetAsDouble({1, 0}), 3.0);
}
TEST_F(LiteralUtilTest, GetSumAsDouble) {
auto m = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
EXPECT_EQ(*m.GetSumAsDouble({0, 3}), 1.0 + 4.0);
EXPECT_EQ(*m.GetSumAsDouble({0, 1, 2, 3}), 1.0 + 2.0 + 3.0 + 4.0);
auto md = LiteralUtil::CreateR2<double>({{1.0, 2.0}, {3.0, 4.0}});
EXPECT_EQ(*md.GetSumAsDouble({0, 3}), 1.0 + 4.0);
EXPECT_EQ(*md.GetSumAsDouble({0, 1, 2, 3}), 1.0 + 2.0 + 3.0 + 4.0);
std::vector<float> vals(1024, 1.0);
auto v = LiteralUtil::CreateR1<float>(vals);
std::vector<int64_t> indices;
for (int i = 0; i < 1024; i += 2) {
indices.push_back(i);
EXPECT_EQ(*v.GetSumAsDouble(indices), (i + 2) / 2.0);
}
}
TEST_F(LiteralUtilTest, GetAsComplex128) {
complex128 value = {1, 0};
Literal c1 = LiteralUtil::CreateR0<complex128>(value);
EXPECT_EQ(*c1.GetAsComplex128({}), value);
Literal c2 = LiteralUtil::CreateR0<double>(1);
EXPECT_EQ(*c2.GetAsComplex128({}), value);
complex64 float_value = {1, 0};
Literal c4 = LiteralUtil::CreateR0<complex64>(float_value);
EXPECT_EQ(*c4.GetAsComplex128({}), value);
complex128 other_value = {1, 2};
Literal c5 = LiteralUtil::CreateR0<complex128>(other_value);
EXPECT_EQ(*c5.GetAsComplex128({}), other_value);
Literal c6 = LiteralUtil::CreateR0<int64_t>(1);
EXPECT_FALSE(c6.GetAsComplex128({}).has_value());
}
TEST_F(LiteralUtilTest, SliceOnBool) {
Literal c1 = LiteralUtil::CreateR1<bool>({true, true, false});
EXPECT_EQ(c1, c1.Slice({0}, {3}));
}
TEST_F(LiteralUtilTest, IsEqualAt) {
double val_double = 10.0;
int val_integral = 10;
Literal c1 = LiteralUtil::CreateR0<int>(10);
EXPECT_TRUE(c1.IsEqualAt({}, val_double));
EXPECT_TRUE(c1.IsEqualAt({}, val_integral));
Literal c2 = LiteralUtil::CreateR0<double>(10);
EXPECT_TRUE(c2.IsEqualAt({}, val_double));
EXPECT_TRUE(c2.IsEqualAt({}, val_integral));
Literal c3 =
LiteralUtil::CreateR0<tsl::float8_e5m2>(tsl::float8_e5m2{val_double});
EXPECT_TRUE(c3.IsEqualAt({}, val_double));
EXPECT_TRUE(c3.IsEqualAt({}, val_integral));
complex128 val_complex = {10, 0};
EXPECT_TRUE(c1.IsEqualAt({}, val_complex));
EXPECT_TRUE(c2.IsEqualAt({}, val_complex));
EXPECT_TRUE(c3.IsEqualAt({}, val_complex));
Literal c4 = LiteralUtil::CreateR0<complex128>(val_complex);
EXPECT_TRUE(c4.IsEqualAt({}, val_double));
EXPECT_TRUE(c4.IsEqualAt({}, val_integral));
EXPECT_TRUE(c4.IsEqualAt({}, val_complex));
EXPECT_FALSE(c4.IsEqualAt({}, std::numeric_limits<double>::infinity()));
complex128 val_true_complex = {10, 3};
complex64 val_smaller_complex = {10, 3};
Literal c5 = LiteralUtil::CreateR0<complex128>(val_true_complex);
EXPECT_TRUE(c5.IsEqualAt({}, val_true_complex));
EXPECT_TRUE(c5.IsEqualAt({}, val_smaller_complex));
Literal c6 = LiteralUtil::CreateR0<tsl::float8_e5m2fnuz>(
tsl::float8_e5m2fnuz{val_double});
EXPECT_TRUE(c6.IsEqualAt({}, val_double));
EXPECT_TRUE(c6.IsEqualAt({}, val_integral));
Literal c7 = LiteralUtil::CreateR0<tsl::float8_e4m3fnuz>(
tsl::float8_e4m3fnuz{val_double});
EXPECT_TRUE(c6.IsEqualAt({}, val_double));
EXPECT_TRUE(c6.IsEqualAt({}, val_integral));
Literal c8 =
LiteralUtil::CreateR0<tsl::float8_e4m3>(tsl::float8_e4m3{val_double});
EXPECT_TRUE(c8.IsEqualAt({}, val_double));
EXPECT_TRUE(c8.IsEqualAt({}, val_integral));
Literal c9 =
LiteralUtil::CreateR0<tsl::float8_e4m3fn>(tsl::float8_e4m3fn{val_double});
EXPECT_TRUE(c9.IsEqualAt({}, val_double));
EXPECT_TRUE(c9.IsEqualAt({}, val_integral));
Literal c10 =
LiteralUtil::CreateR0<tsl::float8_e3m4>(tsl::float8_e3m4{val_double});
EXPECT_TRUE(c10.IsEqualAt({}, val_double));
EXPECT_TRUE(c10.IsEqualAt({}, val_integral));
}
TEST_F(LiteralUtilTest, CreateFromShapeWithUnknownLeafArrays) {
Literal c1 = Literal::CreateFromShapeWithUnknownLeafArrays(
ShapeUtil::MakeShape(F32, {4, 4}));
EXPECT_FALSE(c1.IsKnown());
}
TEST_F(LiteralUtilTest, CreateFromShapeWithUnknownLeafArraysS4Tuple) {
auto inner_shape = ShapeUtil::MakeShape(S4, {4, 4});
inner_shape.mutable_layout()->set_element_size_in_bits(4);
Literal c1 = Literal::CreateFromShapeWithUnknownLeafArrays(
ShapeUtil::MakeTupleShape({inner_shape}));
EXPECT_FALSE(c1.IsKnown());
}
TEST_F(LiteralUtilTest, CreatePartiallyKnownTuple) {
Literal c1 = Literal::CreateFromShapeWithUnknownLeafArrays(
ShapeUtil::MakeShape(F32, {4, 4}));
Literal c2 = LiteralUtil::CreateR0<int>(10);
Literal c3 = LiteralUtil::MakeTuple({&c1, &c2});
Literal c4 = LiteralUtil::CreateR0<int>(100);
Literal c5 = LiteralUtil::MakeTuple({&c4, &c3});
EXPECT_FALSE(c5.IsKnown());
}
TEST_F(LiteralUtilTest, CopyFromPartiallyKnownTuple) {
Literal c1 = Literal::CreateFromShapeWithUnknownLeafArrays(
ShapeUtil::MakeShape(F32, {4, 4}));
Literal c2 = LiteralUtil::CreateR0<int>(10);
Literal c3 = LiteralUtil::MakeTuple({&c1, &c2});
Literal c4 = LiteralUtil::CreateR0<int>(100);
Literal c5 = LiteralUtil::MakeTuple({&c4, &c3});
Literal c6 = Literal::CreateFromShape(c5.shape());
TF_ASSERT_OK(
c6.CopyFrom(c5, {1}, {1}));
EXPECT_FALSE(c6.IsKnown());
}
TEST_F(LiteralUtilTest, CopyFromPartiallyKnownTupleUnknownTupleElement) {
Literal c1 = Literal::CreateFromShapeWithUnknownLeafArrays(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {4, 4}),
ShapeUtil::MakeShape(F32, {4, 4})}));
Literal c2 = LiteralUtil::CreateR0<int>(10);
Literal c3 = LiteralUtil::MakeTuple({&c1, &c2});
Literal c4 = LiteralUtil::CreateR0<int>(100);
Literal c5 = LiteralUtil::MakeTuple({&c4, &c3});
Literal c6 = Literal::CreateFromShape(c5.shape());
Literal c1_copy = Literal::CreateFromShape(c1.shape());
Literal c2_copy = Literal::CreateFromShape(c2.shape());
TF_ASSERT_OK(
c6.CopyFrom(c5, {1}, {1}));
TF_ASSERT_OK(c1_copy.CopyFrom(c6, {},
{1, 0}));
TF_ASSERT_OK(c2_copy.CopyFrom(c6, {},
{1, 1}));
EXPECT_FALSE(c6.IsKnown());
EXPECT_FALSE(c1_copy.IsKnown());
EXPECT_TRUE(c2_copy.IsKnown());
}
TEST_F(LiteralUtilTest, PopulateR1Dynamic) {
auto literal = Literal(ShapeUtil::MakeShape(U32, {20}));
literal.SetDynamicSize(0, 10);
literal.PopulateR1<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
std::string expected = "u32[<=20](10) {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateR2DynamicDim0) {
auto literal = Literal(ShapeUtil::MakeShape(U32, {5, 2}));
literal.SetDynamicSize(0, 3);
literal.PopulateR2<uint32_t>({{1, 2}, {3, 4}, {5, 6}});
std::string expected = R"(u32[<=5,2](3,2) {
{ 1, 2 },
{ 3, 4 },
{ 5, 6 }
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateR2DynamicDim1) {
auto literal = Literal(ShapeUtil::MakeShape(U32, {2, 5}));
literal.SetDynamicSize(1, 3);
literal.PopulateR2<uint32_t>({{1, 2, 3}, {4, 5, 6}});
std::string expected = R"(u32[2,<=5](2,3) {
{ 1, 2, 3 },
{ 4, 5, 6 }
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateFrom1DArray) {
auto literal = Literal(ShapeUtil::MakeShape(F32, {20}));
literal.SetDynamicSize(0, 10);
xla::Array<float> array({10});
for (int i = 0; i < 10; i++) {
array(i) = static_cast<float>(i);
}
literal.PopulateFromArray(array);
std::string expected = "f32[<=20](10) {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateFromArrayDynamicDim0) {
auto literal = Literal(ShapeUtil::MakeShape(F32, {5, 5}));
const uint32_t rows = 3;
const uint32_t cols = 5;
literal.SetDynamicSize(0, rows);
xla::Array<float> array({rows, cols});
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
array(i, j) = static_cast<float>(j);
}
}
literal.PopulateFromArray(array);
std::string expected = R"(f32[<=5,5](3,5) {
{ 0, 1, 2, 3, 4 },
{ 0, 1, 2, 3, 4 },
{ 0, 1, 2, 3, 4 }
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateFromArrayDynamicDim1) {
auto literal = Literal(ShapeUtil::MakeShape(F32, {5, 5}));
const uint32_t rows = 5;
const uint32_t cols = 3;
literal.SetDynamicSize(1, cols);
xla::Array<float> array({rows, cols});
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
array(i, j) = static_cast<float>(j);
}
}
literal.PopulateFromArray(array);
std::string expected = R"(f32[5,<=5](5,3) {
{ 0, 1, 2 },
{ 0, 1, 2 },
{ 0, 1, 2 },
{ 0, 1, 2 },
{ 0, 1, 2 }
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateR2FromArray2DDynamicDim0) {
auto literal = Literal(ShapeUtil::MakeShape(F32, {5, 5}));
const uint32_t rows = 3;
const uint32_t cols = 5;
literal.SetDynamicSize(0, rows);
xla::Array2D<float> array({rows, cols});
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
array(i, j) = static_cast<float>(j);
}
}
literal.PopulateR2FromArray2D(array);
std::string expected = R"(f32[<=5,5](3,5) {
{ 0, 1, 2, 3, 4 },
{ 0, 1, 2, 3, 4 },
{ 0, 1, 2, 3, 4 }
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateR2FromArray2DDynamicDim1) {
auto literal = Literal(ShapeUtil::MakeShape(F32, {5, 5}));
const uint32_t rows = 5;
const uint32_t cols = 3;
literal.SetDynamicSize(1, cols);
xla::Array2D<float> array({rows, cols});
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
array(i, j) = static_cast<float>(j);
}
}
literal.PopulateR2FromArray2D(array);
std::string expected = R"(f32[5,<=5](5,3) {
{ 0, 1, 2 },
{ 0, 1, 2 },
{ 0, 1, 2 },
{ 0, 1, 2 },
{ 0, 1, 2 }
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateR2FromArray2DDynamicDim0Dim1) {
auto literal = Literal(ShapeUtil::MakeShape(F32, {5, 5}));
const uint32_t rows = 3;
const uint32_t cols = 2;
literal.SetDynamicSize(0, rows);
literal.SetDynamicSize(1, cols);
xla::Array2D<float> array({rows, cols});
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
array(i, j) = static_cast<float>(j);
}
}
literal.PopulateR2FromArray2D(array);
std::string expected = R"(f32[<=5,<=5](3,2) {
{ 0, 1 },
{ 0, 1 },
{ 0, 1 }
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateR3FromArray3DDynamicDim0) {
auto literal = Literal(ShapeUtil::MakeShape(S32, {3, 3, 3}));
const uint32_t rows = 2;
const uint32_t cols = 3;
const uint32_t depth = 3;
literal.SetDynamicSize(0, rows);
xla::Array3D<int32_t> array({rows, cols, depth});
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
for (int k = 0; k < depth; k++) {
array(i, j, k) = static_cast<int32_t>(k);
}
}
}
literal.PopulateR3FromArray3D(array);
std::string expected = R"(s32[<=3,3,3](2,3,3) {
{
{ 0, 1, 2 },
{ 0, 1, 2 },
{ 0, 1, 2 }
},
{
{ 0, 1, 2 },
{ 0, 1, 2 },
{ 0, 1, 2 }
}
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateR3FromArray3DDynamicDim1) {
auto literal = Literal(ShapeUtil::MakeShape(S32, {3, 3, 3}));
const uint32_t rows = 3;
const uint32_t cols = 2;
const uint32_t depth = 3;
literal.SetDynamicSize(1, cols);
xla::Array3D<int32_t> array({rows, cols, depth});
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
for (int k = 0; k < depth; k++) {
array(i, j, k) = static_cast<int32_t>(k);
}
}
}
literal.PopulateR3FromArray3D(array);
std::string expected = R"(s32[3,<=3,3](3,2,3) {
{
{ 0, 1, 2 },
{ 0, 1, 2 }
},
{
{ 0, 1, 2 },
{ 0, 1, 2 }
},
{
{ 0, 1, 2 },
{ 0, 1, 2 }
}
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, PopulateR3FromArray3DDynamicDim2) {
auto literal = Literal(ShapeUtil::MakeShape(S32, {3, 3, 3}));
const uint32_t rows = 3;
const uint32_t cols = 3;
const uint32_t depth = 2;
literal.SetDynamicSize(2, depth);
xla::Array3D<int32_t> array({rows, cols, depth});
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
for (int k = 0; k < depth; k++) {
array(i, j, k) = static_cast<int32_t>(k);
}
}
}
literal.PopulateR3FromArray3D(array);
std::string expected = R"(s32[3,3,<=3](3,3,2) {
{
{ 0, 1 },
{ 0, 1 },
{ 0, 1 }
},
{
{ 0, 1 },
{ 0, 1 },
{ 0, 1 }
},
{
{ 0, 1 },
{ 0, 1 },
{ 0, 1 }
}
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, Compare4BitType) {
Literal literal1 = Literal(ShapeUtil::MakeShape(S4, {}));
Literal literal2 = Literal(ShapeUtil::MakeShape(S4, {}));
void* p = literal1.untyped_data();
void* q = literal2.untyped_data();
*((uint8_t*)p) = 0x44;
*((uint8_t*)q) = 0xc4;
std::string expected = R"(s4[] 4)";
EXPECT_EQ(expected, literal1.ToString());
EXPECT_EQ(literal1.ToString(), literal2.ToString());
EXPECT_EQ(literal1, literal2);
}
class LiteralSerializationTest : public ::testing::Test,
public ::testing::WithParamInterface<Shape> {
public:
static std::vector<Shape> GenerateSimpleParams() {
std::vector<Shape> params;
for (PrimitiveType element_type :
{PRED, S4, U4, S8, U8, S16,
U16, S32, U32, S64, U64, F16,
F32, F64, BF16, F8E5M2, F8E4M3, F8E4M3FN,
F8E4M3B11FNUZ, F8E5M2FNUZ, F8E4M3FNUZ, F8E3M4, C64, C128}) {
for (const DimensionVector& dimensions : {
DimensionVector{},
DimensionVector{0},
DimensionVector{1},
DimensionVector{7},
DimensionVector{8},
DimensionVector{9},
DimensionVector{0, 8},
DimensionVector{8, 9},
}) {
params.push_back(ShapeUtil::MakeShape(element_type, dimensions));
}
}
return params;
}
static std::vector<Shape> GenerateTupleParams() {
std::vector<Shape> params;
const Shape tuple_elements[] = {
ShapeUtil::MakeShape(PRED, {}),
ShapeUtil::MakeShape(U4, {3}),
ShapeUtil::MakeShape(U32, {0}),
ShapeUtil::MakeShape(F32, {7}),
ShapeUtil::MakeTupleShape({
ShapeUtil::MakeShape(BF16, {3}),
ShapeUtil::MakeShape(C64, {7}),
}),
};
for (const Shape& lhs : tuple_elements) {
for (const Shape& rhs : tuple_elements) {
params.push_back(ShapeUtil::MakeTupleShape({lhs, rhs}));
}
}
return params;
}
};
TEST_P(LiteralSerializationTest, Test) {
const Shape& shape = GetParam();
LOG(INFO) << "shape: " << shape.ToString();
absl::InsecureBitGen bitgen(std::seed_seq({42}));
Literal literal(shape);
ASSERT_NO_FATAL_FAILURE(ShapeUtil::ForEachSubshape(
shape, [&](const Shape& subshape, const ShapeIndex& shape_index) {
if (subshape.IsTuple()) {
return;
}
ASSERT_TRUE(subshape.IsArray());
primitive_util::ArrayTypeSwitch<void>(
[&](auto primitive_type) {
using NativeT = primitive_util::NativeTypeOf<primitive_type>;
for (auto& element : literal.data<NativeT>(shape_index)) {
if constexpr (std::is_same_v<NativeT, bool>) {
element = absl::Uniform<int>(bitgen, 0, 2);
} else if constexpr (primitive_util::IsComplexType(
primitive_type)) {
element = NativeT(absl::Uniform<double>(bitgen, -1.0, 1.0),
absl::Uniform<double>(bitgen, -1.0, 1.0));
} else if constexpr (primitive_util::IsFloatingPointType(
primitive_type)) {
element = static_cast<NativeT>(
absl::Uniform<double>(bitgen, -1.0, 1.0));
} else {
element =
static_cast<NativeT>(absl::Uniform<uint64_t>(bitgen));
}
}
},
subshape.element_type());
}));
TF_ASSERT_OK_AND_ASSIGN(std::string serialized, literal.SerializeAsString());
TF_ASSERT_OK_AND_ASSIGN(Literal deserialized,
Literal::DeserializeFromString(serialized));
EXPECT_EQ(literal, deserialized);
}
INSTANTIATE_TEST_SUITE_P(
Simple, LiteralSerializationTest,
::testing::ValuesIn(LiteralSerializationTest::GenerateSimpleParams()));
INSTANTIATE_TEST_SUITE_P(
Tuples, LiteralSerializationTest,
::testing::ValuesIn(LiteralSerializationTest::GenerateTupleParams()));
void BM_BroadcastVectorToMatrix(::testing::benchmark::State& state) {
const int d0 = state.range(0);
const int d1 = state.range(1);
std::vector<int64_t> v(d0);
for (int i = 0; i < d0; i++) {
v[i] = i;
}
Literal literal = LiteralUtil::CreateR1<int64_t>(v);
int count = 0;
for (auto s : state) {
TF_ASSERT_OK_AND_ASSIGN(
Literal broadcasted_literal,
literal.Broadcast(ShapeUtil::MakeShape(S64, {d0, d1}),
{0}));
if (count == 0) {
state.SetLabel(literal.shape().ToString() + " to " +
broadcasted_literal.shape().ToString());
}
count++;
}
}
BENCHMARK(BM_BroadcastVectorToMatrix)
->ArgPair(16, 16)
->ArgPair(16, 1024)
->ArgPair(1024, 1024);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/literal.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/literal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
40deacba-c32f-4c52-97bb-a950b5905922 | cpp | tensorflow/tensorflow | shape_util | tensorflow/compiler/tf2xla/kernels/shape_util.cc | third_party/xla/xla/shape_util_test.cc | #include "tensorflow/compiler/tf2xla/kernels/shape_util.h"
#include <limits>
#include "absl/status/status.h"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
Status TensorShapeToConstant(const TensorShape& input_shape,
Tensor* shape_constant) {
const int dims = input_shape.dims();
if (shape_constant->dtype() == DT_INT32) {
auto vec = shape_constant->vec<int32>();
for (int i = 0; i < dims; ++i) {
int64_t dim_size = input_shape.dim_size(i);
if (!FastBoundsCheck(dim_size, std::numeric_limits<int32>::max())) {
return errors::InvalidArgument(
"Shape with out_type=int32 does not support tensors > int32max",
" but dim ", i, " is ", dim_size);
}
vec(i) = static_cast<int32>(dim_size);
}
} else {
auto vec = shape_constant->vec<int64_t>();
for (int i = 0; i < dims; ++i) {
int64_t dim_size = input_shape.dim_size(i);
vec(i) = dim_size;
}
}
return absl::OkStatus();
}
} | #include "xla/shape_util.h"
#include <algorithm>
#include <cstdint>
#include <numeric>
#include <optional>
#include <utility>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/shape.h"
#include "xla/test.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
TEST(ShapeUtilTest, GetDimensionHelperCanNegativeIndex) {
Shape matrix = ShapeUtil::MakeShape(F32, {2, 3});
EXPECT_EQ(3, ShapeUtil::GetDimension(matrix, -1));
EXPECT_EQ(2, ShapeUtil::GetDimension(matrix, -2));
}
TEST(ShapeUtilTest, GetDimensionHelperExampleInDocumentationTest) {
auto shape = ShapeUtil::MakeShape(F32, {1, 2, 3, 4});
ASSERT_EQ(4, ShapeUtil::GetDimension(shape, -1));
}
TEST(ShapeUtilTest, NegativeIndexOobFails) {
Shape matrix = ShapeUtil::MakeShape(F32, {2, 3});
ASSERT_DEATH(ShapeUtil::GetDimension(matrix, -3), "dimension_number >= 0");
}
TEST(ShapeUtilTest, CreateRank3DimensionVectorFromShape) {
Shape shape = ShapeUtil::MakeShape(F32, {3, 2, 7});
DimensionVector dimensions = ShapeUtil::CreateDimensionVectorFromShape(shape);
EXPECT_THAT(dimensions, ElementsAre(3, 2, 7));
}
TEST(ShapeUtilTest, Rank1DimensionIndexing) {
Shape shape = ShapeUtil::MakeShape(F32, {3});
ASSERT_EQ(3, shape.dimensions(0));
}
TEST(ShapeUtilTest, Rank2DimensionIndexing) {
Shape shape = ShapeUtil::MakeShape(F32, {3, 2});
ASSERT_EQ(2, shape.dimensions(1));
ASSERT_EQ(3, shape.dimensions(0));
}
TEST(ShapeUtilTest, Rank3DimensionIndexing) {
Shape shape = ShapeUtil::MakeShape(F32, {3, 2, 7});
ASSERT_EQ(7, shape.dimensions(2));
ASSERT_EQ(2, shape.dimensions(1));
ASSERT_EQ(3, shape.dimensions(0));
}
TEST(ShapeUtilTest, Rank4DimensionIndexing) {
Shape shape = ShapeUtil::MakeShape(F32, {3, 2, 7, 8});
ASSERT_EQ(8, shape.dimensions(3));
ASSERT_EQ(7, shape.dimensions(2));
ASSERT_EQ(2, shape.dimensions(1));
ASSERT_EQ(3, shape.dimensions(0));
}
TEST(ShapeUtilTest, CompatibleIdenticalShapes) {
Shape shape1 = ShapeUtil::MakeShape(F32, {3, 2});
Shape shape2 = ShapeUtil::MakeShape(F32, {3, 2});
ASSERT_TRUE(ShapeUtil::Compatible(shape1, shape2));
}
TEST(ShapeUtilTest, TokenCompatibility) {
EXPECT_TRUE(ShapeUtil::Compatible(ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeTokenShape()));
EXPECT_FALSE(ShapeUtil::Compatible(ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeShape(F32, {})));
EXPECT_FALSE(ShapeUtil::Compatible(ShapeUtil::MakeShape(F32, {}),
ShapeUtil::MakeTokenShape()));
EXPECT_TRUE(ShapeUtil::Compatible(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeTokenShape()}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeTokenShape()})));
}
TEST(ShapeUtilTest, TokensEqualShapes) {
EXPECT_TRUE(ShapeUtil::Equal(ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeTokenShape()));
EXPECT_FALSE(ShapeUtil::Equal(ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeShape(F32, {})));
EXPECT_FALSE(ShapeUtil::Equal(ShapeUtil::MakeShape(F32, {}),
ShapeUtil::MakeTokenShape()));
EXPECT_TRUE(ShapeUtil::Equal(
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeShapeWithDenseLayout(S32, {3, 4}, {0, 1})}),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeShapeWithDenseLayout(S32, {3, 4}, {0, 1})})));
EXPECT_FALSE(ShapeUtil::Equal(
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeShapeWithDenseLayout(S32, {3, 4}, {0, 1})}),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeShapeWithDenseLayout(S32, {3, 4}, {1, 0})})));
}
TEST(ShapeUtilTest, CompatibleNotIdenticalShapes) {
Shape shape_1 = ShapeUtil::MakeShape(F32, {3, 2});
auto layout_1 = shape_1.mutable_layout();
layout_1->clear_minor_to_major();
layout_1->add_minor_to_major(0);
layout_1->add_minor_to_major(1);
Shape shape_2 = ShapeUtil::MakeShape(F32, {3, 2});
auto layout_2 = shape_2.mutable_layout();
layout_2->clear_minor_to_major();
layout_2->add_minor_to_major(1);
layout_2->add_minor_to_major(0);
EXPECT_FALSE(ShapeUtil::Equal(shape_1, shape_2));
EXPECT_TRUE(ShapeUtil::Compatible(shape_1, shape_2));
}
TEST(ShapeUtilTest, CompatibleIgnoringFpPrecision) {
Shape shape1 = ShapeUtil::MakeShape(BF16, {3, 2});
Shape shape2 = ShapeUtil::MakeShape(F32, {3, 2});
ASSERT_TRUE(ShapeUtil::CompatibleIgnoringFpPrecision(shape1, shape2));
}
TEST(ShapeUtilTest, IncompatibleIgnoringFpPrecision) {
Shape shape1 = ShapeUtil::MakeShape(BF16, {3, 2});
Shape shape2 = ShapeUtil::MakeShape(F32, {2, 2});
ASSERT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(shape1, shape2));
}
TEST(ShapeUtilTest, IncompatibleDifferentElementShapes) {
Shape shape_1 = ShapeUtil::MakeShape(F32, {3, 2});
Shape shape_2 = ShapeUtil::MakeShape(PRED, {3, 2});
EXPECT_FALSE(ShapeUtil::Compatible(shape_1, shape_2));
}
TEST(ShapeUtilTest, EqualIgnoringFpPrecision) {
EXPECT_TRUE(ShapeUtil::EqualIgnoringFpPrecision(
ShapeUtil::MakeShapeWithDenseLayout(F32, {4, 3}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {4, 3}, {0, 1})));
}
TEST(ShapeUtilTest, UnequalIgnoringFpPrecision) {
EXPECT_FALSE(ShapeUtil::EqualIgnoringFpPrecision(
ShapeUtil::MakeShapeWithDenseLayout(F32, {4, 3}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {3, 4}, {0, 1})));
EXPECT_FALSE(ShapeUtil::EqualIgnoringFpPrecision(
ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 4}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {3, 4}, {1, 0})));
EXPECT_FALSE(ShapeUtil::EqualIgnoringFpPrecision(
ShapeUtil::MakeShapeWithDenseLayout(F32, {4, 3}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(PRED, {4, 3}, {0, 1})));
}
TEST(ShapeUtilTest, EqualIgnoringElementType) {
EXPECT_TRUE(ShapeUtil::EqualIgnoringElementType(
ShapeUtil::MakeShapeWithDenseLayout(F32, {4, 3}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {4, 3}, {0, 1})));
EXPECT_TRUE(ShapeUtil::EqualIgnoringElementType(
ShapeUtil::MakeShapeWithDenseLayout(S32, {4, 3}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {4, 3}, {0, 1})));
EXPECT_TRUE(ShapeUtil::EqualIgnoringElementType(
ShapeUtil::MakeShapeWithDenseLayout(F32, {4, 3}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(PRED, {4, 3}, {0, 1})));
}
TEST(ShapeUtilTest, UnequalIgnoringElementType) {
EXPECT_FALSE(ShapeUtil::EqualIgnoringElementType(
ShapeUtil::MakeShapeWithDenseLayout(F32, {4, 3}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {3, 4}, {0, 1})));
EXPECT_FALSE(ShapeUtil::EqualIgnoringElementType(
ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 4}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {3, 4}, {1, 0})));
}
TEST(ShapeUtilTest, EqualDynamicShapes) {
EXPECT_TRUE(
ShapeUtil::Equal(ShapeUtil::MakeShape(F32, {4, 3}, {true, false}),
ShapeUtil::MakeShape(F32, {4, 3}, {true, false})));
EXPECT_FALSE(
ShapeUtil::Equal(ShapeUtil::MakeShape(F32, {4, 3}, {true, false}),
ShapeUtil::MakeShape(F32, {4, 3}, {false, false})));
EXPECT_FALSE(ShapeUtil::Equal(
ShapeUtil::MakeShape(F32, {Shape::kUnboundedSize}, {true}),
ShapeUtil::MakeShape(F32, {2}, {true})));
}
TEST(ShapeUtilTest, CompatibleDynamicShapes) {
Shape shape_a = ShapeUtil::MakeShape(F32, {4, 3}, {true, false});
*shape_a.mutable_layout() = Layout({1, 0});
Shape shape_b = ShapeUtil::MakeShape(F32, {4, 3}, {true, false});
*shape_b.mutable_layout() = Layout({0, 1});
Shape shape_c = ShapeUtil::MakeShape(F32, {4, 3}, {false, true});
*shape_c.mutable_layout() = Layout({0, 1});
EXPECT_TRUE(ShapeUtil::Compatible(shape_a, shape_a));
EXPECT_TRUE(ShapeUtil::Compatible(shape_a, shape_b));
EXPECT_TRUE(ShapeUtil::Compatible(shape_a, shape_c));
}
TEST(ShapeUtilTest, CompatibleTuples) {
Shape tuple1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(PRED, {4, 5})});
Shape tuple2 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(PRED, {4, 5})});
EXPECT_TRUE(ShapeUtil::Compatible(tuple1, tuple2));
}
TEST(ShapeUtilTest, MakeMaybeTupleShape) {
Shape s1 =
ShapeUtil::MakeMaybeTupleShape({ShapeUtil::MakeShape(F32, {3, 2})});
EXPECT_TRUE(ShapeUtil::Compatible(s1, ShapeUtil::MakeShape(F32, {3, 2})));
}
TEST(ShapeUtilTest, CompatibleTuplesIgnoringFpPrecision) {
Shape tuple1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(BF16, {3, 2}), ShapeUtil::MakeShape(F32, {4, 5})});
Shape tuple2 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F64, {3, 2}), ShapeUtil::MakeShape(BF16, {4, 5})});
EXPECT_TRUE(ShapeUtil::CompatibleIgnoringFpPrecision(tuple1, tuple2));
}
TEST(ShapeUtilTest, IncompatibleTuplesWithSwappedElements) {
Shape tuple1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {4, 5}), ShapeUtil::MakeShape(F32, {3, 2})});
Shape tuple2 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(PRED, {4, 5})});
EXPECT_FALSE(ShapeUtil::Compatible(tuple1, tuple2));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringElementType(tuple1, tuple2));
}
TEST(ShapeUtilTest, IncompatibleTuplesIgnoringFpPrecision) {
Shape tuple1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(BF16, {4, 5}), ShapeUtil::MakeShape(F32, {3, 2})});
Shape tuple2 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(BF16, {4, 5})});
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(tuple1, tuple2));
}
TEST(ShapeUtilTest, IncompatibleTuplesWithDifferentPrimitiveType) {
Shape tuple1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {4, 5}), ShapeUtil::MakeShape(F32, {3, 2})});
Shape tuple2 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {4, 5}), ShapeUtil::MakeShape(S32, {3, 2})});
EXPECT_FALSE(ShapeUtil::Compatible(tuple1, tuple2));
EXPECT_TRUE(ShapeUtil::CompatibleIgnoringElementType(tuple1, tuple2));
}
TEST(ShapeUtilTest, IncompatibleTuplesWithDifferentDimensions) {
Shape tuple1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {4, 5}), ShapeUtil::MakeShape(F32, {3, 2})});
Shape tuple2 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {4, 5}), ShapeUtil::MakeShape(F32, {4, 2})});
EXPECT_FALSE(ShapeUtil::Compatible(tuple1, tuple2));
}
TEST(ShapeUtilTest, IncompatibleScalarVsTuple) {
Shape shape1 = ShapeUtil::MakeShape(F32, {});
Shape shape2 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(U32, {})});
EXPECT_FALSE(ShapeUtil::Compatible(shape1, shape2));
EXPECT_FALSE(ShapeUtil::Compatible(shape2, shape1));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringElementType(shape1, shape2));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringElementType(shape2, shape1));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(shape1, shape2));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(shape2, shape1));
}
TEST(ShapeUtilTest, OpaqueVsArray) {
Shape shape1 = ShapeUtil::MakeShape(F32, {5, 7});
Shape shape2 = ShapeUtil::MakeOpaqueShape();
EXPECT_FALSE(ShapeUtil::Compatible(shape1, shape2));
EXPECT_FALSE(ShapeUtil::Compatible(shape2, shape1));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(shape1, shape2));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(shape2, shape1));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringElementType(shape1, shape2));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringElementType(shape2, shape1));
}
TEST(ShapeUtilTest, ScalarDefaultLayoutEqualsScalarEmptyMin2Maj) {
Shape scalar_default_layout = ShapeUtil::MakeShape(F32, {});
ASSERT_TRUE(scalar_default_layout.has_layout())
<< ShapeUtil::HumanStringWithLayout(scalar_default_layout);
const Shape scalar_empty_min2maj =
ShapeUtil::MakeShapeWithDenseLayout(F32, {}, {});
ASSERT_TRUE(scalar_empty_min2maj.has_layout())
<< ShapeUtil::HumanStringWithLayout(scalar_empty_min2maj);
EXPECT_TRUE(ShapeUtil::Equal(scalar_default_layout, scalar_empty_min2maj));
}
TEST(ShapeUtilTest, ByteSizeOfWithoutPadding) {
EXPECT_EQ(4, ShapeUtil::ByteSizeOfPrimitiveType(F32));
EXPECT_EQ(4, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(F32, {})));
EXPECT_EQ(800, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(F32, {10, 20})));
EXPECT_EQ(8, ShapeUtil::ByteSizeOfPrimitiveType(F64));
EXPECT_EQ(8, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(F64, {})));
EXPECT_EQ(1600, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(F64, {10, 20})));
EXPECT_EQ(8, ShapeUtil::ByteSizeOfPrimitiveType(C64));
EXPECT_EQ(8, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(C64, {})));
EXPECT_EQ(1600, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(C64, {10, 20})));
}
TEST(ShapeUtilTest, ByteStrides) {
Shape shape1 = ShapeUtil::MakeShape(F32, {3, 5, 7});
Shape shape2 = ShapeUtil::MakeShape(F16, {5, 7, 9});
EXPECT_THAT(*ShapeUtil::ByteStrides(shape1), ElementsAre(140, 28, 4));
EXPECT_THAT(*ShapeUtil::ByteStrides(shape2), ElementsAre(126, 18, 2));
}
TEST(ShapeUtilTest, NilShape) {
EXPECT_TRUE(ShapeUtil::IsEmptyTuple(ShapeUtil::MakeNil()));
EXPECT_FALSE(ShapeUtil::IsEmptyTuple(ShapeUtil::MakeShape(F32, {1, 2, 3})));
EXPECT_FALSE(ShapeUtil::IsEmptyTuple(ShapeUtil::MakeShape(F32, {0, 1})));
EXPECT_FALSE(ShapeUtil::IsEmptyTuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(S32, {})})));
EXPECT_FALSE(ShapeUtil::IsEmptyTuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {0})})));
}
TEST(ShapeUtilTest, NestedTuple) {
EXPECT_FALSE(ShapeUtil::IsNestedTuple(ShapeUtil::MakeTupleShape({})));
EXPECT_FALSE(ShapeUtil::IsNestedTuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(S32, {})})));
EXPECT_TRUE(ShapeUtil::IsNestedTuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeTupleShape({})})));
EXPECT_FALSE(ShapeUtil::IsNestedTuple(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(S32, {}), ShapeUtil::MakeShape(S32, {})})));
EXPECT_TRUE(ShapeUtil::IsNestedTuple(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(S32, {}), ShapeUtil::MakeTupleShape({})})));
EXPECT_TRUE(ShapeUtil::IsNestedTuple(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({}), ShapeUtil::MakeShape(S32, {})})));
EXPECT_TRUE(ShapeUtil::IsNestedTuple(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({}), ShapeUtil::MakeTupleShape({})})));
}
TEST(ShapeUtilTest, NestedTupleWithPtrs) {
const Shape nil = ShapeUtil::MakeNil();
const Shape s32 = ShapeUtil::MakeShape(S32, {});
EXPECT_FALSE(ShapeUtil::IsNestedTuple(nil));
EXPECT_FALSE(
ShapeUtil::IsNestedTuple(ShapeUtil::MakeTupleShapeWithPtrs({&s32})));
EXPECT_TRUE(
ShapeUtil::IsNestedTuple(ShapeUtil::MakeTupleShapeWithPtrs({&nil})));
EXPECT_FALSE(ShapeUtil::IsNestedTuple(
ShapeUtil::MakeTupleShapeWithPtrs({&s32, &s32})));
EXPECT_TRUE(ShapeUtil::IsNestedTuple(
ShapeUtil::MakeTupleShapeWithPtrs({&s32, &nil})));
EXPECT_TRUE(ShapeUtil::IsNestedTuple(
ShapeUtil::MakeTupleShapeWithPtrs({&nil, &s32})));
EXPECT_TRUE(ShapeUtil::IsNestedTuple(
ShapeUtil::MakeTupleShapeWithPtrs({&nil, &nil})));
}
TEST(ShapeUtilTest, ElementsIn) {
EXPECT_EQ(1, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {})));
EXPECT_EQ(0, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {0})));
EXPECT_EQ(1, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {1})));
EXPECT_EQ(1, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {1, 1})));
EXPECT_EQ(2, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {2})));
EXPECT_EQ(2, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {2, 1})));
EXPECT_EQ(15, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {3, 5})));
EXPECT_EQ(0, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {3, 0, 5})));
EXPECT_EQ(0, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {0, 3, 0})));
EXPECT_EQ(15, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {1, 3, 5})));
EXPECT_EQ(221, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {13, 17})));
}
TEST(ShapeUtilTest, HasPrimitiveType) {
EXPECT_TRUE(ShapeUtil::HasPrimitiveType(ShapeUtil::MakeShape(S32, {}), S32));
EXPECT_FALSE(ShapeUtil::HasPrimitiveType(ShapeUtil::MakeShape(S32, {}), S16));
EXPECT_TRUE(ShapeUtil::HasPrimitiveType(ShapeUtil::MakeShape(S32, {0}), S32));
EXPECT_FALSE(ShapeUtil::HasPrimitiveType(ShapeUtil::MakeTupleShape({}), S32));
EXPECT_TRUE(ShapeUtil::HasPrimitiveType(
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(S32, {}), ShapeUtil::MakeShape(S32, {})}),
S32));
EXPECT_TRUE(ShapeUtil::HasPrimitiveType(
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(S32, {}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(S16, {})})}),
S16));
}
TEST(ShapeUtilTest, IsZeroElementArray) {
EXPECT_FALSE(ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {})));
EXPECT_TRUE(ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {0})));
EXPECT_FALSE(ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {1})));
EXPECT_FALSE(
ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {1, 1})));
EXPECT_FALSE(ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {2})));
EXPECT_FALSE(
ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {2, 1})));
EXPECT_FALSE(
ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {3, 5})));
EXPECT_TRUE(
ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {3, 0, 5})));
EXPECT_TRUE(
ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {0, 3, 0})));
EXPECT_FALSE(
ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {1, 3, 5})));
EXPECT_FALSE(
ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {13, 17})));
EXPECT_FALSE(ShapeUtil::IsZeroElementArray(ShapeUtil::MakeNil()));
EXPECT_FALSE(ShapeUtil::IsZeroElementArray(ShapeUtil::MakeTupleShape({})));
EXPECT_FALSE(ShapeUtil::IsZeroElementArray(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(S32, {0, 3, 0})})));
}
TEST(ShapeUtilTest, SameDimensions) {
EXPECT_TRUE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {}),
ShapeUtil::MakeShape(S32, {})));
EXPECT_TRUE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {}),
ShapeUtil::MakeShape(F32, {})));
EXPECT_TRUE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {1}),
ShapeUtil::MakeShape(S32, {1})));
EXPECT_TRUE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {0}),
ShapeUtil::MakeShape(S32, {0})));
EXPECT_TRUE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {2}),
ShapeUtil::MakeShape(S32, {2})));
EXPECT_FALSE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {1}),
ShapeUtil::MakeShape(F32, {2})));
EXPECT_FALSE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {0, 0}),
ShapeUtil::MakeShape(F32, {0})));
EXPECT_FALSE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {1}),
ShapeUtil::MakeShape(F32, {1, 1})));
EXPECT_FALSE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {}),
ShapeUtil::MakeShape(F32, {1})));
EXPECT_FALSE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {1}),
ShapeUtil::MakeShape(F32, {1, 1})));
EXPECT_FALSE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {1}),
ShapeUtil::MakeShape(F32, {1, 0})));
EXPECT_FALSE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {1, 1}),
ShapeUtil::MakeShape(F32, {1, 2})));
}
TEST(ShapeUtilTest, GetSubshape) {
Shape array_shape = ShapeUtil::MakeShape(F32, {42, 42, 123});
EXPECT_TRUE(
ShapeUtil::Equal(array_shape, ShapeUtil::GetSubshape(array_shape, {})));
EXPECT_TRUE(ShapeUtil::Equal(
array_shape, *ShapeUtil::GetMutableSubshape(&array_shape, {})));
Shape tuple_shape =
ShapeUtil::MakeTupleShape({array_shape, array_shape, array_shape});
EXPECT_TRUE(
ShapeUtil::Equal(tuple_shape, ShapeUtil::GetSubshape(tuple_shape, {})));
EXPECT_TRUE(
ShapeUtil::Equal(array_shape, ShapeUtil::GetSubshape(tuple_shape, {0})));
EXPECT_TRUE(
ShapeUtil::Equal(array_shape, ShapeUtil::GetSubshape(tuple_shape, {1})));
EXPECT_TRUE(
ShapeUtil::Equal(array_shape, ShapeUtil::GetSubshape(tuple_shape, {2})));
Shape nested_tuple_shape = ShapeUtil::MakeTupleShape(
{array_shape, ShapeUtil::MakeTupleShape({array_shape, array_shape}),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({array_shape, array_shape}),
array_shape})});
EXPECT_TRUE(ShapeUtil::Equal(nested_tuple_shape,
ShapeUtil::GetSubshape(nested_tuple_shape, {})));
EXPECT_TRUE(ShapeUtil::Equal(
array_shape, ShapeUtil::GetSubshape(nested_tuple_shape, {0})));
EXPECT_TRUE(
ShapeUtil::Equal(ShapeUtil::MakeTupleShape({array_shape, array_shape}),
ShapeUtil::GetSubshape(nested_tuple_shape, {1})));
EXPECT_TRUE(
ShapeUtil::Equal(ShapeUtil::MakeTupleShape({array_shape, array_shape}),
ShapeUtil::GetSubshape(nested_tuple_shape, {2, 0})));
}
TEST(ShapeUtilTest, IsLeafIndex) {
Shape array_shape = ShapeUtil::MakeShape(F32, {42, 42, 123});
EXPECT_TRUE(ShapeUtil::IsLeafIndex(array_shape, {}));
Shape tuple_shape = ShapeUtil::MakeTupleShape({array_shape, array_shape});
EXPECT_FALSE(ShapeUtil::IsLeafIndex(tuple_shape, {}));
EXPECT_TRUE(ShapeUtil::IsLeafIndex(tuple_shape, {0}));
EXPECT_TRUE(ShapeUtil::IsLeafIndex(tuple_shape, {1}));
Shape nested_tuple_shape = ShapeUtil::MakeTupleShape(
{array_shape, ShapeUtil::MakeTupleShape({array_shape, array_shape}),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({array_shape, array_shape}),
array_shape})});
EXPECT_FALSE(ShapeUtil::IsLeafIndex(nested_tuple_shape, {}));
EXPECT_TRUE(ShapeUtil::IsLeafIndex(nested_tuple_shape, {0}));
EXPECT_FALSE(ShapeUtil::IsLeafIndex(nested_tuple_shape, {1}));
EXPECT_TRUE(ShapeUtil::IsLeafIndex(nested_tuple_shape, {1, 0}));
EXPECT_TRUE(ShapeUtil::IsLeafIndex(nested_tuple_shape, {1, 1}));
}
TEST(ShapeUtilTest, ForEachSubshapeArray) {
const Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
int calls = 0;
ShapeUtil::ForEachSubshape(
shape, [&calls, &shape](const Shape& subshape, const ShapeIndex& index) {
EXPECT_EQ(&shape, &subshape);
EXPECT_TRUE(index.empty());
++calls;
});
EXPECT_EQ(1, calls);
}
TEST(ShapeUtilTest, ForEachSubshapeNestedTuple) {
const Shape shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {42}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {101}),
ShapeUtil::MakeShape(PRED, {33})})});
int calls = 0;
ShapeUtil::ForEachSubshape(
shape, [&calls, &shape](const Shape& subshape, const ShapeIndex& index) {
EXPECT_TRUE(
ShapeUtil::Equal(subshape, ShapeUtil::GetSubshape(shape, index)));
if (calls == 0) {
EXPECT_TRUE(index.empty());
} else if (calls == 4) {
EXPECT_EQ(33, ShapeUtil::ElementsIn(subshape));
}
++calls;
});
EXPECT_EQ(5, calls);
}
TEST(ShapeUtilTest, ForEachMutableSubshapeNestedTuple) {
Shape shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {42}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {101}),
ShapeUtil::MakeShape(PRED, {33})})});
int calls = 0;
ShapeUtil::ForEachMutableSubshape(
&shape, [&calls, &shape](const Shape* subshape, const ShapeIndex& index) {
EXPECT_EQ(subshape, ShapeUtil::GetMutableSubshape(&shape, index));
if (calls == 0) {
EXPECT_TRUE(index.empty());
} else if (calls == 4) {
EXPECT_EQ(33, ShapeUtil::ElementsIn(*subshape));
}
++calls;
});
EXPECT_EQ(5, calls);
}
TEST(ShapeUtilTest, ForEachMutableLeafShapeTest) {
Shape shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {42}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {101}),
ShapeUtil::MakeShape(PRED, {33})})});
int calls = 0;
ShapeUtil::ForEachMutableLeafShape(
&shape, [&calls, &shape](const Shape* subshape, const ShapeIndex& index) {
EXPECT_EQ(subshape, ShapeUtil::GetMutableSubshape(&shape, index));
if (calls == 0) {
EXPECT_EQ(42, ShapeUtil::ElementsIn(*subshape));
} else if (calls == 1) {
EXPECT_EQ(101, ShapeUtil::ElementsIn(*subshape));
} else if (calls == 2) {
EXPECT_EQ(33, ShapeUtil::ElementsIn(*subshape));
}
++calls;
});
EXPECT_EQ(3, calls);
}
TEST(ShapeUtilTest, InsertedOrDeleted1SizedDimensions) {
Shape shape0 = ShapeUtil::MakeShape(S32, {9, 1, 4});
Shape shape1 = ShapeUtil::MakeShape(S32, {1, 9, 4, 1});
Shape shape2 = ShapeUtil::MakeShape(S32, {3, 1, 12});
EXPECT_TRUE(
ShapeUtil::InsertedOrDeleted1SizedDimensions(shape0, shape1).has_value());
EXPECT_FALSE(
ShapeUtil::InsertedOrDeleted1SizedDimensions(shape0, shape2).has_value());
}
TEST(ShapeUtilTest, ForEachIndex) {
struct ShapeDimensionAndNumberInvocations {
std::vector<int64_t> dimensions;
int invocations;
} test_data[] = {
{{}, 1}, {{0}, 0}, {{16}, 16}, {{3, 0}, 0},
{{0, 2}, 0}, {{4, 16}, 64}, {{6, 11, 17}, 1122}, {{6, 11, 5, 17}, 5610},
};
for (const auto& data : test_data) {
Shape shape = ShapeUtil::MakeShape(F32, data.dimensions);
int invocations = 0;
auto increment_func = [&invocations](absl::Span<const int64_t> indexes) {
invocations++;
return true;
};
std::vector<int64_t> zero_base(data.dimensions.size(), 0);
std::vector<int64_t> step(data.dimensions.size(), 1);
ShapeUtil::ForEachIndex(shape, zero_base, data.dimensions, step,
increment_func);
EXPECT_EQ(invocations, data.invocations);
}
}
TEST(ShapeUtilTest, ForEachIndexWithStatus) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 10});
int invocations = 0;
auto increment_func =
[&invocations](
absl::Span<const int64_t> indexes) -> absl::StatusOr<bool> {
if (++invocations == 5) {
return Unimplemented("Cannot increment beyond 5.");
}
return true;
};
absl::Status error_status = ShapeUtil::ForEachIndexWithStatus(
shape, {0, 0}, {10, 10}, {0, 1},
increment_func);
EXPECT_FALSE(error_status.ok());
EXPECT_THAT(error_status.message(),
::testing::HasSubstr("Cannot increment beyond 5."));
EXPECT_EQ(invocations, 5);
}
TEST(ShapeUtilTest, GetForEachIndexParallelThreadCount) {
const int kThreadCount = ShapeUtil::GetForEachIndexParallelThreadCount();
Shape shape = ShapeUtil::MakeShape(F32, {10, 100});
auto check_func = [kThreadCount](absl::Span<const int64_t> ,
int thread_id) -> absl::StatusOr<bool> {
EXPECT_GE(thread_id, -1);
EXPECT_LT(thread_id, kThreadCount);
return true;
};
for (int i = 0; i < 10; ++i) {
ShapeUtil::ForEachIndexParallel(shape, {0, 0}, {10, 100},
{1, 1}, check_func);
}
}
TEST(ShapeUtilTest, ForEachIndexParallel) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 10});
int64_t output[10][10];
int init = 5;
auto set_func = [&](absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
output[indexes[0]][indexes[1]] = init + indexes[0] + indexes[1];
return true;
};
ShapeUtil::ForEachIndexParallel(shape, {0, 0}, {10, 10},
{1, 1}, set_func);
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < 10; ++j) {
EXPECT_EQ(output[i][j], init + i + j);
}
}
}
TEST(ShapeUtilTest, ForEachIndexParallel_Rank0) {
Shape shape = ShapeUtil::MakeShape(F32, {});
int64_t output = -1;
auto set_func = [&](absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
output = indexes.size();
return true;
};
ShapeUtil::ForEachIndexParallel(shape, {}, {},
{}, set_func);
EXPECT_EQ(output, 0);
}
TEST(ShapeUtilTest, ForEachIndexParallel_Empty) {
Shape shape = ShapeUtil::MakeShape(F32, {2, 0});
bool called = false;
auto set_func = [&](absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
called = true;
return true;
};
ShapeUtil::ForEachIndexParallel(shape, {0, 0}, {2, 0},
{1, 1}, set_func);
EXPECT_FALSE(called);
}
TEST(ShapeUtilTest, ForEachIndexParallel_DimensionPinnedWithZeros) {
Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
int64_t output[2][2] = {};
int init = 5;
auto set_func = [&](absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
output[indexes[0]][indexes[1]] = init + indexes[0] + indexes[1];
return true;
};
ShapeUtil::ForEachIndexParallel(shape, {1, 0}, {0, 2},
{0, 1}, set_func);
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 2; ++j) {
if (i == 1) {
EXPECT_EQ(output[i][j], init + i + j);
} else {
EXPECT_EQ(output[i][j], 0);
}
}
}
}
TEST(ShapeUtilTest, ForEachIndexParallel_WithSkips) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 10});
int64_t output[10][10] = {};
int init = 5;
auto set_func = [&](absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
output[indexes[0]][indexes[1]] = init + indexes[0] + indexes[1];
return true;
};
ShapeUtil::ForEachIndexParallel(shape, {2, 3}, {3, 1},
{2, 1}, set_func);
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < 10; ++j) {
if ((i == 2 || i == 4) && j == 3) {
EXPECT_EQ(output[i][j], init + i + j);
} else {
EXPECT_EQ(output[i][j], 0);
}
}
}
}
TEST(ShapeUtilTest, ForEachIndexParallel_CalledTwice) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 10});
int64_t output[10][10];
int init = 5;
auto set_func = [&](absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
output[indexes[0]][indexes[1]] = init + indexes[0] + indexes[1];
return true;
};
int init2 = 15;
auto set_func2 = [&](absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
output[indexes[0]][indexes[1]] = init2 + indexes[0] + indexes[1];
return true;
};
ShapeUtil::ForEachIndexParallel(shape, {0, 0}, {10, 10},
{1, 1}, set_func);
ShapeUtil::ForEachIndexParallel(shape, {0, 0}, {10, 10},
{1, 1}, set_func2);
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < 10; ++j) {
EXPECT_EQ(output[i][j], init2 + i + j);
}
}
}
TEST(ShapeUtilTest, ForEachIndexParallel_CalledFromMultipleThreads) {
constexpr int kCallingThreads = 10;
constexpr int kDim0 = 10;
constexpr int kDim1 = 10;
constexpr int kInit = 5;
const Shape kShape = ShapeUtil::MakeShape(F32, {kDim0, kDim1});
int64_t output[kCallingThreads][kDim0][kDim1];
{
tsl::thread::ThreadPool pool(tsl::Env::Default(), "foreach",
kCallingThreads);
for (int t = 0; t < kCallingThreads; ++t) {
pool.Schedule([&output, &kShape, t] {
auto set_func = [&output, t](
absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
output[t][indexes[0]][indexes[1]] = kInit + indexes[0] + indexes[1];
return true;
};
ShapeUtil::ForEachIndexParallel(kShape, {0, 0},
{kDim0, kDim1},
{1, 1}, set_func);
});
}
}
for (int t = 0; t < kCallingThreads; ++t) {
for (int i = 0; i < kDim0; ++i) {
for (int j = 0; j < kDim1; ++j) {
EXPECT_EQ(output[t][i][j], kInit + i + j);
}
}
}
}
TEST(ShapeUtilTest, DimensionsUnmodifiedByReshape_1x1x1x1_to_1x1x1) {
EXPECT_THAT(ShapeUtil::DimensionsUnmodifiedByReshape(
ShapeUtil::MakeShape(S32, {1, 1, 1, 1}),
ShapeUtil::MakeShape(S32, {1, 1, 1})),
ElementsAre(std::make_pair(0, 0), std::make_pair(1, 1),
std::make_pair(2, 2)));
}
TEST(ShapeUtilTest, DimensionsUnmodifiedByReshape_1x1x1_to_1x1x1x1) {
EXPECT_THAT(ShapeUtil::DimensionsUnmodifiedByReshape(
ShapeUtil::MakeShape(S32, {1, 1, 1}),
ShapeUtil::MakeShape(S32, {1, 1, 1, 1})),
ElementsAre(std::make_pair(0, 0), std::make_pair(1, 1),
std::make_pair(2, 2)));
}
TEST(ShapeUtilTest, DimensionsUnmodifiedByReshape_4x1x3x5x6x7_to_2x6x1x5x1x42) {
EXPECT_THAT(ShapeUtil::DimensionsUnmodifiedByReshape(
ShapeUtil::MakeShape(S32, {4, 1, 3, 5, 6, 7}),
ShapeUtil::MakeShape(S32, {2, 6, 1, 5, 1, 42})),
ElementsAre(std::make_pair(3, 3)));
}
TEST(ShapeUtilTest, ReshapeIsBitcast_3x4_6x2) {
for (bool input_is_row_major : {true, false}) {
for (bool output_is_row_major : {true, false}) {
Layout input_layout = input_is_row_major ? LayoutUtil::MakeLayout({1, 0})
: LayoutUtil::MakeLayout({0, 1});
Layout output_layout = output_is_row_major
? LayoutUtil::MakeLayout({1, 0})
: LayoutUtil::MakeLayout({0, 1});
EXPECT_EQ(ShapeUtil::ReshapeIsBitcast(
ShapeUtil::MakeShapeWithDenseLayout(
F32, {3, 4}, input_layout.minor_to_major()),
ShapeUtil::MakeShapeWithDenseLayout(
F32, {6, 2}, output_layout.minor_to_major())),
input_is_row_major && output_is_row_major);
}
}
}
TEST(ShapeUtilTest, ReshapeIsBitcast_3x2x2_6x2_Dim1IsMostMinor) {
EXPECT_TRUE(ShapeUtil::ReshapeIsBitcast(
ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 2, 2}, {1, 0, 2}),
ShapeUtil::MakeShapeWithDenseLayout(F32, {6, 2}, {0, 1})));
}
TEST(ShapeUtilTest, ReshapeIsBitcastIgnoreElementType) {
EXPECT_TRUE(ShapeUtil::ReshapeIsBitcast(
ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 2, 2}, {1, 0, 2}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {6, 2}, {0, 1}),
true));
EXPECT_FALSE(ShapeUtil::ReshapeIsBitcast(
ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 2, 2}, {1, 0, 2}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {6, 2}, {0, 1}),
false));
}
TEST(ShapeUtilTest, TransposeIsBitcastIgnoreElementType) {
EXPECT_TRUE(ShapeUtil::TransposeIsBitcast(
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 5}, {1, 0}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {5, 10}, {0, 1}), {1, 0},
true));
EXPECT_FALSE(ShapeUtil::TransposeIsBitcast(
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 5}, {1, 0}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {5, 10}, {0, 1}), {1, 0},
false));
}
TEST(ShapeUtilTest, IsReshapeOrTransposeBitcast) {
EXPECT_TRUE(ShapeUtil::IsReshapeOrTransposeBitcast(
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 5}, {1, 0}),
ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 10}, {0, 1})));
EXPECT_TRUE(ShapeUtil::ReshapeIsBitcast(
ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 2, 2}, {1, 0, 2}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {6, 2}, {0, 1}),
true));
}
TEST(ShapeUtilTest, HasDegenerateDimensions) {
EXPECT_TRUE(
ShapeUtil::HasDegenerateDimensions(ShapeUtil::MakeShape(F32, {3, 1, 2})));
EXPECT_TRUE(
ShapeUtil::HasDegenerateDimensions(ShapeUtil::MakeShape(F32, {3, 1, 1})));
EXPECT_FALSE(
ShapeUtil::HasDegenerateDimensions(ShapeUtil::MakeShape(F32, {3, 3, 5})));
EXPECT_FALSE(
ShapeUtil::HasDegenerateDimensions(ShapeUtil::MakeShape(F32, {3, 0, 5})));
}
TEST(ShapeUtilTest, PermuteDimensionsLayout) {
std::vector<int64_t> layout(3);
std::iota(layout.begin(), layout.end(), 0);
do {
Shape s = ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 100, 1000}, layout);
SCOPED_TRACE(absl::StrCat("s=", ShapeUtil::HumanString(s)));
std::vector<int64_t> permutation(3);
std::iota(permutation.begin(), permutation.end(), 0);
do {
SCOPED_TRACE(
absl::StrCat("permutation=", absl::StrJoin(permutation, ",")));
EXPECT_TRUE(ShapeUtil::TransposeIsBitcast(
s, ShapeUtil::PermuteDimensions(permutation, s), permutation));
} while (std::next_permutation(permutation.begin(), permutation.end()));
} while (std::next_permutation(layout.begin(), layout.end()));
}
TEST(ShapeUtilTest, UpdateDynamicDimensions) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 100, 1000});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape});
ShapeUtil::UpdateDynamicDimension(&tuple_shape, {0}, 1, true);
EXPECT_TRUE(ShapeUtil::GetSubshape(tuple_shape, {0}).is_dynamic_dimension(1));
}
TEST(ShapeUtilTest, InvalidDynamicDimension) {
absl::StatusOr<Shape> error_status = ShapeUtil::MakeValidatedShape(
F32, {Shape::kUnboundedSize, Shape::kUnboundedSize}, {true, false});
EXPECT_FALSE(error_status.ok());
EXPECT_THAT(error_status.status().message(),
::testing::HasSubstr(
"Cannot mark a dynamic dimension at dim=1 as static"));
}
TEST(ShapeUtilTest, PermuteDynamicDimensions) {
Shape shape =
ShapeUtil::MakeShape(F32, {10, 100, 1000},
{false, true, true});
SCOPED_TRACE(absl::StrCat("shape=", shape.ToString()));
std::vector<int64_t> permutation(3);
std::iota(permutation.begin(), permutation.end(), 0);
do {
SCOPED_TRACE(absl::StrCat("permutation=", absl::StrJoin(permutation, ",")));
auto permuted = ShapeUtil::PermuteDimensions(permutation, shape);
for (int i = 0; i < shape.rank(); i++) {
EXPECT_EQ(permuted.dimensions(i), shape.dimensions(permutation[i]));
EXPECT_EQ(permuted.is_dynamic_dimension(i),
shape.is_dynamic_dimension(permutation[i]));
}
} while (std::next_permutation(permutation.begin(), permutation.end()));
}
TEST(ShapeUtilTest, PrependMajorDimension) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 20, 30});
EXPECT_EQ(ShapeUtil::PrependMajorDimension(40, shape),
ShapeUtil::MakeShape(F32, {40, 10, 20, 30}));
shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 20, 30}, {0, 2, 1});
EXPECT_EQ(
ShapeUtil::PrependMajorDimension(40, shape),
ShapeUtil::MakeShapeWithDenseLayout(F32, {40, 10, 20, 30}, {1, 3, 2, 0}));
shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 20, 30}, {2, 1, 0});
EXPECT_EQ(
ShapeUtil::PrependMajorDimension(40, shape),
ShapeUtil::MakeShapeWithDenseLayout(F32, {40, 10, 20, 30}, {3, 2, 1, 0}));
}
TEST(ShapeUtilTest, AppendMinorDimension) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 20, 30});
ShapeUtil::AppendMinorDimension(40, &shape);
EXPECT_EQ(shape, ShapeUtil::MakeShape(F32, {10, 20, 30, 40}));
shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 20, 30}, {2, 1, 0});
ShapeUtil::AppendMinorDimension(40, &shape);
EXPECT_EQ(shape, ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 20, 30, 40},
{3, 2, 1, 0}));
shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 20, 30}, {0, 2, 1});
ShapeUtil::AppendMinorDimension(40, &shape);
EXPECT_EQ(shape, ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 20, 30, 40},
{3, 0, 2, 1}));
}
TEST(ShapeUtilTest, MoveDimToMajor) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 10, 10});
Shape new_shape = ShapeUtil::MoveDimToMajor(shape, 0);
EXPECT_EQ(shape, new_shape);
new_shape = ShapeUtil::MoveDimToMajor(shape, 1);
EXPECT_EQ(new_shape,
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 10, 10}, {2, 0, 1}));
shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 10, 10}, {0, 2, 1});
new_shape = ShapeUtil::MoveDimToMajor(shape, 0);
EXPECT_EQ(new_shape,
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 10, 10}, {2, 1, 0}));
shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {10, 10, 10}),
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 10, 10}, {0, 2, 1})});
new_shape = ShapeUtil::MoveDimToMajor(shape, 0);
EXPECT_EQ(new_shape,
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {10, 10, 10}),
ShapeUtil::MakeShapeWithDenseLayout(
F32, {10, 10, 10}, {2, 1, 0})}));
}
TEST(ShapeUtilTest, DeleteDimensions) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3, 2}, {2, 0, 1});
Shape new_shape = ShapeUtil::DeleteDimensions({1}, shape);
EXPECT_EQ(new_shape,
ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 2}, {1, 0}));
}
TEST(ShapeUtilTest, MakeShapeWithDescendingLayoutAndSamePhysicalLayout) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {128, 24, 4, 48, 48},
{2, 4, 3, 1, 0});
Shape new_shape =
ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(shape);
EXPECT_EQ(new_shape, ShapeUtil::MakeShapeWithDenseLayout(
F32, {128, 24, 48, 48, 4}, {4, 3, 2, 1, 0}));
}
TEST(ShapeUtilTest, DeduceTransposeDimensionsForBitcast) {
Shape input_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3}, {1, 0});
Shape output_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 5}, {0, 1});
std::vector<int64_t> expected_permutation = {1, 0};
EXPECT_EQ(std::make_optional(expected_permutation),
ShapeUtil::DeduceTransposeDimensionsForBitcast(input_shape,
output_shape));
}
TEST(ShapeUtilTest, DeduceTransposeDimensionsForBitcastNegative) {
Shape input_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3}, {1, 0});
Shape output_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 5}, {1, 0});
EXPECT_EQ(std::nullopt, ShapeUtil::DeduceTransposeDimensionsForBitcast(
input_shape, output_shape));
}
TEST(ShapeUtilTest, DeleteDimensionsUnsorted) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3, 2, 7, 9},
{2, 0, 1, 4, 3});
Shape a = ShapeUtil::DeleteDimensions({1, 2, 3}, shape);
Shape b = ShapeUtil::DeleteDimensions({3, 2, 1}, shape);
EXPECT_EQ(a, b);
EXPECT_EQ(a, ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 9}, {0, 1}));
}
TEST(ShapeUtilTest, IsEffectivelyMostMajorDimension) {
Shape shape0 = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 1, 16, 1, 279},
{4, 0, 1, 2, 3});
EXPECT_TRUE(ShapeUtil::IsEffectivelyMostMajorDimension(shape0, 2));
Shape shape1 = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 1, 16, 1, 279},
{4, 1, 2, 3, 0});
EXPECT_TRUE(ShapeUtil::IsEffectivelyMostMajorDimension(shape1, 2));
Shape shape2 = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 1, 16, 1, 279},
{0, 1, 2, 3, 4});
EXPECT_FALSE(ShapeUtil::IsEffectivelyMostMajorDimension(shape2, 2));
Shape shape3 = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 1, 16, 1, 1},
{0, 1, 2, 3, 4});
EXPECT_TRUE(ShapeUtil::IsEffectivelyMostMajorDimension(shape2, 4));
}
TEST(ShapeUtilTest, B_250640044) {
ShapeProto proto;
EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(element_type: TUPLE
tuple_shapes {
element_type: S8
dimensions: 137438953472
layout {
minor_to_major: 0
dim_level_types: DIM_COMPRESSED
physical_shape {
element_type: TUPLE
tuple_shapes {}
}
}
is_dynamic_dimension: false
})pb",
&proto));
Shape shape(proto);
EXPECT_FALSE(ShapeUtil::ValidateShape(shape).ok());
}
TEST(ShapeUtilTest, B_251055887) {
ShapeProto proto;
EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(
element_type: S8
dimensions: 0
dimensions: 8
dimensions: 0
dimensions: 0
dimensions: 4
dimensions: 1
dimensions: 1
dimensions: 6
dimensions: 281474976710657
dimensions: 1
layout {
minor_to_major: 1
minor_to_major: 3
minor_to_major: 0
minor_to_major: 5
minor_to_major: 4
minor_to_major: 6
minor_to_major: 8
minor_to_major: 7
minor_to_major: 6
minor_to_major: 9
physical_shape { element_type: -562 }
})pb",
&proto));
Shape shape(proto);
EXPECT_FALSE(ShapeUtil::ValidateShape(shape).ok());
}
TEST(ShapeUtilTest, Int4ShapeSize) {
Shape int4_shape = ShapeUtil::MakeShape(S4, {64, 128});
int4_shape.mutable_layout()->set_element_size_in_bits(4);
EXPECT_EQ(ShapeUtil::ArrayDataSize(int4_shape), 64 * 128 / 2);
EXPECT_EQ(ShapeUtil::ArraySize(int4_shape), 64 * 128 / 2);
Shape int4_shape2 = ShapeUtil::MakeShape(S4, {9216, 6144});
auto* layout = int4_shape2.mutable_layout();
layout->clear_tiles();
layout->add_tiles();
layout->add_tiles();
*layout->mutable_tiles(0) = Tile({8 * (32 / 4), 128});
*layout->mutable_tiles(1) = Tile({32 / 4, 1});
layout->set_element_size_in_bits(4);
EXPECT_EQ(ShapeUtil::ArrayDataSize(int4_shape2), 9216 * 6144 / 2);
EXPECT_EQ(ShapeUtil::ArraySize(int4_shape2), 9216 * 6144 / 2);
Shape pred_shape = ShapeUtil::ChangeElementType(int4_shape, PRED);
EXPECT_EQ(pred_shape.layout().element_size_in_bits(), 0);
Shape u4_shape = ShapeUtil::ChangeElementType(int4_shape, U4);
EXPECT_EQ(u4_shape.layout().element_size_in_bits(), 4);
}
TEST(XlaShapeUtilTest, ZeroSize) {
std::vector<std::vector<int64_t>> test_cases = {
{0, 64, 128}, {128, 0, 64}, {64, 128, 0},
{0, 63, 127}, {127, 0, 63}, {63, 127, 0},
};
for (const auto& dimensions : test_cases) {
xla::Shape int4_shape = xla::ShapeUtil::MakeShape(xla::S4, dimensions);
int4_shape.mutable_layout()->set_element_size_in_bits(4);
EXPECT_EQ(xla::ShapeUtil::ArrayDataSize(int4_shape), 0);
EXPECT_EQ(xla::ShapeUtil::ArraySize(int4_shape), 0);
}
}
TEST(ShapeUtilTest, DecomposeBitcastToReshape) {
const Shape kInputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 16, 17, 3}, {3, 2, 1, 0});
const Shape kOutputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {16, 51}, {1, 0});
ShapeUtil::BitcastDecomposition decomposition =
ShapeUtil::DecomposeBitcast(kInputShape, kOutputShape);
EXPECT_TRUE(std::holds_alternative<ShapeUtil::BitcastDecompositionReshape>(
decomposition));
}
TEST(ShapeUtilTest, DecomposeBitcastToReshape2) {
const Shape kInputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {17, 3, 1, 16}, {1, 0, 3, 2});
const Shape kOutputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {51, 16}, {0, 1});
ShapeUtil::BitcastDecomposition decomposition =
ShapeUtil::DecomposeBitcast(kInputShape, kOutputShape);
EXPECT_TRUE(std::holds_alternative<ShapeUtil::BitcastDecompositionReshape>(
decomposition));
}
TEST(ShapeUtilTest, DecomposeBitcastToTranspose) {
const Shape kInputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 7, 6, 4}, {3, 2, 1, 0});
const Shape kOutputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 6, 4, 7}, {2, 1, 3, 0});
const std::vector<int64_t> kExpectedTransposeDims = {0, 2, 3, 1};
ShapeUtil::BitcastDecomposition decomposition =
ShapeUtil::DecomposeBitcast(kInputShape, kOutputShape);
ASSERT_TRUE(std::holds_alternative<ShapeUtil::BitcastDecompositionTranspose>(
decomposition));
ShapeUtil::BitcastDecompositionTranspose decomposition_transpose =
std::get<ShapeUtil::BitcastDecompositionTranspose>(decomposition);
EXPECT_EQ(decomposition_transpose.transpose_dims, kExpectedTransposeDims);
}
TEST(ShapeUtilTest, DecomposeBitcastToReshapeAndTranspose) {
const Shape kInputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {16, 17, 3}, {2, 1, 0});
const Shape kOutputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {51, 16}, {0, 1});
const std::vector<int64_t> kExpectedTranspose1Dims = {0, 1, 2};
const Shape kExpectedTranspose1Shape = kInputShape;
const Shape kExpectedReshapeShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {16, 51}, {1, 0});
const std::vector<int64_t> kExpectedTranspose2Dims = {1, 0};
ShapeUtil::BitcastDecomposition decomposition =
ShapeUtil::DecomposeBitcast(kInputShape, kOutputShape);
ASSERT_TRUE(std::holds_alternative<ShapeUtil::BitcastDecompositionTrt>(
decomposition));
ShapeUtil::BitcastDecompositionTrt decomposition_trt =
std::get<ShapeUtil::BitcastDecompositionTrt>(decomposition);
EXPECT_EQ(decomposition_trt.transpose1_dims, kExpectedTranspose1Dims);
EXPECT_TRUE(decomposition_trt.IsTranspose1Identity());
EXPECT_EQ(decomposition_trt.transpose1_shape, kExpectedTranspose1Shape);
EXPECT_EQ(decomposition_trt.reshape_shape, kExpectedReshapeShape);
EXPECT_EQ(decomposition_trt.transpose2_dims, kExpectedTranspose2Dims);
EXPECT_FALSE(decomposition_trt.IsTranspose2Identity());
}
TEST(ShapeUtilTest, DecomposeBitcastToReshapeAndTranspose2) {
const Shape kInputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {16, 17, 3, 7}, {3, 2, 1, 0});
const Shape kOutputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {7, 16, 51}, {0, 2, 1});
const std::vector<int64_t> kExpectedTranspose1Dims = {0, 1, 2, 3};
const Shape kExpectedTranspose1Shape = kInputShape;
const Shape kExpectedReshapeShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {16, 51, 7}, {2, 1, 0});
const std::vector<int64_t> kExpectedTranspose2Dims = {2, 0, 1};
ShapeUtil::BitcastDecomposition decomposition =
ShapeUtil::DecomposeBitcast(kInputShape, kOutputShape);
ASSERT_TRUE(std::holds_alternative<ShapeUtil::BitcastDecompositionTrt>(
decomposition));
ShapeUtil::BitcastDecompositionTrt decomposition_trt =
std::get<ShapeUtil::BitcastDecompositionTrt>(decomposition);
EXPECT_EQ(decomposition_trt.transpose1_dims, kExpectedTranspose1Dims);
EXPECT_TRUE(decomposition_trt.IsTranspose1Identity());
EXPECT_EQ(decomposition_trt.transpose1_shape, kExpectedTranspose1Shape);
EXPECT_EQ(decomposition_trt.reshape_shape, kExpectedReshapeShape);
EXPECT_EQ(decomposition_trt.transpose2_dims, kExpectedTranspose2Dims);
EXPECT_FALSE(decomposition_trt.IsTranspose2Identity());
}
TEST(ShapeUtilTest, DecomposeBitcastToTransposeAndReshape) {
const Shape kInputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {16, 3, 17}, {1, 2, 0});
const Shape kOutputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {51, 16}, {1, 0});
const std::vector<int64_t> kExpectedTranspose1Dims = {0, 2, 1};
const Shape kExpectedTranspose1Shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {16, 17, 3}, {2, 1, 0});
const Shape kExpectedReshapeShape = kOutputShape;
const std::vector<int64_t> kExpectedTranspose2Dims = {0, 1};
ShapeUtil::BitcastDecomposition decomposition =
ShapeUtil::DecomposeBitcast(kInputShape, kOutputShape);
ASSERT_TRUE(std::holds_alternative<ShapeUtil::BitcastDecompositionTrt>(
decomposition));
ShapeUtil::BitcastDecompositionTrt decomposition_trt =
std::get<ShapeUtil::BitcastDecompositionTrt>(decomposition);
EXPECT_EQ(decomposition_trt.transpose1_dims, kExpectedTranspose1Dims);
EXPECT_FALSE(decomposition_trt.IsTranspose1Identity());
EXPECT_EQ(decomposition_trt.transpose1_shape, kExpectedTranspose1Shape);
EXPECT_EQ(decomposition_trt.reshape_shape, kExpectedReshapeShape);
EXPECT_EQ(decomposition_trt.transpose2_dims, kExpectedTranspose2Dims);
EXPECT_TRUE(decomposition_trt.IsTranspose2Identity());
}
TEST(ShapeUtilTest, DecomposeBitcastToTrt) {
const Shape kInputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {16, 3, 17}, {1, 2, 0});
const Shape kOutputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {16, 51}, {0, 1});
const std::vector<int64_t> kExpectedTranspose1Dims = {0, 2, 1};
const Shape kExpectedTranspose1Shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {16, 17, 3}, {2, 1, 0});
const Shape kExpectedReshapeShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {51, 16}, {1, 0});
const std::vector<int64_t> kExpectedTranspose2Dims = {1, 0};
ShapeUtil::BitcastDecomposition decomposition =
ShapeUtil::DecomposeBitcast(kInputShape, kOutputShape);
ASSERT_TRUE(std::holds_alternative<ShapeUtil::BitcastDecompositionTrt>(
decomposition));
ShapeUtil::BitcastDecompositionTrt decomposition_trt =
std::get<ShapeUtil::BitcastDecompositionTrt>(decomposition);
EXPECT_EQ(decomposition_trt.transpose1_dims, kExpectedTranspose1Dims);
EXPECT_FALSE(decomposition_trt.IsTranspose1Identity());
EXPECT_EQ(decomposition_trt.transpose1_shape, kExpectedTranspose1Shape);
EXPECT_EQ(decomposition_trt.reshape_shape, kExpectedReshapeShape);
EXPECT_EQ(decomposition_trt.transpose2_dims, kExpectedTranspose2Dims);
EXPECT_FALSE(decomposition_trt.IsTranspose2Identity());
}
TEST(AlgebraicSimplifierTest, ReshapeIsBitcast_3x2x2_6x2_Dim0IsMostMinor) {
EXPECT_FALSE(ShapeUtil::ReshapeIsBitcast(
ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 2, 2}, {0, 1, 2}),
ShapeUtil::MakeShapeWithDenseLayout(F32, {6, 2}, {0, 1})));
}
TEST(AlignmentTest, AlignLayoutsWithoutTrivialDimensions) {
Shape input = ShapeUtil::MakeShapeWithDenseLayout(xla::F32, {3, 8, 5, 7, 11},
{3, 2, 1, 0, 4});
auto aligned_shape = ShapeUtil::AlignLayouts(
input, ShapeUtil::MakeShape(xla::F32, {4, 3, 2, 7, 5, 11}));
EXPECT_TRUE(aligned_shape);
EXPECT_THAT(aligned_shape.value().layout().minor_to_major(),
ElementsAre(4, 3, 2, 1, 0, 5));
EXPECT_TRUE(ShapeUtil::ReshapeIsBitcast(input, aligned_shape.value()));
aligned_shape = ShapeUtil::AlignLayouts(
input, ShapeUtil::MakeShape(xla::F32, {3, 2, 4, 35, 11}));
EXPECT_TRUE(aligned_shape);
EXPECT_THAT(aligned_shape.value().layout().minor_to_major(),
ElementsAre(3, 2, 1, 0, 4));
EXPECT_TRUE(ShapeUtil::ReshapeIsBitcast(input, aligned_shape.value()));
}
TEST(AlignmentTest, AlignLayoutsWithTrivialDimensions) {
Shape input = ShapeUtil::MakeShapeWithDenseLayout(
xla::F32, {1, 3, 8, 1, 5, 7, 1, 11, 1, 1},
{5, 0, 4, 2, 1, 3, 6, 7, 9, 8});
auto aligned_shape = ShapeUtil::AlignLayouts(
input, ShapeUtil::MakeShape(xla::F32, {1, 4, 1, 3, 2, 7, 5, 11, 1}));
EXPECT_TRUE(aligned_shape);
EXPECT_TRUE(ShapeUtil::ReshapeIsBitcast(input, aligned_shape.value()));
}
TEST(AlignmentTest, AlignLayoutsWithAllTrivialDimensions) {
Shape input =
ShapeUtil::MakeShapeWithDenseLayout(xla::F32, {1, 1, 1, 1}, {0, 1, 3, 2});
auto aligned_shape = ShapeUtil::AlignLayouts(
input, ShapeUtil::MakeShape(xla::F32, {1, 1, 1, 1, 1}));
EXPECT_TRUE(aligned_shape);
EXPECT_TRUE(ShapeUtil::ReshapeIsBitcast(input, aligned_shape.value()));
}
TEST(AlignmentTest, AlignLayoutsWithoutTrivialDimensionsWrongInputLayout) {
Shape input = ShapeUtil::MakeShapeWithDenseLayout(xla::F32, {3, 8, 5, 7, 11},
{2, 3, 1, 0, 4});
auto aligned_shape = ShapeUtil::AlignLayouts(
input, ShapeUtil::MakeShape(xla::F32, {4, 3, 2, 7, 5, 11}));
EXPECT_FALSE(aligned_shape);
}
TEST(AlignmentTest,
AlignLayoutsWithoutTrivialDimensionsNonConsecutiveAlignmentPart) {
Shape input = ShapeUtil::MakeShapeWithDenseLayout(xla::F32, {3, 8, 5, 7, 11},
{3, 2, 1, 0, 4});
auto aligned_shape = ShapeUtil::AlignLayouts(
input, ShapeUtil::MakeShape(xla::F32, {4, 3, 2, 5, 77}));
EXPECT_FALSE(aligned_shape);
}
void BM_MakeShape(::testing::benchmark::State& state) {
for (auto s : state) {
ShapeUtil::MakeShape(F32, {2});
}
}
BENCHMARK(BM_MakeShape);
void BM_MakeValidatedShape(::testing::benchmark::State& state) {
for (auto s : state) {
ShapeUtil::MakeValidatedShape(F32, {2}).value();
}
}
BENCHMARK(BM_MakeValidatedShape);
Shape ShapeForBenchmark(::testing::benchmark::State& state) {
Shape shape;
switch (state.range(0)) {
case 0: {
shape = ShapeUtil::MakeShape(xla::F32, {1});
break;
}
case 1: {
shape = ShapeUtil::MakeShape(xla::F32, {4, 1});
break;
}
case 2: {
shape = ShapeUtil::MakeShape(xla::F32, {256, 1, 1024});
break;
}
}
state.SetLabel(shape.ToString());
return shape;
}
void BM_ForEachIndex(::testing::benchmark::State& state) {
Shape shape = ShapeForBenchmark(state);
for (auto s : state) {
int count = 0;
auto increment_func =
[&count](absl::Span<const int64_t> indexes) -> absl::StatusOr<bool> {
count++;
return true;
};
ShapeUtil::ForEachIndex(shape, increment_func);
}
}
BENCHMARK(BM_ForEachIndex)->Arg(0)->Arg(1)->Arg(2);
void BM_ForEachIndexNoStatus(::testing::benchmark::State& state) {
Shape shape = ShapeForBenchmark(state);
for (auto s : state) {
int count = 0;
auto increment_func = [&count](absl::Span<const int64_t> indexes) -> bool {
count++;
return true;
};
ShapeUtil::ForEachIndexNoStatus(shape, increment_func);
}
}
BENCHMARK(BM_ForEachIndexNoStatus)->Arg(0)->Arg(1)->Arg(2);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/shape_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/shape_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
321442fe-b4d4-4301-bf31-494056f960f8 | cpp | tensorflow/tensorflow | host_callback | third_party/xla/xla/python/ifrt/host_callback.cc | third_party/xla/xla/pjrt/host_callback_test.cc | #include "xla/python/ifrt/host_callback.h"
namespace xla {
namespace ifrt {
char HostCallback::ID = 0;
char LoadedHostCallback::ID = 0;
}
} | #include "xla/pjrt/host_callback.h"
#include <cstring>
#include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
class TestPjRtHostMemoryForDeviceManager
: public PjRtHostMemoryForDeviceManager {
public:
~TestPjRtHostMemoryForDeviceManager() override = default;
absl::StatusOr<PjRtChunk> ToDeviceLayout(const void* src_data,
size_t src_size,
const Shape& host_shape,
const Shape& device_shape) override {
auto chunk = PjRtChunk::AllocateDefault(src_size);
std::memcpy(chunk.data(), src_data, src_size);
return chunk;
}
absl::Status ToHostLayout(const void* src_data, size_t src_size,
const Shape& src_shape, void* dst_data,
size_t dst_size, const Shape& dst_shape) override {
CHECK_EQ(src_size, dst_size);
std::memcpy(dst_data, src_data, src_size);
return absl::OkStatus();
}
};
class TestStream : public CopyToDeviceStream {
public:
TestStream(int64_t total_bytes, int64_t granule_bytes, PjRtChunk& chunk,
absl::Notification& done)
: CopyToDeviceStream(total_bytes, granule_bytes),
chunk_(chunk),
done_(done) {}
PjRtFuture<> AddChunk(PjRtChunk chunk) override {
CHECK(!done_.HasBeenNotified());
chunk_ = std::move(chunk);
done_.Notify();
return PjRtFuture<>(absl::OkStatus());
}
private:
PjRtChunk& chunk_;
absl::Notification& done_;
};
TEST(HostCallbackTest, Basic) {
HostCallback host_callback;
Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
size_t byte_size = ShapeUtil::ByteSizeOf(shape);
host_callback.operands = {HostCallbackArgInfo{1, shape}};
host_callback.results = {HostCallbackArgInfo{2, shape}};
host_callback.callback = [byte_size](void** outputs, void** inputs) {
std::memcpy(outputs[0], inputs[0], byte_size);
return absl::OkStatus();
};
HostCallbackStates states;
auto& send_callbacks = states.send_callbacks.emplace_back();
auto& recv_callbacks = states.recv_callbacks.emplace_back();
TestPjRtHostMemoryForDeviceManager test_host_memory_for_device_manager;
auto context = CreateHostCallbackStateAndAppendSendRecvCallbacks(
std::move(host_callback), &test_host_memory_for_device_manager,
send_callbacks, recv_callbacks,
false);
PjRtTransferMetadata metadata;
metadata.device_shape = shape;
auto literal = LiteralUtil::CreateR2({{1.0f, 2.0f}, {3.0f, 4.0f}});
auto chunk = PjRtChunk::AllocateDefault(byte_size);
ASSERT_EQ(chunk.size(), literal.size_bytes());
std::memcpy(chunk.data(), literal.untyped_data(), literal.size_bytes());
TF_ASSERT_OK(context->OnSend(0, metadata, std::move(chunk)));
PjRtChunk received_chunk;
absl::Notification done;
auto stream = std::make_unique<TestStream>(byte_size, 8,
received_chunk, done);
context->Receive(0, metadata, std::move(stream));
done.WaitForNotification();
BorrowingLiteral borrowing_literal(
reinterpret_cast<const char*>(received_chunk.data()), shape);
EXPECT_TRUE(LiteralTestUtil::Equal(literal, borrowing_literal));
}
TEST(HostCallbackTest, NonBlockingRecv) {
HostCallback host_callback;
Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
size_t byte_size = ShapeUtil::ByteSizeOf(shape);
host_callback.operands = {HostCallbackArgInfo{1, shape}};
host_callback.results = {HostCallbackArgInfo{2, shape}};
host_callback.callback = [byte_size](void** outputs, void** inputs) {
std::memcpy(outputs[0], inputs[0], byte_size);
return absl::OkStatus();
};
HostCallbackStates states;
auto& send_callbacks = states.send_callbacks.emplace_back();
auto& recv_callbacks = states.recv_callbacks.emplace_back();
TestPjRtHostMemoryForDeviceManager test_host_memory_for_device_manager;
auto context = CreateHostCallbackStateAndAppendSendRecvCallbacks(
std::move(host_callback), &test_host_memory_for_device_manager,
send_callbacks, recv_callbacks,
false);
PjRtTransferMetadata metadata;
metadata.device_shape = shape;
auto literal = LiteralUtil::CreateR2({{1.0f, 2.0f}, {3.0f, 4.0f}});
auto chunk = PjRtChunk::AllocateDefault(byte_size);
ASSERT_EQ(chunk.size(), literal.size_bytes());
std::memcpy(chunk.data(), literal.untyped_data(), literal.size_bytes());
absl::Notification done;
PjRtChunk received_chunk;
auto stream = std::make_unique<TestStream>(byte_size, 8,
received_chunk, done);
context->Receive(0, metadata, std::move(stream));
TF_ASSERT_OK(context->OnSend(0, metadata, std::move(chunk)));
done.WaitForNotification();
BorrowingLiteral borrowing_literal(
reinterpret_cast<const char*>(received_chunk.data()), shape);
EXPECT_TRUE(LiteralTestUtil::Equal(literal, borrowing_literal));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/host_callback.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/host_callback_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
629082a0-250e-48af-a434-7fe64da908c3 | cpp | tensorflow/tensorflow | tracked_device_buffer | third_party/xla/xla/pjrt/tracked_device_buffer.cc | third_party/xla/xla/pjrt/tracked_device_buffer_test.cc | #include "xla/pjrt/tracked_device_buffer.h"
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/pjrt/event_pool.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/service/executable.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/service/shaped_buffer.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/event.h"
#include "tsl/platform/logging.h"
#include "tsl/profiler/lib/connected_traceme.h"
#include "tsl/profiler/lib/context_types.h"
namespace xla {
void BufferSequencingEvent::SetSequencingEvent(EventPool::Handle event,
se::Stream* stream) {
{
absl::MutexLock lock(&mu_);
defined_status_.emplace(absl::OkStatus());
CHECK(!event_.event());
event_ = std::move(event);
CHECK(streams_defined_on_.empty());
streams_defined_on_.push_back(stream);
sequence_number_.store(event_.sequence_number(), std::memory_order_seq_cst);
}
this->ExecuteFutureTasks();
}
bool BufferSequencingEvent::EventHasBeenRecorded() const {
return event_.event() != nullptr;
}
bool BufferSequencingEvent::IsDefinedNoLock() const {
return defined_status_.IsConcrete();
}
uint64_t BufferSequencingEvent::sequence_number() const {
uint64_t seq = sequence_number_.load(std::memory_order_seq_cst);
return seq;
}
void BufferSequencingEvent::WaitForEventOnStream(se::Stream* stream) {
absl::MutexLock lock(&mu_);
mu_.Await(
absl::Condition(this, &BufferSequencingEvent::EventHasBeenRecorded));
if (std::find(streams_defined_on_.begin(), streams_defined_on_.end(),
stream) != streams_defined_on_.end()) {
return;
}
stream->WaitFor(event_.event()).IgnoreError();
streams_defined_on_.push_back(stream);
}
absl::Status BufferSequencingEvent::WaitForEventOnExternalStream(
std::intptr_t stream) {
absl::MutexLock lock(&mu_);
mu_.Await(
absl::Condition(this, &BufferSequencingEvent::EventHasBeenRecorded));
return event_.event()->WaitForEventOnExternalStream(stream);
}
bool BufferSequencingEvent::IsPredeterminedErrorOrDefinedOn(
se::Stream* stream) {
absl::MutexLock lock(&mu_);
mu_.Await(absl::Condition(this, &BufferSequencingEvent::IsDefinedNoLock));
if (defined_status_.IsConcrete() && !defined_status_.get().ok()) {
return true;
}
return std::find(streams_defined_on_.begin(), streams_defined_on_.end(),
stream) != streams_defined_on_.end();
}
bool BufferSequencingEvent::IsComplete() {
absl::MutexLock lock(&mu_);
mu_.Await(
absl::Condition(this, &BufferSequencingEvent::EventHasBeenRecorded));
return event_.event()->PollForStatus() == se::Event::Status::kComplete;
}
void BufferSequencingEvent::ExecuteOrAddToFutureTasks(
const std::string& task_name, std::function<void()> task) {
tsl::profiler::TraceMeProducer producer(
"BufferSequencingEvent::ExecuteOrAddToFutureTasks",
tsl::profiler::ContextType::kPjRt);
uint64_t context_id = producer.GetContextId();
auto wrapped_task = [task = std::move(task), context_id]() {
tsl::profiler::TraceMeConsumer consumer("BufferSequencingEvent::Execute",
tsl::profiler::ContextType::kPjRt,
context_id);
task();
};
{
absl::MutexLock lock(&mu_);
if (!defined_status_.IsConcrete()) {
on_ready_tasks_callback_[task_name] = std::move(wrapped_task);
return;
}
}
thread_pool_->Schedule(std::move(wrapped_task));
}
void BufferSequencingEvent::ExecuteFutureTasks() {
absl::flat_hash_map<std::string, std::function<void()>>
on_ready_tasks_callback;
{
absl::MutexLock lock(&mu_);
on_ready_tasks_callback = std::move(on_ready_tasks_callback_);
}
auto call_all_task_callbacks = [on_ready_tasks_callback =
std::move(on_ready_tasks_callback)]() {
for (auto& [task_name, task_callback] : on_ready_tasks_callback) {
task_callback();
}
};
thread_pool_->Schedule(std::move(call_all_task_callbacks));
}
std::shared_ptr<TrackedDeviceBuffer>
TrackedDeviceBuffer::FromScopedShapedBuffer(
ScopedShapedBuffer* shaped_buffer,
absl::Span<const std::shared_ptr<BufferSequencingEvent>> definition_events,
PjRtDevice* device) {
ShapeTree<se::DeviceMemoryBase>::iterator iterator =
shaped_buffer->buffers().begin();
std::vector<se::DeviceMemoryBase> buffers;
buffers.reserve(1);
ShapeUtil::ForEachSubshape(
shaped_buffer->on_device_shape(), [&](const Shape&, const ShapeIndex&) {
CHECK(iterator != shaped_buffer->buffers().end());
buffers.push_back(iterator->second);
iterator->second = se::DeviceMemoryBase();
++iterator;
});
CHECK(iterator == shaped_buffer->buffers().end());
return std::make_shared<TrackedDeviceBuffer>(
shaped_buffer->memory_allocator(), device,
absl::Span<se::DeviceMemoryBase>(buffers), definition_events,
nullptr);
}
ShapedBuffer TrackedDeviceBuffer::AsShapedBuffer(
const Shape& on_device_shape) const {
ShapedBuffer shaped_buffer(on_device_shape,
device_->local_device_id().value(),
device_->local_hardware_id().value());
ShapeTree<se::DeviceMemoryBase>::iterator iterator =
shaped_buffer.buffers().begin();
for (const se::DeviceMemoryBase& buf : device_memory_) {
CHECK(iterator != shaped_buffer.buffers().end());
iterator->second = buf;
++iterator;
}
CHECK(iterator == shaped_buffer.buffers().end());
return shaped_buffer;
}
void TrackedDeviceBuffer::AddToInputAsImmutable(
ShapeTree<MaybeOwningDeviceMemory>::iterator* iterator,
const ShapeTree<MaybeOwningDeviceMemory>::iterator& end) const {
for (const se::DeviceMemoryBase& buf : device_memory_) {
CHECK(*iterator != end);
(*iterator)->second = MaybeOwningDeviceMemory(buf);
++(*iterator);
}
}
void TrackedDeviceBuffer::AddToInputAsDonated(
ShapeTree<MaybeOwningDeviceMemory>::iterator* iterator,
const ShapeTree<MaybeOwningDeviceMemory>::iterator& end,
ExecutionInput* execution_input,
se::DeviceMemoryAllocator* allocator) const {
for (const se::DeviceMemoryBase& buf : device_memory_) {
CHECK(*iterator != end);
(*iterator)->second = MaybeOwningDeviceMemory(se::OwningDeviceMemory(
buf, device_->local_device_id().value(), allocator));
execution_input->SetUnownedIndex((*iterator)->first);
++(*iterator);
}
}
TrackedDeviceBuffer::TrackedDeviceBuffer(
se::DeviceMemoryAllocator* allocator, PjRtDevice* device,
absl::Span<se::DeviceMemoryBase const> device_memory,
absl::Span<const std::shared_ptr<BufferSequencingEvent>> definition_events,
absl::AnyInvocable<void() &&> on_delete_callback)
: allocator_(allocator),
device_(device),
device_memory_(device_memory.begin(), device_memory.end()),
definition_events_(std::make_move_iterator(definition_events.begin()),
std::make_move_iterator(definition_events.end())),
in_use_(true),
on_delete_callback_(std::move(on_delete_callback)) {}
TrackedDeviceBuffer::~TrackedDeviceBuffer() {
if (allocator_) {
for (const se::DeviceMemoryBase& buffer : device_memory_) {
absl::Status status =
allocator_->Deallocate(device_->local_device_id().value(), buffer);
if (!status.ok()) {
LOG(ERROR) << "Buffer deallocation failed: " << status;
}
}
}
if (on_delete_callback_) {
std::move(on_delete_callback_)();
}
}
void TrackedDeviceBuffer::AddUsageEvent(
se::Stream* usage_stream, std::shared_ptr<BufferSequencingEvent> event,
bool reference_held) {
CHECK(in_use_);
if (*event == 0) {
usage_events_.push_back({usage_stream, event, reference_held});
return;
}
for (auto& existing : usage_events_) {
if (*existing.event == 0) continue;
if (existing.stream == usage_stream) {
if (*existing.event < *event) {
existing.event = event;
existing.reference_held = reference_held;
}
return;
}
}
usage_events_.push_back({usage_stream, event, reference_held});
}
TrackedDeviceBuffer::StreamAndEventContainer
TrackedDeviceBuffer::LockUseAndTransferUsageEvents() {
CHECK(in_use_);
in_use_ = false;
return std::move(usage_events_);
}
void GetDeviceBufferEvents(
const TrackedDeviceBuffer& buffer, bool get_usage_events,
absl::flat_hash_set<BufferSequencingEvent*>* events) {
if (get_usage_events) {
for (const auto& e : buffer.usage_events()) {
events->insert(e.event.get());
}
} else {
for (const auto& e : buffer.definition_events()) {
events->insert(e.get());
}
}
}
void WaitForBufferDefinitionEventsOnStream(const TrackedDeviceBuffer& buffer,
se::Stream* stream) {
absl::flat_hash_set<BufferSequencingEvent*> events;
GetDeviceBufferEvents(buffer, false, &events);
for (BufferSequencingEvent* event : events) {
event->WaitForEventOnStream(stream);
}
}
} | #include "xla/pjrt/tracked_device_buffer.h"
#include <memory>
#include <vector>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/client/client_library.h"
#include "xla/client/local_client.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/test.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class TestDevice : public PjRtDevice {
public:
TestDevice() = default;
PjRtLocalHardwareId local_hardware_id() const override {
return PjRtLocalHardwareId(0);
}
PjRtClient* client() const override {
LOG(FATAL) << "Unimplemented for TestDevice.";
}
bool IsAddressable() const override {
LOG(FATAL) << "Unimplemented for TestDevice.";
}
std::unique_ptr<ScopedAsyncTrackingEvent> CreateAsyncTrackingEvent(
absl::string_view description) const override {
LOG(FATAL) << "Unimplemented for TestDevice.";
}
absl::Status TransferToInfeed(const LiteralSlice& literal) override {
return Unimplemented("Unimplemented for TestDeivce.");
}
absl::Status TransferFromOutfeed(MutableBorrowingLiteral literal) override {
return Unimplemented("Unimplemented for TestDeivce.");
}
absl::Span<PjRtMemorySpace* const> memory_spaces() const override {
LOG(FATAL) << "Unimplemented for TestDevice.";
}
absl::StatusOr<PjRtMemorySpace*> default_memory_space() const override {
LOG(FATAL) << "Unimplemented for TestDevice.";
}
};
absl::StatusOr<std::shared_ptr<TrackedDeviceBuffer>> MakeArray(
const Shape& shape, LocalClient* client, PjRtDevice* device) {
std::vector<stream_executor::DeviceMemoryBase> device_buffers;
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
client->backend().transfer_manager()->HostShapeToDeviceShape(shape),
[&](const Shape& subshape, const ShapeIndex&) -> absl::Status {
TF_ASSIGN_OR_RETURN(
se::OwningDeviceMemory device_memory,
client->backend().memory_allocator()->Allocate(
0,
client->backend().transfer_manager()->GetByteSizeRequirement(
subshape)));
device_buffers.push_back(device_memory.Release());
return absl::OkStatus();
}));
return std::make_shared<TrackedDeviceBuffer>(
client->backend().memory_allocator(), device, device_buffers,
absl::Span<const std::shared_ptr<BufferSequencingEvent>>(), nullptr);
}
TEST(TrackedDeviceBufferTest, AsShapedBuffer) {
LocalClient* client = ClientLibrary::LocalClientOrDie();
TestDevice device;
Shape a_shape = ShapeUtil::MakeShape(F32, {3, 101, 4});
Shape b_shape = ShapeUtil::MakeShape(S8, {77});
Shape c_shape = ShapeUtil::MakeShape(S64, {});
TF_ASSERT_OK_AND_ASSIGN(auto a_buffer, MakeArray(a_shape, client, &device));
TF_ASSERT_OK_AND_ASSIGN(auto b_buffer, MakeArray(b_shape, client, &device));
TF_ASSERT_OK_AND_ASSIGN(auto c_buffer, MakeArray(c_shape, client, &device));
ASSERT_EQ(a_buffer->device_memory().size(), 1);
ASSERT_EQ(b_buffer->device_memory().size(), 1);
ASSERT_EQ(c_buffer->device_memory().size(), 1);
std::vector<se::DeviceMemoryBase> expected_buffer_sequence = {
a_buffer->device_memory()[0], b_buffer->device_memory()[0],
c_buffer->device_memory()[0]};
ShapedBuffer shaped_a = a_buffer->AsShapedBuffer(
client->backend().transfer_manager()->HostShapeToDeviceShape(a_shape));
ShapedBuffer shaped_b = b_buffer->AsShapedBuffer(
client->backend().transfer_manager()->HostShapeToDeviceShape(b_shape));
ShapedBuffer shaped_c = c_buffer->AsShapedBuffer(
client->backend().transfer_manager()->HostShapeToDeviceShape(c_shape));
auto expected_it = expected_buffer_sequence.begin();
for (auto it = shaped_a.buffers().begin(); it != shaped_a.buffers().end();
++it) {
ASSERT_TRUE(expected_it != expected_buffer_sequence.end());
EXPECT_TRUE(expected_it->IsSameAs(it->second));
++expected_it;
}
for (auto it = shaped_b.buffers().begin(); it != shaped_b.buffers().end();
++it) {
ASSERT_TRUE(expected_it != expected_buffer_sequence.end());
EXPECT_TRUE(expected_it->IsSameAs(it->second));
++expected_it;
}
for (auto it = shaped_c.buffers().begin(); it != shaped_c.buffers().end();
++it) {
ASSERT_TRUE(expected_it != expected_buffer_sequence.end());
EXPECT_TRUE(expected_it->IsSameAs(it->second));
++expected_it;
}
EXPECT_TRUE(expected_it == expected_buffer_sequence.end());
}
TEST(TrackedDeviceBufferTest, FromScopedShapedBuffer) {
TestDevice device;
LocalClient* client = ClientLibrary::LocalClientOrDie();
Literal literal = LiteralUtil::MakeTupleOwned(
LiteralUtil::CreateFullWithDescendingLayout<float>({10, 3, 7}, 33.4f),
LiteralUtil::One(S64));
TF_ASSERT_OK_AND_ASSIGN(
ScopedShapedBuffer shaped_buffer,
client->LiteralToShapedBuffer(literal, 0));
std::shared_ptr<TrackedDeviceBuffer> device_buffer =
TrackedDeviceBuffer::FromScopedShapedBuffer(&shaped_buffer, {}, &device);
EXPECT_EQ(device_buffer->device_memory().size(),
ShapeUtil::SubshapeCount(
client->backend().transfer_manager()->HostShapeToDeviceShape(
literal.shape())));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/tracked_device_buffer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/tracked_device_buffer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
910cd118-d757-4424-beac-fac04be8dd3a | cpp | tensorflow/tensorflow | pjrt_client | third_party/xla/xla/python/pjrt_ifrt/pjrt_client.cc | third_party/xla/xla/pjrt/pjrt_client_test.cc | #include "xla/python/pjrt_ifrt/pjrt_client.h"
#include <atomic>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "llvm/Support/Casting.h"
#include "xla/layout.h"
#include "xla/literal.h"
#include "xla/pjrt/distributed/protocol.pb.h"
#include "xla/pjrt/distributed/topology_util.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_compiler.h"
#include "xla/pjrt/pjrt_device_description.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/attribute_map.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/remap_plan.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/ifrt/topology.h"
#include "xla/python/ifrt/tuple.h"
#include "xla/python/ifrt/value.h"
#include "xla/python/pjrt_ifrt/basic_string_array.h"
#include "xla/python/pjrt_ifrt/pjrt_array.h"
#include "xla/python/pjrt_ifrt/pjrt_attribute_map_util.h"
#include "xla/python/pjrt_ifrt/pjrt_device.h"
#include "xla/python/pjrt_ifrt/pjrt_dtype.h"
#include "xla/python/pjrt_ifrt/pjrt_memory.h"
#include "xla/python/pjrt_ifrt/pjrt_remap.h"
#include "xla/python/pjrt_ifrt/pjrt_topology.h"
#include "xla/python/pjrt_ifrt/pjrt_tuple.h"
#include "xla/python/pjrt_ifrt/xla_sharding.h"
#include "xla/status_macros.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/util.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
absl::AnyInvocable<void() &&> FromStdFunction(std::function<void()>&& f) {
return f ? std::move(f) : absl::AnyInvocable<void() &&>();
}
AttributeMap MakeAttributeMap(xla::PjRtClient* pjrt_client) {
absl::flat_hash_map<std::string, PjRtValueType> attributes;
attributes.insert({"supports_executable_serialization", true});
if (std::optional<PjRtPluginAttributes> plugin_attributes =
pjrt_client->plugin_attributes();
plugin_attributes.has_value()) {
attributes.insert(
{"pjrt_c_api_major_version",
PjRtValueType(plugin_attributes->pjrt_c_api_major_version)});
attributes.insert(
{"pjrt_c_api_minor_version",
PjRtValueType(plugin_attributes->pjrt_c_api_minor_version)});
for (const auto& [key, value] : plugin_attributes->attributes) {
attributes.insert({key, value});
}
}
return FromPjRtAttributeMap(std::move(attributes));
}
void SerializePjRtDeviceAttributes(
const absl::flat_hash_map<std::string, PjRtDeviceAttribute>& attributes,
DeviceProto& device_proto) {
for (const auto& [key, value] : attributes) {
DeviceAttributeProto& attribute = (*device_proto.mutable_attributes())[key];
if (std::holds_alternative<std::string>(value)) {
attribute.set_string_value(std::get<std::string>(value));
} else if (std::holds_alternative<int64_t>(value)) {
attribute.set_int_value(std::get<int64_t>(value));
} else if (std::holds_alternative<std::vector<int64_t>>(value)) {
auto values = std::get<std::vector<int64_t>>(value);
attribute.mutable_int_values()->mutable_values()->Assign(values.begin(),
values.end());
} else if (std::holds_alternative<bool>(value)) {
attribute.set_bool_value(std::get<bool>(value));
} else if (std::holds_alternative<float>(value)) {
attribute.set_float_value(std::get<float>(value));
}
}
}
absl::Status DeserializePjRtDeviceAttributes(
const DeviceProto& device_proto,
absl::flat_hash_map<std::string, PjRtDeviceAttribute>& attributes) {
for (const auto& [key, value] : device_proto.attributes()) {
if (value.has_string_value()) {
attributes[key] = value.string_value();
} else if (value.has_int_value()) {
attributes[key] = value.int_value();
} else if (value.has_int_values()) {
attributes[key] =
std::vector<int64_t>(value.int_values().values().begin(),
value.int_values().values().end());
} else if (value.has_bool_value()) {
attributes[key] = value.bool_value();
} else if (value.has_float_value()) {
attributes[key] = value.float_value();
}
}
return absl::OkStatus();
}
absl::StatusOr<tsl::RCReference<Array>> MakeStringArrayFromHostBuffer(
Client* client, const void* data, DType dtype, Shape shape,
std::optional<absl::Span<const int64_t>> byte_strides,
std::shared_ptr<const Sharding> sharding,
Client::HostBufferSemantics semantics,
std::function<void()> on_done_with_host_buffer) {
auto param_validation = [&]() -> absl::Status {
if (byte_strides.has_value()) {
return absl::InvalidArgumentError(
"byte_strides is not currently supported for making "
"BasicStringArrays.");
}
if (semantics != Client::HostBufferSemantics::kImmutableOnlyDuringCall) {
return absl::InvalidArgumentError(
"HostBufferSemantics other than kImmutableOnlyDuringCall are not "
"currently supported for making BasicStringArrays.");
}
if (!llvm::isa<const SingleDeviceSharding>(sharding.get())) {
return absl::InvalidArgumentError(
absl::StrCat("Only SingleDeviceSharding is supported for making "
"BasicStringArrays: got: ",
sharding->DebugString()));
}
return absl::OkStatus();
}();
TF_RETURN_IF_ERROR(param_validation);
auto num_elements = shape.num_elements();
auto strings = std::make_shared<std::vector<std::string>>();
strings->reserve(num_elements);
auto string_views = std::make_shared<std::vector<absl::string_view>>();
string_views->reserve(num_elements);
auto element = static_cast<const absl::string_view*>(data);
for (int i = 0; i < num_elements; ++i, ++element) {
strings->push_back(std::string(*element));
string_views->push_back(absl::string_view(strings->back()));
}
std::move(on_done_with_host_buffer)();
BasicStringArray::Buffers buffers;
buffers.push_back(*string_views);
auto buffer_releaser = [strings = std::move(strings),
string_views = std::move(string_views)]() {};
return BasicStringArray::Create(
client, std::move(shape), std::move(sharding),
Future<BasicStringArray::Buffers>(std::move(buffers)),
std::move(buffer_releaser));
}
absl::StatusOr<tsl::RCReference<Array>>
AssembleStringArrayFromSingleDeviceStringArrays(
Shape shape, std::shared_ptr<const Sharding> sharding,
absl::Span<tsl::RCReference<Array>> arrays, ArrayCopySemantics semantics) {
struct BufferBackingStore {
explicit BufferBackingStore(int num_shards)
: per_shard_strings(num_shards), per_shard_string_views(num_shards) {}
void clear() {
per_shard_strings.clear();
per_shard_string_views.clear();
}
void CopyBuffer(absl::Span<const absl::string_view> strbuf, int shard_index,
BasicStringArray::Buffers* buffers) {
auto& strings = per_shard_strings[shard_index];
strings.reserve(strbuf.size());
auto& views = per_shard_string_views[shard_index];
views.reserve(strbuf.size());
for (int i = 0; i < strbuf.size(); ++i) {
strings.push_back(std::string(strbuf[i].data(), strbuf[i].size()));
}
for (const auto& str : strings) {
views.push_back(str);
}
(*buffers)[shard_index] = absl::MakeConstSpan(views);
}
std::vector<std::vector<std::string>> per_shard_strings;
std::vector<std::vector<absl::string_view>> per_shard_string_views;
};
auto buffer_backing_store =
std::make_shared<BufferBackingStore>(sharding->devices()->size());
auto on_done_with_buffer = [buffer_holder = buffer_backing_store]() {};
struct BufferCopyingState {
BufferCopyingState(int num_buffers_to_copy,
std::shared_ptr<BufferBackingStore> buffer_backing_store)
: num_buffers_to_copy(num_buffers_to_copy),
buffer_backing_store(std::move(buffer_backing_store)),
buffers(num_buffers_to_copy) {}
absl::Mutex mu;
int num_buffers_to_copy ABSL_GUARDED_BY(mu);
std::shared_ptr<BufferBackingStore> buffer_backing_store
ABSL_GUARDED_BY(mu);
BasicStringArray::Buffers buffers ABSL_GUARDED_BY(mu);
};
auto buffer_copying_state = std::make_shared<BufferCopyingState>(
arrays.size(), std::move(buffer_backing_store));
auto buffers_promise = Future<BasicStringArray::Buffers>::CreatePromise();
auto buffers_future = Future<BasicStringArray::Buffers>(buffers_promise);
auto buffer_copier = [state = buffer_copying_state,
promise = buffers_promise](
absl::StatusOr<BasicStringArray::Buffers> strbuf,
int shard_index) mutable {
absl::MutexLock lock(&state->mu);
if (state->num_buffers_to_copy == 0) {
return;
}
if (!strbuf.ok()) {
promise.Set(strbuf.status());
state->num_buffers_to_copy = 0;
state->buffer_backing_store->clear();
state->buffer_backing_store = nullptr;
return;
}
state->buffer_backing_store->CopyBuffer(strbuf->front(), shard_index,
&state->buffers);
if (--state->num_buffers_to_copy > 0) {
return;
}
promise.Set(std::move(state->buffers));
};
for (int i = 0; i < arrays.size(); ++i) {
auto basic_string_array = llvm::dyn_cast<BasicStringArray>(arrays[i].get());
if (!basic_string_array) {
return absl::InvalidArgumentError(
"All single device arrays must be BasicStringArrays");
}
if (!llvm::isa<SingleDeviceSharding>(basic_string_array->sharding())) {
return absl::InvalidArgumentError(absl::StrFormat(
"All single device arrays must have single device sharding. got: %s "
"for shard index: %d",
basic_string_array->sharding().DebugString(), i));
}
basic_string_array->buffers().OnReady(
[shard_index = i, buffer_copier = buffer_copier](
absl::StatusOr<BasicStringArray::Buffers> strbuf) mutable {
buffer_copier(std::move(strbuf), shard_index);
});
}
return BasicStringArray::Create(arrays[0]->client(), std::move(shape),
std::move(sharding), buffers_future,
std::move(on_done_with_buffer));
}
}
char PjRtCompatibleClient::ID = 0;
char PjRtClient::ID = 0;
absl::StatusOr<std::unique_ptr<PjRtClient>> PjRtClient::Create(
PjRtClient::CreateOptions options) {
auto client =
absl::WrapUnique(new PjRtClient(std::move(options.pjrt_client)));
xla::PjRtClient* pjrt_client = client->pjrt_client();
std::vector<std::unique_ptr<PjRtDevice>> devices;
if (!options.kv_store) {
devices.reserve(pjrt_client->devices().size());
for (xla::PjRtDevice* device : pjrt_client->devices()) {
auto ifrt_device = std::make_unique<PjRtDevice>(
client.get(), DeviceId(device->global_device_id().value()),
std::string(device->device_kind()), std::string(device->ToString()),
std::string(device->DebugString()), device->process_index(),
device->Attributes(), device->IsAddressable() ? device : nullptr);
devices.push_back(std::move(ifrt_device));
}
} else {
LocalTopologyProto local_topology;
local_topology.set_node_id(options.process_id);
std::string boot_id_str;
auto boot_id_str_or_status = GetBootIdString();
if (!boot_id_str_or_status.ok()) {
LOG(INFO) << boot_id_str_or_status.status();
} else {
boot_id_str = boot_id_str_or_status.value();
}
local_topology.set_boot_id(boot_id_str);
absl::flat_hash_map<PjRtLocalDeviceId, xla::PjRtDevice*> pjrt_devices;
for (xla::PjRtDevice* device : pjrt_client->addressable_devices()) {
pjrt_devices[device->local_device_id()] = device;
DeviceProto& device_proto = *local_topology.add_devices();
device_proto.set_global_device_id(device->global_device_id().value());
device_proto.set_local_device_ordinal(device->local_device_id().value());
device_proto.set_device_kind(
std::string(device->description().device_kind()));
device_proto.set_to_string(std::string(device->ToString()));
device_proto.set_debug_string(std::string(device->DebugString()));
SerializePjRtDeviceAttributes(device->Attributes(), device_proto);
}
GlobalTopologyProto global_topology;
TF_RETURN_IF_ERROR(ExchangeTopologies(
pjrt_client->platform_name(), options.process_id, options.num_processes,
options.get_local_topology_timeout, options.get_global_topology_timeout,
options.kv_store.get(), local_topology, &global_topology,
false));
int next_slice_index = 0;
absl::flat_hash_map<std::string, int> boot_id_to_slice_index;
for (const LocalTopologyProto& node : global_topology.nodes()) {
int64_t slice_index = -1;
if (!node.boot_id().empty()) {
std::string_view boot_id = node.boot_id();
auto [it, inserted] =
boot_id_to_slice_index.try_emplace(boot_id, next_slice_index);
slice_index = it->second;
if (inserted) {
++next_slice_index;
}
}
bool node_is_me = (node.node_id() == options.process_id);
for (const DeviceProto& device_proto : node.devices()) {
absl::flat_hash_map<std::string, PjRtDeviceAttribute> attributes;
TF_RETURN_IF_ERROR(
DeserializePjRtDeviceAttributes(device_proto, attributes));
if (!attributes.contains("slice_index")) {
attributes["slice_index"] = slice_index;
}
xla::PjRtDevice* pjrt_device = nullptr;
if (node_is_me) {
auto it = pjrt_devices.find(
PjRtLocalDeviceId(device_proto.local_device_ordinal()));
TF_RET_CHECK(it != pjrt_devices.end());
pjrt_device = it->second;
}
auto ifrt_device = std::make_unique<PjRtDevice>(
client.get(), DeviceId(device_proto.global_device_id()),
device_proto.device_kind(), device_proto.to_string(),
device_proto.debug_string(), node.node_id(), std::move(attributes),
pjrt_device);
devices.push_back(std::move(ifrt_device));
}
}
}
client->devices_.reserve(devices.size());
client->device_map_.reserve(pjrt_client->addressable_device_count());
for (auto& ifrt_device : devices) {
client->devices_.push_back(ifrt_device.get());
TF_RET_CHECK(
client->device_id_map_.emplace(ifrt_device->Id(), ifrt_device.get())
.second);
xla::PjRtDevice* pjrt_device = ifrt_device->pjrt_device();
if (pjrt_device) {
TF_RET_CHECK(
client->device_map_.emplace(pjrt_device, ifrt_device.get()).second);
}
client->owned_devices_.push_back(std::move(ifrt_device));
}
client->addressable_devices_.reserve(
pjrt_client->addressable_devices().size());
for (xla::PjRtDevice* device : pjrt_client->addressable_devices()) {
auto it = client->device_map_.find(device);
CHECK(it != client->device_map_.end());
client->addressable_devices_.push_back(it->second);
}
client->memory_map_.reserve(pjrt_client->memory_spaces().size());
for (xla::PjRtMemorySpace* memory_space : pjrt_client->memory_spaces()) {
auto ifrt_memory = std::make_unique<PjRtMemory>(client.get(), memory_space);
client->memory_map_[memory_space] = ifrt_memory.get();
client->owned_memories_.push_back(std::move(ifrt_memory));
}
for (Device* ifrt_device : client->addressable_devices_) {
auto* device = tensorflow::down_cast<PjRtDevice*>(ifrt_device);
auto* pjrt_device = device->pjrt_device();
device->memories_.reserve(pjrt_device->memory_spaces().size());
for (xla::PjRtMemorySpace* pjrt_memory_space :
pjrt_device->memory_spaces()) {
device->memories_.push_back(*client->LookupPjRtMemory(pjrt_memory_space));
}
absl::StatusOr<PjRtMemorySpace*> memory =
pjrt_device->default_memory_space();
if (memory.ok()) {
device->default_memory_ = *client->LookupPjRtMemory(*memory);
} else {
device->default_memory_ = memory.status();
}
}
return client;
}
std::unique_ptr<PjRtClient> PjRtClient::Create(
std::shared_ptr<xla::PjRtClient> pjrt_client) {
PjRtClient::CreateOptions options;
options.pjrt_client = std::move(pjrt_client);
return *Create(std::move(options));
}
PjRtClient::PjRtClient(std::shared_ptr<xla::PjRtClient> pjrt_client)
: pjrt_client_(std::move(pjrt_client)),
default_compiler_(this),
attributes_(MakeAttributeMap(pjrt_client_.get())) {}
PjRtClient::~PjRtClient() = default;
absl::StatusOr<PjRtCompatibleDevice*> PjRtClient::LookupPjRtDevice(
xla::PjRtDevice* pjrt_device) const {
auto it = device_map_.find(pjrt_device);
if (it == device_map_.end()) {
return InvalidArgument("PjRtDevice not found: %s",
pjrt_device->DebugString());
}
return it->second;
}
absl::StatusOr<PjRtCompatibleMemory*> PjRtClient::LookupPjRtMemory(
xla::PjRtMemorySpace* pjrt_memory) const {
auto it = memory_map_.find(pjrt_memory);
if (it == memory_map_.end()) {
return InvalidArgument("PjRtMemorySpace not found: %s",
pjrt_memory->DebugString());
}
return it->second;
}
absl::StatusOr<Device*> PjRtClient::LookupDevice(DeviceId device_id) const {
DCHECK(this);
auto it = device_id_map_.find(device_id);
if (it != device_id_map_.end()) {
return it->second;
}
return InvalidArgument("No matching device found for device_id %d",
device_id.value());
}
absl::StatusOr<Device*> PjRtClient::LookupAddressableDevice(
int local_hardware_id) const {
DCHECK(this);
TF_ASSIGN_OR_RETURN(xla::PjRtDevice * pjrt_device,
pjrt_client_->LookupAddressableDevice(
xla::PjRtLocalDeviceId(local_hardware_id)));
return LookupPjRtDevice(pjrt_device);
}
const AttributeMap& PjRtClient::Attributes() const { return attributes_; }
absl::StatusOr<tsl::RCReference<PjRtCompatibleArray>>
PjRtClient::CreatePjRtArray(std::shared_ptr<PjRtBuffer> pjrt_buffer) {
TF_ASSIGN_OR_RETURN(auto array,
PjRtArray::Create(this, std::move(pjrt_buffer)));
return tsl::RCReference<PjRtCompatibleArray>(std::move(array));
}
absl::StatusOr<tsl::RCReference<PjRtCompatibleArray>>
PjRtClient::CreatePjRtArray(Shape shape, PjRtBuffers pjrt_buffers) {
TF_ASSIGN_OR_RETURN(auto array, PjRtArray::Create(this, std::move(shape),
std::move(pjrt_buffers)));
return tsl::RCReference<PjRtCompatibleArray>(std::move(array));
}
absl::StatusOr<tsl::RCReference<Array>> PjRtClient::MakeArrayFromHostBuffer(
const void* data, DType dtype, Shape shape,
std::optional<absl::Span<const int64_t>> byte_strides,
std::shared_ptr<const Sharding> sharding,
Client::HostBufferSemantics semantics,
std::function<void()> on_done_with_host_buffer) {
DCHECK(this);
if (dtype.kind() == DType::kString) {
return MakeStringArrayFromHostBuffer(this, data, dtype, shape, byte_strides,
sharding, semantics,
on_done_with_host_buffer);
}
if (!llvm::isa<const SingleDeviceSharding>(sharding.get()) &&
!sharding->IsFullyReplicated()) {
return InvalidArgument(
"Only SingleDeviceSharding or fully-replicated sharding is supported: "
"sharding=%s",
sharding->DebugString());
}
TF_ASSIGN_OR_RETURN(auto primitive_type, ToPrimitiveType(dtype));
auto count = std::make_shared<std::atomic<int>>(sharding->devices()->size());
std::function<void()> on_done_with_host_buffer_per_device;
if (on_done_with_host_buffer) {
on_done_with_host_buffer_per_device =
[on_done_with_host_buffer = std::move(on_done_with_host_buffer),
count]() {
if (count->fetch_sub(1, std::memory_order_relaxed) == 1) {
on_done_with_host_buffer();
}
};
} else {
on_done_with_host_buffer_per_device = []() {};
}
PjRtArray::PjRtBuffers buffers;
buffers.reserve(sharding->devices()->size());
for (xla::ifrt::Device* const device : sharding->devices()->devices()) {
std::unique_ptr<PjRtBuffer> buffer;
if (sharding->memory_kind().memory_kind().has_value()) {
Memory* memory = nullptr;
for (Memory* ms : device->Memories()) {
if (ms->Kind() == sharding->memory_kind()) {
memory = ms;
break;
}
}
if (memory == nullptr) {
return InvalidArgument(
"Invalid memory kind: %s; available memory kinds: %s",
*sharding->memory_kind().memory_kind(),
absl::StrJoin(sharding->devices()->devices().front()->Memories(),
", ", [](std::string* out, Memory* ms) {
absl::StrAppend(out, *ms->Kind().memory_kind());
}));
}
TF_ASSIGN_OR_RETURN(
buffer, pjrt_client_->BufferFromHostBuffer(
data, primitive_type, shape.dims(), byte_strides,
semantics, on_done_with_host_buffer_per_device,
tensorflow::down_cast<PjRtMemory*>(memory)->pjrt_memory(),
nullptr));
} else {
if (!device->IsAddressable()) {
return InvalidArgument("Cannot copy array to non-addressable device %s",
device->DebugString());
}
TF_ASSIGN_OR_RETURN(
buffer,
pjrt_client_->BufferFromHostBuffer(
data, primitive_type, shape.dims(), byte_strides, semantics,
on_done_with_host_buffer_per_device,
tensorflow::down_cast<PjRtDevice*>(device)->pjrt_device()));
}
buffers.push_back(std::move(buffer));
}
return PjRtArray::Create(this, dtype, std::move(shape), std::move(sharding),
std::move(buffers));
}
absl::StatusOr<tsl::RCReference<Array>>
PjRtClient::AssembleArrayFromSingleDeviceArrays(
Shape shape, std::shared_ptr<const Sharding> sharding,
absl::Span<tsl::RCReference<Array>> arrays, ArrayCopySemantics semantics) {
DCHECK(this);
if (llvm::isa<const SingleDeviceSharding>(sharding.get())) {
if (arrays.size() != 1) {
return InvalidArgument(
"When the sharding is SingleDeviceSharding, the input arrays size "
"must be one, but the actual size is %d",
arrays.size());
}
return arrays[0];
} else if (!llvm::isa<const OpaqueSharding, const ConcreteSharding,
const ConcreteEvenSharding, const ShardingParamSharding,
const HloSharding>(sharding.get())) {
return InvalidArgument(
"Only SingleDeviceSharding, OpaqueSharding, ConcreteSharding, "
"ConcreteEvenSharding, ShardingParamSharding, HloSharding are "
"supported: sharding=%s",
sharding->DebugString());
}
if (sharding->devices()->size() != arrays.size()) {
return InvalidArgument(
"Number of output shards must match the number of single-shard "
"arrays: %d vs. %d",
sharding->devices()->size(), arrays.size());
}
if (arrays[0]->dtype().kind() == DType::kString) {
return AssembleStringArrayFromSingleDeviceStringArrays(shape, sharding,
arrays, semantics);
}
PjRtArray::PjRtBuffers buffers;
buffers.reserve(arrays.size());
DType dtype = arrays[0]->dtype();
for (int i = 0; i < arrays.size(); ++i) {
if (!llvm::isa<PjRtCompatibleArray>(arrays[i].get())) {
return InvalidArgument(
"Only PjRtCompatibleArray is supported: arrays[%d]=%s", i,
arrays[i]->DebugString());
}
auto* array = static_cast<PjRtCompatibleArray*>(arrays[i].get());
if (array->dtype() != dtype) {
return InvalidArgument(
"Every input must have the same dtype: %s (shard 0) vs. %s (shard "
"%d)",
dtype.DebugString(), array->dtype().DebugString(), i);
}
if (array->sharding().devices()->size() != 1) {
return InvalidArgument(
"Every input must use a single device sharding, but input %d has "
"sharding=%s",
i, array->sharding().DebugString());
}
switch (semantics) {
case ArrayCopySemantics::kAlwaysCopy:
buffers.push_back(array->pjrt_buffers().front());
break;
case ArrayCopySemantics::kReuseInput:
buffers.push_back(array->pjrt_buffers().front());
break;
case ArrayCopySemantics::kDonateInput:
buffers.push_back(std::move(array->pjrt_buffers().front()));
break;
}
}
return PjRtArray::Create(this, dtype, std::move(shape), std::move(sharding),
std::move(buffers));
}
absl::StatusOr<std::vector<tsl::RCReference<Array>>> PjRtClient::CopyArrays(
absl::Span<tsl::RCReference<Array>> arrays,
std::optional<tsl::RCReference<DeviceList>> devices,
std::optional<MemoryKind> memory_kind, ArrayCopySemantics semantics) {
if (arrays.empty()) {
return std::vector<tsl::RCReference<Array>>();
}
for (int i = 1; i < arrays.size(); ++i) {
const auto& sharding = arrays[i]->sharding();
if (*sharding.devices() != *arrays[0]->sharding().devices() ||
sharding.memory_kind() != arrays[0]->sharding().memory_kind()) {
return absl::InvalidArgumentError(
"CopyArrays only supports arrays with the same device list and "
"memory kind");
}
}
std::vector<tsl::RCReference<Array>> new_arrays;
new_arrays.reserve(arrays.size());
for (const auto& array : arrays) {
if (auto* const pjrt_array = llvm::dyn_cast<PjRtArray>(array.get())) {
TF_ASSIGN_OR_RETURN(new_arrays.emplace_back(),
pjrt_array->Copy(devices, memory_kind, semantics));
} else if (auto* const string_array =
llvm::dyn_cast<BasicStringArray>(array.get())) {
TF_ASSIGN_OR_RETURN(new_arrays.emplace_back(),
string_array->Copy(devices, memory_kind, semantics));
} else {
return absl::InvalidArgumentError(
"Unsupported array type for PjRtClient::CopyArrays");
}
}
return new_arrays;
}
absl::StatusOr<std::vector<tsl::RCReference<xla::ifrt::Array>>>
PjRtClient::RemapArrays(const RemapPlan& plan,
absl::Span<tsl::RCReference<xla::ifrt::Array>> arrays,
ArrayCopySemantics semantics) {
return PjRtCompatibleClientRemapArrays(this, plan, arrays, semantics);
}
Future<> PjRtClient::GetReadyFuture(
absl::Span<const tsl::RCReference<Value>> values) {
absl::InlinedVector<Future<>, 1> futures;
futures.reserve(values.size());
for (const auto& value : values) {
futures.push_back(value->GetReadyFuture());
}
return JoinFutures(futures);
}
absl::StatusOr<tsl::RCReference<Tuple>> PjRtClient::MakeTuple(
absl::Span<tsl::RCReference<Value>> values) {
return PjRtTuple::Create(this, values);
}
absl::StatusOr<std::shared_ptr<Topology>> PjRtClient::GetTopologyForDevices(
const tsl::RCReference<xla::ifrt::DeviceList>& devices) const {
TF_ASSIGN_OR_RETURN(auto topology, pjrt_client_->GetTopologyDescription());
return std::make_shared<PjRtTopology>(
std::shared_ptr<const xla::PjRtTopologyDescription>(pjrt_client_,
topology));
}
absl::StatusOr<std::unique_ptr<PjRtLayout>>
PjRtClient::GetDefaultLayoutForDevice(DType dtype,
absl::Span<const int64_t> dims,
Device* device) const {
TF_ASSIGN_OR_RETURN(PrimitiveType element_type, ToPrimitiveType(dtype));
TF_ASSIGN_OR_RETURN(xla::Layout layout,
pjrt_client_->GetDefaultLayout(element_type, dims));
return std::make_unique<PjRtXlaLayout>(std::move(layout));
}
absl::Status PjRtClient::TransferToInfeed(PjRtDevice* device,
const LiteralSlice& literal) {
if (!device->IsAddressable()) {
return InvalidArgument(
"Infeed is only supported on addressable devices "
"but device %s is not addressable",
device->DebugString());
}
return device->pjrt_device()->TransferToInfeed(literal);
}
absl::Status PjRtClient::TransferFromOutfeed(PjRtDevice* device,
MutableBorrowingLiteral literal) {
if (!device->IsAddressable()) {
return InvalidArgument(
"Outfeed is only supported on addressable devices "
"but device %s is not addressable",
device->DebugString());
}
return device->pjrt_device()->TransferFromOutfeed(literal);
}
}
} | #include "xla/pjrt/pjrt_client_test.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/synchronization/blocking_counter.h"
#include "absl/types/span.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/literal_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class TestClientFactory {
public:
void Register(
std::function<absl::StatusOr<std::unique_ptr<PjRtClient>>()> factory) {
absl::MutexLock lock(&mu_);
CHECK(!factory_);
factory_ = std::move(factory);
}
std::function<absl::StatusOr<std::unique_ptr<PjRtClient>>()> Get() const {
absl::MutexLock lock(&mu_);
return factory_;
}
private:
mutable absl::Mutex mu_;
std::function<absl::StatusOr<std::unique_ptr<PjRtClient>>()> factory_
ABSL_GUARDED_BY(mu_);
};
TestClientFactory& GetGlobalTestClientFactory() {
static auto* const factory = new TestClientFactory;
return *factory;
}
absl::StatusOr<std::unique_ptr<PjRtClient>> GetClient() {
return GetGlobalTestClientFactory().Get()();
}
}
void RegisterTestClientFactory(
std::function<absl::StatusOr<std::unique_ptr<PjRtClient>>()> factory) {
GetGlobalTestClientFactory().Register(std::move(factory));
}
namespace {
std::unique_ptr<PjRtLoadedExecutable> MakeIncrementProgram(
PjRtClient* client, bool alias, int device, bool tuplize_arg = false) {
Shape shape = ShapeUtil::MakeShape(S32, {4});
XlaBuilder builder("inc");
if (tuplize_arg) {
shape = ShapeUtil::MakeTupleShape({shape});
}
auto inp = Parameter(&builder, 0, shape, "inp");
if (tuplize_arg) {
inp = GetTupleElement(inp, 0);
}
auto one = ConstantR0<int32_t>(&builder, 1);
auto inc = Add(inp, one);
if (alias) {
builder.SetUpAlias({}, 0, {});
}
XlaComputation computation = builder.Build(inc).value();
DeviceAssignment assignment(1, 1);
assignment(0, 0) = device;
CompileOptions options;
options.parameter_is_tupled_arguments = tuplize_arg;
options.executable_build_options.set_device_assignment(assignment);
return client->Compile(computation, options).value();
}
class PjRtClientTest
: public ::testing::TestWithParam<ExecuteOptions::ExecutionMode> {};
TEST_P(PjRtClientTest, Execute) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
auto executable =
MakeIncrementProgram(client.get(), false, 0);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
ExecuteOptions options;
options.execution_mode = GetParam();
TF_ASSERT_OK_AND_ASSIGN(auto results,
executable->Execute({{buffer.get()}}, options));
ASSERT_EQ(results.size(), 1);
ASSERT_EQ(results[0].size(), 1);
TF_ASSERT_OK_AND_ASSIGN(auto literal, results[0][0]->ToLiteralSync());
std::vector<int32_t> expected(4, 1);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
TEST_P(PjRtClientTest, ExecuteWithImmutableUntilTransferCompletes) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
auto executable =
MakeIncrementProgram(client.get(), false, 0);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableUntilTransferCompletes,
nullptr, client->addressable_devices()[0]));
ExecuteOptions options;
options.execution_mode = GetParam();
TF_ASSERT_OK_AND_ASSIGN(auto results,
executable->Execute({{buffer.get()}}, options));
ASSERT_EQ(results.size(), 1);
ASSERT_EQ(results[0].size(), 1);
TF_ASSERT_OK_AND_ASSIGN(auto literal, results[0][0]->ToLiteralSync());
std::vector<int32_t> expected(4, 1);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
TEST_P(PjRtClientTest, ExecuteWithTupleZeroCopy) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
auto executable = MakeIncrementProgram(client.get(), false,
0, true);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer, client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableZeroCopy,
[&data]() {
std::fill(data.begin(), data.end(), 1);
},
client->addressable_devices()[0]));
ExecuteOptions options;
options.execution_mode = GetParam();
TF_ASSERT_OK_AND_ASSIGN(auto results,
executable->Execute({{buffer.get()}}, options));
buffer.reset();
ASSERT_EQ(results.size(), 1);
ASSERT_EQ(results[0].size(), 1);
TF_ASSERT_OK_AND_ASSIGN(auto literal, results[0][0]->ToLiteralSync());
std::vector<int32_t> expected(4, 1);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
TEST_P(PjRtClientTest, ExecuteWithDonation) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
auto executable =
MakeIncrementProgram(client.get(), true, 0);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer, client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableZeroCopy,
nullptr, client->addressable_devices()[0]));
ExecuteOptions options;
options.execution_mode = GetParam();
TF_ASSERT_OK_AND_ASSIGN(auto results,
executable->Execute({{buffer.get()}}, options));
ASSERT_EQ(results.size(), 1);
ASSERT_EQ(results[0].size(), 1);
TF_ASSERT_OK_AND_ASSIGN(auto literal, results[0][0]->ToLiteralSync());
std::vector<int32_t> expected(4, 1);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
TEST_P(PjRtClientTest, ExecuteWithDonationAbort) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
if (client->platform_id() == CpuId()) {
return;
}
auto executable =
MakeIncrementProgram(client.get(), true, 0);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer, client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableZeroCopy,
nullptr, client->addressable_devices()[0]));
auto external_reference = buffer->AcquireExternalReference();
ExecuteOptions options;
options.execution_mode = GetParam();
auto resultsor = executable->Execute({{buffer.get()}}, options);
ASSERT_FALSE(resultsor.ok());
EXPECT_THAT(resultsor.status().message(),
::testing::HasSubstr(
"Donation requested for buffer with external reference"));
}
TEST_P(PjRtClientTest, ExecuteWithConcurrentUsage) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
auto executable =
MakeIncrementProgram(client.get(), false, 0);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
ExecuteOptions options;
options.execution_mode = GetParam();
constexpr int kNumThreads = 4;
tsl::thread::ThreadPool thread_pool(
tsl::Env::Default(), "ExecuteWithConcurrentUsage", kNumThreads);
constexpr int kConcurrency = 16;
absl::BlockingCounter blocking_counter(kConcurrency);
std::vector<std::unique_ptr<PjRtBuffer>> results(kConcurrency);
for (int i = 0; i < kConcurrency; ++i) {
thread_pool.Schedule([&, &result = results[i]]() {
auto results = executable->Execute({{buffer.get()}}, options).value();
CHECK_EQ(results.size(), 1);
CHECK_EQ(results[0].size(), 1);
result = std::move(results[0][0]);
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
std::vector<int32_t> expected(4, 1);
for (const auto& result : results) {
TF_ASSERT_OK_AND_ASSIGN(auto literal, result->ToLiteralSync());
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
}
TEST_P(PjRtClientTest, ExecuteWithConcurrentUsageAndDonation) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
auto executable =
MakeIncrementProgram(client.get(), false, 0);
auto executable_with_donation =
MakeIncrementProgram(client.get(), true, 0);
std::vector<int32_t> data(4, 0);
std::vector<int32_t> expected(4, 1);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer, client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableZeroCopy,
nullptr, client->addressable_devices()[0]));
ExecuteOptions options;
options.execution_mode = GetParam();
constexpr int kNumThreads = 4;
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(),
"ExecuteWithConcurrentUsageAndDonation",
kNumThreads);
constexpr int kConcurrentUsage = 16;
absl::BlockingCounter blocking_counter(kConcurrentUsage + 1);
for (int i = 0; i < kConcurrentUsage; ++i) {
thread_pool.Schedule([&]() {
auto results_or = executable->Execute({{buffer.get()}}, options);
if (results_or.ok()) {
auto& results = *results_or;
CHECK_EQ(results.size(), 1);
CHECK_EQ(results[0].size(), 1);
auto literal = results[0][0]->ToLiteralSync().value();
CHECK(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
blocking_counter.DecrementCount();
});
}
std::unique_ptr<PjRtBuffer> result;
thread_pool.Schedule([&]() {
auto results =
executable_with_donation->Execute({{buffer.get()}}, options).value();
CHECK_EQ(results.size(), 1);
CHECK_EQ(results[0].size(), 1);
result = std::move(results[0][0]);
blocking_counter.DecrementCount();
});
blocking_counter.Wait();
TF_ASSERT_OK_AND_ASSIGN(auto literal, result->ToLiteralSync());
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
INSTANTIATE_TEST_SUITE_P(
PjRtClientTestSuite, PjRtClientTest,
::testing::Values(ExecuteOptions::ExecutionMode::kSynchronous,
ExecuteOptions::ExecutionMode::kAsynchronous));
TEST(PjRtClientTest, CopyToDevice) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
ASSERT_GT(client->addressable_devices().size(), 1);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
auto* device_1 = client->addressable_devices()[1];
TF_ASSERT_OK_AND_ASSIGN(auto result, buffer->CopyToDevice(device_1));
TF_ASSERT_OK_AND_ASSIGN(auto literal, result->ToLiteralSync());
std::vector<int32_t> expected(4, 0);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
TEST(PjRtClientTest, CopyToDeviceAsync) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
ASSERT_GT(client->addressable_devices().size(), 1);
std::vector<int32_t> data(4, 0);
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
auto* device_1 = client->addressable_devices()[1];
constexpr int kNumThreads = 4;
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "CopyToDeviceAsync",
kNumThreads);
constexpr int kConcurrentCopy = 16;
std::vector<std::unique_ptr<PjRtBuffer>> results(kConcurrentCopy);
for (int i = 0; i < kConcurrentCopy; ++i) {
TF_ASSERT_OK_AND_ASSIGN(results[i], buffer->CopyToDevice(device_1));
}
buffer.reset();
for (const auto& result : results) {
ASSERT_TRUE(result);
TF_ASSERT_OK_AND_ASSIGN(auto literal, result->ToLiteralSync());
std::vector<int32_t> expected(4, 0);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
}
TEST(PjRtClientTest, CopyToDeviceAsyncExternalCpuOnly) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
ASSERT_GT(client->addressable_devices().size(), 1);
if (client->platform_id() != CpuId()) return;
std::vector<int32_t> data(4, 0);
auto* data_ptr = data.data();
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->CreateViewOfDeviceBuffer(
data_ptr, shape, client->addressable_devices()[0],
[data = std::move(data)]() mutable {
data.clear();
data.shrink_to_fit();
}));
auto* device_1 = client->addressable_devices()[1];
constexpr int kNumThreads = 4;
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(),
"CopyToDeviceAsyncExternal", kNumThreads);
constexpr int kConcurrentCopy = 16;
std::vector<std::unique_ptr<PjRtBuffer>> results(kConcurrentCopy);
for (int i = 0; i < kConcurrentCopy; ++i) {
TF_ASSERT_OK_AND_ASSIGN(results[i], buffer->CopyToDevice(device_1));
}
buffer.reset();
for (const auto& result : results) {
ASSERT_TRUE(result);
TF_ASSERT_OK_AND_ASSIGN(auto literal, result->ToLiteralSync());
std::vector<int32_t> expected(4, 0);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> MakeFloatBuffer(
PjRtClient* client, const std::vector<float>& data,
absl::Span<const int64_t> dimensions) {
Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
return client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]);
}
TEST(PjRtClientTest, DuplicateDonationError) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
constexpr char kProgram[] =
R"(HloModule DuplicateDonationError, input_output_alias={ {0}: (1, {}, must-alias), {1}: (2, {}, must-alias) }
ENTRY DuplicateDonationError() -> (f32[2, 2], f32[2, 2]) {
%input0 = f32[2, 2] parameter(0)
%input1 = f32[2, 2] parameter(1)
%input2 = f32[2, 2] parameter(2)
%input3 = f32[2, 2] parameter(3)
%tmp1 = f32[2, 2] add(%input0, %input1)
%tmp2 = f32[2, 2] add(%input2, %input3)
ROOT %result = (f32[2, 2], f32[2, 2]) tuple(%tmp1, %tmp2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnUnverifiedModule(kProgram, {}));
XlaComputation xla_computation(hlo_module->ToProto());
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_executable,
client->Compile(xla_computation, {}));
std::vector<float> data(4, 0);
TF_ASSERT_OK_AND_ASSIGN(auto buffer0,
MakeFloatBuffer(client.get(), data, {2, 2}));
TF_ASSERT_OK_AND_ASSIGN(auto buffer1,
MakeFloatBuffer(client.get(), data, {2, 2}));
TF_ASSERT_OK_AND_ASSIGN(auto buffer2,
MakeFloatBuffer(client.get(), data, {2, 2}));
{
auto result = pjrt_executable->Execute({{
buffer0.get(),
buffer1.get(),
buffer1.get(),
buffer0.get(),
}},
{});
ASSERT_FALSE(result.ok());
EXPECT_THAT(result.status().message(),
::testing::HasSubstr("f(donate(a), donate(a))"));
}
{
auto result = pjrt_executable->Execute({{
buffer1.get(),
buffer1.get(),
buffer2.get(),
buffer0.get(),
}},
{});
ASSERT_FALSE(result.ok());
EXPECT_THAT(result.status().message(),
::testing::HasSubstr("f(a, donate(a))"));
}
{
auto result = pjrt_executable->Execute({{
buffer0.get(),
buffer1.get(),
buffer2.get(),
buffer2.get(),
}},
{});
ASSERT_FALSE(result.ok());
EXPECT_THAT(result.status().message(),
::testing::HasSubstr("f(donate(a), a)"));
}
}
TEST(PjRtClientTest, GetDefaultLayout) {}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/pjrt_ifrt/pjrt_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/pjrt_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
79f4067c-cdba-4150-810e-3c3b509d548f | cpp | tensorflow/tensorflow | mlir_to_hlo | third_party/xla/xla/pjrt/mlir_to_hlo.cc | third_party/xla/xla/pjrt/mlir_to_hlo_test.cc | #include "xla/pjrt/mlir_to_hlo.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TypeSwitch.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Bytecode/BytecodeWriter.h"
#include "mlir/Conversion/ReconcileUnrealizedCasts/ReconcileUnrealizedCasts.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/Extensions/AllExtensions.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/MLProgram/IR/MLProgram.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/Passes.h"
#include "shardy/dialect/sdy/ir/register.h"
#include "stablehlo/dialect/ChloOps.h"
#include "stablehlo/dialect/Register.h"
#include "stablehlo/dialect/Serialization.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "stablehlo/dialect/Version.h"
#include "stablehlo/transforms/Passes.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/translate/mhlo_to_hlo/mlir_hlo_to_hlo.h"
#include "xla/mlir/utils/error_util.h"
#include "xla/mlir_hlo/mhlo/IR/register.h"
#include "xla/mlir_hlo/mhlo/transforms/passes.h"
#include "xla/service/spmd/shardy/constants.h"
#include "xla/service/spmd/shardy/utils.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::Status MlirToXlaComputation(mlir::ModuleOp module,
XlaComputation& xla_computation,
bool use_tuple_args, bool return_tuple,
bool use_shardy) {
mlir::MLIRContext* context = module->getContext();
mlir::BaseScopedDiagnosticHandler diagnostic_handler(context);
{
mlir::PassManager pm(context);
pm.addPass(mlir::mhlo::createStablehloLegalizeToHloPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::createChloLegalizeToHloPass());
pm.addNestedPass<mlir::func::FuncOp>(mlir::createCanonicalizerPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::createSinkConstantsToControlFlowPass());
if (failed(pm.run(module))) {
VLOG(1) << "MHLO->HLO lowering passes failed.";
module->dump();
return diagnostic_handler.ConsumeStatus();
}
VLOG(5) << "MHLO module after lowering, before HLO import ";
if (VLOG_IS_ON(5)) {
module->dump();
}
}
if (use_tuple_args && use_shardy) {
sdy::addFrontendAttribute(module, sdy::kUseTupleArgs,
mlir::StringAttr::get(context, "t"));
use_tuple_args = false;
}
mlir::MlirToHloConversionOptions options;
options.use_tuple_args = use_tuple_args;
options.return_tuple = return_tuple;
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> hlo_module,
mlir::ConvertMlirHloToHloModule(module, options));
xla_computation = XlaComputation(hlo_module->ToProto());
return absl::OkStatus();
}
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> ParseMlirModuleString(
absl::string_view mlir_module_str, mlir::MLIRContext& context) {
mlir::DialectRegistry registry;
registry.insert<mlir::arith::ArithDialect>();
registry.insert<mlir::func::FuncDialect>();
registry.insert<mlir::ml_program::MLProgramDialect>();
registry.insert<mlir::shape::ShapeDialect>();
mlir::func::registerAllExtensions(registry);
mlir::mhlo::registerAllMhloDialects(registry);
mlir::sdy::registerAllDialects(registry);
mlir::stablehlo::registerAllDialects(registry);
context.appendDialectRegistry(registry);
mlir::BaseScopedDiagnosticHandler diagnostic_handler(&context);
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(
llvm::StringRef(mlir_module_str.data(), mlir_module_str.size()),
mlir::ParserConfig{&context});
if (!module) {
mlir::emitError(mlir::UnknownLoc::get(&context))
<< "Failed to parse using StableHLO v"
<< mlir::vhlo::Version::getCurrentVersion() << ", "
<< "this could indicate forward incompatibility, >12w old "
"unsupported plugin, or a portable artifact that needs to be "
"further downgraded.";
return diagnostic_handler.ConsumeStatus();
}
TF_RETURN_IF_ERROR(UpgradeVersionedStablehlo(*module));
return std::move(module);
}
absl::Status ParseMlirModuleStringAndConvertToXlaComputation(
absl::string_view mlir_module_str, XlaComputation& xla_computation,
bool use_tuple_args, bool return_tuple) {
mlir::MLIRContext context;
TF_ASSIGN_OR_RETURN(mlir::OwningOpRef<mlir::ModuleOp> module,
xla::ParseMlirModuleString(mlir_module_str, context));
return xla::MlirToXlaComputation(*module, xla_computation, use_tuple_args,
return_tuple, false);
}
absl::StatusOr<std::string> SerializeUsingNativeBytecode(
mlir::ModuleOp module) {
std::string bytecode;
llvm::raw_string_ostream os(bytecode);
mlir::BytecodeWriterConfig config;
config.setDesiredBytecodeVersion(1);
mlir::OwningOpRef<mlir::ModuleOp> cloned = module.clone();
if (mlir::failed(mlir::writeBytecodeToFile(*cloned, os, config))) {
return absl::InvalidArgumentError("mlir::writeBytecodeToFile failed");
}
return bytecode;
}
absl::StatusOr<std::string> SerializeUsingVersionedStablehlo(
mlir::ModuleOp mlir_module, absl::string_view target, bool inplace) {
mlir::MLIRContext* context = mlir_module->getContext();
mlir::BaseScopedDiagnosticHandler diagnostic_handler(context);
mlir::PassManager pm(context);
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::createChloLegalizeToHighLevelMhloPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::stablehlo::createChloLegalizeToStablehloPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::stablehlo::createStablehloCompatibilityExpanderPass(
{std::string(target)}));
pm.addNestedPass<mlir::func::FuncOp>(
mlir::stablehlo::createChloLegalizeToStablehloPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::stablehlo::createShapeLegalizeToStablehloPass());
pm.addPass(mlir::createReconcileUnrealizedCastsPass());
pm.addPass(mlir::mhlo::createHloLegalizeToStablehloPass());
if (!mlir::succeeded(pm.run(mlir_module))) {
const absl::Status status = diagnostic_handler.ConsumeStatus();
return absl::InvalidArgumentError(
absl::StrCat("CHLO => [MHLO+Shape] => StableHLO failed;\n\nDetailed "
"error from MLIR: ",
status.message()));
}
mlir::OwningOpRef<mlir::ModuleOp> cloned;
if (!inplace) {
cloned = mlir_module.clone();
mlir_module = *cloned;
}
std::string buffer;
llvm::raw_string_ostream os(buffer);
if (failed(mlir::stablehlo::serializePortableArtifact(mlir_module, target,
os))) {
const absl::Status status = diagnostic_handler.ConsumeStatus();
return absl::InvalidArgumentError(absl::StrCat(
"Failed to serialize StableHLO;\n\nDetailed error from MLIR: ",
status.message()));
}
return buffer;
}
absl::Status UpgradeVersionedStablehlo(mlir::ModuleOp mlir_module) {
mlir::PassManager pm(mlir_module->getContext());
mlir::stablehlo::createStablehloDeserializePipeline(pm);
if (!mlir::succeeded(pm.run(mlir_module)))
return xla::InvalidArgument("Failed to upgrade versioned StableHLO.");
return absl::OkStatus();
}
std::string GetDefaultStablehloVersion(std::optional<int64_t> plugin_version) {
if (plugin_version.has_value() && plugin_version.value() < 54) {
return "0.19.0";
}
return mlir::vhlo::Version::fromCompatibilityRequirement(
mlir::vhlo::Version::CompatibilityRequirement::WEEK_12)
.toString();
}
absl::StatusOr<std::string> Serialize(mlir::ModuleOp module,
absl::string_view target, bool inplace) {
bool all_stablehlo = true;
module->walk([&](mlir::Operation* op) {
if (!llvm::isa<mlir::ModuleOp>(op) &&
!llvm::isa<mlir::stablehlo::StablehloDialect, mlir::func::FuncDialect,
mlir::chlo::ChloDialect>(op->getDialect())) {
all_stablehlo = false;
return mlir::WalkResult::interrupt();
}
return mlir::WalkResult::advance();
});
if (!all_stablehlo) {
return SerializeUsingNativeBytecode(module);
}
return SerializeUsingVersionedStablehlo(module, target, inplace);
}
} | #include "xla/pjrt/mlir_to_hlo.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "stablehlo/api/PortableApi.h"
#include "xla/test.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::testing::HasSubstr;
using ::testing::Not;
MATCHER_P(IsVhloArtifact, version, "") {
return ExplainMatchResult(HasSubstr(absl::StrCat("StableHLO_v", version)),
arg, result_listener);
}
TEST(MlirToHloTest, StablehloTest) {
constexpr char kProgram[] =
R"(
func.func @add(%arg0: tensor<1x2xf32>) -> tensor<1x2xf32> {
%cst = stablehlo.constant dense<1.0> : tensor<1x2xf32>
%0 = stablehlo.add %arg0, %cst : tensor<1x2xf32>
return %0 : tensor<1x2xf32>
}
)";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
ParseMlirModuleString(kProgram, context));
TF_ASSERT_OK_AND_ASSIGN(std::string blob, Serialize(*module, "1.0.0"));
EXPECT_THAT(blob, IsVhloArtifact("1.0.0"));
}
TEST(MlirToHloTest, ChloTest) {
constexpr char kProgram[] =
R"(
func.func @add(%arg0: tensor<1x2xf32>) -> tensor<1x2xf32> {
%cst = stablehlo.constant dense<1.0> : tensor<1x2xf32>
%0 = chlo.broadcast_add %arg0, %cst : (tensor<1x2xf32>, tensor<1x2xf32>) -> tensor<1x2xf32>
return %0 : tensor<1x2xf32>
}
)";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
ParseMlirModuleString(kProgram, context));
TF_ASSERT_OK_AND_ASSIGN(std::string blob, Serialize(*module, "1.0.0"));
EXPECT_THAT(blob, IsVhloArtifact("1.0.0"));
}
TEST(MlirToHloTest, ChloTanOpTest) {
constexpr char kProgram[] =
R"(
func.func @add(%arg0: tensor<1x2xf32>) -> tensor<1x2xf32> {
%0 = chlo.tan %arg0 : tensor<1x2xf32> -> tensor<1x2xf32>
return %0 : tensor<1x2xf32>
}
)";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
ParseMlirModuleString(kProgram, context));
TF_ASSERT_OK_AND_ASSIGN(std::string blob, Serialize(*module, "1.0.0"));
EXPECT_THAT(blob, IsVhloArtifact("1.0.0"));
}
TEST(MlirToHloTest, MhloTest) {
constexpr char kProgram[] =
R"(
func.func @add(%arg0: tensor<1x2xf32>) -> tensor<1x2xf32> {
%cst = mhlo.constant dense<1.0> : tensor<1x2xf32>
%0 = mhlo.add %arg0, %cst : tensor<1x2xf32>
return %0 : tensor<1x2xf32>
}
)";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
ParseMlirModuleString(kProgram, context));
TF_ASSERT_OK_AND_ASSIGN(std::string blob, Serialize(*module, "1.0.0"));
EXPECT_THAT(blob, Not(IsVhloArtifact("1.0.0")));
}
TEST(MlirToHloTest, InvalidBytecodeTest) {
unsigned char invalid_future_vhlo_mlirbc[] = {
0x4d, 0x4c, 0xef, 0x52, 0x0d, 0x53, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x48,
0x4c, 0x4f, 0x5f, 0x76, 0x32, 0x2e, 0x30, 0x2e, 0x30, 0x00, 0x01, 0x19,
0x05, 0x01, 0x05, 0x09, 0x01, 0x03, 0x0b, 0x03, 0x07, 0x0f, 0x13, 0x17,
0x03, 0x2b, 0x15, 0x07, 0x01, 0x0b, 0x0b, 0x13, 0x13, 0x13, 0x13, 0x03,
0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x1f, 0x03, 0x07, 0x0f, 0x13, 0x07, 0x02,
0x53, 0x05, 0x0d, 0x17, 0x01, 0x03, 0x03, 0x17, 0x01, 0x05, 0x07, 0x17,
0x01, 0x07, 0x15, 0x17, 0x01, 0x09, 0x0b, 0x03, 0x01, 0x23, 0x03, 0x1d,
0x0f, 0x1d, 0x11, 0x1f, 0x01, 0x09, 0x00, 0x00, 0x80, 0x3f, 0x29, 0x01,
0x05, 0x11, 0x01, 0x03, 0x01, 0x09, 0x04, 0x41, 0x05, 0x01, 0x50, 0x03,
0x01, 0x07, 0x04, 0x31, 0x03, 0x01, 0x05, 0x03, 0x50, 0x05, 0x03, 0x07,
0x04, 0x1d, 0x03, 0x03, 0x09, 0x05, 0x42, 0x07, 0x05, 0x03, 0x01, 0x07,
0x04, 0x09, 0x03, 0x01, 0x06, 0x03, 0x01, 0x05, 0x01, 0x00, 0xad, 0x13,
0x0f, 0x0b, 0x1b, 0x15, 0x1b, 0x11, 0x0f, 0x0b, 0x11, 0x62, 0x75, 0x69,
0x6c, 0x74, 0x69, 0x6e, 0x00, 0x76, 0x68, 0x6c, 0x6f, 0x00, 0x6d, 0x6f,
0x64, 0x75, 0x6c, 0x65, 0x00, 0x66, 0x75, 0x6e, 0x63, 0x5f, 0x76, 0x31,
0x00, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x76, 0x39,
0x39, 0x00, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x5f, 0x76, 0x31, 0x00,
0x2f, 0x74, 0x6d, 0x70, 0x2f, 0x74, 0x32, 0x2e, 0x6d, 0x6c, 0x69, 0x72,
0x00, 0x6d, 0x61, 0x69, 0x6e, 0x00, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63,
0x00, 0x08, 0x19, 0x07, 0x05, 0x01, 0x01, 0x0b, 0x0b, 0x0d, 0x0b, 0x0f,
0x11, 0x03, 0x13};
unsigned int invalid_future_vhlo_mlirbc_len = 243;
std::string buffer(reinterpret_cast<char*>(invalid_future_vhlo_mlirbc),
invalid_future_vhlo_mlirbc_len);
mlir::MLIRContext context;
auto status = ParseMlirModuleString(buffer, context);
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.status().message(), HasSubstr("vhlo.constant_v99"));
EXPECT_THAT(status.status().message(), HasSubstr("StableHLO_v2.0.0"));
EXPECT_THAT(status.status().message(),
HasSubstr(mlir::stablehlo::getCurrentVersion()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/mlir_to_hlo.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/mlir_to_hlo_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5f9eba1a-e055-4de1-82c4-1871f84d29b9 | cpp | tensorflow/tensorflow | utils | tensorflow/compiler/mlir/tfrt/transforms/utils.cc | tensorflow/compiler/mlir/tf2xla/api/v2/testing/utils_test.cc | #include "tensorflow/compiler/mlir/tfrt/transforms/utils.h"
#include <optional>
#include <string>
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/host_runtime/tfrt_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
namespace tensorflow {
bool IsResourceArgument(mlir::Value value) {
auto arg = mlir::dyn_cast<mlir::BlockArgument>(value);
if (!arg) return false;
auto func = llvm::cast<mlir::func::FuncOp>(arg.getOwner()->getParentOp());
return func.getArgAttr(arg.getArgNumber(), "tf.resource_name") != nullptr;
}
bool IsResultVariable(const mlir::Value &original_operand,
const mlir::Value &operand) {
if (mlir::isa<mlir::OpResult>(original_operand)) {
auto defining_op = original_operand.getDefiningOp();
if (llvm::isa<mlir::TF::ReadVariableOp>(defining_op) &&
defining_op->getNumOperands() == 1) {
return true;
} else if (llvm::isa<mlir::TF::_TfrtGetResourceOp>(defining_op)) {
return true;
}
return false;
}
return IsResourceArgument(operand);
}
std::optional<std::string> CanonicalizeTensorflowFunctionName(
const mlir::SymbolTable &symbol_table, absl::string_view mlir_func_name,
bool use_mlir_func_name) {
if (use_mlir_func_name) {
return std::string(mlir_func_name);
}
auto callee =
symbol_table.lookup<mlir::func::FuncOp>(std::string(mlir_func_name));
if (!callee) return std::nullopt;
mlir::StringAttr original_func_name =
callee->getAttrOfType<mlir::StringAttr>("tf._original_func_name");
if (!original_func_name) {
mlir_func_name.remove_suffix(1);
return std::string(mlir_func_name);
}
return original_func_name.str();
}
bool IsSessionInitializer(mlir::func::FuncOp op) {
auto session_initializer_op = mlir::tf_saved_model::GetSessionInitializerOp(
op->getParentOfType<mlir::ModuleOp>());
if (!session_initializer_op) return false;
for (auto sym_ref : session_initializer_op.getInitializers()) {
if (op.getSymName() ==
mlir::cast<mlir::FlatSymbolRefAttr>(sym_ref).getValue())
return true;
}
return false;
}
} | #include "tensorflow/compiler/mlir/tf2xla/api/v2/testing/utils.h"
#include <stdlib.h>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tensorflow {
namespace tf2xla {
namespace v2 {
namespace testing {
namespace {
class UtilsTest : public ::testing::Test {};
TEST_F(UtilsTest, TestDataPathSucceeds) {
std::string expected_test_data_path_regex =
".*tensorflow/compiler/mlir/tf2xla/api/v2/testdata/";
std::string result_test_data_path = TestDataPath();
EXPECT_THAT(result_test_data_path,
::testing::ContainsRegex(expected_test_data_path_regex));
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/transforms/utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/api/v2/testing/utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5bad8880-efb8-4120-a68e-51035a8d7284 | cpp | tensorflow/tensorflow | pjrt_api | third_party/xla/xla/pjrt/pjrt_api.cc | third_party/xla/xla/pjrt/pjrt_api_test.cc | #include "xla/pjrt/pjrt_api.h"
#include <cstdlib>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#if !defined(PLATFORM_WINDOWS)
#include <dlfcn.h>
#endif
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/pjrt/c/pjrt_c_api_helpers.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace pjrt {
constexpr int kMinPjRtMinor = 29;
static auto* pjrt_apis =
new absl::flat_hash_map<std::string, std::pair<const PJRT_Api*, bool>>{};
static std::string CanonicalizeDeviceType(absl::string_view device_type) {
return absl::AsciiStrToLower(device_type);
}
absl::StatusOr<const PJRT_Api*> PjrtApi(absl::string_view device_type) {
std::string canonicalize_device_type = CanonicalizeDeviceType(device_type);
auto iter = pjrt_apis->find(canonicalize_device_type);
if (iter == pjrt_apis->end()) {
return tsl::errors::NotFound("PJRT_Api not found for device type ",
canonicalize_device_type);
}
return iter->second.first;
}
absl::Status SetPjrtApi(absl::string_view device_type, const PJRT_Api* api) {
std::string canonicalize_device_type = CanonicalizeDeviceType(device_type);
if (auto iter = pjrt_apis->find(canonicalize_device_type);
iter != pjrt_apis->end()) {
return tsl::errors::AlreadyExists(
"PJRT_Api already exists for device type ", canonicalize_device_type);
}
(*pjrt_apis)[canonicalize_device_type] =
std::make_pair(api, false);
LOG(INFO) << "PJRT_Api is set for device type " << canonicalize_device_type;
return absl::OkStatus();
}
typedef const PJRT_Api* (*PjrtApiInitFn)();
absl::StatusOr<const PJRT_Api*> LoadPjrtPlugin(absl::string_view device_type,
absl::string_view library_path) {
#ifdef PLATFORM_WINDOWS
return tsl::errors::Unimplemented(
"LoadPjrtPlugin is not implemented on windows yet.");
#else
void* library = dlopen(library_path.data(), RTLD_LAZY);
if (library == nullptr) {
return tsl::errors::Internal("Failed to open ", library_path, ": ",
dlerror());
}
PjrtApiInitFn init_fn;
*reinterpret_cast<void**>(&init_fn) = dlsym(library, "GetPjrtApi");
if (init_fn == nullptr) {
return tsl::errors::NotFound("GetPjrtApi not found in ", library_path);
}
LOG(INFO) << "GetPjrtApi was found for " << device_type << " at "
<< library_path;
const PJRT_Api* api = init_fn();
TF_RETURN_IF_ERROR(SetPjrtApi(device_type, api));
return api;
#endif
}
absl::StatusOr<bool> IsPjrtPluginInitialized(absl::string_view device_type) {
std::string canonicalize_device_type = CanonicalizeDeviceType(device_type);
auto iter = pjrt_apis->find(canonicalize_device_type);
if (iter == pjrt_apis->end()) {
return absl::NotFoundError(absl::StrCat(
"PJRT_Api not found for device type ", canonicalize_device_type,
". Call SetPjrtApi before calling IsPjrtPluginInitialized."));
}
return iter->second.second;
}
static bool IsPjRtCompatibilityEnabled() {
const char* val = getenv("ENABLE_PJRT_COMPATIBILITY");
if (val == nullptr) {
return true;
}
bool enabled = false;
if (!absl::SimpleAtob(val, &enabled)) {
return false;
}
return enabled;
}
absl::Status InitializePjrtPlugin(absl::string_view device_type) {
std::string canonicalize_device_type = CanonicalizeDeviceType(device_type);
auto iter = pjrt_apis->find(canonicalize_device_type);
if (iter == pjrt_apis->end()) {
return absl::NotFoundError(absl::StrCat(
"PJRT_Api not found for device type ", canonicalize_device_type,
". Call SetPjrtApi before calling IsPjrtPluginInitialized."));
}
if (iter->second.second) {
return absl::InvalidArgumentError(
absl::StrCat("InitializePjrtPlugin requested to run on already "
"initialized plugin ",
canonicalize_device_type));
}
const PJRT_Api* pjrt_api = iter->second.first;
LOG(INFO) << "The PJRT plugin has PJRT API version "
<< pjrt_api->pjrt_api_version.major_version << "."
<< pjrt_api->pjrt_api_version.minor_version
<< ". The framework PJRT API version is " << PJRT_API_MAJOR << "."
<< PJRT_API_MINOR << ".";
if (IsPjRtCompatibilityEnabled()) {
if (pjrt_api->pjrt_api_version.major_version != PJRT_API_MAJOR) {
return absl::InvalidArgumentError(absl::StrCat(
"Mismatched PJRT plugin PJRT API major version (",
pjrt_api->pjrt_api_version.major_version,
") and framework PJRT API major version ", PJRT_API_MAJOR, ")."));
}
if (pjrt_api->pjrt_api_version.minor_version < kMinPjRtMinor) {
return absl::InvalidArgumentError(absl::StrCat(
"Plugin PJRT API version ", pjrt_api->pjrt_api_version.major_version,
".", pjrt_api->pjrt_api_version.minor_version,
" is older than the minimum supported version ", PJRT_API_MAJOR, ".",
kMinPjRtMinor));
}
} else {
if (pjrt_api->pjrt_api_version.major_version != PJRT_API_MAJOR ||
pjrt_api->pjrt_api_version.minor_version != PJRT_API_MINOR) {
return absl::InvalidArgumentError(
absl::StrCat("Mismatched PJRT plugin PJRT API version (",
pjrt_api->pjrt_api_version.major_version, ".",
pjrt_api->pjrt_api_version.minor_version,
") and framework PJRT API version ", PJRT_API_MAJOR, ".",
PJRT_API_MINOR, ")."));
}
}
PJRT_Plugin_Initialize_Args args;
args.struct_size = PJRT_Plugin_Initialize_Args_STRUCT_SIZE;
args.extension_start = nullptr;
RETURN_STATUS_IF_PJRT_ERROR(pjrt_api->PJRT_Plugin_Initialize(&args),
pjrt_api);
iter->second.second = true;
return absl::OkStatus();
}
} | #include "xla/pjrt/pjrt_api.h"
#include <string>
#include <gtest/gtest.h>
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/c/pjrt_c_api_wrapper_impl.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace {
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
TEST(PjRtApiTest, SetAndGetGlobalPjRtApi) {
PJRT_Api api;
api.struct_size = PJRT_Api_STRUCT_SIZE;
api.pjrt_api_version.major_version = PJRT_API_MAJOR;
api.pjrt_api_version.minor_version = PJRT_API_MINOR;
TF_ASSERT_OK(pjrt::SetPjrtApi("CPU", &api));
TF_ASSERT_OK_AND_ASSIGN(const PJRT_Api* output, pjrt::PjrtApi("CPU"));
TF_ASSERT_OK_AND_ASSIGN(const PJRT_Api* output_lowercase,
pjrt::PjrtApi("cpu"));
TF_ASSERT_OK_AND_ASSIGN(bool is_initialized,
pjrt::IsPjrtPluginInitialized("CPU"));
EXPECT_FALSE(is_initialized);
EXPECT_EQ(output, &api);
EXPECT_EQ(output_lowercase, &api);
EXPECT_THAT(pjrt::SetPjrtApi("CPU", &api),
StatusIs(tensorflow::error::ALREADY_EXISTS,
HasSubstr("PJRT_Api already exists for device type")));
EXPECT_THAT(pjrt::PjrtApi("TPU"),
StatusIs(tensorflow::error::NOT_FOUND,
HasSubstr("PJRT_Api not found for device type tpu")));
}
TEST(PjRtApiTest, InitPjRtPlugin) {
PJRT_Api api;
api.struct_size = PJRT_Api_STRUCT_SIZE;
api.pjrt_api_version.major_version = PJRT_API_MAJOR;
api.pjrt_api_version.minor_version = PJRT_API_MINOR;
api.PJRT_Plugin_Initialize = pjrt::PJRT_Plugin_Initialize_NoOp;
std::string plugin_name = "plugin";
TF_ASSERT_OK(pjrt::SetPjrtApi(plugin_name, &api));
TF_ASSERT_OK_AND_ASSIGN(bool is_initialized,
pjrt::IsPjrtPluginInitialized(plugin_name));
EXPECT_FALSE(is_initialized);
TF_ASSERT_OK(pjrt::InitializePjrtPlugin(plugin_name));
TF_ASSERT_OK_AND_ASSIGN(is_initialized,
pjrt::IsPjrtPluginInitialized(plugin_name));
EXPECT_TRUE(is_initialized);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/pjrt_api.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/pjrt_api_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
634f061d-fae5-4c7b-a795-7b8955b04bf4 | cpp | tensorflow/tensorflow | transpose | tensorflow/lite/delegates/gpu/common/tasks/transpose.cc | tensorflow/lite/delegates/xnnpack/transpose_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/transpose.h"
#include <string>
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
namespace tflite {
namespace gpu {
namespace {
std::string GetTransposeCode(const OperationDef& op_def,
const TransposeAttributes& attr) {
const std::string batch_id =
op_def.dst_tensors[0].HasAxis(Axis::BATCH) ? "B" : "0";
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GLOBAL_ID_0;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GLOBAL_ID_0;\n";
}
c += " int Y = GLOBAL_ID_1;\n";
c += " int S = GLOBAL_ID_2;\n";
c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() || "
"S >= args.dst_tensor.Slices()) { \n";
c += " return; \n";
c += " } \n";
c += " args.src_tensor::scalar_type temps[4];\n";
c += " temps[0] = args.src_tensor::scalar_zero_value;\n";
c += " temps[1] = args.src_tensor::scalar_zero_value;\n";
c += " temps[2] = args.src_tensor::scalar_zero_value;\n";
c += " temps[3] = args.src_tensor::scalar_zero_value;\n";
int remap[4];
remap[attr.perm.b] = 0;
remap[attr.perm.h] = 1;
remap[attr.perm.w] = 2;
remap[attr.perm.c] = 3;
if (attr.perm.c == 3) {
const std::string bhw[] = {batch_id, "Y", "X"};
if (op_def.src_tensors[0].HasAxis(Axis::BATCH)) {
c += " args.src_tensor.SetBatchRef(" + bhw[remap[0]] + ");\n";
}
c += " int s_y = " + bhw[remap[1]] + ";\n";
c += " int s_x = " + bhw[remap[2]] + ";\n";
c += " args.src_tensor::type t = args.src_tensor.Read(s_x, s_y, S);\n";
c += " temps[0] = t.x;\n";
c += " temps[1] = t.y;\n";
c += " temps[2] = t.z;\n";
c += " temps[3] = t.w;\n";
} else {
c += " for (int i = 0; i < 4; ++i) {\n";
c += " int dst_channel = S * 4 + i;\n";
c += " if (dst_channel < args.dst_tensor.Channels()) {\n";
const std::string bhwc[] = {batch_id, "Y", "X", "dst_channel"};
if (op_def.src_tensors[0].HasAxis(Axis::BATCH)) {
c += " args.src_tensor.SetBatchRef(" + bhwc[remap[0]] + ");\n";
}
c += " int s_y = " + bhwc[remap[1]] + ";\n";
c += " int s_x = " + bhwc[remap[2]] + ";\n";
c += " int s_c = " + bhwc[remap[3]] + ";\n";
c += " args.src_tensor.ReadPerChannel(temps[i], s_x, s_y, s_c);\n";
c += " }\n";
c += " }\n";
}
c += " args.src_tensor::type result;\n";
c += " result.x = temps[0];\n";
c += " result.y = temps[1];\n";
c += " result.z = temps[2];\n";
c += " result.w = temps[3];\n";
c += " args.dst_tensor.Write(result, X, Y, S);\n";
c += "}\n";
return c;
}
}
GPUOperation CreateTranspose(const OperationDef& definition,
const TransposeAttributes& attr) {
GPUOperation op(definition);
op.AddSrcTensor("src_tensor", definition.src_tensors[0]);
op.AddDstTensor("dst_tensor", definition.dst_tensors[0]);
op.code_ = GetTransposeCode(definition, attr);
op.tensor_to_grid_ = TensorToGrid::kWBToX_HDToY_SToZ;
return op;
}
}
} | #include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/transpose_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(Transpose, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::vector<int32_t> perm{0};
TransposeTester()
.num_dims(1)
.input_shape({37})
.perm(perm)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
}
TEST(Transpose, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::vector<int32_t> perm{0, 1};
do {
TransposeTester()
.num_dims(2)
.input_shape({37, 113})
.perm(perm)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
} while (std::next_permutation(perm.begin(), perm.end()));
}
TEST(Transpose, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::vector<int32_t> perm{0, 1, 2};
do {
TransposeTester()
.num_dims(3)
.input_shape({5, 7, 11})
.perm(perm)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
} while (std::next_permutation(perm.begin(), perm.end()));
}
TEST(Transpose, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::vector<int32_t> perm{0, 1, 2, 3};
do {
TransposeTester()
.num_dims(4)
.input_shape({5, 7, 11, 13})
.perm(perm)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
} while (std::next_permutation(perm.begin(), perm.end()));
}
TEST(Transpose, 5D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::vector<int32_t> perm{0, 1, 2, 3, 4};
do {
TransposeTester()
.num_dims(5)
.input_shape({3, 5, 7, 11, 13})
.perm(perm)
.Test(TensorType_FLOAT32, xnnpack_delegate.get());
} while (std::next_permutation(perm.begin(), perm.end()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/transpose.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/transpose_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8adb0ed0-9542-4a16-afc5-37b3e581de81 | cpp | tensorflow/tensorflow | pjrt_compiler | third_party/xla/xla/python/pjrt_ifrt/pjrt_compiler.cc | third_party/xla/xla/pjrt/pjrt_compiler_test.cc | #include "xla/python/pjrt_ifrt/pjrt_compiler.h"
#include <memory>
#include <optional>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_compiler.h"
#include "xla/python/ifrt/compiler.h"
#include "xla/python/ifrt/executable.h"
#include "xla/python/ifrt/hlo/hlo_program.h"
#include "xla/python/ifrt/program.h"
#include "xla/python/ifrt/topology.h"
#include "xla/python/pjrt_ifrt/pjrt_client.h"
#include "xla/python/pjrt_ifrt/pjrt_executable.h"
#include "xla/python/pjrt_ifrt/pjrt_topology.h"
#include "xla/python/pjrt_ifrt/xla_compiler.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
char PjRtCompiler::ID = 0;
absl::StatusOr<std::unique_ptr<LoadedExecutable>> PjRtCompiler::Compile(
std::unique_ptr<Program> program, std::unique_ptr<CompileOptions> options) {
DCHECK(this);
const auto* xla_program = llvm::dyn_cast<HloProgram>(program.get());
if (xla_program == nullptr) {
return absl::InvalidArgumentError("PjRtCompiler requires an HloProgram");
}
TF_ASSIGN_OR_RETURN(auto xla_compile_options,
GetXlaCompileOptions(std::move(options)));
return PjRtLoadedExecutable::Create(
client_, xla_program->mlir_module,
std::move(xla_compile_options->compile_options),
std::move(xla_compile_options->loaded_host_callbacks));
}
absl::StatusOr<std::unique_ptr<Executable>> PjRtCompiler::Compile(
std::unique_ptr<Program> program, const Topology& topology,
std::unique_ptr<CompileOptions> options) {
DCHECK(this);
const auto* xla_program = llvm::dyn_cast<HloProgram>(program.get());
if (xla_program == nullptr) {
return absl::InvalidArgumentError("PjRtCompiler requires an HloProgram");
}
TF_ASSIGN_OR_RETURN(auto xla_compile_options,
GetXlaCompileOptions(std::move(options)));
const auto* pjrt_topology = llvm::dyn_cast<PjRtTopology>(&topology);
if (pjrt_topology == nullptr) {
return absl::InvalidArgumentError("PjRtCompiler requires a PjRtTopology");
}
TF_ASSIGN_OR_RETURN(
auto executable,
PjRtCompile(xla_compile_options->compile_options,
xla_program->mlir_module, *pjrt_topology->description()));
return PjRtExecutable::Create(std::move(executable),
std::move(xla_compile_options));
}
absl::StatusOr<std::unique_ptr<LoadedExecutable>>
PjRtCompiler::DeserializeLoadedExecutable(
absl::string_view serialized,
std::unique_ptr<DeserializeExecutableOptions> options) {
DCHECK(this);
TF_ASSIGN_OR_RETURN(auto xla_deserialize_options,
GetXlaDeserializeExecutableOptions(std::move(options)));
TF_ASSIGN_OR_RETURN(
auto pjrt_loaded_executable,
client_->pjrt_client()->DeserializeExecutable(
serialized, std::move(xla_deserialize_options->compile_options)));
return PjRtLoadedExecutable::Create(
client_,
std::shared_ptr<xla::PjRtLoadedExecutable>(
std::move(pjrt_loaded_executable)),
std::move(xla_deserialize_options->loaded_host_callbacks));
}
}
} | #include "xla/pjrt/pjrt_compiler.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/client/xla_computation.h"
#include "xla/pjrt/metrics.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_device_description.h"
#include "xla/tsl/lib/monitoring/cell_reader.h"
#include "tsl/platform/status_matchers.h"
namespace xla {
using metrics::kPjrtCompilerCompileComputationMetricName;
using metrics::kPjrtCompilerCompileModuleMetricName;
using ::tsl::monitoring::testing::CellReader;
using ::tsl::testing::StatusIs;
namespace {
class PjRtTestTopology : public PjRtTopologyDescription {
public:
PjRtPlatformId platform_id() const override { return 0; }
absl::string_view platform_name() const override { return "not_registered"; }
absl::string_view platform_version() const override { return "test"; }
std::vector<std::unique_ptr<const PjRtDeviceDescription>> DeviceDescriptions()
const override {
LOG(FATAL) << "Unused";
}
absl::StatusOr<std::string> Serialize() const override { return "test_topo"; }
const absl::flat_hash_map<std::string, PjRtDeviceAttribute>& Attributes()
const override {
LOG(FATAL) << "Unused";
}
absl::StatusOr<Layout> GetDefaultLayout(
PrimitiveType element_type,
absl::Span<const int64_t> dims) const override {
return Unimplemented("TestTopology does not support GetDefaultLayout");
}
};
TEST(PjRtCompilerTest, CompilerNotRegistered) {
PjRtTestTopology topology;
CompileOptions options;
XlaComputation computation;
auto res = PjRtCompile(options, computation, topology);
EXPECT_TRUE(tsl::errors::IsNotFound(res.status()));
}
TEST(PjRtCompilerTest, CompilerRegistered) {
class PjRtTestTopology : public PjRtTopologyDescription {
public:
PjRtPlatformId platform_id() const override { return 0; }
absl::string_view platform_name() const override { return "registered"; }
absl::string_view platform_version() const override { return "test"; }
std::vector<std::unique_ptr<const PjRtDeviceDescription>>
DeviceDescriptions() const override {
LOG(FATAL) << "Unused";
}
absl::StatusOr<std::string> Serialize() const override {
return "test_topo";
}
const absl::flat_hash_map<std::string, PjRtDeviceAttribute>& Attributes()
const override {
LOG(FATAL) << "Unused";
}
absl::StatusOr<Layout> GetDefaultLayout(
PrimitiveType element_type,
absl::Span<const int64_t> dims) const override {
return Unimplemented("TestTopology does not support GetDefaultLayout");
}
};
PjRtTestTopology topology;
class PjRtTestCompiler : public PjRtCompiler {
public:
absl::StatusOr<std::unique_ptr<PjRtExecutable>> Compile(
CompileOptions options, const XlaComputation& computation,
const PjRtTopologyDescription& topology, PjRtClient* client) override {
return tsl::errors::Unimplemented("test compiler!");
}
absl::StatusOr<std::unique_ptr<PjRtExecutable>> Compile(
CompileOptions options, mlir::ModuleOp module,
const PjRtTopologyDescription& topology, PjRtClient* client) override {
return tsl::errors::Unimplemented("test compiler!");
}
};
std::unique_ptr<PjRtCompiler> compiler = std::make_unique<PjRtTestCompiler>();
PjRtRegisterCompiler(topology.platform_name(), std::move(compiler));
CompileOptions options;
XlaComputation computation;
auto res = PjRtCompile(options, computation, topology);
EXPECT_TRUE(tsl::errors::IsUnimplemented(res.status()));
}
TEST(PjRtCompilerTest, PjrtCompileComputationMetric) {
PjRtTestTopology topology;
xla::CompileOptions compile_options;
XlaComputation xla_computation;
CellReader<bool> metric_reader(
std::string{kPjrtCompilerCompileComputationMetricName});
EXPECT_THAT(PjRtCompile(compile_options, xla_computation, topology,
nullptr),
StatusIs(tensorflow::error::NOT_FOUND));
EXPECT_FALSE(metric_reader.Read());
}
TEST(PjRtCompilerTest, PjrtCompileModuleMetric) {
PjRtTestTopology topology;
xla::CompileOptions compile_options;
mlir::ModuleOp module;
CellReader<bool> metric_reader(
std::string{kPjrtCompilerCompileModuleMetricName});
EXPECT_THAT(PjRtCompile(compile_options, module, topology,
nullptr),
StatusIs(tensorflow::error::NOT_FOUND));
EXPECT_FALSE(metric_reader.Read());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/pjrt_ifrt/pjrt_compiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/pjrt_compiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0fe8e421-3b6a-41a7-a4c4-85093705cb50 | cpp | tensorflow/tensorflow | tf_pjrt_client | third_party/xla/xla/pjrt/tf_pjrt_client.cc | third_party/xla/xla/pjrt/tf_pjrt_client_test.cc | #include "xla/pjrt/tf_pjrt_client.h"
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/synchronization/mutex.h"
#include "xla/pjrt/pjrt_client.h"
namespace xla {
TfPjRtBuffer::TfPjRtBuffer(TfPjRtClient* client,
std::unique_ptr<PjRtBuffer> wrapped)
: client_(client), wrapped_(std::move(wrapped)) {
client_->TrackBuffer(this);
}
TfPjRtBuffer::~TfPjRtBuffer() { client_->UntrackBuffer(this); }
PjRtClient* TfPjRtBuffer::client() const { return client_; }
PjRtClient* TfPjRtExecutable::client() const { return client_; }
absl::StatusOr<std::unique_ptr<PjRtBuffer>> TfPjRtBuffer::CopyToDevice(
PjRtDevice* dst_device) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtBuffer> result,
wrapped_->CopyToDevice(dst_device));
return std::unique_ptr<PjRtBuffer>(
std::make_unique<TfPjRtBuffer>(client_, std::move(result)));
}
TfPjRtExecutable::TfPjRtExecutable(
TfPjRtClient* client, std::unique_ptr<PjRtLoadedExecutable> wrapped)
: client_(client), wrapped_(std::move(wrapped)) {}
absl::StatusOr<std::vector<std::vector<std::unique_ptr<PjRtBuffer>>>>
TfPjRtExecutable::Execute(
absl::Span<const std::vector<PjRtBuffer*>> argument_handles,
const ExecuteOptions& options,
std::optional<std::vector<PjRtFuture<>>>& returned_futures) {
std::vector<std::vector<PjRtBuffer*>> unwrapped_argument_handles;
unwrapped_argument_handles.reserve(argument_handles.size());
for (auto& handles : argument_handles) {
unwrapped_argument_handles.emplace_back();
auto& unwrapped_handles = unwrapped_argument_handles.back();
unwrapped_handles.reserve(handles.size());
for (PjRtBuffer* buffer : handles) {
unwrapped_handles.push_back(
tensorflow::down_cast<TfPjRtBuffer*>(buffer)->wrapped());
}
}
TF_ASSIGN_OR_RETURN(auto out, wrapped_->Execute(unwrapped_argument_handles,
options, returned_futures));
for (auto& buffer_list : out) {
for (std::unique_ptr<PjRtBuffer>& buffer : buffer_list) {
buffer = std::make_unique<TfPjRtBuffer>(client_, std::move(buffer));
}
}
return out;
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>>
TfPjRtExecutable::ExecuteSharded(absl::Span<PjRtBuffer* const> argument_handles,
PjRtDevice* device,
const ExecuteOptions& options,
std::optional<PjRtFuture<>>& returned_future,
bool fill_future) {
std::vector<PjRtBuffer*> unwrapped_argument_handles;
unwrapped_argument_handles.reserve(argument_handles.size());
for (PjRtBuffer* buffer : argument_handles) {
unwrapped_argument_handles.push_back(
tensorflow::down_cast<TfPjRtBuffer*>(buffer)->wrapped());
}
TF_ASSIGN_OR_RETURN(auto out, wrapped_->ExecuteSharded(
unwrapped_argument_handles, device, options,
returned_future, fill_future));
for (std::unique_ptr<PjRtBuffer>& buffer : out) {
buffer = std::make_unique<TfPjRtBuffer>(client_, std::move(buffer));
}
return out;
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>>
TfPjRtExecutable::ExecutePortable(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options, std::optional<PjRtFuture<>>& returned_future,
bool fill_future) {
std::vector<PjRtBuffer*> unwrapped_argument_handles;
unwrapped_argument_handles.reserve(argument_handles.size());
for (PjRtBuffer* buffer : argument_handles) {
unwrapped_argument_handles.push_back(
tensorflow::down_cast<TfPjRtBuffer*>(buffer)->wrapped());
}
TF_ASSIGN_OR_RETURN(auto out, wrapped_->ExecutePortable(
unwrapped_argument_handles, device, options,
returned_future, fill_future));
for (std::unique_ptr<PjRtBuffer>& buffer : out) {
buffer = std::make_unique<TfPjRtBuffer>(client_, std::move(buffer));
}
return out;
}
TfPjRtClient::TfPjRtClient(std::unique_ptr<PjRtClient> wrapped)
: wrapped_(std::move(wrapped)) {
LOG(INFO) << "TfPjRtClient created.";
int num_mutexes = wrapped_->addressable_device_count();
alive_buffers_ = std::vector<DeviceBuffers>(num_mutexes);
for (int i = 0; i < num_mutexes; ++i) {
mutex_id_from_device_id_.insert(
{wrapped_->addressable_devices()[i]->id(), i});
}
}
TfPjRtClient::~TfPjRtClient() { LOG(INFO) << "TfPjRtClient destroyed."; }
absl::StatusOr<std::unique_ptr<PjRtBuffer>> TfPjRtClient::WrapBuffer(
absl::StatusOr<std::unique_ptr<PjRtBuffer>> to_wrap) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtBuffer> buffer, std::move(to_wrap));
return std::unique_ptr<PjRtBuffer>(
std::make_unique<TfPjRtBuffer>(this, std::move(buffer)));
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
TfPjRtClient::WrapExecutable(
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>> to_wrap) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtLoadedExecutable> executable,
std::move(to_wrap));
return std::unique_ptr<PjRtLoadedExecutable>(
std::make_unique<TfPjRtExecutable>(this, std::move(executable)));
}
static int GetMutexId(
const TfPjRtBuffer* buffer,
const absl::flat_hash_map<int, int>& mutex_id_from_device_id) {
auto iters = mutex_id_from_device_id.find(buffer->wrapped()->device()->id());
CHECK(iters != mutex_id_from_device_id.end())
<< "Mutex id not found for device id: "
<< buffer->wrapped()->device()->id();
return iters->second;
}
void TfPjRtClient::TrackBuffer(TfPjRtBuffer* buffer) {
int mutex_id = GetMutexId(buffer, mutex_id_from_device_id_);
{
absl::MutexLock lock(&alive_buffers_[mutex_id].mu);
alive_buffers_[mutex_id].alive_buffers.insert(buffer);
}
}
void TfPjRtClient::UntrackBuffer(const TfPjRtBuffer* buffer) {
if (buffer->wrapped() == nullptr) {
return;
}
int mutex_id = GetMutexId(buffer, mutex_id_from_device_id_);
{
absl::MutexLock lock(&alive_buffers_[mutex_id].mu);
alive_buffers_[mutex_id].alive_buffers.erase(buffer);
}
}
void TfPjRtClient::DestroyWrappedBuffersAndClient() {
int num_mutexes = alive_buffers_.size();
for (int i = 0; i < num_mutexes; ++i) {
absl::MutexLock lock(&alive_buffers_[i].mu);
for (auto* buffer : alive_buffers_[i].alive_buffers) {
buffer->DestroyWrappedBuffer();
}
}
wrapped_.reset(nullptr);
LOG(INFO) << "TfPjRtClient::DestroyWrappedBuffersAndClient completed.";
}
std::unique_ptr<TfPjRtClient> TfPjRtClient::CreateTfPjRtClient(
std::unique_ptr<PjRtClient> wrapped) {
return std::make_unique<TfPjRtClient>(std::move(wrapped));
}
} | #include "xla/pjrt/tf_pjrt_client.h"
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "xla/literal_util.h"
#include "xla/pjrt/cpu/cpu_client.h"
#include "xla/service/hlo_parser.h"
#include "tsl/platform/env.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
TEST(TfClientTest, ExecuteAndHloSnapshot) {
constexpr char kProgram[] = R"(
HloModule add
ENTRY add {
x = f32[3,2] parameter(0)
y = f32[3,2] parameter(1)
ROOT add = f32[3,2] add(x, y)
})";
TF_ASSERT_OK_AND_ASSIGN(auto client, GetTfrtCpuClient(true));
client = TfPjRtClient::CreateTfPjRtClient(std::move(client));
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnUnverifiedModule(kProgram, {}));
std::string dir = tsl::testing::TmpDir();
xla::CompileOptions options;
auto* debug_opts = options.executable_build_options.mutable_debug_options();
debug_opts->set_xla_dump_to(dir);
debug_opts->set_xla_dump_hlo_snapshots(true);
XlaComputation xla_computation(hlo_module->ToProto());
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_executable,
client->Compile(xla_computation, options));
std::vector<float> data1{1.0, 2.0, 3.0, 4.0, 5.0, 6.0};
std::vector<float> data2{10.0, 20.0, 30.0, 40.0, 50.0, 60.0};
Shape shape = ShapeUtil::MakeShape(F32, {3, 2});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer1,
client->BufferFromHostBuffer(
data1.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
TF_ASSERT_OK_AND_ASSIGN(
auto buffer2,
client->BufferFromHostBuffer(
data2.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
auto result = pjrt_executable->Execute(
{{buffer1.get(), buffer2.get()}},
{});
ASSERT_TRUE(result.ok());
tsl::FileSystem* fs;
ASSERT_TRUE(tsl::Env::Default()->GetFileSystemForFile(dir, &fs).ok());
std::vector<std::string> paths;
ASSERT_TRUE(fs->GetMatchingPaths(dir + "/*.snapshot.*.pb", &paths).ok());
ASSERT_EQ(paths.size(), 1);
HloSnapshot snapshot;
ASSERT_TRUE(
tsl::ReadBinaryProto(tsl::Env::Default(), paths[0], &snapshot).ok());
ASSERT_EQ(*Literal::CreateFromProto(snapshot.arguments(0)),
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}}));
ASSERT_EQ(
*Literal::CreateFromProto(snapshot.arguments(1)),
LiteralUtil::CreateR2<float>({{10.0, 20.0}, {30.0, 40.0}, {50.0, 60.0}}));
ASSERT_EQ(
*Literal::CreateFromProto(snapshot.result()),
LiteralUtil::CreateR2<float>({{11.0, 22.0}, {33.0, 44.0}, {55.0, 66.0}}));
auto* tf_pjrt_client =
tensorflow::down_cast<xla::TfPjRtClient*>(client.get());
tf_pjrt_client->DestroyWrappedBuffersAndClient();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/tf_pjrt_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/tf_pjrt_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8a36c167-4407-4e5f-84ef-a0ee90099328 | cpp | tensorflow/tensorflow | pjrt_c_api_client | third_party/xla/xla/pjrt/pjrt_c_api_client.cc | third_party/xla/xla/pjrt/pjrt_c_api_client_test.cc | #include "xla/pjrt/pjrt_c_api_client.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "llvm/Support/ErrorHandling.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "xla/client/xla_computation.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/translate/mhlo_to_hlo/mlir_hlo_to_hlo.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/mlir_hlo/mhlo/transforms/passes.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/c/pjrt_c_api_helpers.h"
#include "xla/pjrt/c/pjrt_c_api_layouts_extension.h"
#include "xla/pjrt/c/pjrt_c_api_profiler_extension.h"
#include "xla/pjrt/c/pjrt_c_api_stream_extension.h"
#include "xla/pjrt/compile_options.pb.h"
#include "xla/pjrt/distributed/key_value_store_interface.h"
#include "xla/pjrt/mlir_to_hlo.h"
#include "xla/pjrt/pjrt_api.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_compiler.h"
#include "xla/pjrt/pjrt_device_description.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/service/computation_placer.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tsl/framework/allocator.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/fingerprint.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
#define RETURN_FUTURE_IF_ERROR(expr, c_api) \
do { \
PJRT_Error* error = (expr); \
std::unique_ptr<PJRT_Error, pjrt::PJRT_ErrorDeleter> _error( \
error, pjrt::MakeErrorDeleter(c_api)); \
absl::Status _status = pjrt::PjrtErrorToStatus(_error.get(), c_api); \
if (!_status.ok()) { \
return PjRtFuture<>(_status); \
} \
} while (false)
static absl::StatusOr<const PjRtCApiTopologyDescription> InitClientTopoDesc(
const PJRT_Api* c_api, PJRT_Client* c_client) {
absl::StatusOr<PJRT_TopologyDescription*> c_topo =
pjrt::GetTopologyDescription(c_client, c_api);
TF_RETURN_IF_ERROR(c_topo.status());
return PjRtCApiTopologyDescription(c_api, *c_topo, false);
}
PjRtCApiClient::PjRtCApiClient(
const PJRT_Api* c_api, PJRT_Client* c_client,
std::unique_ptr<pjrt::PJRT_KeyValueCallbackData> kv_callback_data)
: c_api_(c_api),
c_client_(std::unique_ptr<PJRT_Client, ::pjrt::PJRT_ClientDeleter>(
c_client, ::pjrt::MakeClientDeleter(c_api))),
kv_callback_data_(std::move(kv_callback_data)),
topo_desc_(InitClientTopoDesc(c_api, c_client)),
platform_version_(absl::StrCat(
"PJRT C API\n", ::pjrt::GetPlatformVersion(c_client, c_api))),
platform_name_(::pjrt::GetPlatformName(c_client, c_api)),
platform_id_(tsl::Fingerprint64(platform_name_)) {
InitDevicesAndMemorySpaces();
InitAttributes();
LOG(INFO) << "PjRtCApiClient created.";
}
void PjRtCApiClient::InitDevicesAndMemorySpaces() {
PJRT_Client_Devices_Args devices_args;
devices_args.struct_size = PJRT_Client_Devices_Args_STRUCT_SIZE;
devices_args.extension_start = nullptr;
devices_args.client = c_client_.get();
pjrt::LogFatalIfPjrtError(c_api_->PJRT_Client_Devices(&devices_args), c_api_);
const size_t num_devices = devices_args.num_devices;
c_to_cpp_device_map_.reserve(num_devices);
owned_devices_.reserve(num_devices);
devices_.reserve(num_devices);
for (int i = 0; i < num_devices; ++i) {
PJRT_Device* device = devices_args.devices[i];
std::unique_ptr<PjRtCApiDevice>& cpp_device = owned_devices_.emplace_back(
std::make_unique<PjRtCApiDevice>(device, this));
devices_.push_back(cpp_device.get());
c_to_cpp_device_map_[device] = cpp_device.get();
}
PJRT_Client_AddressableDevices_Args address_args;
address_args.struct_size = PJRT_Client_AddressableDevices_Args_STRUCT_SIZE;
address_args.extension_start = nullptr;
address_args.client = c_client_.get();
pjrt::LogFatalIfPjrtError(
c_api_->PJRT_Client_AddressableDevices(&address_args), c_api_);
const size_t num_addressable_devices = address_args.num_addressable_devices;
addressable_devices_.reserve(num_addressable_devices);
for (int i = 0; i < num_addressable_devices; ++i) {
PJRT_Device* c_device = address_args.addressable_devices[i];
addressable_devices_.push_back(GetCppDevice(c_device));
}
PJRT_Client_AddressableMemories_Args memory_args;
memory_args.struct_size = PJRT_Client_AddressableMemories_Args_STRUCT_SIZE;
memory_args.extension_start = nullptr;
memory_args.client = c_client_.get();
std::unique_ptr<PJRT_Error, pjrt::PJRT_ErrorDeleter> client_error(
c_api_->PJRT_Client_AddressableMemories(&memory_args),
pjrt::MakeErrorDeleter(c_api_));
if (client_error == nullptr) {
const size_t num_memories = memory_args.num_addressable_memories;
c_to_cpp_memory_map_.reserve(num_memories);
owned_memory_spaces_.reserve(num_memories);
addressable_memory_spaces_.reserve(num_memories);
for (int i = 0; i < num_memories; ++i) {
PJRT_Memory* memory = memory_args.addressable_memories[i];
std::unique_ptr<PjRtCApiMemorySpace>& cpp_memory =
owned_memory_spaces_.emplace_back(
std::make_unique<PjRtCApiMemorySpace>(memory, this));
addressable_memory_spaces_.push_back(cpp_memory.get());
c_to_cpp_memory_map_[memory] = cpp_memory.get();
}
} else if (pjrt::GetErrorCode(client_error.get(), c_api_) !=
PJRT_Error_Code_UNIMPLEMENTED) {
pjrt::LogFatalIfPjrtError(client_error.get(), c_api_);
}
for (const auto& device : addressable_devices_) {
PjRtCApiDevice* cpp_device = tensorflow::down_cast<PjRtCApiDevice*>(device);
PJRT_Device* c_device = cpp_device->c_device();
PJRT_Device_AddressableMemories_Args args;
args.struct_size = PJRT_Device_AddressableMemories_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device = c_device;
std::unique_ptr<PJRT_Error, pjrt::PJRT_ErrorDeleter> device_error(
c_api_->PJRT_Device_AddressableMemories(&args),
pjrt::MakeErrorDeleter(c_api_));
if (device_error != nullptr) {
if (pjrt::GetErrorCode(device_error.get(), c_api_) !=
PJRT_Error_Code_UNIMPLEMENTED) {
pjrt::LogFatalIfPjrtError(device_error.get(), c_api_);
}
break;
}
const size_t num_memories = args.num_memories;
cpp_device->memory_spaces_.reserve(num_memories);
for (int i = 0; i < num_memories; ++i) {
cpp_device->memory_spaces_.push_back(GetCppMemory(args.memories[i]));
}
}
for (const auto& memory : addressable_memory_spaces_) {
PjRtCApiMemorySpace* cpp_memory =
tensorflow::down_cast<PjRtCApiMemorySpace*>(memory);
PJRT_Memory* c_memory = cpp_memory->c_memory();
PJRT_Memory_AddressableByDevices_Args args;
args.struct_size = PJRT_Memory_AddressableByDevices_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.memory = c_memory;
pjrt::LogFatalIfPjrtError(c_api_->PJRT_Memory_AddressableByDevices(&args),
c_api_);
const size_t num_attached_devices = args.num_devices;
cpp_memory->devices_.reserve(num_attached_devices);
for (int i = 0; i < num_attached_devices; ++i) {
cpp_memory->devices_.push_back(GetCppDevice(args.devices[i]));
}
}
}
void PjRtCApiClient::InitAttributes() {
PJRT_Plugin_Attributes_Args args;
args.struct_size = PJRT_Plugin_Attributes_Args_STRUCT_SIZE;
args.extension_start = nullptr;
pjrt::LogFatalIfPjrtError(c_api_->PJRT_Plugin_Attributes(&args), c_api_);
attributes_ =
pjrt::ConvertFromPjRtNamedValueList(args.attributes, args.num_attributes);
}
int PjRtCApiClient::device_count() const { return devices_.size(); }
int PjRtCApiClient::addressable_device_count() const {
return addressable_devices_.size();
}
absl::Span<PjRtDevice* const> PjRtCApiClient::devices() const {
return devices_;
}
absl::Span<PjRtDevice* const> PjRtCApiClient::addressable_devices() const {
return addressable_devices_;
}
int PjRtCApiClient::process_index() const {
PJRT_Client_ProcessIndex_Args process_index_args;
process_index_args.struct_size = PJRT_Client_ProcessIndex_Args_STRUCT_SIZE;
process_index_args.extension_start = nullptr;
process_index_args.client = c_client_.get();
pjrt::LogFatalIfPjrtError(
c_api_->PJRT_Client_ProcessIndex(&process_index_args), c_api_);
return process_index_args.process_index;
}
absl::string_view PjRtCApiClient::platform_version() const {
return platform_version_;
}
std::optional<PjRtPluginAttributes> PjRtCApiClient::plugin_attributes() const {
return PjRtPluginAttributes{c_api_->pjrt_api_version.major_version,
c_api_->pjrt_api_version.minor_version,
attributes_};
}
static DeviceAssignment CalculateDefaultAssignment(
int num_replicas, int num_partitions,
absl::Span<const int> device_assignment) {
DeviceAssignment cpp_device_assignment(num_replicas, num_partitions);
const int* iterator = device_assignment.begin();
for (int replica = 0; replica < num_replicas; ++replica) {
for (int partition = 0; partition < num_partitions; ++partition) {
cpp_device_assignment(replica, partition) = *(iterator++);
}
}
return cpp_device_assignment;
}
absl::StatusOr<DeviceAssignment> PjRtCApiClient::GetDefaultDeviceAssignment(
int num_replicas, int num_partitions) const {
PJRT_Client_DefaultDeviceAssignment_Args args;
args.struct_size = PJRT_Client_DefaultDeviceAssignment_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.client = c_client_.get();
args.num_replicas = num_replicas;
args.num_partitions = num_partitions;
std::vector<int> assignment_buffer(num_replicas * num_partitions);
args.default_assignment_size = assignment_buffer.size();
args.default_assignment = assignment_buffer.data();
RETURN_STATUS_IF_PJRT_ERROR(
c_api_->PJRT_Client_DefaultDeviceAssignment(&args), c_api_);
absl::Span<const int> param{args.default_assignment,
args.default_assignment_size};
return CalculateDefaultAssignment(args.num_replicas, args.num_partitions,
param);
}
absl::StatusOr<PjRtDevice*> PjRtCApiClient::LookupDevice(
PjRtGlobalDeviceId global_device_id) const {
PJRT_Client_LookupDevice_Args args;
args.struct_size = PJRT_Client_LookupDevice_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.client = c_client_.get();
args.id = global_device_id.value();
RETURN_STATUS_IF_PJRT_ERROR(c_api_->PJRT_Client_LookupDevice(&args), c_api_);
return GetCppDevice(args.device);
}
absl::StatusOr<PjRtDevice*> PjRtCApiClient::LookupAddressableDevice(
PjRtLocalDeviceId local_device_id) const {
PJRT_Client_LookupAddressableDevice_Args args;
args.struct_size = PJRT_Client_LookupAddressableDevice_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.client = c_client_.get();
args.local_hardware_id = local_device_id.value();
RETURN_STATUS_IF_PJRT_ERROR(
c_api_->PJRT_Client_LookupAddressableDevice(&args), c_api_);
return GetCppDevice(args.addressable_device);
}
absl::Span<PjRtMemorySpace* const> PjRtCApiClient::memory_spaces() const {
return addressable_memory_spaces_;
}
static absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
InitializeArgsAndCompile(PjRtCApiClient* api_client, const PJRT_Api* c_api,
PJRT_Client* client, const CompileOptions& options,
const std::string& code, const std::string& format) {
PJRT_Client_Compile_Args args;
args.struct_size = PJRT_Client_Compile_Args_STRUCT_SIZE;
PJRT_Profiler_Extension profiler_extension =
pjrt::CreatePjrtProfilerExtension("PJRT_Client_Compile linkage");
args.extension_start =
reinterpret_cast<PJRT_Extension_Base*>(&profiler_extension);
args.client = client;
TF_ASSIGN_OR_RETURN(const CompileOptionsProto options_proto,
options.ToProto());
std::string options_str = options_proto.SerializeAsString();
args.compile_options = options_str.c_str();
args.compile_options_size = options_str.size();
PJRT_Program program;
program.struct_size = PJRT_Program_STRUCT_SIZE;
program.extension_start = nullptr;
program.code = const_cast<char*>(code.c_str());
program.code_size = code.size();
program.format = format.c_str();
program.format_size = format.size();
args.program = &program;
RETURN_STATUS_IF_PJRT_ERROR(c_api->PJRT_Client_Compile(&args), c_api);
std::unique_ptr<PjRtLoadedExecutable> ret =
std::make_unique<PjRtCApiLoadedExecutable>(api_client, args.executable);
return ret;
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>> PjRtCApiClient::Compile(
const XlaComputation& computation, CompileOptions options) {
std::string module_str = computation.proto().SerializeAsString();
std::string format(pjrt::kHloFormat);
return InitializeArgsAndCompile(this, c_api_, c_client_.get(), options,
module_str, format);
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>> PjRtCApiClient::Compile(
mlir::ModuleOp module, CompileOptions options) {
if (!pjrt_c_api()) llvm::report_fatal_error("pjrt_c_api is null");
TF_ASSIGN_OR_RETURN(
std::string serialized,
xla::Serialize(module,
xla::GetDefaultStablehloVersion(
plugin_attributes()->pjrt_c_api_minor_version)));
std::string format(pjrt::kMlirFormat);
return InitializeArgsAndCompile(this, c_api_, c_client_.get(), options,
serialized, format);
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
PjRtCApiClient::DeserializeExecutable(absl::string_view serialized,
std::optional<CompileOptions> options) {
PJRT_Executable_DeserializeAndLoad_Args des_args;
des_args.struct_size = PJRT_Executable_DeserializeAndLoad_Args_STRUCT_SIZE;
des_args.extension_start = nullptr;
des_args.client = c_client_.get();
des_args.serialized_executable = serialized.data();
des_args.serialized_executable_size = serialized.length();
const PJRT_Api* api = pjrt_c_api();
RETURN_STATUS_IF_PJRT_ERROR(
api->PJRT_Executable_DeserializeAndLoad(&des_args), api);
PJRT_LoadedExecutable* c_exec = des_args.loaded_executable;
CHECK(c_exec != nullptr);
return std::unique_ptr<PjRtLoadedExecutable>(
std::make_unique<PjRtCApiLoadedExecutable>(this, c_exec));
}
absl::StatusOr<const PjRtTopologyDescription*>
PjRtCApiClient::GetTopologyDescription() const {
if (!topo_desc_.ok()) {
return topo_desc_.status();
}
return &(*topo_desc_);
}
absl::StatusOr<std::uintptr_t> PjRtCApiClient::UnsafeBufferPointer(
PjRtBuffer* buffer) {
if (buffer->client() != this) {
return InvalidArgument(
"buffer passed to PjRtCApiClient::UnsafeBufferPointer() is from a "
"different client than that of the function call. Buffer's client "
"platform: '%s', function call's client platform: '%s'.",
buffer->client()->platform_name(), this->platform_name());
}
PJRT_Buffer_UnsafePointer_Args args;
args.struct_size = PJRT_Buffer_UnsafePointer_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer =
tensorflow::down_cast<const PjRtCApiBuffer*>(buffer)->c_buffer();
RETURN_STATUS_IF_PJRT_ERROR(c_api_->PJRT_Buffer_UnsafePointer(&args), c_api_);
return args.buffer_pointer;
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtCApiClient::BufferFromHostBufferInternalImpl(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
absl::AnyInvocable<void() &&> on_done_with_host_buffer,
std::variant<PjRtDevice*, PjRtMemorySpace*> device_or_memory,
const Layout* device_layout) {
if (host_buffer_semantics != HostBufferSemantics::kImmutableOnlyDuringCall &&
host_buffer_semantics != HostBufferSemantics::kImmutableZeroCopy &&
host_buffer_semantics !=
HostBufferSemantics::kImmutableUntilTransferCompletes) {
return Unimplemented(
"PJRT C API does not support HostBufferSemantics other than "
"HostBufferSemantics::kImmutableOnlyDuringCall, "
"HostBufferSemantics::kImmutableZeroCopy and "
"HostBufferSemantics::kImmutableUntilTransferCompletes.");
}
PJRT_Client_BufferFromHostBuffer_Args args;
args.struct_size = PJRT_Client_BufferFromHostBuffer_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.client = c_client_.get();
args.data = data;
args.type = ::pjrt::ConvertToPjRtBufferType(type);
args.dims = dims.data();
args.num_dims = dims.size();
if (byte_strides.has_value()) {
args.byte_strides = byte_strides.value().data();
args.num_byte_strides = byte_strides.value().size();
} else {
args.byte_strides = nullptr;
args.num_byte_strides = 0;
}
pjrt::BufferMemoryLayoutData c_layout_data;
if (device_layout != nullptr) {
TF_ASSIGN_OR_RETURN(c_layout_data,
pjrt::ConvertToBufferMemoryLayoutData(*device_layout));
args.device_layout = &c_layout_data.c_layout;
} else {
args.device_layout = nullptr;
}
args.host_buffer_semantics =
::pjrt::ConvertToPjRtHostBufferSemantics(host_buffer_semantics);
if (std::holds_alternative<PjRtDevice*>(device_or_memory)) {
args.device = tensorflow::down_cast<PjRtCApiDevice*>(
std::get<PjRtDevice*>(device_or_memory))
->c_device();
args.memory = nullptr;
} else {
CHECK(std::holds_alternative<PjRtMemorySpace*>(device_or_memory));
args.device = nullptr;
args.memory = tensorflow::down_cast<PjRtCApiMemorySpace*>(
std::get<PjRtMemorySpace*>(device_or_memory))
->c_memory();
}
RETURN_STATUS_IF_PJRT_ERROR(c_api_->PJRT_Client_BufferFromHostBuffer(&args),
c_api_);
auto buffer = std::unique_ptr<PjRtBuffer>(
std::make_unique<PjRtCApiBuffer>(this, args.buffer));
std::unique_ptr<PJRT_Event, ::pjrt::PJRT_EventDeleter> event(
args.done_with_host_buffer, ::pjrt::MakeEventDeleter(c_api_));
if (on_done_with_host_buffer) {
PJRT_Event_OnReady_Args event_args;
event_args.struct_size = PJRT_Event_OnReady_Args_STRUCT_SIZE;
event_args.extension_start = nullptr;
event_args.event = event.get();
event_args.user_arg = new absl::AnyInvocable<void(PJRT_Error*)>(
[on_done_with_host_buffer = std::move(on_done_with_host_buffer),
c_api = c_api_](PJRT_Error* error) mutable {
if (error) {
::pjrt::MakeErrorDeleter(c_api)(error);
}
std::move(on_done_with_host_buffer)();
});
event_args.callback = [](PJRT_Error* error, void* args) {
auto* on_done_with_host_buffer =
reinterpret_cast<absl::AnyInvocable<void(PJRT_Error*)>*>(args);
(*on_done_with_host_buffer)(error);
delete on_done_with_host_buffer;
};
RETURN_STATUS_IF_PJRT_ERROR(c_api_->PJRT_Event_OnReady(&event_args),
c_api_);
}
return buffer;
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtCApiClient::BufferFromHostBuffer(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
absl::AnyInvocable<void() &&> on_done_with_host_buffer,
PjRtMemorySpace* memory_space, const Layout* device_layout) {
return BufferFromHostBufferInternalImpl(
data, type, dims, byte_strides, host_buffer_semantics,
std::move(on_done_with_host_buffer), memory_space, device_layout);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtCApiClient::BufferFromHostBuffer(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
absl::AnyInvocable<void() &&> on_done_with_host_buffer, PjRtDevice* device,
const Layout* device_layout) {
return BufferFromHostBufferInternalImpl(
data, type, dims, byte_strides, host_buffer_semantics,
std::move(on_done_with_host_buffer), device, device_layout);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtCApiClient::BufferFromHostBuffer(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
absl::AnyInvocable<void() &&> on_done_with_host_buffer,
PjRtDevice* device) {
return BufferFromHostBufferInternalImpl(
data, type, dims, byte_strides, host_buffer_semantics,
std::move(on_done_with_host_buffer), device, nullptr);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtCApiClient::CreateViewOfDeviceBuffer(
void* device_ptr, const Shape& shape, PjRtDevice* device,
std::function<void()> on_delete_callback,
std::optional<std::intptr_t> stream) {
PJRT_Client_CreateViewOfDeviceBuffer_Args args;
args.struct_size = PJRT_Client_CreateViewOfDeviceBuffer_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.client = c_client_.get();
args.device_buffer_ptr = device_ptr;
args.dims = shape.dimensions().data();
args.num_dims = shape.dimensions().size();
args.element_type = pjrt::ConvertToPjRtBufferType(shape.element_type());
pjrt::BufferMemoryLayoutData c_layout_data;
if (shape.has_layout()) {
TF_ASSIGN_OR_RETURN(c_layout_data,
pjrt::ConvertToBufferMemoryLayoutData(shape.layout()));
args.layout = &(c_layout_data.c_layout);
} else {
args.layout = nullptr;
}
if (on_delete_callback != nullptr) {
args.on_delete_callback_arg =
new std::function(std::move(on_delete_callback));
args.on_delete_callback = [](void* device_buffer_ptr, void* user_arg) {
auto* c_func = reinterpret_cast<std::function<void()>*>(user_arg);
(*c_func)();
delete c_func;
};
} else {
args.on_delete_callback = nullptr;
args.on_delete_callback_arg = nullptr;
}
args.device = tensorflow::down_cast<PjRtCApiDevice*>(device)->c_device();
if (stream.has_value()) {
args.stream = *stream;
} else {
args.stream = reinterpret_cast<intptr_t>(nullptr);
}
const PJRT_Api* c_api = pjrt_c_api();
RETURN_STATUS_IF_PJRT_ERROR(
c_api->PJRT_Client_CreateViewOfDeviceBuffer(&args), c_api);
return std::unique_ptr<PjRtBuffer>(
std::make_unique<PjRtCApiBuffer>(this, args.buffer));
}
absl::StatusOr<Layout> PjRtCApiClient::GetDefaultLayout(
PrimitiveType element_type, absl::Span<const int64_t> dims) {
const PJRT_Api* c_api = pjrt_c_api();
PJRT_Layouts_Extension* extension =
pjrt::FindExtension<PJRT_Layouts_Extension>(
c_api, PJRT_Extension_Type::PJRT_Extension_Type_Layouts);
if (extension == nullptr) {
return LayoutUtil::MakeDescendingLayout(dims.size());
}
PJRT_Layouts_PJRT_Client_GetDefaultLayout_Args args;
args.struct_size = PJRT_Layouts_PJRT_Client_GetDefaultLayout_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.client = c_client_.get();
args.type = pjrt::ConvertToPjRtBufferType(element_type);
args.dims = dims.data();
args.num_dims = dims.size();
RETURN_STATUS_IF_PJRT_ERROR(
extension->PJRT_Layouts_PJRT_Client_GetDefaultLayout(&args), c_api);
std::unique_ptr<PJRT_Layouts_MemoryLayout,
pjrt::PJRT_Layouts_MemoryLayoutDeleter>
layout_destroyer(args.layout, pjrt::MakeMemoryLayoutDeleter(c_api));
PJRT_Layouts_MemoryLayout_Serialize_Args serialize_args;
serialize_args.struct_size =
PJRT_Layouts_MemoryLayout_Serialize_Args_STRUCT_SIZE;
serialize_args.extension_start = nullptr;
serialize_args.layout = args.layout;
RETURN_STATUS_IF_PJRT_ERROR(
extension->PJRT_Layouts_MemoryLayout_Serialize(&serialize_args), c_api);
absl::Cleanup cleanup = [&serialize_args] {
serialize_args.serialized_layout_deleter(serialize_args.serialized_layout);
};
std::string serialized_layout(serialize_args.serialized_bytes,
serialize_args.serialized_bytes_size);
TF_ASSIGN_OR_RETURN(PjRtXlaLayout pjrt_xla_layout,
PjRtXlaLayout::Deserialize(serialized_layout));
return pjrt_xla_layout.xla_layout();
}
const PJRT_Api* PjRtCApiClient::pjrt_c_api() const { return c_api_; }
PjRtCApiDeviceDescription::PjRtCApiDeviceDescription(
const PJRT_Api* c_api, PJRT_DeviceDescription* device_description)
: c_api_(c_api), device_description_(device_description) {
InitAttributes();
}
int PjRtCApiDeviceDescription::id() const {
PJRT_DeviceDescription_Id_Args args;
args.struct_size = PJRT_DeviceDescription_Id_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device_description = device_description_;
pjrt::LogFatalIfPjrtError(c_api_->PJRT_DeviceDescription_Id(&args), c_api_);
return args.id;
}
int PjRtCApiDeviceDescription::process_index() const {
PJRT_DeviceDescription_ProcessIndex_Args args;
args.struct_size = PJRT_DeviceDescription_ProcessIndex_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device_description = device_description_;
pjrt::LogFatalIfPjrtError(c_api_->PJRT_DeviceDescription_ProcessIndex(&args),
c_api_);
return args.process_index;
}
void PjRtCApiDeviceDescription::InitAttributes() {
attributes_ = {};
PJRT_DeviceDescription_Attributes_Args args;
args.struct_size = PJRT_DeviceDescription_Attributes_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device_description = device_description_;
pjrt::LogFatalIfPjrtError(c_api_->PJRT_DeviceDescription_Attributes(&args),
c_api_);
for (int i = 0; i < args.num_attributes; ++i) {
const auto& attribute = args.attributes[i];
std::string attribute_name(attribute.name, attribute.name_size);
switch (attribute.type) {
case PJRT_NamedValue_Type::PJRT_NamedValue_kString: {
std::string string_value(attribute.string_value, attribute.value_size);
attributes_[attribute_name] = PjRtDeviceAttribute(string_value);
break;
}
case PJRT_NamedValue_Type::PJRT_NamedValue_kInt64: {
attributes_[attribute_name] =
PjRtDeviceAttribute(attribute.int64_value);
break;
}
case PJRT_NamedValue_Type::PJRT_NamedValue_kInt64List: {
const int64_t* array_ptr(attribute.int64_array_value);
std::vector<int64_t> int64_array(array_ptr,
array_ptr + attribute.value_size);
attributes_[attribute_name] = PjRtDeviceAttribute(int64_array);
break;
}
default: {
LOG(FATAL) << "PJRT_DeviceDescription_Attributes() returned attribute '"
<< attribute_name << "' with unsupported type "
<< attribute.type
<< " to PjRtCApiDeviceDescription::InitAttributes()";
break;
}
}
}
}
const absl::flat_hash_map<std::string, PjRtDeviceAttribute>&
PjRtCApiDeviceDescription::Attributes() const {
return attributes_;
}
absl::string_view PjRtCApiDeviceDescription::device_kind() const {
PJRT_DeviceDescription_Kind_Args args;
args.struct_size = PJRT_DeviceDescription_Kind_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device_description = device_description_;
pjrt::LogFatalIfPjrtError(c_api_->PJRT_DeviceDescription_Kind(&args), c_api_);
absl::string_view device_kind(args.device_kind, args.device_kind_size);
return device_kind;
}
absl::string_view PjRtCApiDeviceDescription::DebugString() const {
PJRT_DeviceDescription_DebugString_Args args;
args.struct_size = PJRT_DeviceDescription_DebugString_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device_description = device_description_;
pjrt::LogFatalIfPjrtError(c_api_->PJRT_DeviceDescription_DebugString(&args),
c_api_);
absl::string_view debug_string(args.debug_string, args.debug_string_size);
return debug_string;
}
absl::string_view PjRtCApiDeviceDescription::ToString() const {
PJRT_DeviceDescription_ToString_Args args;
args.struct_size = PJRT_DeviceDescription_ToString_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device_description = device_description_;
pjrt::LogFatalIfPjrtError(c_api_->PJRT_DeviceDescription_ToString(&args),
c_api_);
absl::string_view to_string(args.to_string, args.to_string_size);
return to_string;
}
PjRtCApiDevice::PjRtCApiDevice(PJRT_Device* device, PjRtCApiClient* client)
: client_(client),
device_(device),
description_(client->pjrt_c_api(),
pjrt::GetDeviceDescription(client->pjrt_c_api(), device)) {}
PjRtClient* PjRtCApiDevice::client() const { return client_; }
bool PjRtCApiDevice::IsAddressable() const {
PJRT_Device_IsAddressable_Args args;
args.struct_size = PJRT_Device_IsAddressable_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device = device_;
const PJRT_Api* api = client_->pjrt_c_api();
pjrt::LogFatalIfPjrtError(api->PJRT_Device_IsAddressable(&args), api);
return args.is_addressable;
}
PjRtLocalHardwareId PjRtCApiDevice::local_hardware_id() const {
PJRT_Device_LocalHardwareId_Args args;
args.struct_size = PJRT_Device_LocalHardwareId_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device = device_;
const PJRT_Api* api = client_->pjrt_c_api();
pjrt::LogFatalIfPjrtError(api->PJRT_Device_LocalHardwareId(&args), api);
return PjRtLocalHardwareId(args.local_hardware_id);
}
absl::StatusOr<PjRtMemorySpace*> PjRtCApiDevice::default_memory_space() const {
PJRT_Device_DefaultMemory_Args args;
args.struct_size = PJRT_Device_DefaultMemory_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device = device_;
const PJRT_Api* api = client_->pjrt_c_api();
RETURN_STATUS_IF_PJRT_ERROR(api->PJRT_Device_DefaultMemory(&args), api);
return client_->GetCppMemory(args.memory);
}
absl::StatusOr<tsl::AllocatorStats> PjRtCApiDevice::GetAllocatorStats() const {
PJRT_Device_MemoryStats_Args args;
args.struct_size = PJRT_Device_MemoryStats_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device = device_;
const PJRT_Api* api = client_->pjrt_c_api();
RETURN_STATUS_IF_PJRT_ERROR(api->PJRT_Device_MemoryStats(&args), api);
tsl::AllocatorStats result;
result.bytes_in_use = args.bytes_in_use;
if (args.peak_bytes_in_use_is_set) {
result.peak_bytes_in_use = args.peak_bytes_in_use;
} else {
result.peak_bytes_in_use = -1;
}
if (args.num_allocs_is_set) {
result.num_allocs = args.num_allocs;
} else {
result.num_allocs = -1;
}
if (args.largest_alloc_size_is_set) {
result.largest_alloc_size = args.largest_alloc_size;
} else {
result.largest_alloc_size = -1;
}
if (args.bytes_limit_is_set) {
result.bytes_limit = args.bytes_limit;
}
if (args.bytes_reserved_is_set) {
result.bytes_reserved = args.bytes_reserved;
} else {
result.bytes_reserved = -1;
}
if (args.peak_bytes_reserved_is_set) {
result.peak_bytes_reserved = args.peak_bytes_reserved;
} else {
result.peak_bytes_reserved = -1;
}
if (args.bytes_reservable_limit_is_set) {
result.bytes_reservable_limit = args.bytes_reservable_limit;
}
if (args.largest_free_block_bytes_is_set) {
result.largest_free_block_bytes = args.largest_free_block_bytes;
} else {
result.largest_free_block_bytes = -1;
}
if (args.pool_bytes_is_set) {
result.pool_bytes = args.pool_bytes;
}
if (args.peak_pool_bytes_is_set) {
result.peak_pool_bytes = args.peak_pool_bytes;
}
return result;
}
absl::StatusOr<std::intptr_t> PjRtCApiDevice::GetStreamForExternalReadyEvents()
const {
const PJRT_Api* c_api = client_->pjrt_c_api();
PJRT_Stream_Extension* extension = pjrt::FindExtension<PJRT_Stream_Extension>(
c_api, PJRT_Extension_Type::PJRT_Extension_Type_Stream);
if (extension == nullptr) {
return absl::UnimplementedError(
"Stream extension not implemented in this PJRT plugin.");
}
PJRT_Get_Stream_For_External_Ready_Events_Args args;
args.struct_size = PJRT_Get_Stream_For_External_Ready_Events_Args_STRUCT_SIZE;
args.device = device_;
RETURN_STATUS_IF_PJRT_ERROR(extension->get_stream(&args), c_api);
return args.stream;
}
const PJRT_Api* PjRtCApiMemorySpace::pjrt_c_api() const {
return client_->pjrt_c_api();
}
PjRtClient* PjRtCApiMemorySpace::client() const { return client_; }
int PjRtCApiMemorySpace::id() const {
PJRT_Memory_Id_Args args;
args.struct_size = PJRT_Memory_Id_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.memory = c_memory_;
pjrt::LogFatalIfPjrtError(pjrt_c_api()->PJRT_Memory_Id(&args), pjrt_c_api());
return args.id;
}
absl::string_view PjRtCApiMemorySpace::kind() const {
PJRT_Memory_Kind_Args args;
args.struct_size = PJRT_Memory_Kind_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.memory = c_memory_;
pjrt::LogFatalIfPjrtError(pjrt_c_api()->PJRT_Memory_Kind(&args),
pjrt_c_api());
return absl::string_view(args.kind, args.kind_size);
}
int PjRtCApiMemorySpace::kind_id() const {
PJRT_Memory_Kind_Id_Args args;
args.struct_size = PJRT_Memory_Kind_Id_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.memory = c_memory_;
if (pjrt_c_api()->pjrt_api_version.major_version > 0 ||
pjrt_c_api()->pjrt_api_version.minor_version >= 48) {
pjrt::LogFatalIfPjrtError(pjrt_c_api()->PJRT_Memory_Kind_Id(&args),
pjrt_c_api());
return args.kind_id;
}
return tsl::Fingerprint32(kind());
}
absl::string_view PjRtCApiMemorySpace::DebugString() const {
PJRT_Memory_DebugString_Args args;
args.struct_size = PJRT_Memory_DebugString_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.memory = c_memory_;
pjrt::LogFatalIfPjrtError(pjrt_c_api()->PJRT_Memory_DebugString(&args),
pjrt_c_api());
return absl::string_view(args.debug_string, args.debug_string_size);
}
absl::string_view PjRtCApiMemorySpace::ToString() const {
PJRT_Memory_ToString_Args args;
args.struct_size = PJRT_Memory_ToString_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.memory = c_memory_;
pjrt::LogFatalIfPjrtError(pjrt_c_api()->PJRT_Memory_ToString(&args),
pjrt_c_api());
return absl::string_view(args.to_string, args.to_string_size);
}
PjRtCApiExecutable::PjRtCApiExecutable(const PJRT_Api* c_api,
PJRT_Executable* executable)
: c_api_(c_api),
executable_(executable, ::pjrt::MakeExecutableDeleter(c_api)) {}
absl::string_view PjRtCApiExecutable::name() const {
auto* c_api = pjrt_c_api();
auto* executable = c_executable();
PJRT_Executable_Name_Args args;
args.executable = executable;
args.struct_size = PJRT_Executable_Name_Args_STRUCT_SIZE;
args.extension_start = nullptr;
pjrt::LogFatalIfPjrtError(c_api->PJRT_Executable_Name(&args), c_api);
return absl::string_view(args.executable_name, args.executable_name_size);
}
int PjRtCApiExecutable::num_replicas() const {
auto* c_api = pjrt_c_api();
auto* executable = c_executable();
PJRT_Executable_NumReplicas_Args args;
args.executable = executable;
args.struct_size = PJRT_Executable_NumReplicas_Args_STRUCT_SIZE;
args.extension_start = nullptr;
pjrt::LogFatalIfPjrtError(c_api->PJRT_Executable_NumReplicas(&args), c_api);
return args.num_replicas;
}
int PjRtCApiExecutable::num_partitions() const {
auto* c_api = pjrt_c_api();
auto* executable = c_executable();
PJRT_Executable_NumPartitions_Args args;
args.executable = executable;
args.struct_size = PJRT_Executable_NumPartitions_Args_STRUCT_SIZE;
args.extension_start = nullptr;
pjrt::LogFatalIfPjrtError(c_api->PJRT_Executable_NumPartitions(&args), c_api);
return args.num_partitions;
}
int64_t PjRtCApiExecutable::SizeOfGeneratedCodeInBytes() const {
auto* c_api = pjrt_c_api();
auto* executable = c_executable();
PJRT_Executable_SizeOfGeneratedCodeInBytes_Args args;
args.struct_size =
PJRT_Executable_SizeOfGeneratedCodeInBytes_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = executable;
pjrt::LogFatalIfPjrtError(
c_api->PJRT_Executable_SizeOfGeneratedCodeInBytes(&args), c_api);
return args.size_in_bytes;
}
absl::StatusOr<absl::flat_hash_map<std::string, PjRtValueType>>
PjRtCApiExecutable::GetCostAnalysis() const {
PJRT_Executable_GetCostAnalysis_Args args;
args.struct_size = PJRT_Executable_GetCostAnalysis_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = c_executable();
const PJRT_Api* c_api = pjrt_c_api();
RETURN_STATUS_IF_PJRT_ERROR(c_api->PJRT_Executable_GetCostAnalysis(&args),
c_api);
return pjrt::ConvertFromPjRtNamedValueList(args.properties,
args.num_properties);
}
absl::StatusOr<std::vector<std::vector<PrimitiveType>>>
PjRtCApiExecutable::GetOutputElementTypes() const {
PJRT_Executable_OutputElementTypes_Args args;
args.struct_size = PJRT_Executable_OutputElementTypes_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = c_executable();
const PJRT_Api* c_api = pjrt_c_api();
RETURN_STATUS_IF_PJRT_ERROR(c_api->PJRT_Executable_OutputElementTypes(&args),
c_api);
std::vector<PrimitiveType> out;
out.reserve(args.num_output_types);
for (int i = 0; i < args.num_output_types; ++i) {
out.push_back(pjrt::ConvertFromPjRtBufferType(args.output_types[i]));
}
return std::vector<std::vector<PrimitiveType>>{std::move(out)};
}
absl::StatusOr<std::vector<std::vector<DimensionVector>>>
PjRtCApiExecutable::GetOutputDimensions() const {
PJRT_Executable_OutputDimensions_Args args;
args.struct_size = PJRT_Executable_OutputDimensions_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = c_executable();
const PJRT_Api* c_api = pjrt_c_api();
RETURN_STATUS_IF_PJRT_ERROR(c_api->PJRT_Executable_OutputDimensions(&args),
c_api);
std::vector<DimensionVector> out;
out.reserve(args.num_outputs);
int index = 0;
for (int i = 0; i < args.num_outputs; ++i) {
DimensionVector dimensions;
dimensions.reserve(args.dim_sizes[i]);
for (int j = 0; j < args.dim_sizes[i]; ++j) {
dimensions.push_back(args.dims[index++]);
}
out.push_back(std::move(dimensions));
}
return std::vector<std::vector<DimensionVector>>{std::move(out)};
}
absl::StatusOr<std::vector<std::vector<absl::string_view>>>
PjRtCApiExecutable::GetOutputMemoryKinds() const {
PJRT_Executable_OutputMemoryKinds_Args args;
args.struct_size = PJRT_Executable_OutputMemoryKinds_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = c_executable();
const PJRT_Api* c_api = pjrt_c_api();
RETURN_STATUS_IF_PJRT_ERROR(c_api->PJRT_Executable_OutputMemoryKinds(&args),
c_api);
std::vector<absl::string_view> out;
out.reserve(args.num_outputs);
for (int i = 0; i < args.num_outputs; ++i) {
out.push_back(
absl::string_view(args.memory_kinds[i], args.memory_kind_sizes[i]));
}
return std::vector<std::vector<absl::string_view>>{std::move(out)};
}
absl::StatusOr<std::vector<std::shared_ptr<HloModule>>>
PjRtCApiExecutable::GetHloModules() const {
auto* c_api = pjrt_c_api();
auto* executable = c_executable();
PJRT_Executable_OptimizedProgram_Args args;
args.struct_size = PJRT_Executable_OptimizedProgram_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = executable;
PJRT_Program program;
program.struct_size = PJRT_Program_STRUCT_SIZE;
program.extension_start = nullptr;
program.code = nullptr;
args.program = &program;
RETURN_STATUS_IF_PJRT_ERROR(c_api->PJRT_Executable_OptimizedProgram(&args),
c_api);
constexpr size_t TWO_GIBIBYTES = 2ull * 1024 * 1024 * 1024;
const size_t code_size = args.program->code_size;
CHECK(code_size < TWO_GIBIBYTES);
std::string code(code_size, ' ');
args.program->code = code.data();
RETURN_STATUS_IF_PJRT_ERROR(c_api->PJRT_Executable_OptimizedProgram(&args),
c_api);
absl::string_view program_format(program.format, program.format_size);
if (program_format != ::pjrt::kHloWithConfigFormat &&
program_format != ::pjrt::kMlirFormat) {
return xla::Internal(
"expected program format `hlo_with_config` or `mlir` but got %s",
program_format);
}
if (program_format == ::pjrt::kMlirFormat) {
mlir::MLIRContext ctx;
TF_ASSIGN_OR_RETURN(
mlir::OwningOpRef<mlir::ModuleOp> module,
ParseMlirModuleString(code, ctx));
mlir::PassManager pm(&ctx);
pm.addPass(mlir::mhlo::createStablehloLegalizeToHloPass());
if (mlir::failed(pm.run(module.get())))
return xla::Internal("failed to convert to MHLO");
mlir::MlirToHloConversionOptions options;
options.return_tuple = false;
TF_ASSIGN_OR_RETURN(std::unique_ptr<xla::HloModule> hlo_module,
mlir::ConvertMlirHloToHloModule(module.get(), options));
std::vector<std::shared_ptr<HloModule>> out;
out.push_back(std::move(hlo_module));
return out;
}
HloModuleProtoWithConfig proto;
proto.ParseFromString(code);
std::vector<std::shared_ptr<HloModule>> out;
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module,
HloModule::CreateFromProtoWithConfig(proto));
out.push_back(std::move(module));
return out;
}
absl::StatusOr<std::string> PjRtCApiExecutable::SerializeExecutable() const {
auto* c_api = pjrt_c_api();
auto* executable = c_executable();
PJRT_Executable_Serialize_Args ser_args;
ser_args.struct_size = PJRT_Executable_Serialize_Args_STRUCT_SIZE;
ser_args.extension_start = nullptr;
ser_args.executable = executable;
ser_args.serialized_executable = nullptr;
RETURN_STATUS_IF_PJRT_ERROR(c_api->PJRT_Executable_Serialize(&ser_args),
c_api);
absl::Cleanup cleanup = [&ser_args] {
ser_args.serialized_executable_deleter(ser_args.serialized_executable);
};
return std::string(ser_args.serialized_bytes, ser_args.serialized_bytes_size);
}
absl::StatusOr<std::string> PjRtCApiExecutable::FingerprintExecutable() const {
const PJRT_Api* c_api_ = pjrt_c_api();
PJRT_Executable_Fingerprint_Args args;
args.struct_size = PJRT_Executable_Fingerprint_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = c_executable();
RETURN_STATUS_IF_PJRT_ERROR(c_api_->PJRT_Executable_Fingerprint(&args),
c_api_);
return std::string(args.executable_fingerprint,
args.executable_fingerprint_size);
}
PjRtCApiLoadedExecutable::PjRtCApiLoadedExecutable(
PjRtCApiClient* client, PJRT_LoadedExecutable* executable)
: client_(client),
loaded_executable_(executable, ::pjrt::MakeLoadedExecutableDeleter(
client->pjrt_c_api())) {
PJRT_LoadedExecutable_GetExecutable_Args args;
args.struct_size = PJRT_LoadedExecutable_GetExecutable_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.loaded_executable = c_loaded_executable();
args.executable = nullptr;
pjrt::LogFatalIfPjrtError(
pjrt_c_api()->PJRT_LoadedExecutable_GetExecutable(&args), pjrt_c_api());
executable_ =
std::make_unique<PjRtCApiExecutable>(pjrt_c_api(), args.executable);
InitDevices();
}
void PjRtCApiLoadedExecutable::InitDevices() {
PJRT_LoadedExecutable_AddressableDevices_Args args;
args.struct_size = PJRT_LoadedExecutable_AddressableDevices_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = c_loaded_executable();
args.addressable_devices = nullptr;
args.num_addressable_devices = 0;
const PJRT_Api* api = pjrt_c_api();
pjrt::LogFatalIfPjrtError(
api->PJRT_LoadedExecutable_AddressableDevices(&args), api);
const size_t num_addressable_devices = args.num_addressable_devices;
addressable_devices_.reserve(num_addressable_devices);
for (size_t i = 0; i < num_addressable_devices; ++i) {
PJRT_Device* device = args.addressable_devices[i];
PjRtCApiDevice* c_api_device = client_->GetCppDevice(device);
addressable_devices_.push_back(c_api_device);
}
}
static std::vector<std::vector<PJRT_Buffer*>> Convert2DCppBuffersToCBuffers(
absl::Span<const std::vector<PjRtBuffer*>> cpp_lists) {
std::vector<std::vector<PJRT_Buffer*>> c_lists;
c_lists.reserve(cpp_lists.size());
for (const auto& cpp_list : cpp_lists) {
auto& c_list = c_lists.emplace_back();
c_list.reserve(cpp_list.size());
for (PjRtBuffer* buffer : cpp_list) {
auto* c_api_argument = tensorflow::down_cast<PjRtCApiBuffer*>(buffer);
c_list.push_back(c_api_argument->c_buffer());
}
}
return c_lists;
}
static std::vector<std::vector<std::unique_ptr<PjRtBuffer>>>
Convert2DCBuffersToCppBuffers(PJRT_Buffer** const* c_lists, size_t outer_size,
int inner_size, xla::PjRtCApiClient* client) {
std::vector<std::vector<std::unique_ptr<PjRtBuffer>>> ret;
for (size_t i = 0; i < outer_size; ++i) {
auto& output_list = ret.emplace_back();
output_list.reserve(inner_size);
for (size_t j = 0; j < inner_size; ++j) {
output_list.push_back(
std::make_unique<PjRtCApiBuffer>(client, c_lists[i][j]));
}
}
return ret;
}
PJRT_SendCallbackInfo CppSendCallbackToC(
const xla::SendCallback& cpp_send_callback,
PjRtCApiLoadedExecutable::SendCallbackFunction* send_callback_function) {
*send_callback_function =
[&send_callback = cpp_send_callback.callback](
PJRT_Chunk* chunk, PJRT_CallbackError* callback_error,
size_t total_size_in_bytes, bool done) -> PJRT_Error* {
xla::Shape dummy_shape;
absl::Status status = send_callback(xla::PjRtTransferMetadata{dummy_shape},
::pjrt::ConvertToCppChunk(*chunk),
total_size_in_bytes, done);
if (!status.ok()) {
absl::string_view message = status.message();
return (*callback_error)(pjrt::StatusCodeToPjrtErrorCode(status.code()),
message.data(), message.size());
}
return nullptr;
};
return PJRT_SendCallbackInfo{
cpp_send_callback.channel_id,
send_callback_function,
[](PJRT_Chunk* chunk, PJRT_CallbackError* callback_error,
size_t total_size_in_bytes, bool done, void* user_arg) -> PJRT_Error* {
PjRtCApiLoadedExecutable::SendCallbackFunction* send_callback =
reinterpret_cast<PjRtCApiLoadedExecutable::SendCallbackFunction*>(
user_arg);
return (*send_callback)(chunk, callback_error, total_size_in_bytes,
done);
}};
}
CApiCopyToDeviceStream::CApiCopyToDeviceStream(
PJRT_CopyToDeviceStream* c_stream, const PJRT_Api* c_api)
: CopyToDeviceStream(0, 0),
c_stream_(c_stream),
c_api_(c_api) {
PJRT_CopyToDeviceStream_TotalBytes_Args total_bytes_args;
total_bytes_args.struct_size =
PJRT_CopyToDeviceStream_TotalBytes_Args_STRUCT_SIZE;
total_bytes_args.extension_start = nullptr;
total_bytes_args.stream = c_stream_;
pjrt::LogFatalIfPjrtError(
c_api_->PJRT_CopyToDeviceStream_TotalBytes(&total_bytes_args), c_api_);
total_bytes_ = total_bytes_args.total_bytes;
PJRT_CopyToDeviceStream_GranuleSize_Args granule_size_args;
granule_size_args.struct_size =
PJRT_CopyToDeviceStream_GranuleSize_Args_STRUCT_SIZE;
granule_size_args.extension_start = nullptr;
granule_size_args.stream = c_stream_;
pjrt::LogFatalIfPjrtError(
c_api_->PJRT_CopyToDeviceStream_GranuleSize(&granule_size_args), c_api_);
granule_bytes_ = granule_size_args.granule_size_in_bytes;
}
CApiCopyToDeviceStream::~CApiCopyToDeviceStream() {
PJRT_CopyToDeviceStream_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_CopyToDeviceStream_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.stream = c_stream_;
pjrt::LogFatalIfPjrtError(
c_api_->PJRT_CopyToDeviceStream_Destroy(&destroy_args), c_api_);
}
PjRtFuture<> CApiCopyToDeviceStream::AddChunk(PjRtChunk chunk) {
PJRT_Chunk c_chunk = ::pjrt::ConvertFromCppChunk(std::move(chunk));
PJRT_CopyToDeviceStream_AddChunk_Args add_chunk_args;
add_chunk_args.struct_size =
PJRT_CopyToDeviceStream_AddChunk_Args_STRUCT_SIZE;
add_chunk_args.extension_start = nullptr;
add_chunk_args.stream = c_stream_;
add_chunk_args.chunk = &c_chunk;
PJRT_CopyToDeviceStream_CurrentBytes_Args current_bytes_args;
current_bytes_args.struct_size =
PJRT_CopyToDeviceStream_CurrentBytes_Args_STRUCT_SIZE;
current_bytes_args.extension_start = nullptr;
current_bytes_args.stream = c_stream_;
{
absl::MutexLock lock(&mu_);
RETURN_FUTURE_IF_ERROR(
c_api_->PJRT_CopyToDeviceStream_AddChunk(&add_chunk_args), c_api_);
RETURN_FUTURE_IF_ERROR(
c_api_->PJRT_CopyToDeviceStream_CurrentBytes(¤t_bytes_args),
c_api_);
current_bytes_ = current_bytes_args.current_bytes;
}
CHECK(add_chunk_args.transfer_complete != nullptr);
return ::pjrt::ConvertCEventToCppFuture(add_chunk_args.transfer_complete,
c_api_);
}
PJRT_RecvCallbackInfo CppRecvCallbackToC(
const xla::RecvCallback& cpp_recv_callback, const PJRT_Api* c_api,
PjRtCApiLoadedExecutable::RecvCallbackFunction* recv_callback_function) {
*recv_callback_function = [&recv_callback = cpp_recv_callback.callback,
c_api](PJRT_CopyToDeviceStream* stream) {
xla::Shape dummy_shape;
recv_callback(xla::PjRtTransferMetadata{dummy_shape},
std::make_unique<CApiCopyToDeviceStream>(stream, c_api));
};
return PJRT_RecvCallbackInfo{
cpp_recv_callback.channel_id,
recv_callback_function,
[](PJRT_CopyToDeviceStream* stream, void* user_arg) {
PjRtCApiLoadedExecutable::RecvCallbackFunction* recv_callback =
reinterpret_cast<PjRtCApiLoadedExecutable::RecvCallbackFunction*>(
user_arg);
(*recv_callback)(stream);
}};
}
static void CppSendCallbackListsToC(
absl::Span<const std::vector<xla::SendCallback>> cpp_lists,
std::vector<PjRtCApiLoadedExecutable::SendCallbackFunction>&
send_callback_functions,
std::vector<std::vector<PJRT_SendCallbackInfo>>& c_lists) {
if (cpp_lists.empty()) return;
send_callback_functions.resize(cpp_lists.size() * cpp_lists[0].size());
c_lists.reserve(cpp_lists.size());
int func_count = 0;
for (const std::vector<xla::SendCallback>& cpp_list : cpp_lists) {
std::vector<PJRT_SendCallbackInfo>& c_list = c_lists.emplace_back();
c_list.reserve(cpp_list.size());
for (const xla::SendCallback& cpp_callback : cpp_list) {
c_list.emplace_back(CppSendCallbackToC(
cpp_callback, &send_callback_functions[func_count++]));
}
}
}
static void CppRecvCallbackListsToC(
absl::Span<const std::vector<xla::RecvCallback>> cpp_lists,
const PJRT_Api* c_api,
std::vector<PjRtCApiLoadedExecutable::RecvCallbackFunction>&
recv_callback_functions,
std::vector<std::vector<PJRT_RecvCallbackInfo>>& c_lists) {
if (cpp_lists.empty()) return;
recv_callback_functions.resize(cpp_lists.size() * cpp_lists[0].size());
c_lists.reserve(cpp_lists.size());
int func_count = 0;
for (const auto& cpp_list : cpp_lists) {
std::vector<PJRT_RecvCallbackInfo>& c_list = c_lists.emplace_back();
c_list.reserve(cpp_list.size());
for (const auto& cpp_callback : cpp_list) {
c_list.emplace_back(CppRecvCallbackToC(
cpp_callback, c_api, &recv_callback_functions[func_count++]));
}
}
}
absl::StatusOr<PJRT_LoadedExecutable_Execute_Args>
PjRtCApiLoadedExecutable::GetCommonExecuteArgs(
absl::Span<const std::vector<PjRtBuffer*>> argument_handles,
const ExecuteOptions& options, PJRT_ExecuteOptions& c_options,
std::vector<std::vector<PJRT_Buffer*>>& c_argument_lists_storage,
std::vector<PJRT_Buffer**>& c_arguments,
std::vector<std::vector<PJRT_Buffer*>>& c_output_lists_storage,
std::vector<PJRT_Buffer**>& c_output_lists,
std::optional<std::vector<PJRT_Event*>>& device_complete_events,
SendRecvCallbackData& callback_data,
std::vector<int64_t>& non_donatable_input_indices_storage) {
bool using_host_callbacks =
!options.send_callbacks.empty() || !options.recv_callbacks.empty();
if (using_host_callbacks &&
!options.use_major_to_minor_data_layout_for_callbacks) {
return Unimplemented(
"PJRT C API doesn't support "
"ExecuteOptions::use_major_to_minor_data_layout_for_callbacks = false");
}
PJRT_LoadedExecutable_Execute_Args args;
args.struct_size = PJRT_LoadedExecutable_Execute_Args_STRUCT_SIZE;
args.executable = c_loaded_executable();
args.options = &c_options;
args.options->struct_size = PJRT_ExecuteOptions_STRUCT_SIZE;
args.options->launch_id = options.launch_id;
for (auto i : options.non_donatable_input_indices) {
non_donatable_input_indices_storage.push_back(i);
}
args.options->num_non_donatable_input_indices =
options.non_donatable_input_indices.size();
args.options->non_donatable_input_indices =
non_donatable_input_indices_storage.data();
args.num_devices = argument_handles.size();
CHECK_GT(args.num_devices, 0);
args.num_args = argument_handles[0].size();
if (device_complete_events.has_value() || using_host_callbacks) {
device_complete_events->resize(args.num_devices);
args.device_complete_events = device_complete_events->data();
} else {
args.device_complete_events = nullptr;
}
c_argument_lists_storage = Convert2DCppBuffersToCBuffers(argument_handles);
c_arguments.reserve(c_argument_lists_storage.size());
for (auto& argument_list : c_argument_lists_storage) {
c_arguments.push_back(argument_list.data());
}
args.argument_lists = c_arguments.data();
PJRT_Executable_NumOutputs_Args numoutputs_args;
numoutputs_args.struct_size = PJRT_Executable_NumOutputs_Args_STRUCT_SIZE;
numoutputs_args.extension_start = nullptr;
numoutputs_args.executable = c_executable();
RETURN_STATUS_IF_PJRT_ERROR(
pjrt_c_api()->PJRT_Executable_NumOutputs(&numoutputs_args), pjrt_c_api());
size_t outer_size = args.num_devices;
size_t inner_size = numoutputs_args.num_outputs;
c_output_lists_storage.resize(outer_size);
c_output_lists.resize(outer_size);
for (int i = 0; i < outer_size; ++i) {
c_output_lists_storage[i].resize(inner_size);
c_output_lists[i] = c_output_lists_storage[i].data();
}
args.output_lists = c_output_lists.data();
if (!options.send_callbacks.empty()) {
CppSendCallbackListsToC(options.send_callbacks,
callback_data.send_callback_functions,
callback_data.c_send_callbacks);
for (auto& c_send_callback_list : callback_data.c_send_callbacks) {
callback_data.c_send_callback_lists.push_back(
c_send_callback_list.data());
}
args.options->send_callbacks = callback_data.c_send_callback_lists.data();
args.options->num_send_ops = options.send_callbacks[0].size();
}
if (!options.recv_callbacks.empty()) {
CppRecvCallbackListsToC(options.recv_callbacks, pjrt_c_api(),
callback_data.recv_callback_functions,
callback_data.c_recv_callbacks);
for (auto& c_recv_callback_list : callback_data.c_recv_callbacks) {
callback_data.c_recv_callback_lists.push_back(
c_recv_callback_list.data());
}
args.options->recv_callbacks = callback_data.c_recv_callback_lists.data();
args.options->num_recv_ops = options.recv_callbacks[0].size();
}
return args;
}
absl::StatusOr<std::vector<std::vector<std::unique_ptr<PjRtBuffer>>>>
PjRtCApiLoadedExecutable::Execute(
absl::Span<const std::vector<PjRtBuffer*>> argument_handles,
const ExecuteOptions& options,
std::optional<std::vector<PjRtFuture<>>>& returned_futures) {
std::vector<std::vector<PJRT_Buffer*>> c_argument_lists_storage;
std::vector<std::vector<PJRT_Buffer*>> c_output_lists_storage;
std::vector<PJRT_Buffer**> c_output_lists;
std::vector<int64_t> non_donatable_input_indices_storage;
PJRT_ExecuteOptions c_options;
c_options.num_send_ops = 0;
c_options.num_recv_ops = 0;
std::vector<PJRT_Buffer**> c_arguments;
std::optional<std::vector<PJRT_Event*>> device_complete_events;
if (returned_futures.has_value()) {
device_complete_events.emplace();
}
auto callback_data = std::make_shared<SendRecvCallbackData>();
TF_ASSIGN_OR_RETURN(
PJRT_LoadedExecutable_Execute_Args args,
GetCommonExecuteArgs(argument_handles, options, c_options,
c_argument_lists_storage, c_arguments,
c_output_lists_storage, c_output_lists,
device_complete_events, *callback_data,
non_donatable_input_indices_storage));
args.execute_device = nullptr;
PJRT_Profiler_Extension profiler_extension =
pjrt::CreatePjrtProfilerExtension(
"PJRT_LoadedExecutable_Execute linkage");
args.extension_start =
reinterpret_cast<PJRT_Extension_Base*>(&profiler_extension);
RETURN_STATUS_IF_PJRT_ERROR(
pjrt_c_api()->PJRT_LoadedExecutable_Execute(&args), pjrt_c_api());
if (device_complete_events.has_value()) {
std::vector<PjRtFuture<>> device_complete_futures;
device_complete_futures.reserve(args.num_devices);
for (int i = 0; i < args.num_devices; ++i) {
device_complete_futures.push_back(pjrt::ConvertCEventToCppFuture(
args.device_complete_events[i], pjrt_c_api()));
if (!callback_data->c_send_callbacks.empty() ||
!callback_data->c_recv_callbacks.empty()) {
device_complete_futures.back().OnReady(
[callback_data](absl::Status status) {
});
}
}
if (returned_futures.has_value()) {
*returned_futures = std::move(device_complete_futures);
}
}
return Convert2DCBuffersToCppBuffers(args.output_lists, args.num_devices,
c_output_lists_storage[0].size(),
client_);
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>>
PjRtCApiLoadedExecutable::ExecuteWithSingleDevice(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options, std::optional<PjRtFuture<>>& returned_future,
bool fill_future) {
if (!options.send_callbacks.empty() || !options.recv_callbacks.empty()) {
return absl::Status(absl::StatusCode::kUnimplemented,
"Send/recv callbacks not implemented for "
"PjRtCApiLoadedExecutable::ExecuteWithSingleDevice.");
}
std::vector<std::vector<PjRtBuffer*>> argument_handles_vec = {
{argument_handles.begin(), argument_handles.end()}};
std::vector<std::vector<PJRT_Buffer*>> c_argument_lists_storage;
std::vector<std::vector<PJRT_Buffer*>> c_output_lists_storage;
std::vector<PJRT_Buffer**> c_output_lists;
std::vector<int64_t> non_donatable_input_indices_storage;
PJRT_ExecuteOptions c_options;
c_options.num_send_ops = 0;
c_options.num_recv_ops = 0;
std::vector<PJRT_Buffer**> c_arguments;
std::optional<std::vector<PJRT_Event*>> device_complete_events;
if (fill_future) {
device_complete_events.emplace();
}
auto callback_data = std::make_shared<SendRecvCallbackData>();
TF_ASSIGN_OR_RETURN(
PJRT_LoadedExecutable_Execute_Args args,
GetCommonExecuteArgs(argument_handles_vec, options, c_options,
c_argument_lists_storage, c_arguments,
c_output_lists_storage, c_output_lists,
device_complete_events, *callback_data,
non_donatable_input_indices_storage));
args.execute_device =
tensorflow::down_cast<PjRtCApiDevice*>(device)->c_device();
PJRT_Profiler_Extension profiler_extension =
pjrt::CreatePjrtProfilerExtension(
"PJRT_LoadedExecutable_Execute linkage");
args.extension_start =
reinterpret_cast<PJRT_Extension_Base*>(&profiler_extension);
RETURN_STATUS_IF_PJRT_ERROR(
pjrt_c_api()->PJRT_LoadedExecutable_Execute(&args), pjrt_c_api());
if (fill_future) {
returned_future = pjrt::ConvertCEventToCppFuture(
args.device_complete_events[0], pjrt_c_api());
}
return std::move(Convert2DCBuffersToCppBuffers(
args.output_lists, args.num_devices, c_output_lists_storage[0].size(),
client_)[0]);
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>>
PjRtCApiLoadedExecutable::ExecuteSharded(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options, std::optional<PjRtFuture<>>& returned_future,
bool fill_future) {
return ExecuteWithSingleDevice(argument_handles, device, options,
returned_future, fill_future);
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>>
PjRtCApiLoadedExecutable::ExecutePortable(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options, std::optional<PjRtFuture<>>& returned_future,
bool fill_future) {
return ExecuteWithSingleDevice(argument_handles, device, options,
returned_future, fill_future);
}
void PjRtCApiLoadedExecutable::Delete() {
PJRT_LoadedExecutable_Delete_Args args;
args.struct_size = PJRT_LoadedExecutable_Delete_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = c_loaded_executable();
const PJRT_Api* c_api = pjrt_c_api();
pjrt::LogFatalIfPjrtError(c_api->PJRT_LoadedExecutable_Delete(&args), c_api);
}
bool PjRtCApiLoadedExecutable::IsDeleted() {
PJRT_LoadedExecutable_IsDeleted_Args args;
args.struct_size = PJRT_LoadedExecutable_IsDeleted_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = c_loaded_executable();
const PJRT_Api* c_api = pjrt_c_api();
pjrt::LogFatalIfPjrtError(c_api->PJRT_LoadedExecutable_IsDeleted(&args),
c_api);
return args.is_deleted;
}
absl::StatusOr<std::string> PjRtCApiLoadedExecutable::FingerprintExecutable()
const {
absl::StatusOr<std::string> fingerprint =
executable_->FingerprintExecutable();
if (fingerprint.ok()) {
return *fingerprint;
}
if (fingerprint.status().code() != absl::StatusCode::kUnimplemented) {
return fingerprint.status();
}
PJRT_LoadedExecutable_Fingerprint_Args args;
args.struct_size = PJRT_LoadedExecutable_Fingerprint_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = c_loaded_executable();
const PJRT_Api* c_api = pjrt_c_api();
std::unique_ptr<PJRT_Error, pjrt::PJRT_ErrorDeleter> error(
c_api->PJRT_LoadedExecutable_Fingerprint(&args),
pjrt::MakeErrorDeleter(c_api));
if (error) {
return ::pjrt::PjrtErrorToStatus(error.get(), c_api);
}
return std::string(args.executable_fingerprint,
args.executable_fingerprint_size);
}
PjRtCApiBuffer::PjRtCApiBuffer(PjRtCApiClient* client, PJRT_Buffer* buffer)
: client_(client),
buffer_(buffer, ::pjrt::MakeBufferDeleter(client->pjrt_c_api())),
readiness_event_(nullptr,
::pjrt::MakeEventDeleter(client->pjrt_c_api())) {}
PrimitiveType PjRtCApiBuffer::element_type() const {
PJRT_Buffer_ElementType_Args args;
args.struct_size = PJRT_Buffer_ElementType_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
pjrt::LogFatalIfPjrtError(pjrt_c_api()->PJRT_Buffer_ElementType(&args),
pjrt_c_api());
return pjrt::ConvertFromPjRtBufferType(args.type);
}
absl::Span<const int64_t> PjRtCApiBuffer::dimensions() const {
PJRT_Buffer_Dimensions_Args args;
args.struct_size = PJRT_Buffer_Dimensions_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
pjrt::LogFatalIfPjrtError(pjrt_c_api()->PJRT_Buffer_Dimensions(&args),
pjrt_c_api());
return absl::Span<const int64_t>(args.dims, args.num_dims);
}
std::unique_ptr<PjRtLayout> PjRtCApiBuffer::layout() const {
{
absl::MutexLock lock(&mu_);
if (!layout_.has_value()) {
const PJRT_Api* c_api = pjrt_c_api();
PJRT_Layouts_Extension* extension =
pjrt::FindExtension<PJRT_Layouts_Extension>(
c_api, PJRT_Extension_Type::PJRT_Extension_Type_Layouts);
if (extension == nullptr) {
layout_.emplace(LayoutUtil::MakeDescendingLayout(dimensions().size()));
} else {
std::unique_ptr<PJRT_Layouts_MemoryLayout,
pjrt::PJRT_Layouts_MemoryLayoutDeleter>
layout = pjrt::GetMemoryLayout(c_api, buffer_.get());
PJRT_Layouts_MemoryLayout_Serialize_Args serialize_args;
serialize_args.struct_size =
PJRT_Layouts_MemoryLayout_Serialize_Args_STRUCT_SIZE;
serialize_args.extension_start = nullptr;
serialize_args.layout = layout.get();
pjrt::LogFatalIfPjrtError(
extension->PJRT_Layouts_MemoryLayout_Serialize(&serialize_args),
c_api);
absl::Cleanup cleanup = [&serialize_args] {
serialize_args.serialized_layout_deleter(
serialize_args.serialized_layout);
};
std::string serialized_layout(serialize_args.serialized_bytes,
serialize_args.serialized_bytes_size);
absl::StatusOr<PjRtXlaLayout> pjrt_xla_layout =
PjRtXlaLayout::Deserialize(serialized_layout);
TF_CHECK_OK(pjrt_xla_layout.status());
layout_.emplace(*pjrt_xla_layout);
}
}
}
return std::make_unique<PjRtXlaLayout>(*layout_);
}
bool PjRtCApiBuffer::has_dynamic_dimensions() const {
PJRT_Buffer_DynamicDimensionIndices_Args args;
args.struct_size = PJRT_Buffer_DynamicDimensionIndices_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
const PJRT_Api* api = pjrt_c_api();
std::unique_ptr<PJRT_Error, pjrt::PJRT_ErrorDeleter> error(
api->PJRT_Buffer_DynamicDimensionIndices(&args),
pjrt::MakeErrorDeleter(api));
if (error &&
pjrt::GetErrorCode(error.get(), api) == PJRT_Error_Code_UNIMPLEMENTED) {
return false;
}
return args.num_dynamic_dims > 0;
}
absl::Span<const bool> PjRtCApiBuffer::is_dynamic_dimension() const {
{
absl::MutexLock lock(&mu_);
if (!is_dynamic_dimension_.has_value()) {
absl::InlinedVector<bool, InlineRank()>& is_dynamic_dimension_value =
is_dynamic_dimension_.emplace();
is_dynamic_dimension_value.assign(dimensions().size(), false);
PJRT_Buffer_DynamicDimensionIndices_Args args;
args.struct_size = PJRT_Buffer_DynamicDimensionIndices_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
const PJRT_Api* api = pjrt_c_api();
std::unique_ptr<PJRT_Error, pjrt::PJRT_ErrorDeleter> error(
api->PJRT_Buffer_DynamicDimensionIndices(&args),
pjrt::MakeErrorDeleter(api));
if (error && pjrt::GetErrorCode(error.get(), api) ==
PJRT_Error_Code_UNIMPLEMENTED) {
return *is_dynamic_dimension_;
}
for (int i = 0; i < args.num_dynamic_dims; ++i) {
is_dynamic_dimension_value[args.dynamic_dim_indices[i]] = true;
}
}
}
return *is_dynamic_dimension_;
}
absl::StatusOr<std::vector<int64_t>> PjRtCApiBuffer::logical_dimensions() {
PJRT_Buffer_UnpaddedDimensions_Args args;
args.struct_size = PJRT_Buffer_UnpaddedDimensions_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
RETURN_STATUS_IF_PJRT_ERROR(
pjrt_c_api()->PJRT_Buffer_UnpaddedDimensions(&args), pjrt_c_api());
return std::vector<int64_t>(args.unpadded_dims,
args.unpadded_dims + args.num_dims);
}
PjRtFuture<> PjRtCApiBuffer::LazyToLiteral(
absl::AnyInvocable<absl::StatusOr<MutableLiteralBase*>() &&> generator) {
auto buffer = std::move(generator)();
if (!buffer.ok()) {
return PjRtFuture<>(buffer.status());
}
return ToLiteral(buffer.value());
}
PjRtFuture<> PjRtCApiBuffer::ToLiteral(MutableLiteralBase* literal) {
PJRT_Buffer_ToHostBuffer_Args args;
args.struct_size = PJRT_Buffer_ToHostBuffer_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.src = buffer_.get();
const xla::Shape& shape = literal->shape();
if (!shape.IsArray()) {
return PjRtFuture<>(
Unimplemented("PjRtCApiBuffer::ToLiteral: Shapes other than array are"
"not supported."));
}
args.dst_size = ShapeUtil::ByteSizeOfElements(shape);
args.dst = literal->untyped_data();
absl::StatusOr<pjrt::BufferMemoryLayoutData> c_layout_data;
if (literal->shape().has_layout()) {
c_layout_data =
pjrt::ConvertToBufferMemoryLayoutData(literal->shape().layout());
if (!c_layout_data.ok()) {
return PjRtFuture<>(c_layout_data.status());
}
args.host_layout = &(c_layout_data->c_layout);
} else {
args.host_layout = nullptr;
}
const PJRT_Api* api = pjrt_c_api();
std::unique_ptr<PJRT_Error, ::pjrt::PJRT_ErrorDeleter> error{
pjrt_c_api()->PJRT_Buffer_ToHostBuffer(&args),
::pjrt::MakeErrorDeleter(api)};
if (error != nullptr) {
return PjRtFuture<>(::pjrt::PjrtErrorToStatus(error.get(), api));
}
return pjrt::ConvertCEventToCppFuture(args.event, api);
}
absl::StatusOr<size_t> PjRtCApiBuffer::GetOnDeviceSizeInBytes() const {
PJRT_Buffer_OnDeviceSizeInBytes_Args args;
args.struct_size = PJRT_Buffer_OnDeviceSizeInBytes_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
RETURN_STATUS_IF_PJRT_ERROR(
client_->pjrt_c_api()->PJRT_Buffer_OnDeviceSizeInBytes(&args),
client_->pjrt_c_api());
return args.on_device_size_in_bytes;
}
PjRtMemorySpace* PjRtCApiBuffer::memory_space() const {
PJRT_Buffer_Memory_Args args;
args.struct_size = PJRT_Buffer_Memory_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
const PJRT_Api* api = pjrt_c_api();
std::unique_ptr<PJRT_Error, pjrt::PJRT_ErrorDeleter> error(
api->PJRT_Buffer_Memory(&args), pjrt::MakeErrorDeleter(api));
if (error == nullptr && args.memory != nullptr) {
return client_->GetCppMemory(args.memory);
} else if (error != nullptr && pjrt::GetErrorCode(error.get(), api) !=
PJRT_Error_Code_UNIMPLEMENTED) {
pjrt::LogFatalIfPjrtError(error.get(), api);
}
return nullptr;
}
PjRtDevice* PjRtCApiBuffer::device() const {
PJRT_Buffer_Device_Args args;
args.struct_size = PJRT_Buffer_Device_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
const PJRT_Api* api = pjrt_c_api();
pjrt::LogFatalIfPjrtError(api->PJRT_Buffer_Device(&args), api);
return client_->GetCppDevice(args.device);
}
void PjRtCApiBuffer::Delete() {
PJRT_Buffer_Delete_Args args;
args.struct_size = PJRT_Buffer_Delete_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
const PJRT_Api* api = pjrt_c_api();
pjrt::LogFatalIfPjrtError(api->PJRT_Buffer_Delete(&args), api);
}
bool PjRtCApiBuffer::IsDeleted() {
PJRT_Buffer_IsDeleted_Args args;
args.struct_size = PJRT_Buffer_IsDeleted_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
const PJRT_Api* api = pjrt_c_api();
pjrt::LogFatalIfPjrtError(api->PJRT_Buffer_IsDeleted(&args), api);
return args.is_deleted;
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> PjRtCApiBuffer::CopyToDevice(
PjRtDevice* dst_device) {
if (dst_device->client() == client_) {
PJRT_Buffer_CopyToDevice_Args args;
args.struct_size = PJRT_Buffer_CopyToDevice_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
args.dst_device =
tensorflow::down_cast<PjRtCApiDevice*>(dst_device)->c_device();
const PJRT_Api* api = pjrt_c_api();
RETURN_STATUS_IF_PJRT_ERROR(api->PJRT_Buffer_CopyToDevice(&args), api);
return std::unique_ptr<PjRtBuffer>(
std::make_unique<PjRtCApiBuffer>(client_, args.dst_buffer));
} else {
TF_ASSIGN_OR_RETURN(std::shared_ptr<Literal> literal, ToLiteralSync());
absl::InlinedVector<int64_t, 4> byte_strides(
literal->shape().dimensions_size());
TF_RETURN_IF_ERROR(
ShapeUtil::ByteStrides(literal->shape(), absl::MakeSpan(byte_strides)));
Literal* literal_pointer = literal.get();
return dst_device->client()->BufferFromHostBuffer(
literal_pointer->untyped_data(),
literal_pointer->shape().element_type(),
literal_pointer->shape().dimensions(), byte_strides,
PjRtClient::HostBufferSemantics::kImmutableZeroCopy,
[literal{std::move(literal)}]() { }, dst_device);
}
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> PjRtCApiBuffer::CopyToMemorySpace(
PjRtMemorySpace* dst_memory) {
const PJRT_Api* api = pjrt_c_api();
if (dst_memory->client() == client_) {
PJRT_Buffer_CopyToMemory_Args args;
args.struct_size = PJRT_Buffer_CopyToMemory_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
args.dst_memory =
tensorflow::down_cast<PjRtCApiMemorySpace*>(dst_memory)->c_memory();
RETURN_STATUS_IF_PJRT_ERROR(api->PJRT_Buffer_CopyToMemory(&args), api);
return std::unique_ptr<PjRtBuffer>(
std::make_unique<PjRtCApiBuffer>(client_, args.dst_buffer));
} else {
TF_ASSIGN_OR_RETURN(std::shared_ptr<Literal> literal, ToLiteralSync());
absl::InlinedVector<int64_t, 4> byte_strides(
literal->shape().dimensions_size());
TF_RETURN_IF_ERROR(
ShapeUtil::ByteStrides(literal->shape(), absl::MakeSpan(byte_strides)));
Literal* literal_pointer = literal.get();
return dst_memory->client()->BufferFromHostBuffer(
literal_pointer->untyped_data(),
literal_pointer->shape().element_type(),
literal_pointer->shape().dimensions(), byte_strides,
PjRtClient::HostBufferSemantics::kImmutableZeroCopy,
[literal{std::move(literal)}]() { }, dst_memory,
nullptr);
}
}
bool PjRtCApiBuffer::IsOnCpu() const {
PJRT_Buffer_IsOnCpu_Args args;
args.struct_size = PJRT_Buffer_IsOnCpu_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
const PJRT_Api* api = pjrt_c_api();
pjrt::LogFatalIfPjrtError(api->PJRT_Buffer_IsOnCpu(&args), api);
return args.is_on_cpu;
}
PJRT_Event* PjRtCApiBuffer::GetReadyEvent() {
if (readiness_event_ == nullptr) {
const PJRT_Api* api = pjrt_c_api();
PJRT_Buffer_ReadyEvent_Args args;
args.struct_size = PJRT_Buffer_ReadyEvent_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_.get();
pjrt::LogFatalIfPjrtError(api->PJRT_Buffer_ReadyEvent(&args), api);
readiness_event_.reset(args.event);
}
return readiness_event_.get();
}
void PjRtCApiBuffer::MakePromiseTrackEvent() {
CHECK(readiness_promise_ != nullptr);
const PJRT_Api* api = pjrt_c_api();
PJRT_Event_OnReady_Args args;
args.struct_size = PJRT_Event_OnReady_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.event = GetReadyEvent();
args.user_arg = new std::function<void(PJRT_Error*)>(
[promise = readiness_promise_, api](PJRT_Error* error) -> void {
promise->Set(::pjrt::PjrtErrorToStatus(error, api));
::pjrt::MakeErrorDeleter(api)(error);
});
args.callback = [](PJRT_Error* error, void* callback_ptr) {
auto callback =
static_cast<std::function<void(PJRT_Error*)>*>(callback_ptr);
CHECK(callback != nullptr);
(*callback)(error);
delete callback;
};
std::unique_ptr<PJRT_Error, ::pjrt::PJRT_ErrorDeleter> error{
api->PJRT_Event_OnReady(&args), ::pjrt::MakeErrorDeleter(api)};
if (error != nullptr) {
readiness_promise_->Set(::pjrt::PjrtErrorToStatus(error.get(), api));
}
}
PjRtFuture<> PjRtCApiBuffer::GetReadyFuture() {
if (readiness_promise_ == nullptr) {
readiness_promise_ =
std::make_shared<PjRtFuture<>::Promise>(PjRtFuture<>::CreatePromise());
MakePromiseTrackEvent();
}
return PjRtFuture<>{*readiness_promise_};
}
absl::StatusOr<std::unique_ptr<PjRtBuffer::ExternalReference>>
PjRtCApiBuffer::AcquireExternalReference() {
PJRT_Buffer_IncreaseExternalReferenceCount_Args increase_reference_count_args;
increase_reference_count_args.buffer = c_buffer();
increase_reference_count_args.struct_size =
PJRT_Buffer_IncreaseExternalReferenceCount_Args_STRUCT_SIZE;
increase_reference_count_args.extension_start = nullptr;
RETURN_STATUS_IF_PJRT_ERROR(
pjrt_c_api()->PJRT_Buffer_IncreaseExternalReferenceCount(
&increase_reference_count_args),
pjrt_c_api());
PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args
opaque_device_memory_data_pointer_args;
opaque_device_memory_data_pointer_args.struct_size =
PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args_STRUCT_SIZE;
opaque_device_memory_data_pointer_args.extension_start = nullptr;
opaque_device_memory_data_pointer_args.buffer = c_buffer();
RETURN_STATUS_IF_PJRT_ERROR(
pjrt_c_api()->PJRT_Buffer_OpaqueDeviceMemoryDataPointer(
&opaque_device_memory_data_pointer_args),
pjrt_c_api());
void* device_memory_ptr =
opaque_device_memory_data_pointer_args.device_memory_ptr;
return std::make_unique<PjRtCApiExternalReference>(client_, this,
device_memory_ptr);
}
PjRtCApiExternalReference::~PjRtCApiExternalReference() {
PJRT_Buffer_DecreaseExternalReferenceCount_Args args;
args.struct_size =
PJRT_Buffer_DecreaseExternalReferenceCount_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer_->c_buffer();
pjrt::LogFatalIfPjrtError(
client_->pjrt_c_api()->PJRT_Buffer_DecreaseExternalReferenceCount(&args),
client_->pjrt_c_api());
}
absl::Status PjRtCApiExternalReference::WaitUntilBufferReadyOnStream(
std::intptr_t stream) {
const PJRT_Api* c_api = buffer_->pjrt_c_api();
PJRT_Stream_Extension* extension = pjrt::FindExtension<PJRT_Stream_Extension>(
c_api, PJRT_Extension_Type::PJRT_Extension_Type_Stream);
if (extension == nullptr) {
return absl::UnimplementedError(
"Stream extension not implemented in this PJRT plugin.");
}
PJRT_Wait_Until_Buffer_Ready_On_Stream_Args args;
args.struct_size = PJRT_Wait_Until_Buffer_Ready_On_Stream_Args_STRUCT_SIZE;
args.stream = stream;
args.buffer = buffer_->c_buffer();
RETURN_STATUS_IF_PJRT_ERROR(extension->wait_stream(&args), c_api);
return absl::OkStatus();
}
PjRtCApiTopologyDescription::PjRtCApiTopologyDescription(
const PJRT_Api* c_api, PJRT_TopologyDescription* c_topology, bool owned)
: compiler_(std::make_unique<PjRtCApiCompiler>(c_api)),
c_api_(c_api),
c_topology_(c_topology) {
if (owned) {
owned_c_topology_ = std::unique_ptr<PJRT_TopologyDescription,
pjrt::PJRT_TopologyDescriptionDeleter>(
c_topology, pjrt::MakeTopologyDescriptionDeleter(c_api));
}
InitAttributes();
}
absl::string_view PjRtCApiTopologyDescription::platform_name() const {
PJRT_TopologyDescription_PlatformName_Args args;
args.topology = c_topology_;
args.struct_size = PJRT_TopologyDescription_PlatformName_Args_STRUCT_SIZE;
args.extension_start = nullptr;
pjrt::LogFatalIfPjrtError(
c_api_->PJRT_TopologyDescription_PlatformName(&args), c_api_);
return absl::string_view(args.platform_name, args.platform_name_size);
}
absl::string_view PjRtCApiTopologyDescription::platform_version() const {
PJRT_TopologyDescription_PlatformVersion_Args args;
args.struct_size = PJRT_TopologyDescription_PlatformVersion_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.topology = c_topology_;
pjrt::LogFatalIfPjrtError(
c_api_->PJRT_TopologyDescription_PlatformVersion(&args), c_api_);
return absl::string_view(args.platform_version, args.platform_version_size);
}
std::vector<std::unique_ptr<const PjRtDeviceDescription>>
PjRtCApiTopologyDescription::DeviceDescriptions() const {
PJRT_TopologyDescription_GetDeviceDescriptions_Args args;
args.struct_size =
PJRT_TopologyDescription_GetDeviceDescriptions_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.topology = c_topology_;
pjrt::LogFatalIfPjrtError(
c_api_->PJRT_TopologyDescription_GetDeviceDescriptions(&args), c_api_);
std::vector<std::unique_ptr<const PjRtDeviceDescription>> out;
out.reserve(args.num_descriptions);
for (PJRT_DeviceDescription* device_desc :
absl::Span<PJRT_DeviceDescription* const>(args.descriptions,
args.num_descriptions)) {
out.push_back(
std::make_unique<PjRtCApiDeviceDescription>(c_api_, device_desc));
}
return out;
}
absl::StatusOr<std::string> PjRtCApiTopologyDescription::Serialize() const {
PJRT_TopologyDescription_Serialize_Args args;
args.struct_size = PJRT_TopologyDescription_Serialize_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.topology = c_topology_;
RETURN_STATUS_IF_PJRT_ERROR(c_api_->PJRT_TopologyDescription_Serialize(&args),
c_api_);
auto out = std::string(args.serialized_bytes, args.serialized_bytes_size);
args.serialized_topology_deleter(args.serialized_topology);
return out;
}
void PjRtCApiTopologyDescription::InitAttributes() {
PJRT_TopologyDescription_Attributes_Args args;
args.struct_size = PJRT_TopologyDescription_Attributes_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.topology = c_topology_;
pjrt::LogFatalIfPjrtError(c_api_->PJRT_TopologyDescription_Attributes(&args),
c_api_);
attributes_ =
pjrt::ConvertFromPjRtNamedValueList(args.attributes, args.num_attributes);
}
static absl::StatusOr<std::unique_ptr<PjRtExecutable>>
InitializeArgsAndCompileAot(const PJRT_Api* c_api, PjRtClient* client,
const CompileOptions& options,
const PjRtTopologyDescription& topology,
const std::string& code,
const std::string& format) {
PJRT_Compile_Args args;
args.struct_size = PJRT_Compile_Args_STRUCT_SIZE;
args.extension_start = nullptr;
if (client == nullptr) {
args.client = nullptr;
} else {
args.client =
tensorflow::down_cast<PjRtCApiClient*>(client)->pjrt_c_client();
}
args.topology =
tensorflow::down_cast<const PjRtCApiTopologyDescription*>(&topology)
->c_topology();
TF_ASSIGN_OR_RETURN(const CompileOptionsProto options_proto,
options.ToProto());
std::string options_str = options_proto.SerializeAsString();
args.compile_options = options_str.c_str();
args.compile_options_size = options_str.size();
PJRT_Program program;
program.struct_size = PJRT_Program_STRUCT_SIZE;
program.extension_start = nullptr;
program.code = const_cast<char*>(code.c_str());
program.code_size = code.size();
program.format = format.c_str();
program.format_size = format.size();
args.program = &program;
RETURN_STATUS_IF_PJRT_ERROR(c_api->PJRT_Compile(&args), c_api);
std::unique_ptr<PjRtExecutable> ret =
std::make_unique<PjRtCApiExecutable>(c_api, args.executable);
return ret;
}
absl::StatusOr<std::unique_ptr<PjRtExecutable>> PjRtCApiCompiler::Compile(
CompileOptions options, const XlaComputation& computation,
const PjRtTopologyDescription& topology, PjRtClient* client) {
std::string module_str = computation.proto().SerializeAsString();
std::string format(pjrt::kHloFormat);
return InitializeArgsAndCompileAot(c_api_, client, options, topology,
module_str, format);
}
absl::StatusOr<std::unique_ptr<PjRtExecutable>> PjRtCApiCompiler::Compile(
CompileOptions options, mlir::ModuleOp module,
const PjRtTopologyDescription& topology, PjRtClient* client) {
std::optional<int64_t> plugin_version;
if (client) {
plugin_version = client->plugin_attributes()->pjrt_c_api_minor_version;
}
TF_ASSIGN_OR_RETURN(
std::string serialized,
xla::Serialize(module, xla::GetDefaultStablehloVersion(plugin_version)));
std::string format(pjrt::kMlirFormat);
return InitializeArgsAndCompileAot(c_api_, client, options, topology,
serialized, format);
}
absl::StatusOr<std::unique_ptr<PjRtClient>> GetCApiClient(
absl::string_view device_type,
const absl::flat_hash_map<std::string, PjRtValueType>& create_options,
std::shared_ptr<KeyValueStoreInterface> kv_store) {
TF_ASSIGN_OR_RETURN(const PJRT_Api* c_api, pjrt::PjrtApi(device_type));
if (c_api == nullptr) {
return Internal("PJRT C API is nullptr for %s", device_type);
}
PJRT_Client_Create_Args init_args;
init_args.struct_size = PJRT_Client_Create_Args_STRUCT_SIZE;
init_args.extension_start = nullptr;
TF_ASSIGN_OR_RETURN(std::vector<PJRT_NamedValue> c_options,
pjrt::ConvertToPjRtNamedValueList(create_options));
init_args.create_options = c_options.data();
init_args.num_options = c_options.size();
std::unique_ptr<pjrt::PJRT_KeyValueCallbackData> kv_callback_data;
if (kv_store) {
kv_callback_data = pjrt::ConvertToCKeyValueCallbacks(kv_store);
init_args.kv_get_callback = kv_callback_data->c_kv_get;
init_args.kv_get_user_arg = &kv_callback_data->kv_get_c_func;
init_args.kv_put_callback = kv_callback_data->c_kv_put;
init_args.kv_put_user_arg = &kv_callback_data->kv_put_c_func;
}
RETURN_STATUS_IF_PJRT_ERROR(c_api->PJRT_Client_Create(&init_args), c_api);
PJRT_Client* c_client = init_args.client;
return std::unique_ptr<PjRtClient>(std::make_unique<PjRtCApiClient>(
c_api, c_client, std::move(kv_callback_data)));
}
absl::StatusOr<std::unique_ptr<PjRtTopologyDescription>> GetCApiTopology(
absl::string_view device_type, absl::string_view topology_name,
const absl::flat_hash_map<std::string, PjRtValueType>& create_options) {
TF_ASSIGN_OR_RETURN(const PJRT_Api* c_api, pjrt::PjrtApi(device_type));
if (c_api == nullptr) {
return Internal("PJRT C API is nullptr for %s", device_type);
}
return GetCApiTopology(c_api, topology_name, create_options);
}
absl::StatusOr<std::unique_ptr<PjRtTopologyDescription>> GetCApiTopology(
const PJRT_Api* c_api, absl::string_view topology_name,
const absl::flat_hash_map<std::string, PjRtValueType>& create_options) {
PJRT_TopologyDescription_Create_Args init_args;
init_args.struct_size = PJRT_TopologyDescription_Create_Args_STRUCT_SIZE;
init_args.extension_start = nullptr;
TF_ASSIGN_OR_RETURN(std::vector<PJRT_NamedValue> c_options,
pjrt::ConvertToPjRtNamedValueList(create_options));
init_args.create_options = c_options.data();
init_args.num_options = c_options.size();
init_args.topology_name = topology_name.data();
init_args.topology_name_size = topology_name.size();
RETURN_STATUS_IF_PJRT_ERROR(
c_api->PJRT_TopologyDescription_Create(&init_args), c_api);
PJRT_TopologyDescription* c_topology = init_args.topology;
return std::unique_ptr<PjRtTopologyDescription>(
std::make_unique<PjRtCApiTopologyDescription>(c_api, c_topology,
true));
}
} | #include "xla/pjrt/pjrt_c_api_client.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "xla/client/xla_builder.h"
#include "xla/literal_util.h"
#include "xla/pjrt/c/pjrt_c_api_cpu_internal.h"
#include "xla/pjrt/pjrt_api.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_compiler.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
static void SetUpCpuPjRtApi() {
std::string device_type = "cpu";
auto status = ::pjrt::PjrtApi(device_type);
if (!status.ok()) {
TF_ASSERT_OK(
pjrt::SetPjrtApi(device_type, ::pjrt::cpu_plugin::GetCpuPjrtApi()));
}
}
TEST(PjRtCApiClientTest, IsDynamicDimension) {
SetUpCpuPjRtApi();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<PjRtClient> client,
GetCApiClient("cpu"));
std::vector<int32_t> data0{1, 2, 3, 4, 5, 6};
Shape shape0 = ShapeUtil::MakeShape(S32, {2, 3});
TF_ASSERT_OK_AND_ASSIGN(
auto param0,
client->BufferFromHostBuffer(
data0.data(), shape0.element_type(), shape0.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
std::vector<int32_t> data1{2};
Shape shape1 = ShapeUtil::MakeShape(S32, {});
TF_ASSERT_OK_AND_ASSIGN(
auto param1,
client->BufferFromHostBuffer(
data1.data(), shape1.element_type(), shape1.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
XlaBuilder builder("DynamicReshape");
auto inp_0 = Parameter(&builder, 0, shape0, "input0");
auto inp_1 = Parameter(&builder, 1, shape1, "input1");
std::vector<bool> dims_are_dynamic = {false, true};
auto reshaped =
DynamicReshape(inp_0, {inp_1, inp_1}, {2, 3}, dims_are_dynamic);
auto computation = builder.Build(reshaped).value();
std::unique_ptr<PjRtLoadedExecutable> executable =
client->Compile(computation, CompileOptions()).value();
ExecuteOptions execute_options;
execute_options.non_donatable_input_indices = {0};
std::vector<std::vector<std::unique_ptr<PjRtBuffer>>> results =
executable->Execute({{param0.get(), param1.get()}}, execute_options)
.value();
ASSERT_EQ(results[0].size(), 1);
auto* result_buffer = results[0][0].get();
auto is_dynamic_dimension = result_buffer->is_dynamic_dimension();
EXPECT_THAT(is_dynamic_dimension,
::testing::ElementsAreArray(dims_are_dynamic));
}
TEST(PjRtCApiClientTest, PlatformId) {
SetUpCpuPjRtApi();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<PjRtClient> client,
GetCApiClient("cpu"));
EXPECT_EQ(client->platform_name(), xla::CpuName());
EXPECT_EQ(client->platform_id(), xla::CpuId());
}
TEST(PjRtCApiClientTest, EmptyExecutableFingerprint) {
SetUpCpuPjRtApi();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<PjRtClient> client,
GetCApiClient("cpu"));
Shape shape = ShapeUtil::MakeShapeWithType<float>({4});
XlaBuilder builder("sum");
auto inp_0 = Parameter(&builder, 0, shape, "input0");
auto inp_1 = Parameter(&builder, 1, shape, "input1");
auto sum = Add(inp_0, inp_1);
builder.SetUpAlias({}, 0, {});
auto computation = builder.Build(sum).value();
std::unique_ptr<PjRtLoadedExecutable> executable =
client->Compile(computation, CompileOptions()).value();
PjRtCApiClient* c_client = dynamic_cast<PjRtCApiClient*>(client.get());
ASSERT_NE(c_client, nullptr);
if (c_client->pjrt_c_api()->pjrt_api_version.minor_version >= 35) {
EXPECT_FALSE(executable->FingerprintExecutable().ok());
} else {
EXPECT_EQ(executable->FingerprintExecutable().status().code(),
absl::StatusCode::kUnimplemented);
}
}
TEST(PjRtClientTest, CreateViewAndCopyToDeviceAsyncExternalCpuOnly) {
SetUpCpuPjRtApi();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<PjRtClient> client,
GetCApiClient("cpu"));
ASSERT_GT(client->addressable_devices().size(), 1);
std::vector<int32_t> data(4, 0);
auto* data_ptr = data.data();
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->CreateViewOfDeviceBuffer(
data_ptr, shape, client->addressable_devices()[0],
[data = std::move(data)]() mutable {}));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<PjRtBuffer> result,
buffer->CopyToDevice(client->addressable_devices()[1]));
buffer.reset();
ASSERT_TRUE(result);
TF_ASSERT_OK_AND_ASSIGN(auto literal, result->ToLiteralSync());
std::vector<int32_t> expected(4, 0);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/pjrt_c_api_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/pjrt_c_api_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9a63a3bc-e6b2-4a16-8ff2-d35b6996183f | cpp | tensorflow/tensorflow | pjrt_stream_executor_client | third_party/xla/xla/pjrt/pjrt_stream_executor_client.cc | third_party/xla/xla/pjrt/pjrt_stream_executor_client_test.cc | #include "xla/pjrt/pjrt_stream_executor_client.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <functional>
#include <initializer_list>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/casts.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "mlir/IR/BuiltinOps.h"
#include "xla/client/executable_build_options.h"
#include "xla/client/local_client.h"
#include "xla/client/xla_computation.h"
#include "xla/executable_run_options.h"
#include "xla/layout.h"
#include "xla/literal.h"
#include "xla/pjrt/distributed/protocol.pb.h"
#include "xla/pjrt/event_pool.h"
#include "xla/pjrt/host_callback.h"
#include "xla/pjrt/host_memory_spaces.h"
#include "xla/pjrt/local_device_state.h"
#include "xla/pjrt/metrics.h"
#include "xla/pjrt/mlir_to_hlo.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_compiler.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/pjrt/semaphore.h"
#include "xla/pjrt/tracked_device_buffer.h"
#include "xla/pjrt/transpose.h"
#include "xla/pjrt/utils.h"
#include "xla/primitive_util.h"
#include "xla/service/compiler.h"
#include "xla/service/computation_layout.h"
#include "xla/service/executable.h"
#include "xla/service/generic_transfer_manager.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/service/shaped_buffer.h"
#include "xla/service/transfer_manager.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/stream.h"
#include "xla/tsl/framework/allocator.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/fingerprint.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/mem.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/profiler/lib/connected_traceme.h"
#include "tsl/profiler/lib/context_types.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla {
PjRtStreamExecutorMemorySpace::PjRtStreamExecutorMemorySpace(
int id, PjRtDevice* device, absl::string_view kind, int kind_id)
: id_(id), device_(device), kind_(kind), kind_id_(kind_id) {
DCHECK(device_ != nullptr && device_->client() != nullptr);
auto* client = device_->client();
to_string_ = absl::StrFormat("MEMORY_SPACE_%i", id_);
debug_string_ = absl::StrFormat(
"PjRtStreamExecutorMemory(id=%i, process_index=%i, client=%s)", id_,
client->process_index(), client->platform_name());
}
PjRtPlatformId PjRtStreamExecutorDevice::platform_id() const {
return client_->platform_id();
}
absl::string_view PjRtStreamExecutorDevice::platform_name() const {
return client_->platform_name();
}
absl::StatusOr<LocalDeviceState*>
PjRtStreamExecutorDevice::GetLocalDeviceState() const {
if (local_device_state_) {
return local_device_state_.get();
}
return InvalidArgument("Device %s is not a local device.", DebugString());
}
absl::StatusOr<DeviceAssignment> DevicesToDeviceAssignment(
absl::Span<const std::vector<PjRtDevice*>> devices) {
if (devices.empty()) {
return InvalidArgument(
"Device assignment passed to Compile() must be non-empty.");
}
if (devices[0].empty()) {
return InvalidArgument(
"Device assignment passed to Compile() must have a nonzero number of "
"partitions per replica; replica 0 had 0 partitions.");
}
DeviceAssignment xla_assignment(devices.size(), devices[0].size());
for (int replica = 0; replica < devices.size(); ++replica) {
if (devices[replica].size() != devices[0].size()) {
return InvalidArgument(
"Device assignment passed to Compile() has different numbers of "
"partitions between replicas; %d partitions for replica %d versus %d "
"partitions for replica 0.",
devices[replica].size(), replica, devices[0].size());
}
for (int partition = 0; partition < devices[replica].size(); ++partition) {
if (devices[0][0]->client()->platform_id() !=
devices[replica][partition]->client()->platform_id()) {
return InvalidArgument(
"Device assignment passed to Compile() must have devices of a "
"single kind, got %s for replica 0 partition 0 and %s for replica "
"%d partition %d.",
devices[0][0]->client()->platform_name(),
devices[replica][partition]->client()->platform_name(), replica,
partition);
}
xla_assignment(replica, partition) = devices[replica][partition]->id();
}
}
return xla_assignment;
}
class CpuAllocator : public tsl::Allocator {
public:
CpuAllocator() = default;
std::string Name() override { return "cpu"; }
void* AllocateRaw(size_t alignment, size_t num_bytes) override {
return tsl::port::AlignedMalloc(num_bytes, alignment);
}
void DeallocateRaw(void* ptr) override { return tsl::port::AlignedFree(ptr); }
};
PjRtStreamExecutorClient::PjRtStreamExecutorClient(
std::string platform_name, LocalClient* client,
std::vector<std::unique_ptr<PjRtStreamExecutorDevice>> devices,
int process_index, std::unique_ptr<se::DeviceMemoryAllocator> allocator,
std::unique_ptr<tsl::Allocator> host_memory_allocator,
bool should_stage_host_to_device_transfers,
std::unique_ptr<gpu::GpuExecutableRunOptions> gpu_run_options)
: platform_id_(tsl::Fingerprint64(platform_name)),
platform_name_(std::move(platform_name)),
client_(client),
host_memory_allocator_(std::move(host_memory_allocator)),
owned_allocator_(std::move(allocator)),
owned_devices_(std::move(devices)),
process_index_(process_index),
should_stage_host_to_device_transfers_(
should_stage_host_to_device_transfers),
gpu_run_options_(std::move(gpu_run_options)),
thread_pool_(
tsl::Env::Default(), "pjrt_thread_pool",
std::max<int>(DefaultThreadPoolSize(), client->device_count())),
transpose_cache_(1024) {
if (owned_allocator_ != nullptr) {
allocator_ = owned_allocator_.get();
} else {
allocator_ = client_->backend().memory_allocator();
}
if (!host_memory_allocator_) {
host_memory_allocator_ = std::make_unique<CpuAllocator>();
}
for (const std::unique_ptr<PjRtStreamExecutorDevice>& device :
owned_devices_) {
devices_.push_back(device.get());
CHECK(id_to_device_.insert({device->id(), device.get()}).second)
<< "Duplicate device id: " << device->id();
if (device->IsAddressable()) {
addressable_devices_.push_back(device.get());
}
device->SetClient(this);
}
absl::c_sort(addressable_devices_,
[](const PjRtDevice* a, const PjRtDevice* b) {
return a->local_device_id() < b->local_device_id();
});
}
absl::StatusOr<DeviceAssignment>
PjRtStreamExecutorClient::GetDefaultDeviceAssignment(int num_replicas,
int num_partitions) const {
return client_->backend().computation_placer()->AssignDevices(num_replicas,
num_partitions);
}
absl::StatusOr<Layout> PjRtStreamExecutorClient::GetDefaultLayout(
PrimitiveType element_type, absl::Span<const int64_t> dims) {
Shape shape = ShapeUtil::MakeShape(element_type, dims);
TF_ASSIGN_OR_RETURN(
shape,
client()->backend().transfer_manager()->ChooseCompactLayoutForShape(
shape));
return shape.layout();
}
absl::StatusOr<std::unique_ptr<HloCostAnalysis>>
PjRtStreamExecutorClient::GetHloCostAnalysis() const {
return std::make_unique<HloCostAnalysis>(
client_->backend().compiler()->ShapeSizeBytesFunction());
}
namespace {
void StallStreamOnError(LocalDeviceState* local_device, se::Stream* stream) {
switch (local_device->allocation_model()) {
case LocalDeviceState::kAsynchronous:
break;
case LocalDeviceState::kComputeSynchronized:
if (stream != local_device->compute_stream()) {
auto status = local_device->compute_stream()->WaitFor(stream);
if (!status.ok()) {
LOG(ERROR) << "Stalling compute stream failed: " << status;
}
}
break;
case LocalDeviceState::kSynchronous:
TF_CHECK_OK(stream->BlockHostUntilDone());
break;
}
}
void RecordUsage(PjRtStreamExecutorBuffer::ScopedHold device_buffer,
LocalDeviceState* buffer_local_device,
LocalDeviceState* stream_local_device,
std::shared_ptr<BufferSequencingEvent> event,
se::Stream* usage_stream, bool prefer_to_retain_reference,
std::vector<std::shared_ptr<TrackedDeviceBuffer>>*
buffers_to_release = nullptr) {
tsl::profiler::TraceMe traceme("RecordUsage");
bool retain_buffer_until_completion =
(stream_local_device != buffer_local_device) ||
(stream_local_device->allocation_model() ==
LocalDeviceState::kSynchronous) ||
(stream_local_device->allocation_model() ==
LocalDeviceState::kComputeSynchronized &&
prefer_to_retain_reference);
if (retain_buffer_until_completion) {
if (buffers_to_release) {
buffers_to_release->push_back(device_buffer.buffer());
} else {
buffer_local_device->ThenRelease(usage_stream, device_buffer.buffer())
.IgnoreError();
}
}
device_buffer.ConvertUsageHold(usage_stream, event,
retain_buffer_until_completion);
}
absl::Status AddDestinationBufferSynchronization(
LocalDeviceState* local_device,
PjRtStreamExecutorBuffer::ScopedHold device_buffer,
std::shared_ptr<BufferSequencingEvent> definition_event,
se::Stream* copy_stream) {
absl::StatusOr<EventPool::Handle> event_or =
local_device->event_pool().ThenAllocateAndRecordEvent(copy_stream);
if (!event_or.ok()) {
StallStreamOnError(local_device, copy_stream);
return event_or.status();
}
definition_event->SetSequencingEvent(std::move(event_or).value(),
copy_stream);
RecordUsage(std::move(device_buffer), local_device, local_device,
definition_event, copy_stream,
false);
return absl::OkStatus();
}
void MaybeWaitForEventOnStream(BufferSequencingEvent* event,
LocalDeviceState* local_device_state,
se::Stream*& stream) {
if (!event->IsPredeterminedErrorOrDefinedOn(
local_device_state->compute_stream()) &&
!event->IsComplete()) {
if (stream == nullptr) {
stream = local_device_state->GetFixedSizePoolUsageStream();
}
VLOG(2) << "Waiting for event: " << event
<< "; is_predetermined_error: " << event->IsPredeterminedError()
<< "; on stream: " << stream;
event->WaitForEventOnStream(stream);
}
}
}
absl::StatusOr<std::unique_ptr<PjRtStreamExecutorBuffer>>
AllocateDestinationBuffer(
const Shape& on_host_shape, PjRtDevice* device,
LocalDeviceState* local_device, se::Stream* copy_stream,
bool is_uninitialized_create, PjRtStreamExecutorClient* client,
std::shared_ptr<BufferSequencingEvent> definition_event,
PjRtMemorySpace* memory_space) {
if (on_host_shape.IsTuple() && on_host_shape.tuple_shapes_size() == 0) {
return InvalidArgument("Can't make a buffer from an empty tuple");
}
PjRtMemorySpace* default_memory_space =
device->default_memory_space().value_or(nullptr);
if (!memory_space) {
memory_space = default_memory_space;
}
bool is_pinned_host_memory =
memory_space && (memory_space->kind() == PinnedHostMemorySpace::kKind);
if (memory_space != default_memory_space && !is_pinned_host_memory) {
return InvalidArgument("Buffer allocation: invalid memory space");
}
auto* se_client = tensorflow::down_cast<PjRtStreamExecutorClient*>(client);
TransferManager* transfer_manager =
se_client->client()->backend().transfer_manager();
auto memory_space_shape_fn = [is_pinned_host_memory,
transfer_manager](const Shape& shape) {
Shape result = transfer_manager->HostShapeToDeviceShape(shape);
if (is_pinned_host_memory) {
result.mutable_layout()->set_memory_space(Layout::kHostMemorySpace);
}
return result;
};
TF_ASSIGN_OR_RETURN(
ScopedShapedBuffer dst_buffer,
transfer_manager->AllocateScopedShapedBuffer(
on_host_shape, se_client->allocator(),
local_device->local_device_id().value(),
local_device->local_hardware_id().value(), memory_space_shape_fn));
if (local_device->allocation_model() ==
LocalDeviceState::kComputeSynchronized) {
if (copy_stream == nullptr) {
CHECK(is_uninitialized_create);
} else {
CHECK(copy_stream->WaitFor(local_device->compute_stream()).ok());
}
} else {
DCHECK(transfer_manager->CanShapedBufferBeAccessedNow(
local_device->compute_stream()->parent(), dst_buffer));
}
Shape on_device_shape = dst_buffer.on_device_shape();
absl::InlinedVector<std::shared_ptr<BufferSequencingEvent>, 2>
definition_events;
if (is_uninitialized_create) {
if (definition_event) {
definition_events.emplace_back(definition_event);
}
if (local_device->allocation_model() ==
LocalDeviceState::kComputeSynchronized) {
definition_events.emplace_back(
std::make_shared<BufferSequencingEvent>(client->thread_pool()));
TF_ASSIGN_OR_RETURN(EventPool::Handle event,
local_device->event_pool().ThenAllocateAndRecordEvent(
local_device->compute_stream()));
definition_events.back()->SetSequencingEvent(
std::move(event), local_device->compute_stream());
}
} else {
if (definition_event) {
definition_events.emplace_back(definition_event);
} else {
definition_events.emplace_back(
std::make_shared<BufferSequencingEvent>(client->thread_pool()));
}
}
se::Stream* tuple_table_stream = local_device->host_to_device_stream();
if (on_device_shape.IsTuple()) {
if (tuple_table_stream != copy_stream) {
if (local_device->allocation_model() ==
LocalDeviceState::kComputeSynchronized) {
DCHECK(
tuple_table_stream->WaitFor(local_device->compute_stream()).ok());
} else {
DCHECK(transfer_manager->CanShapedBufferBeAccessedNow(
local_device->compute_stream()->parent(), dst_buffer));
}
}
TF_RETURN_IF_ERROR(transfer_manager->WriteTupleIndexTablesAsync(
tuple_table_stream, dst_buffer));
definition_events.emplace_back(
std::make_shared<BufferSequencingEvent>(client->thread_pool()));
absl::StatusOr<EventPool::Handle> event_or =
local_device->event_pool().ThenAllocateAndRecordEvent(
tuple_table_stream);
if (!event_or.ok()) {
StallStreamOnError(local_device, tuple_table_stream);
return event_or.status();
}
definition_events.back()->SetSequencingEvent(std::move(event_or).value(),
tuple_table_stream);
}
std::shared_ptr<TrackedDeviceBuffer> dst_device_buffer =
TrackedDeviceBuffer::FromScopedShapedBuffer(&dst_buffer,
definition_events, device);
auto py_buffer = std::make_unique<PjRtStreamExecutorBuffer>(
on_device_shape, std::move(dst_device_buffer), client, device,
memory_space);
if (on_device_shape.IsTuple()) {
RecordUsage(py_buffer->GetBufferWithUsageHold(), local_device, local_device,
definition_events.back(), tuple_table_stream,
false);
}
return py_buffer;
}
PjRtStreamExecutorBuffer::ScopedHold::~ScopedHold() {
if (ok()) {
parent_->DropHold(type_, buffer().get());
}
}
PjRtStreamExecutorBuffer::ScopedHold::ScopedHold(ScopedHold&& other)
: parent_(other.parent_),
type_(other.type_),
state_(other.state_),
status_(std::move(other.status_)),
buffer_(std::move(other.buffer_)) {
other.SetState(kMoved);
}
void PjRtStreamExecutorBuffer::ScopedHold::Acquire(
absl::StatusOr<std::shared_ptr<TrackedDeviceBuffer>>&& buffer_or) {
CHECK(!ok());
if (buffer_or.ok()) {
buffer_ = buffer_or.value();
SetState(kValid);
} else {
status_ = buffer_or.status();
buffer_ = nullptr;
SetState(kError);
}
CHECK(!ok() || buffer_ != nullptr);
}
PjRtStreamExecutorBuffer::ScopedHold::ForClosure
PjRtStreamExecutorBuffer::ScopedHold::ToClosure() {
CHECK(ok());
ForClosure for_closure(parent_, type_, state_, std::move(status_),
std::move(buffer_));
SetState(kReleased);
return for_closure;
}
void PjRtStreamExecutorBuffer::ScopedHold::ConvertUsageHold(
se::Stream* usage_stream, std::shared_ptr<BufferSequencingEvent> event,
bool reference_held) {
CHECK(ok());
CHECK_EQ(type_, kUsage);
parent_->ConvertUsageHold(buffer().get(), usage_stream, std::move(event),
reference_held);
SetState(kConverted);
}
void PjRtStreamExecutorBuffer::ScopedHold::ConfirmDonation() {
CHECK(ok());
CHECK_EQ(type_, kDonation);
parent_->ConfirmDonation(buffer().get());
SetState(kDonated);
}
void PjRtStreamExecutorBuffer::ScopedHold::AddToInput(
ShapeTree<MaybeOwningDeviceMemory>::iterator* iterator,
const ShapeTree<MaybeOwningDeviceMemory>::iterator& end,
ExecutionInput* execution_input,
se::DeviceMemoryAllocator* allocator) const {
CHECK(ok());
if (type_ == kDonation) {
buffer()->AddToInputAsDonated(iterator, end, execution_input, allocator);
} else {
CHECK_EQ(type_, kUsage);
buffer()->AddToInputAsImmutable(iterator, end);
}
}
bool PjRtStreamExecutorBuffer::IsOnCpu() const {
return memory_space() != nullptr &&
memory_space()->kind() == PinnedHostMemorySpace::kKind;
}
absl::StatusOr<Shape> PjRtStreamExecutorBuffer::logical_on_device_shape() {
if (on_device_shape_.is_static()) {
return on_device_shape_;
}
auto* local_device = device_->local_device_state();
auto* stream = local_device->GetDeviceToHostStream();
ScopedHold device_buffer(this, ScopedHold::kUsage);
{
absl::MutexLock lock(&mu_);
WaitForOutstandingDonationHold();
if (device_buffer_ == nullptr) {
return InvalidArgument(
"logical_on_device_shape() called on deleted or donated buffer");
}
AcquireHoldLocked(&device_buffer);
}
WaitForBufferDefinitionEventsOnStream(*device_buffer, stream);
ShapedBuffer shaped_buffer = device_buffer->AsShapedBuffer(on_device_shape_);
absl::StatusOr<EventPool::Handle> event_or =
local_device->event_pool().AllocateEvent(stream->parent());
if (!event_or.ok()) {
return event_or.status();
}
Shape ret_shape = on_device_shape_;
TransferManager* transfer_manager =
client_->client()->backend().transfer_manager();
TF_RETURN_IF_ERROR(
transfer_manager->ReadDynamicShapes(stream, &shaped_buffer, &ret_shape));
return ret_shape;
}
namespace {
class ScopedHoldAsExternalReference : public PjRtBuffer::ExternalReference {
public:
explicit ScopedHoldAsExternalReference(
PjRtStreamExecutorBuffer::ScopedHold hold)
: external_reference_(std::move(hold)) {
CHECK(external_reference_.type() ==
PjRtStreamExecutorBuffer::ScopedHold::kExternalReference);
data_ptr_ = external_reference_->device_memory().front().opaque();
}
~ScopedHoldAsExternalReference() override = default;
absl::Status WaitUntilBufferReadyOnStream(std::intptr_t stream) override {
for (const std::shared_ptr<BufferSequencingEvent>& event :
external_reference_->definition_events()) {
TF_RETURN_IF_ERROR(event->WaitForEventOnExternalStream(stream));
}
return absl::OkStatus();
}
private:
PjRtStreamExecutorBuffer::ScopedHold external_reference_;
};
}
absl::StatusOr<std::unique_ptr<PjRtBuffer::ExternalReference>>
PjRtStreamExecutorBuffer::AcquireExternalReference() {
ScopedHold hold = GetBufferWithExternalReference();
absl::Status hold_status = hold.status();
if (!hold_status.ok()) return hold_status;
return std::unique_ptr<ExternalReference>(
std::make_unique<ScopedHoldAsExternalReference>(std::move(hold)));
}
class TrackedDeviceBufferExternalReference
: public PjRtBuffer::ExternalReference {
public:
explicit TrackedDeviceBufferExternalReference(
std::shared_ptr<TrackedDeviceBuffer> tracked_device_buffer)
: tracked_device_buffer_(std::move(tracked_device_buffer)) {
data_ptr_ = tracked_device_buffer_->device_memory()[0].opaque();
}
~TrackedDeviceBufferExternalReference() override = default;
private:
std::shared_ptr<TrackedDeviceBuffer> tracked_device_buffer_;
};
absl::StatusOr<std::unique_ptr<PjRtBuffer::ExternalReference>>
PjRtStreamExecutorBuffer::ReleaseDeviceMemoryOwnership(
bool wait_for_operations_to_complete) {
if (on_device_shape_.IsTuple()) {
return InvalidArgument(
"ReleaseDeviceMemoryOwnership allowed only for non-tuple");
}
TF_ASSIGN_OR_RETURN(
std::shared_ptr<TrackedDeviceBuffer> tracked_device_buffer,
Release(wait_for_operations_to_complete));
std::unique_ptr<PjRtBuffer::ExternalReference> ref;
if (tracked_device_buffer) {
ref = std::make_unique<TrackedDeviceBufferExternalReference>(
std::move(tracked_device_buffer));
}
return ref;
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtStreamExecutorBuffer::DonateWithControlDependency(PjRtFuture<> dependency) {
VLOG(1) << "PjRtStreamExecutorBuffer::DonateWithControlDependency";
std::unique_ptr<PjRtBuffer> new_buffer;
auto tracked_buffer =
GetBufferWithHold(PjRtStreamExecutorBuffer::ScopedHold::kDonation);
if (!tracked_buffer.ok()) {
return InvalidArgument(
"Invalid buffer passed to DonateWithControlDependency: %s",
tracked_buffer.status().ToString());
}
absl::InlinedVector<se::DeviceMemoryBase, 4> buffers(
tracked_buffer->device_memory().begin(),
tracked_buffer->device_memory().end());
auto original_definition_events = tracked_buffer->definition_events();
absl::InlinedVector<std::shared_ptr<BufferSequencingEvent>, 4>
definition_events;
auto definition_event_for_status =
std::make_shared<BufferSequencingEvent>(client()->thread_pool());
definition_events.push_back(definition_event_for_status);
definition_events.insert(definition_events.end(),
original_definition_events.begin(),
original_definition_events.end());
auto new_device_buffer = std::make_shared<TrackedDeviceBuffer>(
tracked_buffer->allocator(), device(), std::move(buffers),
std::move(definition_events),
nullptr);
new_buffer =
std::unique_ptr<PjRtBuffer>(std::make_unique<PjRtStreamExecutorBuffer>(
on_device_shape(), std::move(new_device_buffer), client(), device(),
device()->default_memory_space().value_or(nullptr)));
PjRtStreamExecutorDevice* device = this->device();
LocalDeviceState* local_device = device->local_device_state();
dependency.OnReady(
[definition_event_for_status = std::move(definition_event_for_status),
local_device](absl::Status status) mutable {
auto stream = local_device->BorrowStreamFromPool();
auto event =
local_device->event_pool().ThenAllocateAndRecordEvent(stream.get());
TF_CHECK_OK(event.status());
definition_event_for_status->SetSequencingEvent(
std::move(event).value(), stream.get());
local_device->ReturnStreamToPool(std::move(stream));
});
tracked_buffer.ConfirmDonation();
return new_buffer;
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtStreamExecutorClient::BufferFromHostBufferInternal(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
absl::AnyInvocable<void() &&> on_done_with_host_buffer, PjRtDevice* device,
const Layout* device_layout, PjRtMemorySpace* memory_space) {
tsl::profiler::TraceMe traceme(
"PjRtStreamExecutorClient::BufferFromHostBuffer");
Shape device_shape = ShapeUtil::MakeShape(type, dims);
VLOG(1) << "PjRtStreamExecutorClient::BufferFromHostBuffer: shape: "
<< device_shape.ToString() << " device: " << device->DebugString();
TF_ASSIGN_OR_RETURN(LocalDeviceState * local_device,
tensorflow::down_cast<PjRtStreamExecutorDevice*>(device)
->GetLocalDeviceState());
absl::InlinedVector<int64_t, 4> tmp_strides;
if (!byte_strides) {
tmp_strides.resize(dims.size());
TF_RETURN_IF_ERROR(
ShapeUtil::ByteStrides(device_shape, absl::MakeSpan(tmp_strides)));
byte_strides = tmp_strides;
}
int64_t size = ShapeUtil::ByteSizeOf(device_shape);
TransferManager* transfer_manager = client()->backend().transfer_manager();
if (device_layout != nullptr) {
*(device_shape.mutable_layout()) = *device_layout;
} else {
TF_ASSIGN_OR_RETURN(
device_shape,
transfer_manager->ChooseCompactLayoutForShape(device_shape));
}
absl::InlinedVector<int64_t, 4> shape_strides(device_shape.dimensions_size());
TF_RETURN_IF_ERROR(
ShapeUtil::ByteStrides(device_shape, absl::MakeSpan(shape_strides)));
bool host_and_device_strides_equal =
(size == 0 || *byte_strides == shape_strides);
TF_ASSIGN_OR_RETURN(
std::unique_ptr<PjRtStreamExecutorBuffer> py_buffer,
AllocateDestinationBuffer(device_shape, device, local_device,
local_device->host_to_device_stream(),
false, this,
nullptr, memory_space));
PjRtStreamExecutorBuffer::ScopedHold device_buffer(
py_buffer->GetBufferWithUsageHold());
CHECK(device_buffer.ok());
std::shared_ptr<TransposePlan> transpose;
if (!host_and_device_strides_equal) {
absl::InlinedVector<int64_t, 4> permutation(dims.size());
absl::c_reverse_copy(device_shape.layout().minor_to_major(),
permutation.begin());
TransposePlan::Options options;
options.elem_size_in_bytes = primitive_util::ByteWidth(type);
options.dims = dims;
options.permutation = permutation;
options.input_layout = TransposePlan::Striding{*byte_strides};
absl::MutexLock lock(&transpose_mu_);
TF_ASSIGN_OR_RETURN(transpose, transpose_cache_.GetOrCreate(options));
}
bool should_pack = primitive_util::IsSubByteNonPredType(type) &&
transfer_manager->PackSubbyteTypes();
int64_t packed_size;
if (should_pack) {
packed_size =
CeilOfRatio<int64_t>(size, 8 / primitive_util::BitWidth(type));
} else {
packed_size = size;
}
std::shared_ptr<void> staging_buffer;
bool must_use_staging_buffer =
host_buffer_semantics == HostBufferSemantics::kImmutableOnlyDuringCall ||
!host_and_device_strides_equal || packed_size != size;
if (must_use_staging_buffer || (should_stage_host_to_device_transfers() &&
packed_size < (int64_t{1} << 30))) {
void* ptr = host_memory_allocator()->AllocateRaw(
tsl::Allocator::kAllocatorAlignment, transpose ? size : packed_size);
staging_buffer = std::shared_ptr<void>(
ptr, [host_memory_allocator = host_memory_allocator()](void* ptr) {
host_memory_allocator->DeallocateRaw(ptr);
});
}
if (host_buffer_semantics == HostBufferSemantics::kImmutableOnlyDuringCall) {
if (transpose) {
transpose->Execute(data, staging_buffer.get());
if (should_pack) {
primitive_util::PackIntN(
type,
absl::MakeConstSpan(static_cast<const char*>(staging_buffer.get()),
size),
absl::MakeSpan(static_cast<char*>(staging_buffer.get()),
packed_size));
}
} else {
if (should_pack) {
primitive_util::PackIntN(
type, absl::MakeConstSpan(static_cast<const char*>(data), size),
absl::MakeSpan(static_cast<char*>(staging_buffer.get()),
packed_size));
} else {
std::memcpy(staging_buffer.get(), data, size);
}
}
if (on_done_with_host_buffer) {
std::move(on_done_with_host_buffer)();
on_done_with_host_buffer = nullptr;
}
}
auto transfer_h2d =
[local_client = client(), transfer_manager, local_device, data, size,
type, packed_size, movable_device_buffer{device_buffer.ToClosure()},
device_shape, should_pack, py_buffer{py_buffer.get()},
on_device_shape{py_buffer->on_device_shape()},
staging_buffer{std::move(staging_buffer)},
on_done_with_host_buffer =
on_done_with_host_buffer
? std::make_shared<absl::AnyInvocable<void() &&>>(
std::move(on_done_with_host_buffer))
: nullptr,
host_buffer_semantics, transpose{std::move(transpose)}]() {
PjRtStreamExecutorBuffer::ScopedHold device_buffer(
movable_device_buffer);
se::DeviceMemoryBase device_memory = device_buffer->device_memory()[0];
if (staging_buffer) {
if (host_buffer_semantics !=
HostBufferSemantics::kImmutableOnlyDuringCall) {
if (transpose) {
transpose->Execute(data, staging_buffer.get());
if (should_pack) {
primitive_util::PackIntN(
type,
absl::MakeConstSpan(
static_cast<const char*>(staging_buffer.get()), size),
absl::MakeSpan(static_cast<char*>(staging_buffer.get()),
packed_size));
}
} else {
if (should_pack) {
primitive_util::PackIntN(
type,
absl::MakeConstSpan(static_cast<const char*>(data), size),
absl::MakeSpan(static_cast<char*>(staging_buffer.get()),
packed_size));
} else {
std::memcpy(staging_buffer.get(), data, size);
}
}
}
TF_CHECK_OK(local_device->host_to_device_stream()->Memcpy(
&device_memory, staging_buffer.get(), packed_size));
} else {
TF_CHECK_OK(local_device->host_to_device_stream()->Memcpy(
&device_memory, data, packed_size));
}
std::shared_ptr<BufferSequencingEvent> event =
device_buffer->definition_events()[0];
TF_CHECK_OK(AddDestinationBufferSynchronization(
local_device, std::move(device_buffer), event,
local_device->host_to_device_stream()));
TF_CHECK_OK(local_device->ThenExecuteCallback(
local_device->host_to_device_stream(),
[staging_buffer{std::move(staging_buffer)},
on_done_with_host_buffer{
std::move(on_done_with_host_buffer)}]() mutable {
if (on_done_with_host_buffer) {
std::move (*on_done_with_host_buffer)();
}
}));
};
thread_pool()->Schedule(transfer_h2d);
return std::unique_ptr<PjRtBuffer>(std::move(py_buffer));
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtStreamExecutorClient::BufferFromHostBuffer(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
absl::AnyInvocable<void() &&> on_done_with_host_buffer, PjRtDevice* device,
const Layout* device_layout) {
return BufferFromHostBufferInternal(
data, type, dims, byte_strides, host_buffer_semantics,
std::move(on_done_with_host_buffer), device, device_layout,
nullptr);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtStreamExecutorClient::BufferFromHostBuffer(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
absl::AnyInvocable<void() &&> on_done_with_host_buffer,
PjRtDevice* device) {
return BufferFromHostBufferInternal(
data, type, dims, byte_strides, host_buffer_semantics,
std::move(on_done_with_host_buffer), device, nullptr,
nullptr);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtStreamExecutorClient::BufferFromHostBuffer(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
absl::AnyInvocable<void() &&> on_done_with_host_buffer,
PjRtMemorySpace* memory_space, const Layout* device_layout) {
return BufferFromHostBufferInternal(
data, type, dims, byte_strides, host_buffer_semantics,
std::move(on_done_with_host_buffer), memory_space->devices()[0],
device_layout, memory_space);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtStreamExecutorClient::CreateUninitializedBuffer(const Shape& shape,
PjRtDevice* device) {
return CreateUninitializedBuffer(shape, device, nullptr);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtStreamExecutorClient::CreateUninitializedBuffer(
const Shape& shape, PjRtDevice* device,
std::shared_ptr<BufferSequencingEvent> definition_event) {
tsl::profiler::TraceMe traceme(
"PjRtStreamExecutorClient::CreateUninitializedBuffer");
VLOG(1) << "PjRtStreamExecutorClient::CreateUninitializedBuffer: shape: "
<< shape.ToString() << " device: " << device->DebugString();
TF_ASSIGN_OR_RETURN(LocalDeviceState * local_device,
tensorflow::down_cast<PjRtStreamExecutorDevice*>(device)
->GetLocalDeviceState());
TransferManager* transfer_manager = client()->backend().transfer_manager();
TF_ASSIGN_OR_RETURN(Shape compact_shape,
transfer_manager->ChooseCompactLayoutForShape(shape));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<PjRtStreamExecutorBuffer> py_buffer,
AllocateDestinationBuffer(compact_shape, device, local_device,
nullptr,
true, this,
definition_event));
return std::unique_ptr<PjRtBuffer>(std::move(py_buffer));
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtStreamExecutorClient::CreateErrorBuffer(absl::Status error,
const Shape& shape,
PjRtMemorySpace* memory) {
if (memory->client() != this) {
return absl::InvalidArgumentError(
"Memory space is not attached to this client");
}
auto* device = memory->devices()[0];
VLOG(1) << "PjRtStreamExecutorClient::CreateErrorBuffer: shape: "
<< shape.ToString() << " device: " << device->DebugString()
<< " error: " << error;
auto definition_event =
std::make_shared<BufferSequencingEvent>(this->thread_pool());
definition_event->SetDefinedStatus(error);
auto* se_client = tensorflow::down_cast<PjRtStreamExecutorClient*>(this);
absl::Span<se::DeviceMemoryBase> buffers;
auto dummy_device_buffer = std::make_shared<TrackedDeviceBuffer>(
se_client->allocator(), device, buffers,
absl::MakeSpan(&definition_event, 1),
nullptr);
auto py_buffer = std::make_unique<PjRtStreamExecutorBuffer>(
shape, std::move(dummy_device_buffer), this, device,
device->default_memory_space().value_or(nullptr));
return py_buffer;
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtStreamExecutorClient::BufferFromHostLiteral(const LiteralSlice& literal,
PjRtDevice* device) {
tsl::profiler::TraceMe traceme(
"PjRtStreamExecutorClient::BufferFromHostLiteral");
VLOG(1) << "PjRtStreamExecutorClient::BufferFromHostLiteral: shape: "
<< literal.shape().ToString() << " device: " << device->DebugString();
TF_ASSIGN_OR_RETURN(LocalDeviceState * local_device,
tensorflow::down_cast<PjRtStreamExecutorDevice*>(device)
->GetLocalDeviceState());
TransferManager* transfer_manager = client()->backend().transfer_manager();
TF_ASSIGN_OR_RETURN(
Shape compact_shape,
transfer_manager->ChooseCompactLayoutForShape(literal.shape()));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<PjRtStreamExecutorBuffer> py_buffer,
AllocateDestinationBuffer(compact_shape, device, local_device,
local_device->host_to_device_stream(),
false, this));
PjRtStreamExecutorBuffer::ScopedHold device_buffer(
py_buffer->GetBufferWithUsageHold());
CHECK(device_buffer.ok());
auto transfer_h2d = [local_client = client(), transfer_manager, local_device,
movable_device_buffer{device_buffer.ToClosure()},
literal, py_buffer{py_buffer.get()},
on_device_shape{py_buffer->on_device_shape()}]() {
PjRtStreamExecutorBuffer::ScopedHold device_buffer(movable_device_buffer);
se::Stream* h2d_stream = local_device->host_to_device_stream();
ShapedBuffer buffer = device_buffer->AsShapedBuffer(on_device_shape);
TF_CHECK_OK(transfer_manager->TransferLiteralToDeviceAsync(
h2d_stream, literal, buffer));
std::shared_ptr<BufferSequencingEvent> event =
device_buffer->definition_events()[0];
TF_CHECK_OK(AddDestinationBufferSynchronization(
local_device, std::move(device_buffer), event, h2d_stream));
h2d_stream->RefreshStatus()
.IgnoreError();
QCHECK(h2d_stream->ok());
};
thread_pool()->Schedule(transfer_h2d);
return std::unique_ptr<PjRtBuffer>(std::move(py_buffer));
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtStreamExecutorClient::BufferFromHostLiteral(const LiteralSlice& literal,
PjRtMemorySpace* memory_space) {
if (memory_space->devices().size() == 1) {
return BufferFromHostLiteral(literal, memory_space->devices()[0]);
}
return absl::UnimplementedError(absl::StrCat(
"BufferFromHostLiteral with PjRtMemorySpace is not implemented on "
"platform: ",
platform_name()));
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>>
PjRtStreamExecutorClient::MakeCrossHostReceiveBuffers(
absl::Span<const Shape> shapes, PjRtDevice* device,
PjRtCrossHostRecvNotifier notifier) {
if (shapes.empty()) {
return InvalidArgument(
"shapes parameter empty in MakeCrossHostReceiveBuffers");
}
TF_ASSIGN_OR_RETURN(LocalDeviceState * local_device,
tensorflow::down_cast<PjRtStreamExecutorDevice*>(device)
->GetLocalDeviceState());
std::shared_ptr<BufferSequencingEvent> definition_event =
std::make_shared<BufferSequencingEvent>(this->thread_pool());
std::vector<std::unique_ptr<PjRtBuffer>> buffers;
buffers.reserve(shapes.size());
for (const auto& shape : shapes) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<PjRtBuffer> buffer,
AllocateDestinationBuffer(shape, device, local_device,
nullptr,
false, this,
definition_event));
buffers.push_back(std::move(buffer));
}
TF_RETURN_IF_ERROR(EnqueueCrossHostReceive(
buffers, std::move(definition_event), std::move(notifier), std::nullopt));
return buffers;
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>>
PjRtStreamExecutorClient::MakeCrossHostReceiveBuffersForGather(
absl::Span<const Shape> shapes, std::vector<GatherDetails> gather_details,
PjRtDevice* device, PjRtCrossHostRecvNotifier notifier) {
VLOG(2) << "Making " << gather_details.size()
<< " cross host receive buffers for gather";
if (gather_details.empty()) {
return InvalidArgument(
"gather_details parameter empty in "
"MakeCrossHostReceiveBuffersForGather");
}
if (shapes.size() != gather_details.size()) {
return InvalidArgument(
"gather_details parameter has length %lld but shapes "
"parameter has length %lld in "
"MakeCrossHostReceiveBuffersForGather",
gather_details.size(), shapes.size());
}
TF_ASSIGN_OR_RETURN(LocalDeviceState * local_device,
tensorflow::down_cast<PjRtStreamExecutorDevice*>(device)
->GetLocalDeviceState());
std::shared_ptr<BufferSequencingEvent> definition_event =
std::make_shared<BufferSequencingEvent>(this->thread_pool());
std::vector<std::unique_ptr<PjRtBuffer>> buffers;
buffers.reserve(shapes.size());
for (int i = 0; i < shapes.size(); ++i) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<PjRtBuffer> buffer,
AllocateDestinationBuffer(shapes[i], device, local_device,
nullptr,
false, this,
definition_event));
buffers.push_back(std::move(buffer));
}
TF_RETURN_IF_ERROR(
EnqueueCrossHostReceive(buffers, std::move(definition_event),
std::move(notifier), gather_details));
return buffers;
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtStreamExecutorClient::CreateViewOfDeviceBuffer(
void* device_ptr, const Shape& shape, PjRtDevice* device,
std::function<void()> on_delete_callback,
std::optional<std::intptr_t> stream) {
se::DeviceMemoryBase buffer(device_ptr, ShapeUtil::ByteSizeOf(shape));
TF_ASSIGN_OR_RETURN(LocalDeviceState * local_device,
tensorflow::down_cast<PjRtStreamExecutorDevice*>(device)
->GetLocalDeviceState());
absl::InlinedVector<std::shared_ptr<BufferSequencingEvent>, 2>
definition_events;
definition_events.emplace_back(
std::make_shared<BufferSequencingEvent>(this->thread_pool()));
se::Stream* definition_stream;
if (!stream) {
definition_stream = local_device->compute_stream();
} else {
TF_ASSIGN_OR_RETURN(definition_stream,
local_device->GetStreamFromExternalStream(*stream));
}
TF_ASSIGN_OR_RETURN(
EventPool::Handle event,
local_device->event_pool().ThenAllocateAndRecordEvent(definition_stream));
definition_events.back()->SetSequencingEvent(std::move(event),
definition_stream);
auto device_buffer = std::make_shared<TrackedDeviceBuffer>(
nullptr, device,
std::initializer_list<se::DeviceMemoryBase>{buffer}, definition_events,
std::move(on_delete_callback));
return std::unique_ptr<PjRtBuffer>(std::make_unique<PjRtStreamExecutorBuffer>(
shape, std::move(device_buffer), this, device,
device->default_memory_space().value_or(nullptr)));
}
absl::Status PjRtStreamExecutorDevice::TransferToInfeed(
const LiteralSlice& literal) {
TF_ASSIGN_OR_RETURN(LocalDeviceState * local_device, GetLocalDeviceState());
return local_device->client()->TransferToInfeedLocal(
literal, local_device->local_hardware_id().value());
}
absl::Status PjRtStreamExecutorDevice::TransferFromOutfeed(
MutableBorrowingLiteral literal) {
VLOG(1) << "PjRtStreamExecutorDevice::TransferFromOutfeed";
TF_ASSIGN_OR_RETURN(LocalDeviceState * local_device, GetLocalDeviceState());
return local_device->client()->TransferFromOutfeedLocal(
local_device->local_hardware_id().value(), literal);
}
void PjRtStreamExecutorDevice::AttachMemorySpace(
PjRtMemorySpace* memory_space) {
CHECK(memory_space != nullptr);
CHECK(client_ == memory_space->client()) << absl::StrFormat(
"Could not attach a PjRtStreamExecutorDevice to a PjRtMemorySpace owned "
"by a different client, the device's client: %s, the memory space's "
"client: %s.",
client_->platform_name(), memory_space->client()->platform_name());
memory_spaces_.push_back(memory_space);
memory_spaces_by_id_.emplace(memory_space->kind_id(), memory_space);
}
absl::Span<PjRtMemorySpace* const> PjRtStreamExecutorDevice::memory_spaces()
const {
return memory_spaces_;
}
absl::StatusOr<PjRtMemorySpace*>
PjRtStreamExecutorDevice::default_memory_space() const {
return Unimplemented("default_memory_space is not supported.");
}
absl::StatusOr<PjRtMemorySpace*> PjRtStreamExecutorDevice::memory_space_by_kind(
absl::string_view memory_space_kind) const {
auto it =
absl::c_find_if(memory_spaces_, [memory_space_kind](PjRtMemorySpace* ms) {
return ms->kind() == memory_space_kind;
});
if (it != memory_spaces_.end()) {
return *it;
}
return absl::InternalError(
absl::StrCat("No memory space found (kind: ", memory_space_kind, ")"));
}
absl::StatusOr<PjRtMemorySpace*>
PjRtStreamExecutorDevice::memory_space_by_kind_id(int id) const {
auto it = memory_spaces_by_id_.find(id);
if (it == memory_spaces_by_id_.end()) {
return absl::InternalError(
absl::StrCat("No memory space found (kind_id: ", id, ")"));
}
return it->second;
}
absl::StatusOr<std::intptr_t>
PjRtStreamExecutorDevice::GetStreamForExternalReadyEvents() const {
TF_ASSIGN_OR_RETURN(LocalDeviceState * local_device, GetLocalDeviceState());
se::Stream* stream = local_device->GetExternalReadyEventStream();
void* raw_stream = stream->platform_specific_handle().stream;
if (raw_stream == nullptr) {
return Unimplemented(
"GetStreamForExternalReadyEvents not implemented for platform '%s'.",
platform_name());
}
return absl::bit_cast<std::intptr_t>(raw_stream);
}
absl::StatusOr<PjRtDevice*> PjRtStreamExecutorClient::LookupAddressableDevice(
xla::PjRtLocalDeviceId local_device_id) const {
for (auto* device : addressable_devices_) {
if (local_device_id == device->local_device_id()) {
return device;
}
}
return InvalidArgument("No matching device found for local_device_id %d",
local_device_id.value());
}
absl::Span<PjRtMemorySpace* const> PjRtStreamExecutorClient::memory_spaces()
const {
return memory_spaces_;
}
PjRtStreamExecutorBuffer::PjRtStreamExecutorBuffer(
Shape on_device_shape, std::shared_ptr<TrackedDeviceBuffer> device_buffer,
PjRtClient* client, PjRtDevice* device, PjRtMemorySpace* memory_space)
: client_(tensorflow::down_cast<PjRtStreamExecutorClient*>(client)),
on_device_shape_(std::move(on_device_shape)),
device_(tensorflow::down_cast<PjRtStreamExecutorDevice*>(device)),
memory_space_(memory_space),
device_buffer_(std::move(device_buffer)) {
for (int i = 0; i < ScopedHold::Type::kMaxValue; ++i) {
holds_[i] = 0;
}
}
PjRtStreamExecutorBuffer::~PjRtStreamExecutorBuffer() {
Delete();
for (int i = 0; i < ScopedHold::Type::kMaxValue; ++i) {
CHECK_EQ(holds_[i], 0);
}
}
void PjRtStreamExecutorBuffer::WaitForOutstandingUsageHolds() {
auto not_in_usage_hold = [&]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return holds_[ScopedHold::kUsage] == 0;
};
mu_.Await(absl::Condition(¬_in_usage_hold));
}
void PjRtStreamExecutorBuffer::WaitForOutstandingDonationHold() {
auto not_in_donation_hold = [&]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return holds_[ScopedHold::kDonation] == 0;
};
mu_.Await(absl::Condition(¬_in_donation_hold));
}
absl::StatusOr<std::shared_ptr<TrackedDeviceBuffer>>
PjRtStreamExecutorBuffer::Release(bool wait_for_operations_to_complete) {
tsl::profiler::TraceMe trace_me("PjRtStreamExecutorBuffer::Release");
std::shared_ptr<TrackedDeviceBuffer> device_buffer;
TrackedDeviceBuffer::StreamAndEventContainer events;
{
absl::MutexLock lock(&mu_);
WaitForOutstandingDonationHold();
if (device_buffer_ == nullptr) {
return std::shared_ptr<TrackedDeviceBuffer>();
}
std::swap(device_buffer_, device_buffer);
WaitForOutstandingUsageHolds();
events = device_buffer->LockUseAndTransferUsageEvents();
}
LocalDeviceState* local_device_state = device_->local_device_state();
if (wait_for_operations_to_complete) {
std::unique_ptr<se::Stream> stream;
for (const auto& stream_and_event : events) {
if (!stream_and_event.event->IsComplete()) {
if (stream == nullptr) {
stream = local_device_state->BorrowStreamFromPool();
}
stream_and_event.event->WaitForEventOnStream(stream.get());
}
}
if (stream != nullptr) {
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
local_device_state->ReturnStreamToPool(std::move(stream));
}
} else {
if (local_device_state->allocation_model() ==
LocalDeviceState::kComputeSynchronized) {
se::Stream* block_stream = nullptr;
absl::InlinedVector<std::shared_ptr<BufferSequencingEvent>, 5>
events_to_wait_for_in_a_different_thread;
auto maybe_wait_for_event_on_block_stream_or_add_to_events_to_wait =
[&events_to_wait_for_in_a_different_thread, local_device_state,
&block_stream](const std::shared_ptr<BufferSequencingEvent>& event) {
if (local_device_state->allow_delete_before_fulfill() &&
!event->IsDefined()) {
events_to_wait_for_in_a_different_thread.push_back(event);
} else {
MaybeWaitForEventOnStream(event.get(), local_device_state,
block_stream);
}
};
for (const auto& stream_and_event : events) {
VLOG(2)
<< "Checking whether need to wait for stream_and_event: stream: "
<< stream_and_event.stream
<< "; event: " << stream_and_event.event.get()
<< "; reference_held: " << stream_and_event.reference_held
<< "; is_predetermined_error: "
<< stream_and_event.event->IsPredeterminedError();
if (!stream_and_event.reference_held) {
maybe_wait_for_event_on_block_stream_or_add_to_events_to_wait(
stream_and_event.event);
}
}
for (const auto& definition_event : device_buffer->definition_events()) {
VLOG(2) << "Checking whether need to wait for definition_event: "
<< definition_event.get() << "; is_predetermined_error: "
<< definition_event->IsPredeterminedError();
maybe_wait_for_event_on_block_stream_or_add_to_events_to_wait(
definition_event);
}
if (!events_to_wait_for_in_a_different_thread.empty()) {
VLOG(1) << "Going to wait for "
<< events_to_wait_for_in_a_different_thread.size()
<< " events in a different thread.";
local_device_state->cleanup_thread()->Schedule(
[events_to_wait_for_in_a_different_thread =
std::move(events_to_wait_for_in_a_different_thread),
local_device_state, device_buffer, block_stream]() mutable {
for (const auto& event :
events_to_wait_for_in_a_different_thread) {
MaybeWaitForEventOnStream(event.get(), local_device_state,
block_stream);
}
if (block_stream != nullptr) {
TF_CHECK_OK(local_device_state->ThenExecuteCallback(
block_stream, [device_buffer]() {
}));
}
});
} else if (block_stream != nullptr) {
TF_RETURN_IF_ERROR(local_device_state->ThenExecuteCallback(
block_stream, [device_buffer]() {
}));
}
}
}
return device_buffer;
}
void PjRtStreamExecutorBuffer::Delete() {
VLOG(1) << "PjRtStreamExecutorBuffer::Delete";
TF_CHECK_OK(Release(false).status());
}
bool PjRtStreamExecutorBuffer::IsDeleted() {
absl::MutexLock lock(&mu_);
return device_buffer_ == nullptr;
}
absl::StatusOr<std::shared_ptr<TrackedDeviceBuffer>>
PjRtStreamExecutorBuffer::GetBufferForHoldLocked(ScopedHold::Type type) {
CHECK_EQ(holds_[ScopedHold::kDonation], 0);
if (type == ScopedHold::kDonation) {
if (device_buffer_ == nullptr) {
return InvalidArgument("Donation requested for invalid buffer");
}
if (holds_[ScopedHold::kExternalReference] > 0) {
return InvalidArgument(
"Donation requested for buffer with external reference");
}
++holds_[type];
WaitForOutstandingUsageHolds();
CHECK(device_buffer_ != nullptr);
} else {
if (device_buffer_ == nullptr) {
return InvalidArgument("Buffer has been deleted or donated.");
} else {
++holds_[type];
}
}
return device_buffer_;
}
void PjRtStreamExecutorBuffer::AcquireHoldLocked(ScopedHold* hold) {
hold->Acquire(GetBufferForHoldLocked(hold->type()));
}
void PjRtStreamExecutorBuffer::ConvertUsageHold(
TrackedDeviceBuffer* buffer, se::Stream* usage_stream,
std::shared_ptr<BufferSequencingEvent> event, bool reference_held) {
absl::MutexLock lock(&mu_);
CHECK(device_buffer_.get() == buffer || device_buffer_ == nullptr);
buffer->AddUsageEvent(usage_stream, std::move(event), reference_held);
CHECK_GT(holds_[ScopedHold::kUsage], 0);
--holds_[ScopedHold::kUsage];
}
void PjRtStreamExecutorBuffer::ConfirmDonation(
TrackedDeviceBuffer* device_buffer) {
{
absl::MutexLock lock(&mu_);
CHECK_EQ(holds_[ScopedHold::kUsage], 0);
CHECK_EQ(holds_[ScopedHold::kExternalReference], 0);
CHECK_EQ(holds_[ScopedHold::kDonation], 1);
holds_[ScopedHold::kDonation] = 0;
CHECK(device_buffer_.get() == device_buffer);
device_buffer->LockUseAndTransferUsageEvents();
device_buffer->ReleaseDeviceMemory();
device_buffer_.reset();
}
}
void PjRtStreamExecutorBuffer::DropHold(ScopedHold::Type type,
TrackedDeviceBuffer* buffer) {
absl::MutexLock lock(&mu_);
CHECK(device_buffer_.get() == buffer || device_buffer_ == nullptr);
CHECK_GT(holds_[type], 0);
--holds_[type];
if (type == ScopedHold::kDonation) {
CHECK_EQ(holds_[ScopedHold::kDonation], 0);
CHECK_EQ(holds_[ScopedHold::kUsage], 0);
CHECK_EQ(holds_[ScopedHold::kExternalReference], 0);
}
}
PjRtFuture<> PjRtStreamExecutorBuffer::LazyToLiteral(
absl::AnyInvocable<absl::StatusOr<MutableLiteralBase*>() &&> generator) {
auto buffer = std::move(generator)();
if (!buffer.ok()) {
return PjRtFuture<>(buffer.status());
}
return ToLiteral(buffer.value());
}
PjRtFuture<> PjRtStreamExecutorBuffer::ToLiteral(MutableLiteralBase* literal) {
VLOG(1) << "PjRtStreamExecutorBuffer::ToLiteral";
if (IsEmptyTuple()) {
return PjRtFuture<>(InvalidArgument("ToLiteral called on empty tuple"));
}
LocalDeviceState* local_device = device_->local_device_state();
se::Stream* stream = local_device->GetDeviceToHostStream();
ScopedHold device_buffer(this, ScopedHold::kUsage);
{
absl::MutexLock lock(&mu_);
WaitForOutstandingDonationHold();
if (device_buffer_ == nullptr) {
return PjRtFuture<>(InvalidArgument(
"CopyToHostAsync() called on deleted or donated buffer"));
}
AcquireHoldLocked(&device_buffer);
}
auto promise = PjRtFuture<>::CreatePromise();
auto usage_event =
std::make_shared<BufferSequencingEvent>(client_->thread_pool());
TransferManager* transfer_manager =
client_->client()->backend().transfer_manager();
auto tracked_device_buffer = device_buffer.buffer();
device_buffer.ConvertUsageHold(stream, usage_event, true);
auto async_to_literal = [usage_event, tracked_device_buffer, stream,
transfer_manager = std::move(transfer_manager),
on_device_shape{on_device_shape_}, literal, promise,
local_device]() mutable {
absl::StatusOr<EventPool::Handle> event_or =
local_device->event_pool().AllocateEvent(stream->parent());
if (!event_or.ok()) {
promise.Set(event_or.status());
return;
}
absl::Status defined_status =
tracked_device_buffer->definition_events()[0]->GetDefinedStatus();
if (!defined_status.ok()) {
promise.Set(defined_status);
return;
}
WaitForBufferDefinitionEventsOnStream(*tracked_device_buffer, stream);
ShapedBuffer shaped_buffer =
tracked_device_buffer->AsShapedBuffer(on_device_shape);
GenericTransferManager::LiteralFromDeviceMetadata transfer_metadata;
transfer_metadata.callback_is_host_callback_safe = true;
TransferManager::TransferMetadata* transfer_metadata_ptr =
(dynamic_cast<GenericTransferManager*>(transfer_manager) != nullptr)
? &transfer_metadata
: nullptr;
transfer_manager->TransferLiteralFromDevice(
stream, shaped_buffer, literal,
[promise](absl::Status status) mutable {
promise.Set(std::move(status));
},
transfer_metadata_ptr);
local_device->event_pool().ThenRecordEvent(stream, event_or.value());
usage_event->SetSequencingEvent(std::move(event_or).value(), stream);
defined_status = local_device->ThenRelease(stream, tracked_device_buffer);
if (!defined_status.ok()) {
promise.Set(defined_status);
}
};
tracked_device_buffer->definition_events()[0]->ExecuteOrAddToFutureTasks(
absl::StrFormat("async_to_literal_%p", literal),
std::move(async_to_literal));
return PjRtFuture<>(
std::move(promise),
[]() {
tsl::profiler::TraceMeProducer traceme(
"PjRtStreamExecutorBuffer::ToLiteral");
VLOG(1) << "PjRtStreamExecutorBuffer::ToLiteral";
return PjRtFutureHelpers::ProfilingKeys(
{traceme.GetContextId()});
},
[](PjRtFutureHelpers::ProfilingKeys keys) {
tsl::profiler::TraceMeConsumer traceme(
"PjRtStreamExecutorBuffer::ToLiteral", keys.traceme_context_id);
});
}
absl::StatusOr<size_t> PjRtStreamExecutorBuffer::GetOnDeviceSizeInBytes()
const {
absl::MutexLock lock(&mu_);
if (device_buffer_ == nullptr) {
return InvalidArgument(
"GetOnDeviceSizeInBytes called on deleted or donated buffer");
}
if (device_buffer_->device_memory().size() != 1) {
return InvalidArgument(
"GetOnDeviceSizeInBytes called on tuple-shaped buffer");
}
return device_buffer_->device_memory()[0].size();
}
PjRtFuture<> PjRtStreamExecutorBuffer::CopyRawToHost(void* dst, int64_t offset,
int64_t transfer_size) {
return client_->CopyRawSubBufferToHost(this, PjRtFuture<void*>(dst), offset,
transfer_size);
}
PjRtFuture<> PjRtStreamExecutorBuffer::CopyRawToHostFuture(
PjRtFuture<void*> dst, int64_t offset, int64_t transfer_size) {
return client_->CopyRawSubBufferToHost(this, dst, offset, transfer_size);
}
absl::StatusOr<ShapedBuffer> PjRtStreamExecutorBuffer::AsShapedBuffer() const {
absl::MutexLock lock(&mu_);
if (device_buffer_ == nullptr) {
return InvalidArgument(
"Attempted to fetch value of invalid/deleted buffer.");
}
return device_buffer_->AsShapedBuffer(on_device_shape_);
}
PjRtStreamExecutorBuffer::ScopedHold
PjRtStreamExecutorBuffer::GetBufferWithHold(ScopedHold::Type type) {
absl::MutexLock lock(&mu_);
WaitForOutstandingDonationHold();
ScopedHold hold(this, type);
AcquireHoldLocked(&hold);
return hold;
}
absl::StatusOr<std::pair<std::unique_ptr<PjRtBuffer>,
std::shared_ptr<BufferSequencingEvent>>>
PjRtStreamExecutorBuffer::CopyToDeviceHelper(
PjRtDevice* dst_device, LocalDeviceState* dst_local_device,
PjRtMemorySpace* dst_memory_space, LocalDeviceState* transfer_local_device,
LocalDeviceState* src_local_device, se::Stream* transfer_stream,
std::shared_ptr<TrackedDeviceBuffer> src_device_buffer) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtStreamExecutorBuffer> py_buffer,
AllocateDestinationBuffer(
ShapeUtil::DeviceShapeToHostShape(on_device_shape_),
dst_device, dst_local_device, transfer_stream,
false, client_,
nullptr, dst_memory_space));
ScopedHold dst_device_buffer(py_buffer->GetBufferWithUsageHold());
CHECK(dst_device_buffer.ok());
std::shared_ptr<BufferSequencingEvent> copy_event =
dst_device_buffer->definition_events()[0];
auto async_copy_to_device = [src_device_buffer,
dst_device_buffer =
std::move(dst_device_buffer.buffer()),
transfer_stream = std::move(transfer_stream),
copy_event,
on_device_shape{py_buffer->on_device_shape()},
src_local_device = std::move(src_local_device),
transfer_local_device =
std::move(transfer_local_device),
dst_local_device =
std::move(dst_local_device)]() mutable {
tsl::profiler::TraceMe traceme(
"PjRtStreamExecutorBuffer::CopyToDeviceHelper::async_copy_to_"
"device");
VLOG(1)
<< "PjRtStreamExecutorBuffer::CopyToDeviceHelper::async_copy_to_device";
absl::Status defined_status =
src_device_buffer->definition_events()[0]->GetDefinedStatus();
if (defined_status.ok()) {
WaitForBufferDefinitionEventsOnStream(*src_device_buffer,
transfer_stream);
ShapedBuffer src_buffer =
src_device_buffer->AsShapedBuffer(on_device_shape);
ShapedBuffer dst_buffer =
dst_device_buffer->AsShapedBuffer(on_device_shape);
for (const auto& leaf : src_buffer.buffers().leaves()) {
const ShapeIndex& index = leaf.first;
const se::DeviceMemoryBase& input_buffer = leaf.second;
const se::DeviceMemoryBase& output_buffer = dst_buffer.buffer(index);
CHECK_EQ(input_buffer.size(), output_buffer.size());
if (input_buffer.size() != 0) {
auto status = transfer_local_device->ThenMemcpyDeviceToDevice(
transfer_stream, dst_local_device->compute_stream(), input_buffer,
output_buffer);
if (!status.ok()) {
LOG(ERROR) << "D2D memory copy failed due to: " << status;
StallStreamOnError(transfer_local_device, transfer_stream);
if (transfer_local_device == dst_local_device) {
auto status = src_local_device->ThenRelease(
transfer_stream, std::move(src_device_buffer));
if (!status.ok()) {
LOG(ERROR) << "ThenRelease failed due to: " << status;
}
}
return;
}
}
}
absl::StatusOr<EventPool::Handle> event_or =
transfer_local_device->event_pool().ThenAllocateAndRecordEvent(
transfer_stream);
if (!event_or.ok()) {
StallStreamOnError(transfer_local_device, transfer_stream);
LOG(ERROR) << event_or.status();
return;
}
copy_event->SetSequencingEvent(std::move(event_or).value(),
transfer_stream);
} else {
copy_event->SetDefinedStatus(defined_status);
}
auto status = src_local_device->ThenRelease(transfer_stream,
std::move(src_device_buffer));
if (!status.ok()) {
LOG(ERROR) << "ThenRelease failed due to: " << status;
}
};
src_device_buffer->definition_events()[0]->ExecuteOrAddToFutureTasks(
absl::StrFormat("async_copy_to_device_%p",
dst_device_buffer.buffer().get()),
std::move(async_copy_to_device));
RecordUsage(std::move(dst_device_buffer), transfer_local_device,
transfer_local_device, copy_event, transfer_stream,
false);
return std::pair<std::unique_ptr<PjRtBuffer>,
std::shared_ptr<BufferSequencingEvent>>(
std::unique_ptr<PjRtStreamExecutorBuffer>(std::move(py_buffer)),
std::move(copy_event));
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtStreamExecutorBuffer::CopyToDevice(PjRtDevice* dst_device) {
tsl::profiler::TraceMe traceme("PjRtStreamExecutorBuffer::CopyToDevice");
VLOG(1) << "PjRtStreamExecutorBuffer::CopyToDevice";
if (dst_device == device_) {
return InvalidArgument(
"CopyToDevice cannot accept the same source and destination devices");
}
return CopyToDeviceMemorySpace(dst_device, nullptr);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtStreamExecutorBuffer::CopyToDeviceMemorySpace(
PjRtDevice* dst_device, PjRtMemorySpace* dst_memory_space) {
if (dst_device == device_ && dst_memory_space == memory_space()) {
return InvalidArgument(
"CopyToDeviceMemorySpace cannot accept the same source and destination "
"devices/memory");
}
if (dst_device->client() != client_) {
TF_ASSIGN_OR_RETURN(std::shared_ptr<Literal> literal, ToLiteralSync());
Literal* literal_pointer = literal.get();
absl::InlinedVector<int64_t, 4> byte_strides(
literal->shape().dimensions_size());
TF_RETURN_IF_ERROR(
ShapeUtil::ByteStrides(literal->shape(), absl::MakeSpan(byte_strides)));
return dst_device->client()->BufferFromHostBuffer(
literal_pointer->untyped_data(),
literal_pointer->shape().element_type(),
literal_pointer->shape().dimensions(), byte_strides,
PjRtStreamExecutorClient::HostBufferSemantics::kImmutableZeroCopy,
[literal{std::move(literal)}]() { }, dst_device);
}
TF_ASSIGN_OR_RETURN(
LocalDeviceState * dst_local_device,
tensorflow::down_cast<PjRtStreamExecutorDevice*>(dst_device)
->GetLocalDeviceState());
LocalDeviceState* transfer_local_device =
client_->EnqueueD2DTransfersOnSrcStream() ? device_->local_device_state()
: dst_local_device;
CHECK_EQ(dst_local_device->allocation_model(),
transfer_local_device->allocation_model());
se::Stream* transfer_stream =
transfer_local_device->GetDeviceToDeviceStream();
ScopedHold src_device_buffer(this, ScopedHold::kUsage);
{
absl::MutexLock lock(&mu_);
WaitForOutstandingDonationHold();
if (device_buffer_ == nullptr) {
return InvalidArgument(
"CopyToDevice called on deleted or donated buffer");
}
AcquireHoldLocked(&src_device_buffer);
}
absl::StatusOr<std::pair<std::unique_ptr<PjRtBuffer>,
std::shared_ptr<BufferSequencingEvent>>>
buffer_and_event_or = CopyToDeviceHelper(
dst_device, dst_local_device, dst_memory_space, transfer_local_device,
device_->local_device_state(), transfer_stream,
src_device_buffer.buffer());
if (!buffer_and_event_or.ok()) {
return buffer_and_event_or.status();
}
auto& buffer_and_event = buffer_and_event_or.value();
std::unique_ptr<PjRtBuffer>& buffer = buffer_and_event.first;
std::shared_ptr<BufferSequencingEvent>& event = buffer_and_event.second;
src_device_buffer.ConvertUsageHold(transfer_stream, event,
true);
return std::move(buffer);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtStreamExecutorBuffer::CopyToMemorySpace(PjRtMemorySpace* dst_memory_space) {
if (dst_memory_space->devices().size() == 1) {
return CopyToDeviceMemorySpace(dst_memory_space->devices()[0],
dst_memory_space);
}
return Unimplemented("CopyToMemorySpace is not supported");
}
void PjRtStreamExecutorBuffer::CopyToRemoteDevice(
PjRtFuture<std::string> serialized_descriptor, RemoteSendCallback on_done) {
VLOG(1) << "PjRtStreamExecutorBuffer::CopyToRemoteDevice";
auto desc = serialized_descriptor.Await();
if (desc.ok()) {
client_->CopyToRemoteDevice(this, *desc, std::move(on_done));
} else {
on_done(desc.status(), false);
}
}
void PjRtStreamExecutorBuffer::CopyToRemoteDeviceScattered(
PjRtFuture<std::vector<std::string>> serialized_descriptors,
std::vector<RemoteSendCallback> callbacks,
const ScatterDetails& scatter_details) {
VLOG(1) << "PjRtStreamExecutorBuffer::CopyToRemoteDeviceScattered";
auto res = serialized_descriptors.Await();
if (res.ok()) {
client_->CopyToRemoteDeviceScattered(this, *std::move(res),
std::move(callbacks), scatter_details);
} else {
for (const auto& cb : callbacks) {
cb(res.status(), false);
}
}
}
PjRtFuture<> PjRtStreamExecutorBuffer::GetReadyFuture() {
std::shared_ptr<TrackedDeviceBuffer> device_buffer;
PjRtFuture<>::Promise definition_promise;
{
absl::MutexLock lock(&mu_);
if (device_buffer_ == nullptr) {
return PjRtFuture<>(InvalidArgument(
"GetReadyFuture() called on deleted or donated buffer"));
}
if (!definition_promise_) {
device_buffer = device_buffer_;
definition_promise_ = PjRtFuture<>::CreatePromise();
}
definition_promise = definition_promise_;
}
if (device_buffer) {
LocalDeviceState* local_device_state = device_->local_device_state();
auto async_wait_for_events =
[device_buffer, local_device_state = std::move(local_device_state),
definition_promise]() mutable {
std::unique_ptr<se::Stream> stream;
absl::Status defined_status =
device_buffer->definition_events()[0]->GetDefinedStatus();
if (!defined_status.ok()) {
definition_promise.Set(defined_status);
return;
}
for (auto& event : device_buffer->definition_events()) {
if (!event->IsComplete()) {
if (stream == nullptr) {
stream = local_device_state->BorrowStreamFromPool();
}
event->WaitForEventOnStream(stream.get());
}
}
if (stream != nullptr) {
auto* stream_ptr = stream.release();
auto status = stream_ptr->DoHostCallback(
[definition_promise, stream_ptr, local_device_state,
event_with_status =
device_buffer->definition_events()[0]]() mutable {
local_device_state->ReturnStreamToPool(
std::unique_ptr<se::Stream>(stream_ptr));
definition_promise.Set(event_with_status->GetDefinedStatus());
});
if (!status.ok()) {
definition_promise.Set(status);
return;
}
} else {
definition_promise.Set(
device_buffer->definition_events()[0]->GetDefinedStatus());
}
};
device_buffer->definition_events()[0]->ExecuteOrAddToFutureTasks(
absl::StrFormat("async_wait_for_events_%p", &async_wait_for_events),
std::move(async_wait_for_events));
}
return PjRtFuture<>(
std::move(definition_promise),
[]() {
tsl::profiler::TraceMeProducer traceme(
"PjRtStreamExecutorBuffer::Await");
VLOG(1) << "PjRtStreamExecutorBuffer::Await";
return PjRtFutureHelpers::ProfilingKeys(
{traceme.GetContextId()});
},
[](PjRtFutureHelpers::ProfilingKeys keys) {
tsl::profiler::TraceMeConsumer traceme(
"PjRtStreamExecutorBuffer::Await", keys.traceme_context_id);
});
}
namespace {
struct TupleHandle {
ExecutionInput execution_input;
std::shared_ptr<BufferSequencingEvent> event;
};
absl::Status CheckCompatibleShapes(bool strict_shape_checking,
const Shape& buffer_on_device_shape,
const Shape& execution_shape,
const TransferManager& transfer_manager,
int parameter_index) {
if (execution_shape.IsToken() &&
buffer_on_device_shape.element_type() == PrimitiveType::PRED &&
buffer_on_device_shape.dimensions_size() == 1 &&
buffer_on_device_shape.dimensions(0) == 0) {
return absl::OkStatus();
}
if (strict_shape_checking || buffer_on_device_shape.IsTuple()) {
if (!ShapeUtil::Compatible(buffer_on_device_shape, execution_shape)) {
return InvalidArgument(
"Executable expected shape %s for argument %d but got "
"incompatible "
"shape %s",
ShapeUtil::HumanStringWithLayout(execution_shape), parameter_index,
ShapeUtil::HumanStringWithLayout(buffer_on_device_shape));
}
} else {
const int64_t buffer_size =
transfer_manager.GetByteSizeRequirement(buffer_on_device_shape);
const int64_t execute_size =
transfer_manager.GetByteSizeRequirement(execution_shape);
if (buffer_on_device_shape.is_static() && buffer_size != execute_size) {
return InvalidArgument(
"Executable expected shape %s for argument %d but got "
"incompatible "
"shape %s",
ShapeUtil::HumanStringWithLayout(execution_shape), parameter_index,
ShapeUtil::HumanStringWithLayout(buffer_on_device_shape));
}
if (!buffer_on_device_shape.is_static() && buffer_size < execute_size) {
return InvalidArgument(
"Executable expected shape %s for argument %d but got "
"incompatible "
"shape %s",
ShapeUtil::HumanStringWithLayout(execution_shape), parameter_index,
ShapeUtil::HumanStringWithLayout(buffer_on_device_shape));
}
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<TupleHandle>> MakeTupleHelper(
PjRtStreamExecutorClient* client, LocalDeviceState* local_device,
bool strict_shape_checking, const Shape& tupled_parameter_shape,
absl::Span<PjRtBuffer* const> py_buffers,
absl::Span<const PjRtStreamExecutorBuffer::ScopedHold> device_buffers,
int device_ordinal) {
se::DeviceMemoryAllocator* allocator = client->allocator();
TransferManager* transfer_manager =
client->client()->backend().transfer_manager();
if (tupled_parameter_shape.tuple_shapes_size() != py_buffers.size()) {
return InvalidArgument("Executable expected %lld parameters but got %lld",
tupled_parameter_shape.tuple_shapes_size(),
py_buffers.size());
}
for (int i = 0; i < py_buffers.size(); ++i) {
TF_RETURN_IF_ERROR(CheckCompatibleShapes(
strict_shape_checking, py_buffers[i]->on_device_shape(),
tupled_parameter_shape.tuple_shapes(i), *transfer_manager, i));
}
se::Stream* stream = local_device->host_to_device_stream();
TF_ASSIGN_OR_RETURN(
se::OwningDeviceMemory root_table_memory,
allocator->Allocate(
device_ordinal,
transfer_manager->GetByteSizeRequirement(tupled_parameter_shape)));
if (local_device->allocation_model() ==
LocalDeviceState::kComputeSynchronized) {
TF_RETURN_IF_ERROR(stream->WaitFor(local_device->compute_stream()));
} else {
DCHECK(transfer_manager->CanBufferBeAccessedNow(
local_device->compute_stream()->parent(), root_table_memory.cref()));
}
ExecutionInput execution_input(tupled_parameter_shape);
ShapeTree<MaybeOwningDeviceMemory>::iterator input_iterator =
execution_input.MutableBuffers()->begin();
ShapeTree<MaybeOwningDeviceMemory>::iterator iterator_end =
execution_input.MutableBuffers()->end();
execution_input.SetBuffer(
input_iterator->first,
MaybeOwningDeviceMemory(std::move(root_table_memory)));
++input_iterator;
for (const PjRtStreamExecutorBuffer::ScopedHold& device_buffer :
device_buffers) {
device_buffer.AddToInput(&input_iterator, iterator_end, &execution_input,
allocator);
}
CHECK(input_iterator == iterator_end);
TF_RETURN_IF_ERROR(transfer_manager->WriteRootTupleIndexTable(
stream, execution_input.Buffers()));
absl::StatusOr<EventPool::Handle> event_or =
local_device->event_pool().ThenAllocateAndRecordEvent(stream);
if (!event_or.ok()) {
StallStreamOnError(local_device, stream);
return event_or.status();
}
auto transfer_event =
std::make_shared<BufferSequencingEvent>(client->thread_pool());
transfer_event->SetSequencingEvent(std::move(event_or).value(), stream);
return std::make_unique<TupleHandle>(
TupleHandle({std::move(execution_input), std::move(transfer_event)}));
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> OutputBufferHelper(
ScopedShapedBuffer* result_buffer,
std::shared_ptr<BufferSequencingEvent> definition_event, PjRtClient* client,
PjRtDevice* device, LocalDeviceState* local_device,
std::vector<std::shared_ptr<TrackedDeviceBuffer>>& buffers_to_release) {
std::shared_ptr<TrackedDeviceBuffer> out_buffer =
TrackedDeviceBuffer::FromScopedShapedBuffer(result_buffer,
{definition_event}, device);
const Shape& shape = result_buffer->on_device_shape();
PjRtMemorySpace* memory_space =
device->default_memory_space().value_or(nullptr);
if (shape.has_layout()) {
switch (shape.layout().memory_space()) {
case Layout::kGenericFastMemorySpace:
case Layout::kDefaultMemorySpace:
break;
case Layout::kHostMemorySpace: {
TF_ASSIGN_OR_RETURN(
memory_space,
tensorflow::down_cast<PjRtStreamExecutorDevice*>(device)
->memory_space_by_kind_id(PinnedHostMemorySpace::kKindId));
break;
}
default:
return absl::InternalError(
absl::StrCat("Unsupported memory space in output layout: ",
shape.layout().memory_space()));
}
}
auto pjrt_buffer = std::make_unique<PjRtStreamExecutorBuffer>(
result_buffer->on_device_shape(), std::move(out_buffer), client, device,
memory_space);
RecordUsage(pjrt_buffer->GetBufferWithUsageHold(), local_device, local_device,
definition_event, local_device->compute_stream(),
false, &buffers_to_release);
return std::unique_ptr<PjRtBuffer>(std::move(pjrt_buffer));
}
bool IsAllZeros(const DeviceAssignment& assignment) {
return std::all_of(
assignment.begin(), assignment.end(),
[](const DeviceAssignment::value_type& v) { return v == 0; });
}
}
PjRtStreamExecutorLoadedExecutable::PjRtStreamExecutorLoadedExecutable(
std::vector<std::unique_ptr<LocalExecutable>> executables,
bool parameter_is_tupled_arguments,
std::shared_ptr<DeviceAssignment> device_assignment,
CompileOptions compile_options,
std::vector<LogicalDeviceIds> addressable_device_logical_ids,
std::vector<PjRtDevice*> addressable_devices,
PjRtStreamExecutorClient* client)
: client_(client),
device_assignment_(std::move(device_assignment)),
compile_options_(std::move(compile_options)),
parameter_is_tupled_arguments_(parameter_is_tupled_arguments),
addressable_device_logical_ids_(
std::move(addressable_device_logical_ids)),
addressable_devices_(std::move(addressable_devices)) {
TransferManager* transfer_manager =
client_->client()->backend().transfer_manager();
executables_.reserve(executables.size());
tsl::Fprint128 fingerprint = tsl::Fingerprint128(fingerprint_);
for (auto& executable : executables) {
const auto& computation_layout =
executable->executable()->module().entry_computation_layout();
std::vector<Shape> parameter_shapes;
parameter_shapes.reserve(computation_layout.parameter_count());
for (int i = 0; i < computation_layout.parameter_count(); ++i) {
parameter_shapes.push_back(transfer_manager->HostShapeToDeviceShape(
computation_layout.parameter_shape(i)));
}
fingerprint = tsl::FingerprintCat128(
fingerprint,
tsl::Fingerprint128(executable->executable()->module().ToString()));
executables_.emplace_back(std::move(executable));
on_device_executable_parameter_shapes_.push_back(
std::move(parameter_shapes));
}
fingerprint_ = absl::StrCat(fingerprint.low64, fingerprint.high64);
int num_partitions;
if (device_assignment_ == nullptr) {
VLOG(3) << "PjRtStreamExecutorLoadedExecutable portable single-core";
num_partitions = 1;
CHECK(addressable_devices_.empty());
} else {
VLOG(3) << "PjRtStreamExecutorLoadedExecutable device_assignment:\n"
<< device_assignment_->ToString();
CHECK_GE(addressable_devices_.size(), 1) << device_assignment_->ToString();
if ((device_assignment_->replica_count() > 1 ||
device_assignment_->computation_count() > 1) &&
IsAllZeros(*device_assignment_)) {
LOG(INFO)
<< "A workaround is in effect to allow compiling multi-device "
"HLOs on machines with fewer devices. Don't run this executable.";
} else {
CHECK_LE(addressable_devices_.size(), client_->addressable_device_count())
<< "Inconsistent local device count.";
}
num_partitions = device_assignment_->computation_count();
}
if (executables_.size() > 1) {
CHECK_EQ(num_partitions, executables_.size())
<< "Number of executables " << executables_.size()
<< " did not match number of partitions " << num_partitions;
}
}
absl::Status PjRtStreamExecutorLoadedExecutable::SetUpDonation(
bool tuple_inputs) {
parameters_that_must_be_donated_.reserve(executables_.size());
for (auto& executable : executables_) {
TF_ASSIGN_OR_RETURN(std::vector<int> parameters_to_donate,
ComputeParametersThatMustBeDonated(
executable->executable()->module(), tuple_inputs));
parameters_that_must_be_donated_.emplace_back(
std::move(parameters_to_donate));
}
return absl::OkStatus();
}
absl::string_view PjRtStreamExecutorLoadedExecutable::name() const {
Executable* executable = executables_[0]->executable();
if (executable->has_module()) {
return executable->module().name();
} else {
return "<unknown executable>";
}
}
absl::Span<int const>
PjRtStreamExecutorLoadedExecutable::ParametersThatMustBeDonated(
int executable_idx) const {
return parameters_that_must_be_donated_[executable_idx];
}
absl::StatusOr<std::vector<ExecutionInput>>
PjRtStreamExecutorLoadedExecutable::MakeExecutionInputsAndWaitForEvents(
int device_ordinal, const ExecuteOptions& options,
absl::Span<const Shape> executable_parameter_shapes,
absl::Span<PjRtBuffer* const> argument_handles,
absl::Span<const PjRtStreamExecutorBuffer::ScopedHold> device_buffers,
absl::flat_hash_set<BufferSequencingEvent*>& events) const {
std::vector<ExecutionInput> execution_inputs;
LocalDeviceState* device_state = &(client_->device_state(device_ordinal));
TransferManager* transfer_manager =
client_->client()->backend().transfer_manager();
std::unique_ptr<TupleHandle> tuple_handle;
if (parameter_is_tupled_arguments_ && !options.arguments_are_tupled) {
TF_ASSIGN_OR_RETURN(
tuple_handle,
MakeTupleHelper(client_, device_state, options.strict_shape_checking,
executable_parameter_shapes[0], argument_handles,
device_buffers, device_ordinal));
events.insert(tuple_handle->event.get());
execution_inputs.emplace_back(std::move(tuple_handle->execution_input));
} else {
if (argument_handles.size() != executable_parameter_shapes.size()) {
return InvalidArgument("Executable expected %lld arguments but got %lld",
executable_parameter_shapes.size(),
argument_handles.size());
}
execution_inputs.reserve(argument_handles.size());
for (int i = 0; i < argument_handles.size(); ++i) {
PjRtBuffer* handle = argument_handles[i];
TF_RETURN_IF_ERROR(CheckCompatibleShapes(
options.strict_shape_checking, handle->on_device_shape(),
executable_parameter_shapes[i], *transfer_manager, i));
execution_inputs.emplace_back(executable_parameter_shapes[i]);
ExecutionInput& execution_input = execution_inputs.back();
ShapeTree<MaybeOwningDeviceMemory>::iterator input_iterator =
execution_input.MutableBuffers()->begin();
ShapeTree<MaybeOwningDeviceMemory>::iterator iterator_end =
execution_input.MutableBuffers()->end();
device_buffers[i].AddToInput(&input_iterator, iterator_end,
&execution_input, client_->allocator());
CHECK(input_iterator == iterator_end);
}
}
for (BufferSequencingEvent* event : events) {
event->WaitForEventOnStream(device_state->compute_stream());
}
return execution_inputs;
}
template <typename T>
static const T* FindCallback(int channel_id, absl::Span<const T> callbacks) {
auto it = absl::c_find_if(callbacks, [&](const T& callback) {
return callback.channel_id == channel_id;
});
return it == callbacks.end() ? nullptr : &*it;
}
using tsl::AsyncValueRef;
using tsl::MakeConstructedAsyncValueRef;
static SendDeviceMemoryFunction ConvertSendCallbacksToSendFunction(
int replica, const ExecuteOptions& options,
tsl::thread::ThreadPool* thread_pool) {
if (replica >= options.send_callbacks.size()) {
return [replica](int64_t channel_id, se::Stream*, const Shape&,
const se::DeviceMemoryBase&,
const absl::flat_hash_map<std::string, std::string>&) {
return Internal(
"Don't send a buffer to the channel_id=%d, there was no send "
"callbacks registered for the replica=%d",
channel_id, replica);
};
}
absl::Span<const SendCallback> callbacks = options.send_callbacks[replica];
return [callbacks, thread_pool](
int64_t channel_id, se::Stream* stream, const Shape& shape,
const se::DeviceMemoryBase& src,
const absl::flat_hash_map<std::string, std::string>&)
-> absl::StatusOr<AsyncValueRef<std::unique_ptr<se::Event>>> {
VLOG(3) << "Send " << src.size() << " bytes to channel #" << channel_id
<< " (shape=" << shape.ToString() << ")";
const SendCallback* send = FindCallback(channel_id, callbacks);
if (!send) {
return InvalidArgument(
"Failed to send a buffer to the channel_id=%d, callback not found",
channel_id);
}
TF_ASSIGN_OR_RETURN(auto se_event, stream->parent()->CreateEvent());
auto done_event = MakeConstructedAsyncValueRef<std::unique_ptr<se::Event>>(
std::move(se_event));
thread_pool->Schedule([done_event, stream, src, channel_id, shape, send] {
tsl::profiler::TraceMe trace([&] {
return tsl::profiler::TraceMeEncode(
"PjRtStreamExecutorLoadedExecutable::Send",
{{"channel_id", channel_id}});
});
PjRtChunk chunk = PjRtChunk::AllocateDefault(src.size());
auto status = stream->Memcpy(chunk.data(), src, src.size());
if (!status.ok()) {
done_event.SetError(status);
return;
}
status = stream->RecordEvent(done_event.get().get());
if (!status.ok()) {
done_event.SetError(status);
return;
}
if (auto st = stream->BlockHostUntilDone(); !st.ok()) {
done_event.SetError(absl::InternalError(absl::StrFormat(
"failed to synchronize send operation with a stream: %s",
st.message())));
return;
}
auto sent = send->callback({shape}, std::move(chunk),
src.size(),
true);
if (!sent.ok()) {
done_event.SetError(sent);
} else {
done_event.SetStateConcrete();
}
});
return std::move(done_event);
};
}
namespace {
class StreamExecutorCopyToDeviceStream : public CopyToDeviceStream {
public:
StreamExecutorCopyToDeviceStream(
int64_t channel_id, se::Stream* stream, se::DeviceMemoryBase dst,
AsyncValueRef<std::unique_ptr<se::Event>> done)
: CopyToDeviceStream(dst.size(), 1),
channel_id_(channel_id),
stream_(stream),
dst_(dst),
done_(std::move(done)) {}
PjRtFuture<> AddChunk(PjRtChunk chunk) final {
tsl::profiler::TraceMe trace([&] {
return tsl::profiler::TraceMeEncode(
"StreamExecutorCopyToDeviceStream::AddChunk",
{{"channel_id", channel_id_}});
});
absl::ReleasableMutexLock lock(&mu_);
VLOG(3) << "Add chunk to a H2D channel #" << channel_id_ << ": "
<< "size=" << chunk.size() << ", "
<< "current_bytes=" << current_bytes_ << ", "
<< "total_bytes=" << total_bytes_;
if (chunk.size() % granule_size_in_bytes() != 0) {
done_.SetError(absl::InvalidArgumentError(absl::StrFormat(
"Chunk size (%d) was not a multiple of the granule size (%d)",
chunk.size(), granule_size_in_bytes())));
return PjRtFuture<>(done_.GetError());
}
if (current_bytes_ + chunk.size() > total_bytes_) {
done_.SetError(absl::InvalidArgumentError(
absl::StrFormat("Adding chunk of size %d would overflow buffer of "
"size %d (%d already transferred)",
chunk.size(), total_bytes_, current_bytes_)));
return PjRtFuture<>(done_.GetError());
}
se::DeviceMemoryBase dst(
reinterpret_cast<std::byte*>(dst_.opaque()) + current_bytes_,
dst_.size() - current_bytes_);
current_bytes_ += chunk.size();
bool complete = IsCompleteLocked();
lock.Release();
auto copied = stream_->Memcpy(&dst, chunk.data(), chunk.size());
if (!copied.ok()) {
done_.SetError(copied);
return PjRtFuture<>(done_.GetError());
}
auto* chunk_ptr = std::make_unique<PjRtChunk>(std::move(chunk)).release();
auto deleted = stream_->DoHostCallback([chunk_ptr]() { delete chunk_ptr; });
if (!deleted.ok()) {
done_.SetError(deleted);
return PjRtFuture<>(done_.GetError());
}
if (complete) {
auto recorded = stream_->RecordEvent(done_.get().get());
if (!recorded.ok()) {
done_.SetError(recorded);
return PjRtFuture<>(done_.GetError());
}
done_.SetStateConcrete();
}
return PjRtFuture<>(absl::OkStatus());
}
private:
int64_t channel_id_;
se::Stream* stream_;
se::DeviceMemoryBase dst_;
AsyncValueRef<std::unique_ptr<se::Event>> done_;
};
}
static RecvDeviceMemoryFunction ConvertRecvCallbacksToRecvFunction(
int replica, const ExecuteOptions& options) {
if (replica >= options.send_callbacks.size()) {
return [replica](int64_t channel_id, se::Stream*, const Shape&,
se::DeviceMemoryBase*,
const absl::flat_hash_map<std::string, std::string>&) {
return InvalidArgument(
"Failed to receive a buffer from the channel_id=%d, there was no "
"recv callbacks registered for the replica=%d",
channel_id, replica);
};
}
absl::Span<const RecvCallback> callbacks = options.recv_callbacks[replica];
return [callbacks](int64_t channel_id, se::Stream* stream, const Shape& shape,
se::DeviceMemoryBase* dst,
const absl::flat_hash_map<std::string, std::string>&)
-> absl::StatusOr<AsyncValueRef<std::unique_ptr<se::Event>>> {
VLOG(3) << "Recv from channel #" << channel_id
<< " (shape=" << shape.ToString() << ")";
tsl::profiler::TraceMe trace([&] {
return tsl::profiler::TraceMeEncode(
"PjRtStreamExecutorLoadedExecutable::Recv",
{{"channel_id", channel_id}});
});
const RecvCallback* recv = FindCallback(channel_id, callbacks);
if (!recv) {
return InvalidArgument(
"Failed to recv a buffer from the channel_id=%d, callback not found",
channel_id);
}
TF_ASSIGN_OR_RETURN(auto event, stream->parent()->CreateEvent());
auto done_event = MakeConstructedAsyncValueRef<std::unique_ptr<se::Event>>(
std::move(event));
recv->callback({shape}, std::make_unique<StreamExecutorCopyToDeviceStream>(
channel_id, stream, *dst, done_event));
return std::move(done_event);
};
}
absl::StatusOr<ScopedShapedBuffer>
PjRtStreamExecutorLoadedExecutable::EnqueueExecution(
absl::Span<PjRtBuffer* const> argument_handles, int replica, int partition,
int executable_idx, const RunId& run_id, const ExecuteOptions& options,
PjRtDevice* device,
std::vector<PjRtStreamExecutorBuffer::ScopedHold>* device_buffers,
std::shared_ptr<DeviceAssignment> device_assignment,
std::vector<std::function<void()>>& compute_callbacks) const {
int device_ordinal = tensorflow::down_cast<PjRtStreamExecutorDevice*>(device)
->local_device_state()
->local_device_id()
.value();
LocalDeviceState* device_state = &(client_->device_state(device_ordinal));
tsl::profiler::TraceMeConsumer activity(
"PjRtStreamExecutorLoadedExecutable::EnqueueExecution",
tsl::profiler::ContextType::kPjRt, run_id.ToInt());
VLOG(3) << "Replica " << replica << ", partition " << partition
<< " mapped to device ordinal for execution: " << device_ordinal;
absl::flat_hash_set<BufferSequencingEvent*> events;
device_buffers->reserve(argument_handles.size());
absl::Span<int const> donated_params =
ParametersThatMustBeDonated(executable_idx);
auto donate_it = donated_params.begin();
absl::flat_hash_set<PjRtStreamExecutorBuffer*> used_buffers;
absl::flat_hash_set<PjRtStreamExecutorBuffer*> donated_buffers;
for (int i = 0; i < argument_handles.size(); ++i) {
auto* handle =
tensorflow::down_cast<PjRtStreamExecutorBuffer*>(argument_handles[i]);
if (handle->device() != device) {
return InvalidArgument(
"Buffer passed to Execute() as argument %d to replica %d is on "
"device %s, but replica is assigned to device %s.",
i, replica, handle->device()->DebugString(), device->DebugString());
}
bool donation_denied_at_runtime =
options.non_donatable_input_indices.contains(i);
bool must_donate = donate_it != donated_params.end() && *donate_it == i &&
!donation_denied_at_runtime;
if (must_donate) {
++donate_it;
}
bool already_used = !used_buffers.emplace(handle).second;
bool already_donated =
must_donate ? !donated_buffers.emplace(handle).second
: donated_buffers.find(handle) != donated_buffers.end();
if (must_donate && already_donated) {
return InvalidArgument(
"Attempt to donate the same buffer twice in Execute() (second use: "
"flattened argument %d, replica %d). "
"Toy example for this bug: `f(donate(a), donate(a))`.",
i, replica);
} else if (must_donate && already_used) {
return InvalidArgument(
"Attempt to donate a buffer which is also used by the same call to "
"Execute() (second use: flattened argument %d, replica %d). "
"Toy example for this bug: `f(a, donate(a))`.",
i, replica);
} else if (already_donated) {
return InvalidArgument(
"Attempt to use a buffer that was previously donated in the same "
"call to Execute() (second use: flattened argument %d, replica %d). "
"Toy example for this bug: `f(donate(a), a)`.",
i, replica);
}
device_buffers->emplace_back(handle->GetBufferWithHold(
must_donate ? PjRtStreamExecutorBuffer::ScopedHold::kDonation
: PjRtStreamExecutorBuffer::ScopedHold::kUsage));
PjRtStreamExecutorBuffer::ScopedHold& device_buffer =
device_buffers->back();
if (!device_buffer.ok()) {
return InvalidArgument(
"Invalid buffer passed to Execute() as argument %d to replica %d: "
"%s",
i, replica, device_buffer.status().ToString());
}
if (device_state->allocation_model() ==
LocalDeviceState::kComputeSynchronized) {
GetDeviceBufferEvents(*device_buffer, false,
&events);
}
GetDeviceBufferEvents(*device_buffer, must_donate,
&events);
}
if (options.arguments_are_tupled) {
if (!parameter_is_tupled_arguments_) {
return InvalidArgument(
"Arguments may only be supplied as a tuple when the executable was "
"compiled with a single tupled parameter");
}
if (argument_handles.size() != 1) {
return InvalidArgument(
"Option arguments_are_tupled was true but %d buffers were passed to "
"execution",
argument_handles.size());
}
}
TF_ASSIGN_OR_RETURN(
std::vector<ExecutionInput> execution_inputs,
MakeExecutionInputsAndWaitForEvents(
device_ordinal, options,
on_device_executable_parameter_shapes_[executable_idx],
argument_handles, *device_buffers, events));
auto* thread_pool = client_->thread_pool();
SendDeviceMemoryFunction send_device_memory =
ConvertSendCallbacksToSendFunction(replica, options, thread_pool);
RecvDeviceMemoryFunction recv_device_memory =
ConvertRecvCallbacksToRecvFunction(replica, options);
ExecutableRunOptions run_options;
run_options.set_stream(device_state->compute_stream());
run_options.set_device_ordinal(device_state->local_device_id().value());
run_options.set_local_device_count(client_->client()->device_count());
run_options.set_physical_device_ordinal(
device_state->local_hardware_id().value());
run_options.set_host_to_device_stream(device_state->host_to_device_stream());
run_options.set_device_to_host_stream(device_state->GetDeviceToHostStream());
run_options.set_allocator(client_->allocator());
run_options.set_intra_op_thread_pool(
client_->client()->backend().eigen_intra_op_thread_pool_device());
run_options.set_device_assignment(device_assignment.get());
if (options.launch_id != 0) {
run_options.set_run_id(RunId(options.launch_id));
} else {
run_options.set_run_id(run_id);
}
run_options.set_rng_seed(device_state->GetNewPrngSeed());
run_options.set_gpu_executable_run_options(client_->gpu_run_options());
run_options.set_launch_id(options.launch_id);
run_options.set_send_device_memory_function(&send_device_memory);
run_options.set_recv_device_memory_function(&recv_device_memory);
if (run_options.launch_id() != 0) {
VLOG(3) << "launch id for " << name() << ": " << run_options.launch_id();
}
if (options.context != nullptr) {
run_options.set_ffi_execution_context(&options.context->ffi_context());
}
std::shared_ptr<Semaphore::ScopedReservation> compute_reservation;
{
tsl::profiler::TraceMe traceme("ComputeSemaphoreAcquire");
compute_reservation = std::make_shared<Semaphore::ScopedReservation>(
device_state->compute_semaphore().ScopedAcquire(1));
}
absl::StatusOr<ExecutionOutput> result_buffer_or_status =
executables_[executable_idx]->RunAsync(std::move(execution_inputs),
run_options);
VLOG(1) << "Replica " << replica << " partition " << partition
<< " completed; ok=" << result_buffer_or_status.ok();
if (!result_buffer_or_status.ok()) {
return result_buffer_or_status.status();
}
if (device_state->allocation_model() == LocalDeviceState::kSynchronous) {
ExecutionOutput& execution_output = result_buffer_or_status.value();
std::vector<se::OwningDeviceMemory> donated_memory =
execution_output.ConsumeToBeReleased();
absl::InlinedVector<se::DeviceMemoryBase, 3> donated_ptrs;
donated_ptrs.reserve(donated_memory.size());
for (se::OwningDeviceMemory& owning : donated_memory) {
donated_ptrs.push_back(owning.Release());
}
compute_callbacks.push_back(
[references{std::make_tuple(executables_[executable_idx],
compute_reservation, device_assignment)},
donated_ptrs{std::move(donated_ptrs)}, allocator{client_->allocator()},
device_ordinal]() {
for (const auto& ptr : donated_ptrs) {
TF_CHECK_OK(allocator->Deallocate(device_ordinal, ptr));
}
});
} else {
compute_callbacks.push_back(
[to_release{std::make_tuple(executables_[executable_idx],
compute_reservation,
device_assignment)}]() {});
}
return std::move(result_buffer_or_status).value().ConsumeResult();
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>>
PjRtStreamExecutorLoadedExecutable::MakeOutputBuffers(
int device_ordinal, const ExecuteOptions& options,
ScopedShapedBuffer result_buffer,
std::shared_ptr<BufferSequencingEvent> definition_event, PjRtDevice* device,
std::vector<std::function<void()>>& compute_callbacks,
std::vector<std::shared_ptr<TrackedDeviceBuffer>>& buffers_to_release)
const {
tsl::profiler::TraceMe traceme("MakeOutputBuffers");
std::vector<std::unique_ptr<PjRtBuffer>> outputs;
LocalDeviceState* device_state = &(client_->device_state(device_ordinal));
if (options.untuple_result && result_buffer.on_device_shape().IsTuple()) {
int tuple_count = result_buffer.on_device_shape().tuple_shapes_size();
outputs.reserve(tuple_count);
for (int i = 0; i < tuple_count; ++i) {
ScopedShapedBuffer tuple_buffer = result_buffer.TakeSubTree({i});
TF_ASSIGN_OR_RETURN(
std::unique_ptr<PjRtBuffer> buffer,
OutputBufferHelper(&tuple_buffer, definition_event, client_, device,
device_state, buffers_to_release));
outputs.push_back(std::move(buffer));
}
if (device_state->allocation_model() == LocalDeviceState::kSynchronous) {
ShapedBuffer root_buffer_holder = result_buffer.release();
se::DeviceMemoryBase root_buffer = root_buffer_holder.root_buffer();
compute_callbacks.push_back(
[root_buffer, allocator{client_->allocator()}, device_ordinal]() {
TF_CHECK_OK(allocator->Deallocate(device_ordinal, root_buffer));
});
}
} else {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<PjRtBuffer> buffer,
OutputBufferHelper(&result_buffer, definition_event, client_, device,
device_state, buffers_to_release));
outputs.push_back(std::move(buffer));
}
return outputs;
}
static absl::Status GetFirstInputError(
absl::Span<PjRtBuffer* const> argument_handles) {
for (auto* handle : argument_handles) {
auto* buffer = tensorflow::down_cast<PjRtStreamExecutorBuffer*>(handle);
PjRtStreamExecutorBuffer::ScopedHold hold =
buffer->GetBufferWithUsageHold();
for (const auto& event : hold->definition_events()) {
if (event->IsPredeterminedError()) {
return event->GetDefinedStatus();
}
}
}
return absl::OkStatus();
}
absl::StatusOr<PjRtLoadedExecutable::Result>
PjRtStreamExecutorLoadedExecutable::ExecuteHelper(
absl::Span<PjRtBuffer* const> argument_handles, int replica, int partition,
const RunId& run_id, const ExecuteOptions& options, bool fill_future,
PjRtDevice* device) const {
const uint64_t start_time_usecs = tsl::Env::Default()->NowMicros();
std::shared_ptr<DeviceAssignment> device_assignment;
if (device == nullptr) {
CHECK(device_assignment_ != nullptr);
const int64_t device_id = (*device_assignment_)(replica, partition);
PjRtGlobalDeviceId global_device_id(device_id);
TF_ASSIGN_OR_RETURN(device, client_->LookupDevice(global_device_id));
device_assignment = device_assignment_;
} else {
CHECK(device_assignment_ == nullptr);
CHECK_EQ(replica, 0);
CHECK_EQ(partition, 0);
CHECK(addressable_devices_.empty());
device_assignment = std::make_shared<DeviceAssignment>(1, 1);
(*device_assignment)(0, 0) = device->id();
}
absl::Status input_error = GetFirstInputError(argument_handles);
if (!input_error.ok()) {
TF_ASSIGN_OR_RETURN(PjRtMemorySpace * memory_space,
device->default_memory_space());
std::vector<std::unique_ptr<PjRtBuffer>> outputs;
TF_ASSIGN_OR_RETURN(auto hlo_modules, GetHloModules());
for (const auto& hlo_module : hlo_modules) {
TF_ASSIGN_OR_RETURN(
auto error_buffer,
client_->CreateErrorBuffer(input_error, hlo_module->result_shape(),
memory_space));
outputs.push_back(std::move(error_buffer));
}
auto future = std::make_optional(PjRtFuture<>(input_error));
return Result({std::move(future), std::move(outputs)});
}
CHECK_EQ(device->process_index(), client_->process_index());
int device_ordinal = tensorflow::down_cast<PjRtStreamExecutorDevice*>(device)
->local_device_state()
->local_device_id()
.value();
tsl::profiler::TraceMe traceme(
"PjRtStreamExecutorLoadedExecutable::ExecuteHelper");
VLOG(1) << "Replica " << replica << ", partition " << partition
<< " mapped to device ordinal for execution: " << device_ordinal;
int executable_idx = executables_.size() > 1 ? partition : 0;
std::vector<std::function<void()>> compute_callbacks;
std::vector<PjRtStreamExecutorBuffer::ScopedHold> device_buffers;
device_buffers.reserve(argument_handles.size());
absl::StatusOr<ScopedShapedBuffer> result_buffer_or_status = EnqueueExecution(
argument_handles, replica, partition, executable_idx, run_id, options,
device, &device_buffers, std::move(device_assignment), compute_callbacks);
if (!result_buffer_or_status.ok()) {
LOG(ERROR) << "Execution of replica " << replica
<< " failed: " << result_buffer_or_status.status();
return result_buffer_or_status.status();
}
ScopedShapedBuffer result_buffer = std::move(result_buffer_or_status).value();
LocalDeviceState* device_state = &(client_->device_state(device_ordinal));
se::Stream* stream = device_state->compute_stream();
absl::StatusOr<EventPool::Handle> event_or =
device_state->event_pool().ThenAllocateAndRecordEvent(stream);
if (!event_or.ok()) {
StallStreamOnError(device_state, stream);
for (PjRtStreamExecutorBuffer::ScopedHold& b : device_buffers) {
if (b.type() == PjRtStreamExecutorBuffer::ScopedHold::kDonation) {
b.ConfirmDonation();
}
}
return event_or.status();
}
auto definition_event =
std::make_shared<BufferSequencingEvent>(client_->thread_pool());
definition_event->SetSequencingEvent(std::move(event_or).value(), stream);
std::vector<std::shared_ptr<TrackedDeviceBuffer>> buffers_to_release;
TF_ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<PjRtBuffer>> outputs,
MakeOutputBuffers(device_ordinal, options, std::move(result_buffer),
definition_event, device, compute_callbacks,
buffers_to_release));
for (PjRtStreamExecutorBuffer::ScopedHold& b : device_buffers) {
if (b.type() == PjRtStreamExecutorBuffer::ScopedHold::kUsage) {
RecordUsage(std::move(b), device_state, device_state, definition_event,
stream,
false, &buffers_to_release);
} else {
CHECK(b.type() == PjRtStreamExecutorBuffer::ScopedHold::kDonation);
b.ConfirmDonation();
}
}
std::optional<PjRtFuture<>> future;
if (fill_future) {
auto promise = PjRtFuture<>::CreatePromise();
future = PjRtFuture<>(promise);
compute_callbacks.push_back(
[promise = std::move(promise)]() mutable { promise.Set(); });
}
TF_RETURN_IF_ERROR(device_state->ThenExecuteCallback(
stream, [callbacks{std::move(compute_callbacks)},
buffers_to_release{std::move(buffers_to_release)}]() {
for (auto& fn : callbacks) {
fn();
}
}));
metrics::ReportExecutableEnqueueTime(tsl::Env::Default()->NowMicros() -
start_time_usecs);
return Result({std::move(future), std::move(outputs)});
}
absl::StatusOr<std::vector<std::vector<std::unique_ptr<PjRtBuffer>>>>
PjRtStreamExecutorLoadedExecutable::Execute(
absl::Span<const std::vector<PjRtBuffer*>> argument_handles,
const ExecuteOptions& options,
std::optional<std::vector<PjRtFuture<>>>& returned_futures) {
if (device_assignment_ == nullptr) {
return InvalidArgument("Execute expects a non-null device_assignment");
}
RunId run_id;
tsl::profiler::TraceMeProducer activity(
"PjRtStreamExecutorLoadedExecutable::Execute",
tsl::profiler::ContextType::kPjRt, run_id.ToInt());
const int num_addressable_devices = addressable_devices_.size();
if (argument_handles.size() != num_addressable_devices) {
return InvalidArgument(
"Attempted to execute with %d argument lists when local device "
"count is %d (total replica count: %d, partition count: %d)",
argument_handles.size(), num_addressable_devices, num_replicas(),
num_partitions());
}
VLOG(1) << "Executing computation " << name()
<< "; num_replicas=" << num_replicas()
<< " num_partitions=" << num_partitions()
<< " num_addressable_devices=" << num_addressable_devices;
std::vector<absl::StatusOr<Result>> results(num_addressable_devices);
if (num_addressable_devices == 1 && !ThisThreadIsInsideHostCallback()) {
const int replica = addressable_device_logical_ids_[0].replica;
const int partition = addressable_device_logical_ids_[0].partition;
results[0] = ExecuteHelper(argument_handles[0], replica, partition, run_id,
options, returned_futures.has_value());
} else {
absl::Mutex mu;
int running = num_addressable_devices;
int failed = 0;
absl::Status first_failure_status;
for (int i = 0; i < num_addressable_devices; ++i) {
const int replica = addressable_device_logical_ids_[i].replica;
const int partition = addressable_device_logical_ids_[i].partition;
PjRtDevice* device = addressable_devices_[i];
const LocalDeviceState& device_state =
*tensorflow::down_cast<PjRtStreamExecutorDevice*>(device)
->local_device_state();
device_state.execute_thread()->Schedule([&, replica, partition, i] {
results[i] =
ExecuteHelper(argument_handles[i], replica, partition, run_id,
options, returned_futures.has_value());
absl::MutexLock lock(&mu);
--running;
if (!results[i].ok()) {
if (failed == 0) {
first_failure_status = results[i].status();
}
++failed;
}
});
}
auto done_running_or_failed = [&]() {
mu.AssertHeld();
return running == 0 || failed > 0;
};
absl::MutexLock lock(&mu);
mu.Await(absl::Condition(&done_running_or_failed));
if (failed > 0) {
auto done_running = [&]() {
mu.AssertHeld();
return running == 0;
};
if (!mu.AwaitWithTimeout(absl::Condition(&done_running),
absl::Seconds(10))) {
LOG(FATAL)
<< "Replicated computation launch failed, but not all replicas "
"terminated. Aborting process to work around deadlock. "
"Failure message (there may have been multiple failures, see "
"the error log for all failures): \n\n"
<< first_failure_status.message();
}
}
}
VLOG(1) << "Replicated execution complete.";
std::vector<std::vector<std::unique_ptr<PjRtBuffer>>> wrapped_results(
num_addressable_devices);
if (returned_futures.has_value()) {
returned_futures->reserve(num_addressable_devices);
}
for (int i = 0; i < num_addressable_devices; ++i) {
const int replica = addressable_device_logical_ids_[i].replica;
const int partition = addressable_device_logical_ids_[i].partition;
auto& statusor = results[i];
if (!statusor.ok()) {
if (returned_futures.has_value()) {
returned_futures->clear();
}
if (num_addressable_devices == 1) {
return statusor.status();
} else {
return AppendStatus(
statusor.status(),
absl::StrFormat("while running replica %d and partition %d of a "
"replicated computation (other "
"replicas may have failed as well).",
replica, partition));
}
}
wrapped_results[i] = std::move(statusor->buffers);
if (returned_futures.has_value()) {
returned_futures->push_back(*std::move(statusor->future));
}
}
return wrapped_results;
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>>
PjRtStreamExecutorLoadedExecutable::ExecuteSharded(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options, std::optional<PjRtFuture<>>& returned_future,
bool fill_future) {
if (device_assignment_ == nullptr) {
return InvalidArgument("ExecuteShard expects a non-null device_assignment");
}
for (int i = 0; i < addressable_devices_.size(); ++i) {
if (addressable_devices_[i] == device) {
VLOG(1) << "ExecuteShard executes computation " << name()
<< " on assigned replica/partition on device "
<< device->DebugString();
TF_ASSIGN_OR_RETURN(
auto result,
ExecuteHelper(argument_handles,
addressable_device_logical_ids_[i].replica,
addressable_device_logical_ids_[i].partition, RunId(),
options, fill_future));
returned_future = std::move(result.future);
return std::move(result.buffers);
}
}
return InvalidArgument(
"ExecuteShard attempted to execute on device id %d which is not "
"addressable by this client",
device->id());
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>>
PjRtStreamExecutorLoadedExecutable::ExecutePortable(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options, std::optional<PjRtFuture<>>& returned_future,
bool fill_future) {
if (device_assignment_ != nullptr) {
return InvalidArgument("ExecutePortable gets a non-portable executable");
}
if (num_replicas() != 1 || num_partitions() != 1) {
return InvalidArgument(
"ExecutePortable expects a single-core executable but gets "
"one with %d replica %d partition",
num_replicas(), num_partitions());
}
if (device == nullptr) {
return InvalidArgument("ExecutePortable expects a device to be specified");
}
VLOG(1) << "ExecutePortable executes single-core portable executable "
<< name();
TF_ASSIGN_OR_RETURN(auto result, ExecuteHelper(argument_handles,
0,
0, RunId(),
options, fill_future, device));
returned_future = std::move(result.future);
return std::move(result.buffers);
}
absl::StatusOr<std::vector<std::shared_ptr<HloModule>>>
PjRtStreamExecutorLoadedExecutable::GetHloModules() const {
std::vector<std::shared_ptr<HloModule>> modules;
modules.reserve(executables().size());
for (const auto& local_exec : executables()) {
if (!local_exec->executable()->has_module()) {
return InvalidArgument("Executable does not have HLO modules.");
}
modules.push_back(local_exec->executable()->shared_module());
}
return std::move(modules);
}
namespace {
absl::StatusOr<absl::string_view> MemoryKindFromSimpleShape(
const Shape& shape, absl::string_view default_memory_kind) {
if (!shape.has_layout()) {
return default_memory_kind;
}
switch (shape.layout().memory_space()) {
case Layout::kHostMemorySpace:
return PinnedHostMemorySpace::kKind;
case Layout::kGenericFastMemorySpace:
case Layout::kDefaultMemorySpace:
return default_memory_kind;
default:
return InvalidArgument("Unexpected memory space %d in output layout",
shape.layout().memory_space());
}
}
absl::StatusOr<std::vector<absl::string_view>> MemoryKindsFromShape(
const Shape& shape, absl::string_view default_memory_kind) {
if (!shape.IsTuple()) {
TF_ASSIGN_OR_RETURN(absl::string_view memory_kind,
MemoryKindFromSimpleShape(shape, default_memory_kind));
return {{memory_kind}};
}
std::vector<absl::string_view> result;
result.reserve(shape.tuple_shapes_size());
for (const auto& element_shape : shape.tuple_shapes()) {
TF_ASSIGN_OR_RETURN(
absl::string_view element_memory_kind,
MemoryKindFromSimpleShape(element_shape, default_memory_kind));
result.push_back(element_memory_kind);
}
return result;
}
}
absl::StatusOr<std::vector<std::vector<absl::string_view>>>
PjRtStreamExecutorLoadedExecutable::GetOutputMemoryKinds() const {
TF_ASSIGN_OR_RETURN(auto shapes, GetOutputShapes());
if (addressable_devices().empty()) {
return Unimplemented(
"GetOutputMemoryKinds is not supported when there are no addressable "
"devices in PjRtStreamExecutorLoadedExecutable.");
}
TF_ASSIGN_OR_RETURN(PjRtMemorySpace * default_memory_space,
addressable_devices()[0]->default_memory_space());
std::vector<std::vector<absl::string_view>> out;
out.reserve(shapes.size());
for (const auto& shape : shapes) {
TF_ASSIGN_OR_RETURN(
std::vector<absl::string_view> memory_kind,
MemoryKindsFromShape(shape, default_memory_space->kind()));
out.push_back(memory_kind);
}
return out;
}
absl::StatusOr<PjRtStreamExecutorClient::ExecutableExtras>
PjRtStreamExecutorClient::GetExecutableExtras(CompileOptions* options) {
ExecutableExtras extras;
std::shared_ptr<DeviceAssignment>& device_assignment =
extras.device_assignment;
std::vector<PjRtStreamExecutorLoadedExecutable::LogicalDeviceIds>&
addressable_device_logical_ids = extras.addressable_device_logical_ids;
std::vector<PjRtDevice*>& addressable_devices = extras.addressable_devices;
ExecutableBuildOptions& build_options = options->executable_build_options;
if (!build_options.compile_thread_pool()) {
build_options.set_compile_thread_pool(thread_pool());
}
if (!build_options.device_allocator()) {
build_options.set_device_allocator(allocator());
}
auto layout_callback = [local_client = client(),
options](const HloModule& module)
-> absl::StatusOr<std::pair<std::vector<Shape>, Shape>> {
ExecutableBuildOptions build_options = options->executable_build_options;
std::vector<const Shape*> argument_layout_pointers;
std::optional<std::vector<Shape>> argument_layouts =
options->argument_layouts;
Shape result_layout;
TF_RETURN_IF_ERROR(DetermineArgumentLayoutsFromCompileOptions(
XlaComputation(module.ToProto()),
[local_client = local_client](Shape shape) {
return local_client->backend()
.transfer_manager()
->ChooseCompactLayoutForShape(shape);
},
argument_layouts, &build_options, &argument_layout_pointers));
result_layout = *build_options.result_layout();
return std::make_pair(*argument_layouts, result_layout);
};
build_options.set_layout_canonicalization_callback(layout_callback);
int num_replicas;
int num_partitions;
TF_RETURN_IF_ERROR(ParseDeviceAssignmentCompileOptions(
options->compile_portable_executable, &options->executable_build_options,
[this](int num_replicas, int num_partitions) {
return this->GetDefaultDeviceAssignment(num_replicas, num_partitions);
},
&num_replicas, &num_partitions, &device_assignment));
if (device_assignment != nullptr) {
addressable_device_logical_ids.reserve(num_replicas * num_partitions);
addressable_devices.reserve(num_replicas * num_partitions);
for (int replica = 0; replica < num_replicas; ++replica) {
for (int partition = 0; partition < num_partitions; ++partition) {
int64_t device_id = (*device_assignment)(replica, partition);
PjRtGlobalDeviceId global_device_id(device_id);
TF_ASSIGN_OR_RETURN(PjRtDevice * device,
LookupDevice(global_device_id));
if (device->process_index() != process_index()) {
VLOG(3) << "Non-local device: " << device_id;
continue;
}
PjRtLoadedExecutable::LogicalDeviceIds logica_device_ids;
logica_device_ids.replica = replica;
logica_device_ids.partition = partition;
addressable_device_logical_ids.push_back(std::move(logica_device_ids));
addressable_devices.push_back(device);
}
}
if (addressable_devices.empty()) {
return InvalidArgument(
"Device assignment (%s) does not have any local devices.",
device_assignment->ToString());
}
if (build_options.device_ordinal() < 0) {
build_options.set_device_ordinal(
addressable_devices.front()->local_hardware_id().value());
}
}
return extras;
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
PjRtStreamExecutorClient::CompileInternal(
const XlaComputation& computation,
const std::vector<const Shape*>& argument_layout_pointers,
LayoutCanonicalizationCallback layout_canonicalization_callback,
CompileOptions options) {
tsl::profiler::TraceMe traceme("PjRtStreamExecutorClient::Compile");
VLOG(1) << "PjRtStreamExecutorClient::Compile";
options.executable_build_options.set_process_index(process_index());
TF_RET_CHECK(device_count() % addressable_device_count() == 0)
<< "Each process is expected to have the same number of devices";
options.executable_build_options.set_process_count(
device_count() / addressable_device_count());
auto input_options = options;
TF_RETURN_IF_ERROR(options.ApplyAllOptionOverrides());
TF_ASSIGN_OR_RETURN(ExecutableExtras extras, GetExecutableExtras(&options));
std::shared_ptr<DeviceAssignment>& device_assignment =
extras.device_assignment;
std::vector<PjRtStreamExecutorLoadedExecutable::LogicalDeviceIds>&
addressable_device_logical_ids = extras.addressable_device_logical_ids;
std::vector<PjRtDevice*>& addressable_devices = extras.addressable_devices;
if (layout_canonicalization_callback) {
options.executable_build_options.set_layout_canonicalization_callback(
layout_canonicalization_callback);
}
TF_ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<LocalExecutable>> local_executables,
client()->Compile(computation, argument_layout_pointers,
options.executable_build_options));
auto executable = std::make_unique<PjRtStreamExecutorLoadedExecutable>(
std::move(local_executables), options.parameter_is_tupled_arguments,
std::move(device_assignment), std::move(input_options),
std::move(addressable_device_logical_ids), std::move(addressable_devices),
this);
TF_RETURN_IF_ERROR(
executable->SetUpDonation(options.parameter_is_tupled_arguments));
return std::unique_ptr<PjRtLoadedExecutable>(std::move(executable));
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
PjRtStreamExecutorClient::Compile(mlir::ModuleOp module,
CompileOptions options) {
XlaComputation xla_computation;
const ExecutableBuildOptions& exec_build_options =
options.executable_build_options;
TF_RETURN_IF_ERROR(MlirToXlaComputation(
module, xla_computation,
options.parameter_is_tupled_arguments,
false, exec_build_options.use_shardy_partitioner()));
if (options.argument_layouts) {
return Compile(xla_computation, options);
}
TF_ASSIGN_OR_RETURN(std::vector<LayoutMode> arg_layout_modes,
GetArgLayoutModes(module));
TF_ASSIGN_OR_RETURN(std::vector<LayoutMode> out_layout_modes,
GetOutputLayoutModes(module));
TF_ASSIGN_OR_RETURN(std::vector<MemorySpaceColor> arg_memory_spaces,
GetArgMemoryKinds(module));
TF_ASSIGN_OR_RETURN(std::vector<MemorySpaceColor> out_memory_spaces,
GetOutputMemoryKinds(module));
auto layout_callback = [local_client = client(), &arg_layout_modes,
&out_layout_modes, &arg_memory_spaces,
&out_memory_spaces](const HloModule& module)
-> absl::StatusOr<std::pair<std::vector<Shape>, Shape>> {
XlaComputation xla_computation(XlaComputation(module.ToProto()));
return LayoutModesToXlaShapes(
xla_computation, arg_layout_modes, out_layout_modes, arg_memory_spaces,
out_memory_spaces,
[local_client](Shape shape) -> absl::StatusOr<Shape> {
return local_client->backend()
.transfer_manager()
->ChooseCompactLayoutForShape(shape);
});
};
TF_ASSIGN_OR_RETURN(auto arg_layouts_and_pointers,
LayoutModesToXla(
xla_computation, arg_layout_modes, out_layout_modes,
arg_memory_spaces, out_memory_spaces,
[this](Shape shape) -> absl::StatusOr<Shape> {
return this->client()
->backend()
.transfer_manager()
->ChooseCompactLayoutForShape(shape);
},
options.executable_build_options));
return CompileInternal(xla_computation, arg_layouts_and_pointers.second,
layout_callback, options);
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
PjRtStreamExecutorClient::Compile(const XlaComputation& computation,
CompileOptions options) {
std::vector<const Shape*> argument_layout_pointers;
TF_RETURN_IF_ERROR(DetermineArgumentLayoutsFromCompileOptions(
computation,
[local_client = client()](Shape shape) {
return local_client->backend()
.transfer_manager()
->ChooseCompactLayoutForShape(shape);
},
options.argument_layouts, &options.executable_build_options,
&argument_layout_pointers));
return CompileInternal(computation, argument_layout_pointers,
nullptr,
options);
}
absl::StatusOr<std::string> PjRtStreamExecutorClient::SerializeExecutable(
const PjRtLoadedExecutable& executable) const {
const PjRtStreamExecutorLoadedExecutable* se_executable =
tensorflow::down_cast<const PjRtStreamExecutorLoadedExecutable*>(
&executable);
absl::Span<const std::shared_ptr<LocalExecutable>> local_executables =
se_executable->executables();
if (local_executables.empty()) {
return Internal("No local executable");
}
if (local_executables.size() != 1) {
return Unimplemented(
"PjRtStreamExecutorClient::SerializeExecutable unimplemented for MPMD "
"executables");
}
Executable* built_executable = local_executables[0]->executable();
Compiler* compiler = client_->backend().compiler();
TF_ASSIGN_OR_RETURN(std::unique_ptr<AotCompilationResult> aot_result,
compiler->Export(built_executable));
TF_ASSIGN_OR_RETURN(std::string serialized, aot_result->SerializeAsString());
if (serialized.empty()) {
return Internal(
"PjRtStreamExecutorClient::SerializeExecutable proto serialization "
"failed");
}
ExecutableAndOptionsProto proto;
*proto.mutable_serialized_executable() = std::move(serialized);
TF_ASSIGN_OR_RETURN(*proto.mutable_compile_options(),
se_executable->compile_options_.ToProto());
return proto.SerializeAsString();
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
PjRtStreamExecutorClient::DeserializeExecutable(
absl::string_view serialized, std::optional<CompileOptions> options) {
ExecutableAndOptionsProto proto;
if (serialized.size() > std::numeric_limits<int>::max()) {
return Internal(
"PjRtStreamExecutorClient::DeserializeExecutable proto too large "
"(>2GB)");
}
if (!proto.ParseFromArray(serialized.data(), serialized.size())) {
return Internal(
"PjRtStreamExecutorClient::DeserializeExecutable proto "
"deserialization "
"failed");
}
CompileOptions compile_options;
if (options.has_value()) {
compile_options = *std::move(options);
} else {
TF_ASSIGN_OR_RETURN(compile_options,
CompileOptions::FromProto(proto.compile_options()));
}
auto input_options = compile_options;
tsl::profiler::TraceMe traceme(
"PjRtStreamExecutorClient::DeserializeExecutable");
VLOG(1) << "PjRtStreamExecutorClient::DeserializeExecutable";
TF_ASSIGN_OR_RETURN(ExecutableExtras extras,
GetExecutableExtras(&compile_options));
std::shared_ptr<DeviceAssignment>& device_assignment =
extras.device_assignment;
std::vector<PjRtStreamExecutorLoadedExecutable::LogicalDeviceIds>&
addressable_device_logical_ids = extras.addressable_device_logical_ids;
std::vector<PjRtDevice*>& addressable_devices = extras.addressable_devices;
std::string str = std::move(*proto.mutable_serialized_executable());
TF_ASSIGN_OR_RETURN(
std::unique_ptr<LocalExecutable> loaded,
client()->Load(str, compile_options.executable_build_options));
std::vector<std::unique_ptr<LocalExecutable>> local_executables;
local_executables.push_back(std::move(loaded));
auto executable = std::make_unique<PjRtStreamExecutorLoadedExecutable>(
std::move(local_executables),
compile_options.parameter_is_tupled_arguments,
std::move(device_assignment), std::move(input_options),
std::move(addressable_device_logical_ids), std::move(addressable_devices),
this);
TF_RETURN_IF_ERROR(
executable->SetUpDonation(compile_options.parameter_is_tupled_arguments));
return std::unique_ptr<PjRtLoadedExecutable>(std::move(executable));
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
PjRtStreamExecutorClient::LoadSerializedExecutable(
absl::string_view serialized, std::optional<CompileOptions> options,
const LoadOptions& load_options) {
return DeserializeExecutable(serialized, options);
}
} | #include "xla/pjrt/pjrt_stream_executor_client.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include "absl/functional/any_invocable.h"
#include "absl/synchronization/mutex.h"
#include "xla/client/client_library.h"
#include "xla/client/xla_builder.h"
#include "xla/literal.h"
#include "xla/literal_comparison.h"
#include "xla/literal_util.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/service/platform_util.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::StatusOr<std::unique_ptr<PjRtStreamExecutorClient>> GetClient() {
LocalClient* local_client = xla::ClientLibrary::LocalClientOrDie();
TF_ASSIGN_OR_RETURN(se::Platform * platform,
PlatformUtil::GetPlatform("Host"));
TF_ASSIGN_OR_RETURN(se::StreamExecutor * executor,
platform->ExecutorForDevice(0));
auto device_state = std::make_unique<LocalDeviceState>(
executor, local_client, LocalDeviceState::kSynchronous,
32,
false, false);
auto device = std::make_unique<PjRtStreamExecutorDevice>(
0, std::move(device_state), "cpu");
std::vector<std::unique_ptr<PjRtStreamExecutorDevice>> devices;
devices.emplace_back(std::move(device));
return std::make_unique<PjRtStreamExecutorClient>(
"cpu", local_client, std::move(devices),
0, nullptr,
nullptr,
false,
nullptr);
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>> ToyExecutable(
PjRtStreamExecutorClient& client, Shape shape,
absl::AnyInvocable<void(XlaBuilder&)> set_up_aliases) {
CompileOptions compile_options;
XlaBuilder builder("Add");
auto a = Parameter(&builder, 0, shape, "a");
auto b = Parameter(&builder, 1, shape, "b");
auto c = Add(a, b);
auto d = Add(c, c);
Tuple(&builder, {c, d});
set_up_aliases(builder);
TF_ASSIGN_OR_RETURN(auto computation,
builder.Build(true));
TF_ASSIGN_OR_RETURN(auto executable,
client.Compile(computation, compile_options));
return executable;
}
absl::Status ExecuteWithSameInputBuffer(
absl::AnyInvocable<void(XlaBuilder&)> set_up_aliases) {
auto shape = xla::ShapeUtil::MakeScalarShape(xla::F32);
TF_ASSIGN_OR_RETURN(auto client, GetClient());
TF_RET_CHECK(!client->addressable_devices().empty());
auto* device0 = client->addressable_devices().front();
TF_ASSIGN_OR_RETURN(auto buffer,
client->CreateUninitializedBuffer(shape, device0));
TF_ASSIGN_OR_RETURN(auto executable,
ToyExecutable(*client, shape, std::move(set_up_aliases)));
return executable->Execute({{buffer.get(), buffer.get()}}, {})
.status();
}
TEST(PjRtStreamExecutorClientTest, DonateSameBufferTwice) {
auto status = ExecuteWithSameInputBuffer([](XlaBuilder& builder) {});
ASSERT_TRUE(status.ok());
status = ExecuteWithSameInputBuffer(
[](XlaBuilder& builder) { builder.SetUpAlias({0}, 0, {}); });
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), ::testing::HasSubstr("f(donate(a), a)"));
status = ExecuteWithSameInputBuffer(
[](XlaBuilder& builder) { builder.SetUpAlias({0}, 1, {}); });
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), ::testing::HasSubstr("f(a, donate(a))"));
status = ExecuteWithSameInputBuffer([](XlaBuilder& builder) {
builder.SetUpAlias({0}, 0, {});
builder.SetUpAlias({1}, 1, {});
});
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
::testing::HasSubstr("f(donate(a), donate(a))"));
}
TEST(PjRtStreamExecutorClientTest, DonateWithControlDependency) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetClient());
auto literal = LiteralUtil::CreateR2({{1, 2, 3}, {4, 5, 6}});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<PjRtBuffer> buffer,
client->BufferFromHostLiteral(literal, client->addressable_devices()[0]));
PjRtFuture<>::Promise promise = PjRtFuture<>::CreatePromise();
PjRtFuture<> future(promise);
auto blocked_buffer =
std::move(*(buffer->DonateWithControlDependency(future)));
EXPECT_TRUE(buffer->IsDeleted());
buffer.reset();
absl::Mutex mu;
auto result_literal = std::make_shared<Literal>(
ShapeUtil::DeviceShapeToHostShape(blocked_buffer->on_device_shape()));
bool got_literal = false;
blocked_buffer->ToLiteral(result_literal.get()).OnReady([&](absl::Status s) {
absl::MutexLock l(&mu);
TF_ASSERT_OK(s);
got_literal = true;
});
blocked_buffer.reset();
EXPECT_FALSE(got_literal);
promise.Set();
EXPECT_TRUE(future.IsReady());
{
absl::MutexLock l(&mu);
mu.Await(absl::Condition(&got_literal));
}
TF_ASSERT_OK(literal_comparison::Equal(literal, *result_literal));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/pjrt_stream_executor_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/pjrt_stream_executor_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2ca9acea-9d42-468d-b041-88b5b74e6f47 | cpp | tensorflow/tensorflow | semaphore | third_party/xla/xla/pjrt/semaphore.cc | third_party/xla/xla/pjrt/semaphore_test.cc | #include "xla/pjrt/semaphore.h"
#include <cstdint>
#include "absl/synchronization/mutex.h"
#include "tsl/platform/logging.h"
namespace xla {
Semaphore::Semaphore(int64_t capacity)
: value_(capacity), max_capacity_(capacity) {
CHECK_GE(capacity, 0);
}
bool Semaphore::CanAcquire(CanAcquireArgs* args) {
return args->semaphore->value_ >= args->amount;
}
void Semaphore::Acquire(int64_t amount) {
CHECK_GE(amount, 0);
CanAcquireArgs args;
args.semaphore = this;
args.amount = amount;
mu_.LockWhen(absl::Condition(&CanAcquire, &args));
value_ -= amount;
mu_.Unlock();
}
bool Semaphore::TryAcquire(int64_t amount) {
CHECK_GE(amount, 0);
absl::MutexLock lock(&mu_);
if (value_ >= amount) {
value_ -= amount;
return true;
}
return false;
}
void Semaphore::Release(int64_t amount) {
CHECK_GE(amount, 0);
absl::MutexLock lock(&mu_);
value_ += amount;
}
Semaphore::ScopedReservation::~ScopedReservation() {
if (semaphore_) {
semaphore_->Release(amount_);
}
}
Semaphore::ScopedReservation::ScopedReservation(
ScopedReservation&& other) noexcept {
semaphore_ = other.semaphore_;
amount_ = other.amount_;
other.semaphore_ = nullptr;
}
Semaphore::ScopedReservation& Semaphore::ScopedReservation::operator=(
ScopedReservation&& other) noexcept {
semaphore_ = other.semaphore_;
amount_ = other.amount_;
other.semaphore_ = nullptr;
return *this;
}
Semaphore::ScopedReservation Semaphore::ScopedAcquire(int64_t amount) {
Acquire(amount);
return ScopedReservation(this, amount);
}
} | #include "xla/pjrt/semaphore.h"
#include <gtest/gtest.h>
#include "absl/synchronization/notification.h"
#include "xla/test.h"
#include "tsl/platform/env.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace {
TEST(SemaphoreTest, UnthreadedTests) {
Semaphore semaphore(2);
EXPECT_EQ(semaphore.capacity(), 2);
EXPECT_FALSE(semaphore.TryAcquire(semaphore.capacity() + 1));
EXPECT_TRUE(semaphore.TryAcquire(semaphore.capacity()));
semaphore.Release(semaphore.capacity());
semaphore.Acquire(1);
semaphore.Release(1);
semaphore.Acquire(2);
semaphore.Release(2);
semaphore.Acquire(1);
semaphore.Acquire(1);
semaphore.Release(1);
semaphore.Acquire(1);
semaphore.Release(1);
semaphore.Acquire(1);
semaphore.Release(2);
{
auto a = semaphore.ScopedAcquire(1);
EXPECT_EQ(a.amount(), 1);
{ auto b = semaphore.ScopedAcquire(1); }
{ auto c = semaphore.ScopedAcquire(1); }
}
{
auto d = semaphore.ScopedAcquire(2);
EXPECT_EQ(d.amount(), 2);
}
}
TEST(SemaphoreTest, ConcurrentTest) {
tsl::thread::ThreadPool pool(tsl::Env::Default(), "test", 2);
Semaphore semaphore(2);
semaphore.Acquire(1);
absl::Notification a_done;
pool.Schedule([&]() {
semaphore.Acquire(2);
semaphore.Release(2);
a_done.Notify();
});
absl::Notification b_done;
pool.Schedule([&]() {
semaphore.Acquire(1);
semaphore.Release(1);
b_done.Notify();
});
b_done.WaitForNotification();
EXPECT_FALSE(a_done.HasBeenNotified());
semaphore.Release(1);
a_done.WaitForNotification();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/semaphore.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/semaphore_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0e5b51f5-887b-4bbd-86cf-cea50fe6fd3e | cpp | tensorflow/tensorflow | pjrt_future | third_party/xla/xla/pjrt/pjrt_future.cc | third_party/xla/xla/pjrt/pjrt_future_test.cc | #include "xla/pjrt/pjrt_future.h"
#include <atomic>
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
struct State {
explicit State(int32_t size)
: pending_count(size), promise(PjRtFuture<>::CreatePromise()) {}
std::atomic<int32_t> pending_count;
PjRtFuture<>::Promise promise;
absl::Mutex mu;
absl::Status status ABSL_GUARDED_BY(&mu);
};
}
PjRtFuture<> JoinFutures(absl::Span<const PjRtFuture<>> futures) {
if (futures.empty()) {
return PjRtFuture<>(absl::OkStatus());
} else if (futures.size() == 1) {
return futures.front();
}
auto state = std::make_shared<State>(futures.size());
for (const PjRtFuture<>& future : futures) {
future.OnReady([state](absl::Status status) {
if (!status.ok()) {
absl::MutexLock lock(&state->mu);
state->status.Update(status);
}
const int pending_count =
state->pending_count.fetch_sub(1, std::memory_order_acq_rel);
CHECK_GE(pending_count, 1) << "Pending count can't drop below 0";
if (pending_count == 1) {
absl::MutexLock lock(&state->mu);
state->promise.Set(std::move(state->status));
}
});
}
return PjRtFuture<>(state->promise);
}
} | #include "xla/pjrt/pjrt_future.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
TEST(PjRtFutureTest, StatelessFuture) {
auto promise = PjRtFuture<>::CreatePromise();
PjRtFuture<> future(promise);
EXPECT_FALSE(future.IsReady());
promise.Set();
EXPECT_TRUE(future.IsReady());
EXPECT_EQ(future.Await(), absl::OkStatus());
future.OnReady(
[](absl::Status status) { EXPECT_EQ(status, absl::OkStatus()); });
}
TEST(PjRtFutureTest, CopyableFuture) {
auto promise = PjRtFuture<int32_t>::CreatePromise();
PjRtFuture<int32_t> future(promise);
PjRtFuture<int32_t> copy_constructed(future);
PjRtFuture<int32_t> copy_assigned = future;
EXPECT_FALSE(copy_constructed.IsReady());
EXPECT_FALSE(copy_assigned.IsReady());
promise.Set(42);
EXPECT_TRUE(copy_constructed.IsReady());
EXPECT_TRUE(copy_assigned.IsReady());
}
TEST(PjRtFutureTest, MoveConstructedFuture) {
auto promise = PjRtFuture<std::unique_ptr<int32_t>>::CreatePromise();
PjRtFuture<std::unique_ptr<int32_t>> future(promise);
PjRtFuture<std::unique_ptr<int32_t>> move_constructed(std::move(future));
EXPECT_FALSE(move_constructed.IsReady());
promise.Set(std::make_unique<int32_t>(42));
EXPECT_TRUE(move_constructed.IsReady());
}
TEST(PjRtFutureTest, MoveAssignedFuture) {
auto promise = PjRtFuture<std::unique_ptr<int32_t>>::CreatePromise();
PjRtFuture<std::unique_ptr<int32_t>> future(promise);
PjRtFuture<std::unique_ptr<int32_t>> move_assigned = std::move(future);
EXPECT_FALSE(move_assigned.IsReady());
promise.Set(std::make_unique<int32_t>(42));
EXPECT_TRUE(move_assigned.IsReady());
}
TEST(PjRtFutureTest, AwaitMoveOnlyFuture) {
auto promise = PjRtFuture<std::unique_ptr<int32_t>>::CreatePromise();
PjRtFuture<std::unique_ptr<int32_t>> future(promise);
promise.Set(std::make_unique<int32_t>(42));
EXPECT_EQ(**future.Await(), 42);
EXPECT_EQ(**std::move(future).Await(), 42);
}
TEST(PjRtFutureTest, OnReadyRvalueFuture) {
auto promise = PjRtFuture<int32_t>::CreatePromise();
PjRtFuture<int32_t> future(promise);
promise.Set(42);
std::move(future).OnReady(
[](absl::StatusOr<int32_t> value) { EXPECT_EQ(*value, 42); });
}
TEST(PjRtFutureTest, OnReadyMoveOnlyFuture) {
auto promise = PjRtFuture<std::unique_ptr<int32_t>>::CreatePromise();
PjRtFuture<std::unique_ptr<int32_t>> future(promise);
promise.Set(std::make_unique<int32_t>(42));
std::move(future).OnReady([](absl::StatusOr<std::unique_ptr<int32_t>> value) {
EXPECT_EQ(**value, 42);
});
}
TEST(PjRtFutureTest, StatelessError) {
auto promise = PjRtFuture<>::CreatePromise();
PjRtFuture<> future(promise);
EXPECT_FALSE(future.IsReady());
promise.Set(absl::InternalError("test"));
EXPECT_TRUE(future.IsReady());
absl::Status status = future.Await();
EXPECT_EQ(status, absl::InternalError("test"));
future.OnReady([](absl::Status status) {
EXPECT_EQ(status, absl::InternalError("test"));
});
}
TEST(PjRtFutureTest, StatelessImmediate) {
PjRtFuture<> ok_future(absl::OkStatus());
PjRtFuture<> error_future(absl::InternalError("test"));
EXPECT_TRUE(ok_future.IsReady());
EXPECT_TRUE(error_future.IsReady());
EXPECT_EQ(ok_future.Await(), absl::OkStatus());
EXPECT_EQ(error_future.Await(), absl::InternalError("test"));
ok_future.OnReady(
[](absl::Status status) { EXPECT_EQ(status, absl::OkStatus()); });
error_future.OnReady([](absl::Status status) {
EXPECT_EQ(status, absl::InternalError("test"));
});
}
TEST(PjRtFutureTest, StatefulFuture) {
auto promise = PjRtFuture<int32_t>::CreatePromise();
PjRtFuture<int32_t> future(promise);
EXPECT_FALSE(future.IsReady());
promise.Set(42);
EXPECT_TRUE(future.IsReady());
future.OnReady([](absl::StatusOr<int32_t> value) { EXPECT_EQ(*value, 42); });
}
TEST(PjRtFutureTest, StatusFuture) {
auto promise = PjRtFuture<>::CreatePromise();
PjRtFuture<> future(promise);
EXPECT_FALSE(future.IsReady());
promise.Set(absl::OkStatus());
EXPECT_TRUE(future.IsReady());
future.OnReady(
[](absl::Status status) { EXPECT_EQ(status, absl::OkStatus()); });
}
TEST(PjRtFutureTest, StatusOrFuture) {
auto promise = PjRtFuture<int32_t>::CreatePromise();
PjRtFuture<int32_t> future(promise);
EXPECT_FALSE(future.IsReady());
promise.Set(42);
EXPECT_TRUE(future.IsReady());
future.OnReady([](absl::StatusOr<int32_t> value) { EXPECT_EQ(*value, 42); });
}
TEST(PjRtFutureTest, JoinFutures) {
auto empty_join = JoinFutures({});
EXPECT_TRUE(empty_join.IsReady());
EXPECT_EQ(empty_join.Await(), absl::OkStatus());
auto promise0 = PjRtFuture<>::CreatePromise();
auto promise1 = PjRtFuture<>::CreatePromise();
std::vector<PjRtFuture<>> futures0 = {PjRtFuture<>(promise0)};
std::vector<PjRtFuture<>> futures1 = {PjRtFuture<>(promise0),
PjRtFuture<>(promise1)};
auto join_one = JoinFutures(futures0);
EXPECT_FALSE(join_one.IsReady());
auto join_two = JoinFutures(futures1);
EXPECT_FALSE(join_two.IsReady());
promise0.Set();
EXPECT_TRUE(join_one.IsReady());
EXPECT_FALSE(join_two.IsReady());
EXPECT_EQ(join_one.Await(), absl::OkStatus());
promise1.Set();
EXPECT_TRUE(join_two.IsReady());
EXPECT_EQ(join_two.Await(), absl::OkStatus());
}
TEST(PjRtFutureTest, JoinErrors) {
auto empty_join = JoinFutures({});
EXPECT_TRUE(empty_join.IsReady());
EXPECT_EQ(empty_join.Await(), absl::OkStatus());
auto promise0 = PjRtFuture<>::CreatePromise();
auto promise1 = PjRtFuture<>::CreatePromise();
std::vector<PjRtFuture<>> futures0 = {PjRtFuture<>(promise0)};
std::vector<PjRtFuture<>> futures1 = {PjRtFuture<>(promise0),
PjRtFuture<>(promise1)};
auto join_one = JoinFutures(futures0);
EXPECT_FALSE(join_one.IsReady());
auto join_two = JoinFutures(futures1);
EXPECT_FALSE(join_two.IsReady());
promise0.Set(absl::InternalError("error #0"));
EXPECT_TRUE(join_one.IsReady());
EXPECT_FALSE(join_two.IsReady());
EXPECT_EQ(join_one.Await(), absl::InternalError("error #0"));
promise1.Set(absl::InternalError("error #1"));
EXPECT_TRUE(join_two.IsReady());
EXPECT_EQ(join_two.Await(), absl::InternalError("error #0"));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/pjrt_future.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/pjrt_future_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
90a3ea44-24b4-466c-848c-48043121493f | cpp | tensorflow/tensorflow | pjrt_executable | third_party/xla/xla/python/pjrt_ifrt/pjrt_executable.cc | third_party/xla/xla/pjrt/pjrt_executable_test.cc | #include "xla/python/pjrt_ifrt/pjrt_executable.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/translate/mhlo_to_hlo/type_to_shape.h"
#include "xla/pjrt/host_callback.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/primitive_util.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/executable.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/host_callback.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/pjrt_ifrt/pjrt_array.h"
#include "xla/python/pjrt_ifrt/pjrt_client.h"
#include "xla/python/pjrt_ifrt/pjrt_device.h"
#include "xla/python/pjrt_ifrt/pjrt_dtype.h"
#include "xla/python/pjrt_ifrt/pjrt_host_callback.h"
#include "xla/python/pjrt_ifrt/pjrt_memory.h"
#include "xla/python/pjrt_ifrt/xla_compiler.h"
#include "xla/service/hlo.pb.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
absl::StatusOr<const xla::HloInstructionProto*> FindRootInstruction(
const HloModuleProto& proto) {
for (const auto& computation : proto.computations()) {
if (computation.id() == proto.entry_computation_id()) {
for (const auto& instruction : computation.instructions()) {
if (instruction.id() == computation.root_id()) {
return &instruction;
}
}
}
}
return InvalidArgument("Entry computation not found");
}
absl::StatusOr<std::vector<xla::PrimitiveType>>
GetFirstModuleOutputElementTypes(
xla::PjRtLoadedExecutable* pjrt_loaded_executable) {
auto element_types = pjrt_loaded_executable->GetOutputElementTypes();
TF_RETURN_IF_ERROR(element_types.status());
if (element_types->empty()) {
return FailedPrecondition("No output element types found");
}
return element_types->front();
}
absl::StatusOr<std::vector<xla::DimensionVector>>
GetFirstModuleOutputDimensions(
xla::PjRtLoadedExecutable* pjrt_loaded_executable) {
auto dimensions = pjrt_loaded_executable->GetOutputDimensions();
TF_RETURN_IF_ERROR(dimensions.status());
if (dimensions->empty()) {
return FailedPrecondition("No output dimensions found");
}
return dimensions->front();
}
absl::StatusOr<std::optional<HloSharding>> GetFirstModuleOutputSharding(
xla::PjRtLoadedExecutable* pjrt_loaded_executable,
const xla::Shape& shape) {
auto output_shardings = pjrt_loaded_executable->GetOutputShardings();
std::optional<xla::HloSharding> result_hlo_sharding;
if (output_shardings.has_value()) {
std::vector<HloSharding> hlo_shardings;
hlo_shardings.reserve(output_shardings->size());
for (const auto& sharding : *output_shardings) {
TF_ASSIGN_OR_RETURN(auto hlo_sharding, HloSharding::FromProto(sharding));
hlo_shardings.push_back(hlo_sharding);
}
if (shape.IsTuple()) {
return HloSharding::Tuple(shape, hlo_shardings);
} else {
return hlo_shardings.front();
}
}
return std::nullopt;
}
absl::StatusOr<std::optional<std::vector<absl::string_view>>>
GetFirstModuleOutputMemoryKinds(
xla::PjRtLoadedExecutable* pjrt_loaded_executable) {
auto output_memory_kinds = pjrt_loaded_executable->GetOutputMemoryKinds();
if (absl::IsUnimplemented(output_memory_kinds.status())) {
return std::nullopt;
}
TF_RETURN_IF_ERROR(output_memory_kinds.status());
if (output_memory_kinds->empty()) {
return FailedPrecondition("No output memory kinds found");
}
return std::move(output_memory_kinds)->front();
}
struct ShapePartialInfo {
std::vector<xla::PrimitiveType> element_types;
std::vector<xla::DimensionVector> dimensions;
};
absl::StatusOr<ShapePartialInfo> CreateShapePartialInfo(
absl::Span<const xla::Shape> shapes) {
ShapePartialInfo partial_info;
partial_info.element_types.reserve(shapes.size());
partial_info.dimensions.reserve(shapes.size());
for (const auto& shape : shapes) {
if (shape.IsTuple()) {
return FailedPrecondition(
"Tupled shape is not supported in `CreateShapePartialInfo`.");
}
partial_info.element_types.push_back(shape.element_type());
partial_info.dimensions.push_back(
xla::ShapeUtil::CreateDimensionVectorFromShape(shape));
}
return partial_info;
}
}
char PjRtCompatibleExecutable::ID = 0;
char PjRtCompatibleLoadedExecutable::ID = 0;
char PjRtExecutable::ID = 0;
char PjRtLoadedExecutable::ID = 0;
absl::StatusOr<std::unique_ptr<Executable>> PjRtExecutable::Create(
std::shared_ptr<xla::PjRtExecutable> pjrt_executable,
std::unique_ptr<XlaCompileOptions> compile_options) {
return std::unique_ptr<Executable>(new PjRtExecutable(
std::move(pjrt_executable), std::move(compile_options)));
}
absl::StatusOr<std::optional<std::string>> PjRtExecutable::Fingerprint() const {
DCHECK(this);
return pjrt_executable_->FingerprintExecutable();
}
absl::StatusOr<std::string> PjRtExecutable::Serialize() const {
DCHECK(this);
return pjrt_executable_->SerializeExecutable();
}
absl::StatusOr<std::unique_ptr<LoadedExecutable>> PjRtLoadedExecutable::Create(
PjRtCompatibleClient* client,
std::shared_ptr<xla::PjRtLoadedExecutable> pjrt_loaded_executable,
std::vector<tsl::RCReference<LoadedHostCallback>> loaded_host_callbacks) {
VLOG(3) << "PjRtLoadedExecutable::Create";
VLOG(3) << "Using per-shard shape";
TF_ASSIGN_OR_RETURN(
auto result_element_types,
GetFirstModuleOutputElementTypes(pjrt_loaded_executable.get()));
TF_ASSIGN_OR_RETURN(
auto result_dimensions,
GetFirstModuleOutputDimensions(pjrt_loaded_executable.get()));
TF_ASSIGN_OR_RETURN(
auto result_memory_kinds,
GetFirstModuleOutputMemoryKinds(pjrt_loaded_executable.get()));
return CreateInternal(client, std::move(pjrt_loaded_executable),
result_element_types, result_dimensions,
std::nullopt,
result_memory_kinds, loaded_host_callbacks);
}
static absl::StatusOr<std::vector<xla::Shape>> ResultShapesOfModule(
mlir::ModuleOp module) {
auto main = module.lookupSymbol<mlir::func::FuncOp>("main");
if (!main) {
return InvalidArgument("MLIR module has no main function");
}
auto type = main.getFunctionType();
std::vector<xla::Shape> result_shapes;
result_shapes.reserve(type.getNumResults());
for (unsigned i = 0; i < type.getNumResults(); ++i) {
auto result_type = type.getResult(i);
result_shapes.push_back(xla::TypeToShape(result_type));
}
return result_shapes;
}
absl::StatusOr<std::unique_ptr<LoadedExecutable>> PjRtLoadedExecutable::Create(
PjRtCompatibleClient* client, mlir::ModuleOp module,
xla::CompileOptions compile_options,
std::vector<tsl::RCReference<LoadedHostCallback>> loaded_host_callbacks) {
VLOG(3) << "PjRtLoadedExecutable::Create";
if (VLOG_IS_ON(3)) {
module.dump();
}
VLOG(3) << compile_options.ToProto()->DebugString();
const auto& build_options = compile_options.executable_build_options;
const bool auto_spmd_partitioning =
build_options.use_spmd_partitioning() &&
build_options.num_partitions() > 1 &&
(build_options.use_auto_spmd_partitioning() ||
build_options.any_allow_spmd_sharding_propagation_to_parameters() ||
build_options.any_allow_spmd_sharding_propagation_to_output());
TF_ASSIGN_OR_RETURN(
auto pjrt_loaded_executable,
client->pjrt_client()->Compile(module, std::move(compile_options)));
if (auto_spmd_partitioning) {
VLOG(3) << "Using per-shard shape";
TF_ASSIGN_OR_RETURN(
auto result_element_types,
GetFirstModuleOutputElementTypes(pjrt_loaded_executable.get()));
TF_ASSIGN_OR_RETURN(
auto result_dimensions,
GetFirstModuleOutputDimensions(pjrt_loaded_executable.get()));
TF_ASSIGN_OR_RETURN(
auto result_memory_kinds,
GetFirstModuleOutputMemoryKinds(pjrt_loaded_executable.get()));
return CreateInternal(client, std::move(pjrt_loaded_executable),
result_element_types, result_dimensions,
std::nullopt,
result_memory_kinds,
std::move(loaded_host_callbacks));
} else {
VLOG(3) << "Using full shape";
TF_ASSIGN_OR_RETURN(auto result_shapes, ResultShapesOfModule(module));
bool tuple_output = result_shapes.size() != 1;
xla::Shape result_shape;
std::vector<xla::Shape> output_shapes;
if (tuple_output) {
result_shape = xla::ShapeUtil::MakeTupleShape(result_shapes);
output_shapes = std::move(result_shapes);
} else {
result_shape = result_shapes.front();
output_shapes = result_shape.IsTuple()
? result_shape.tuple_shapes()
: std::vector<xla::Shape>{result_shape};
}
TF_ASSIGN_OR_RETURN(auto shape_partial_info,
CreateShapePartialInfo(output_shapes));
TF_ASSIGN_OR_RETURN(auto result_hlo_sharding,
GetFirstModuleOutputSharding(
pjrt_loaded_executable.get(), result_shape));
TF_ASSIGN_OR_RETURN(
auto result_memory_kinds,
GetFirstModuleOutputMemoryKinds(pjrt_loaded_executable.get()));
return CreateInternal(client, std::move(pjrt_loaded_executable),
shape_partial_info.element_types,
shape_partial_info.dimensions, result_hlo_sharding,
result_memory_kinds,
std::move(loaded_host_callbacks));
}
}
absl::StatusOr<std::unique_ptr<LoadedExecutable>>
PjRtLoadedExecutable::CreateInternal(
PjRtCompatibleClient* client,
std::shared_ptr<xla::PjRtLoadedExecutable> pjrt_loaded_executable,
absl::Span<const xla::PrimitiveType> result_element_types,
absl::Span<const xla::DimensionVector> result_dimensions,
const std::optional<xla::HloSharding>& result_hlo_sharding,
const std::optional<std::vector<absl::string_view>>& result_memory_kinds,
std::vector<tsl::RCReference<LoadedHostCallback>> loaded_host_callbacks) {
BasicDeviceList::Devices ds;
ds.reserve(pjrt_loaded_executable->addressable_devices().size());
for (xla::PjRtDevice* device :
pjrt_loaded_executable->addressable_devices()) {
TF_ASSIGN_OR_RETURN(Device * ifrt_device, client->LookupPjRtDevice(device));
ds.push_back(ifrt_device);
}
tsl::RCReference<DeviceList> devices = BasicDeviceList::Create(std::move(ds));
std::optional<tsl::RCReference<DeviceList>> sharding_devices;
if (devices->devices().empty()) {
sharding_devices =
BasicDeviceList::Create({client->addressable_devices().front()});
} else {
sharding_devices = devices;
}
std::vector<DType> output_dtypes;
std::vector<Shape> output_shapes;
std::vector<std::shared_ptr<const Sharding>> output_shardings;
auto append_arg = [&](const xla::PrimitiveType& element_type,
const xla::DimensionVector& dimensions,
const xla::HloSharding* sharding,
MemoryKind memory_kind) -> absl::Status {
TF_ASSIGN_OR_RETURN(auto dtype, ToDType(element_type));
output_dtypes.push_back(dtype);
output_shapes.push_back(Shape(dimensions));
CHECK(xla::primitive_util::IsArrayType(element_type));
xla::DimensionVector tile_shape_dimensions = dimensions;
if (sharding != nullptr) {
CHECK(!sharding->IsTuple());
tile_shape_dimensions =
xla::ShapeUtil::CreateDimensionVectorFromShape(sharding->TileShape(
xla::ShapeUtil::MakeShape(element_type, dimensions)));
}
output_shardings.push_back(ifrt::ConcreteEvenSharding::Create(
*sharding_devices, memory_kind,
ifrt::Shape(dimensions),
ifrt::Shape(tile_shape_dimensions)));
return absl::OkStatus();
};
auto append_token = [&](MemoryKind memory_kind) {
output_dtypes.push_back(DType(DType::kToken));
output_shapes.push_back(Shape({}));
output_shardings.push_back(
ifrt::ConcreteEvenSharding::Create(*sharding_devices, memory_kind,
ifrt::Shape({}),
ifrt::Shape({})));
};
auto check_output_sharding_condition =
[](absl::Span<const xla::PrimitiveType> element_types,
const xla::HloSharding& sharding) {
if (sharding.IsTuple()) {
return element_types.size() == sharding.tuple_elements().size() ||
(element_types.empty() &&
sharding.tuple_elements().size() == 1);
}
return element_types.size() == 1;
};
if (result_memory_kinds.has_value() &&
result_memory_kinds->size() != result_element_types.size()) {
return FailedPrecondition(
"Output memory kinds are inconsistent with the output shape");
}
if (result_hlo_sharding.has_value() &&
!check_output_sharding_condition(result_element_types,
*result_hlo_sharding)) {
return FailedPrecondition(
"Output sharding is inconsistent with the output shape");
}
CHECK_EQ(result_element_types.size(), result_dimensions.size());
output_dtypes.reserve(result_element_types.size());
output_shapes.reserve(result_element_types.size());
output_shardings.reserve(result_element_types.size());
for (int i = 0; i < result_element_types.size(); ++i) {
const auto& element_type = result_element_types[i];
MemoryKind element_memory_kind;
if (result_memory_kinds.has_value()) {
element_memory_kind = MemoryKind((*result_memory_kinds)[i]);
}
if (xla::primitive_util::IsArrayType(element_type)) {
const xla::HloSharding* element_hlo_sharding = nullptr;
if (result_hlo_sharding.has_value()) {
element_hlo_sharding = result_hlo_sharding->IsTuple()
? &result_hlo_sharding->tuple_elements()[i]
: &*result_hlo_sharding;
if (element_hlo_sharding->IsTuple()) {
return FailedPrecondition(
"Nested-tupled output sharding is not supported");
}
}
TF_RETURN_IF_ERROR(append_arg(element_type, result_dimensions[i],
element_hlo_sharding, element_memory_kind));
} else if (element_type == TOKEN) {
append_token(element_memory_kind);
} else {
return FailedPrecondition(
"The element type is not a supported type (array, token)");
}
}
std::vector<PjRtHostSendAndRecvLoadedHostCallback*>
host_send_and_recv_callbacks;
host_send_and_recv_callbacks.reserve(loaded_host_callbacks.size());
for (auto& loaded_host_callback : loaded_host_callbacks) {
auto* host_send_and_recv_callback =
llvm::dyn_cast<PjRtHostSendAndRecvLoadedHostCallback>(
loaded_host_callback.get());
if (host_send_and_recv_callback != nullptr) {
host_send_and_recv_callbacks.push_back(host_send_and_recv_callback);
}
}
std::vector<Device*> addressable_devices;
addressable_devices.reserve(
pjrt_loaded_executable->addressable_devices().size());
for (xla::PjRtDevice* device :
pjrt_loaded_executable->addressable_devices()) {
TF_ASSIGN_OR_RETURN(Device * ifrt_device, client->LookupPjRtDevice(device));
addressable_devices.push_back(ifrt_device);
}
return std::unique_ptr<LoadedExecutable>(new PjRtLoadedExecutable(
client, std::move(pjrt_loaded_executable), std::move(devices),
std::move(addressable_devices), std::move(loaded_host_callbacks),
std::move(host_send_and_recv_callbacks), std::move(output_dtypes),
std::move(output_shapes), std::move(output_shardings)));
}
PjRtLoadedExecutable::PjRtLoadedExecutable(
PjRtCompatibleClient* client,
std::shared_ptr<xla::PjRtLoadedExecutable> pjrt_loaded_executable,
tsl::RCReference<DeviceList> devices,
std::vector<Device*> addressable_devices,
std::vector<tsl::RCReference<LoadedHostCallback>> all_loaded_host_callbacks,
std::vector<PjRtHostSendAndRecvLoadedHostCallback*>
host_send_recv_callbacks,
std::vector<DType> output_dtypes, std::vector<Shape> output_shapes,
std::vector<std::shared_ptr<const Sharding>> output_shardings)
: client_(client),
pjrt_loaded_executable_(std::move(pjrt_loaded_executable)),
devices_(std::move(devices)),
addressable_devices_(std::move(addressable_devices)),
all_loaded_host_callbacks_(
std::make_shared<std::vector<tsl::RCReference<LoadedHostCallback>>>(
std::move(all_loaded_host_callbacks))),
host_send_recv_callbacks_(std::move(host_send_recv_callbacks)),
output_dtypes_(std::move(output_dtypes)),
output_shapes_(std::move(output_shapes)),
output_shardings_(std::move(output_shardings)) {}
PjRtLoadedExecutable::~PjRtLoadedExecutable() = default;
absl::StatusOr<PjRtLoadedExecutable::ExecuteResult>
PjRtLoadedExecutable::Execute(
absl::Span<tsl::RCReference<Array>> args, const ExecuteOptions& options,
std::optional<tsl::RCReference<DeviceList>> devices) {
DCHECK(this);
std::vector<std::vector<PjRtBuffer*>> argument_handles;
std::vector<std::unique_ptr<PjRtBuffer>> owned_buffers;
int num_computations;
const bool portable_execution = devices.has_value();
PjRtCompatibleDevice* portable_execution_device = nullptr;
if (portable_execution) {
if ((*devices)->size() != 1) {
return InvalidArgument(
"Only single-shard portable execution is supported");
}
num_computations = 1;
portable_execution_device =
static_cast<PjRtDevice*>((*devices)->devices().front());
} else {
if (devices_->devices().empty()) {
return InvalidArgument("No devices provided for portable executable");
}
num_computations = devices_->size();
}
argument_handles.resize(num_computations);
for (int i = 0; i < num_computations; ++i) {
argument_handles[i].reserve(args.size());
}
for (int i = 0; i < args.size(); ++i) {
auto* pjrt_array =
llvm::dyn_cast_or_null<PjRtCompatibleArray>(args[i].get());
if (!pjrt_array) {
return InvalidArgument(
"Only PjRtCompatibleArray is supported, but argument %d is %s", i,
pjrt_array->DebugString());
}
int j = 0;
for (const auto& pjrt_buffer : pjrt_array->pjrt_buffers()) {
argument_handles[j].push_back(pjrt_buffer.get());
++j;
}
}
const bool returned_future_supported =
pjrt_loaded_executable_->IsReturnedFutureSupported();
xla::ExecuteOptions opts;
opts.untuple_result = true;
opts.launch_id = options.launch_id;
opts.use_major_to_minor_data_layout_for_callbacks = true;
opts.non_donatable_input_indices = options.non_donatable_input_indices;
if (!all_loaded_host_callbacks_->empty() && !returned_future_supported) {
return Internal(
"Host callback not supported without returned future support in "
"runtime: %s",
client_->runtime_type());
}
std::unique_ptr<HostCallbackStates> host_callback_states;
if (!host_send_recv_callbacks_.empty()) {
host_callback_states = std::make_unique<HostCallbackStates>();
for (int i = 0; i < num_computations; ++i) {
auto& contexts = host_callback_states->contexts.emplace_back();
auto& send_callbacks =
host_callback_states->send_callbacks.emplace_back();
auto& recv_callbacks =
host_callback_states->recv_callbacks.emplace_back();
for (const auto& host_send_recv_callback : host_send_recv_callbacks_) {
contexts.push_back(CreateHostCallbackStateAndAppendSendRecvCallbacks(
host_send_recv_callback->host_callback(),
nullptr, send_callbacks,
recv_callbacks, opts.use_major_to_minor_data_layout_for_callbacks));
}
}
opts.send_callbacks = host_callback_states->send_callbacks;
opts.recv_callbacks = host_callback_states->recv_callbacks;
}
std::vector<std::vector<std::unique_ptr<PjRtBuffer>>> pjrt_outputs;
xla::ifrt::Future<> status;
if (portable_execution) {
std::optional<PjRtFuture<>> returned_pjrt_future;
TF_RET_CHECK(portable_execution_device->IsAddressable());
TF_ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<PjRtBuffer>> single_device_pjrt_results,
pjrt_loaded_executable_->ExecutePortable(
argument_handles.front(), portable_execution_device->pjrt_device(),
opts, returned_pjrt_future,
returned_future_supported));
pjrt_outputs.push_back(std::move(single_device_pjrt_results));
if (returned_future_supported) {
status = *std::move(returned_pjrt_future);
} else {
status = Future<>(absl::OkStatus());
}
} else {
std::optional<std::vector<PjRtFuture<>>> returned_pjrt_futures;
if (returned_future_supported) {
returned_pjrt_futures.emplace();
}
TF_ASSIGN_OR_RETURN(
pjrt_outputs, pjrt_loaded_executable_->Execute(argument_handles, opts,
returned_pjrt_futures));
if (returned_future_supported) {
status = JoinFutures(absl::MakeSpan(*returned_pjrt_futures));
} else {
status = Future<>(absl::OkStatus());
}
}
if (!all_loaded_host_callbacks_->empty()) {
status.OnReady([all_loaded_host_callbacks = all_loaded_host_callbacks_,
host_callback_states =
std::move(host_callback_states)](absl::Status) mutable {
all_loaded_host_callbacks.reset();
});
}
std::vector<tsl::RCReference<Array>> outputs;
if (pjrt_outputs.size() != num_computations) {
return FailedPrecondition(
"Unexpected number of computations in outputs: %d vs. %d",
pjrt_outputs.size(), num_computations);
}
const int num_outputs = pjrt_outputs.front().size();
if (num_outputs != output_dtypes_.size()) {
return FailedPrecondition("Unexpected number of outputs: %d vs. %d",
num_outputs, output_dtypes_.size());
}
outputs.reserve(num_outputs);
absl::flat_hash_map<MemoryKind, std::shared_ptr<const Sharding>>
single_device_shardings;
for (int i = 0; i < num_outputs; ++i) {
PjRtArray::PjRtBuffers buffers;
buffers.reserve(num_computations);
const MemoryKind first_memory_kind =
MakeMemoryKindFromPjRtBuffer(pjrt_outputs[0][i].get());
const MemoryKind canonical_first_memory_kind =
CanonicalizeMemoryKindWithPjRtDevice(first_memory_kind,
pjrt_outputs[0][i]->device());
for (int j = 0; j < num_computations; ++j) {
if (j > 0) {
if (auto memory_kind =
MakeMemoryKindFromPjRtBuffer(pjrt_outputs[j][i].get());
canonical_first_memory_kind !=
CanonicalizeMemoryKindWithPjRtDevice(
memory_kind, pjrt_outputs[j][i]->device())) {
return FailedPrecondition(
"Memory kind mismatch between PjRtBuffers. Got one buffer with "
"memory kind '%v' and another with memory_kind '%v'",
first_memory_kind, memory_kind);
}
}
buffers.push_back(
std::shared_ptr<PjRtBuffer>(pjrt_outputs[j][i].release()));
}
std::shared_ptr<const Sharding> sharding;
if (portable_execution) {
if (auto it = single_device_shardings.find(first_memory_kind);
it == single_device_shardings.end()) {
sharding =
single_device_shardings
.insert({first_memory_kind,
SingleDeviceSharding::Create(portable_execution_device,
first_memory_kind)})
.first->second;
} else {
sharding = it->second;
}
} else {
sharding = output_shardings_[i];
}
outputs.push_back(*PjRtArray::Create(client_, output_dtypes_[i],
output_shapes_[i], std::move(sharding),
std::move(buffers)));
}
ExecuteResult result;
if (options.fill_status) {
result.status = status;
}
result.outputs = std::move(outputs);
return result;
}
absl::StatusOr<std::optional<std::string>> PjRtLoadedExecutable::Fingerprint()
const {
DCHECK(this);
absl::StatusOr<std::string> fingerprint =
pjrt_loaded_executable_->FingerprintExecutable();
if (fingerprint.ok()) {
return {fingerprint.value()};
} else if (fingerprint.status().code() == absl::StatusCode::kUnimplemented) {
return std::nullopt;
} else {
return fingerprint.status();
}
}
absl::StatusOr<std::string> PjRtLoadedExecutable::Serialize() const {
DCHECK(this);
return pjrt_loaded_executable_->SerializeExecutable();
}
Future<> PjRtLoadedExecutable::Delete() {
DCHECK(this);
pjrt_loaded_executable_->Delete();
return Future<>(absl::OkStatus());
}
}
} | #include "xla/pjrt/pjrt_executable.h"
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/client/executable_build_options.h"
#include "xla/pjrt/compile_options.pb.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status_matchers.h"
namespace xla {
namespace {
using ::tsl::testing::StatusIs;
TEST(CompileOptionsTest, Serialization) {
CompileOptions src;
src.compile_portable_executable = true;
src.parameter_is_tupled_arguments = true;
src.profile_version = 1;
src.argument_layouts = {ShapeUtil::MakeShape(S32, {1})};
ExecutableBuildOptions build_option;
build_option.set_device_assignment(DeviceAssignment(1, 1));
src.executable_build_options = build_option;
TF_ASSERT_OK_AND_ASSIGN(CompileOptionsProto proto, src.ToProto());
TF_ASSERT_OK_AND_ASSIGN(CompileOptions output,
CompileOptions::FromProto(proto));
TF_ASSERT_OK_AND_ASSIGN(CompileOptionsProto output_proto, src.ToProto());
EXPECT_EQ(proto.SerializeAsString(), output_proto.SerializeAsString());
}
TEST(CompileOptionsTest, MultiSliceConfigNotSupported) {
CompileOptionsProto proto;
*proto.mutable_serialized_multi_slice_config() = "multi_size_config";
auto option = CompileOptions::FromProto(proto);
EXPECT_THAT(
option.status(),
StatusIs(
absl::StatusCode::kUnimplemented,
"multi_slice_config not supported in CompileOptions::FromProto."));
}
TEST(ExecuteOptionsTest, Serialization) {
ExecuteOptions src;
src.arguments_are_tupled = true;
src.untuple_result = false;
src.launch_id = 1234;
src.strict_shape_checking = true;
src.execution_mode = ExecuteOptions::ExecutionMode::kAsynchronous;
src.non_donatable_input_indices = {2, 3};
TF_ASSERT_OK_AND_ASSIGN(ExecuteOptionsProto proto, src.ToProto());
TF_ASSERT_OK_AND_ASSIGN(ExecuteOptions output,
ExecuteOptions::FromProto(proto));
TF_ASSERT_OK_AND_ASSIGN(ExecuteOptionsProto output_proto, src.ToProto());
EXPECT_EQ(proto.SerializeAsString(), output_proto.SerializeAsString());
}
TEST(ExecuteOptionsTest, SendRecvNotSupported) {
ExecuteOptions options;
std::vector<std::vector<SendCallback>> send_callbacks(1);
options.send_callbacks = send_callbacks;
std::vector<std::vector<RecvCallback>> recv_callbacks(1);
options.recv_callbacks = recv_callbacks;
EXPECT_THAT(
options.ToProto(),
StatusIs(absl::StatusCode::kUnimplemented,
"ExecuteOptions with send/recv calbacks is not serializable"));
}
TEST(ExecuteOptionsTest, ApplyOptionsCanParseStringsAndEnums) {
using OptionOverride = std::variant<std::string, bool, int64_t, double>;
std::vector<std::pair<std::string, OptionOverride>> env_override_options;
env_override_options = {
{"xla_gpu_use_runtime_fusion", std::string("True")},
{"xla_gpu_graph_min_graph_size", std::string("2")},
{"xla_gpu_disable_async_collectives", std::string("2")},
{"xla_gpu_redzone_scratch_max_megabytes", std::string("3400")},
{"xla_gpu_auto_spmd_partitioning_memory_budget_ratio", 0.9},
{"xla_gpu_pgle_profile_file_or_directory_path", std::string("abc")}};
CompileOptions src;
src.env_option_overrides = env_override_options;
auto s = src.ApplyAllOptionOverrides();
auto& debug_options = src.executable_build_options.debug_options();
EXPECT_EQ(debug_options.xla_gpu_use_runtime_fusion(), true);
EXPECT_EQ(debug_options.xla_gpu_graph_min_graph_size(), 2);
EXPECT_EQ(debug_options.xla_gpu_redzone_scratch_max_megabytes(), 3400);
EXPECT_FLOAT_EQ(
debug_options.xla_gpu_auto_spmd_partitioning_memory_budget_ratio(), 0.9);
EXPECT_EQ(debug_options.xla_gpu_pgle_profile_file_or_directory_path(), "abc");
EXPECT_EQ(debug_options.xla_gpu_disable_async_collectives().size(), 1);
EXPECT_EQ(debug_options.xla_gpu_disable_async_collectives()[0], 2);
}
TEST(CompiledMemoryStatsTest, Serialization) {
CompiledMemoryStats stats;
stats.generated_code_size_in_bytes = 2;
stats.argument_size_in_bytes = 3;
stats.output_size_in_bytes = 5;
stats.alias_size_in_bytes = 7;
stats.temp_size_in_bytes = 11;
stats.host_generated_code_size_in_bytes = 13;
stats.host_argument_size_in_bytes = 17;
stats.host_output_size_in_bytes = 19;
stats.host_alias_size_in_bytes = 23;
stats.host_temp_size_in_bytes = 29;
CompiledMemoryStatsProto serialized = stats.ToProto();
CompiledMemoryStats deserialized = CompiledMemoryStats::FromProto(serialized);
EXPECT_EQ(serialized.SerializeAsString(),
deserialized.ToProto().SerializeAsString());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/pjrt_ifrt/pjrt_executable.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/pjrt_executable_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
95f3e0ae-3580-482c-a923-012cebb9dfb8 | cpp | tensorflow/tensorflow | metrics | tensorflow/cc/saved_model/metrics.cc | tensorflow/cc/saved_model/metrics_test.cc | #include "tensorflow/cc/saved_model/metrics.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "json/config.h"
#include "json/json.h"
#include "json/writer.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/lib/monitoring/gauge.h"
#include "tensorflow/core/lib/monitoring/sampler.h"
#include "tensorflow/core/protobuf/fingerprint.pb.h"
namespace tensorflow {
namespace metrics {
namespace {
auto* saved_model_write_counter = monitoring::Counter<1>::New(
"/tensorflow/core/saved_model/write/count",
"The number of SavedModels successfully written.", "write_version");
auto* saved_model_read_counter = monitoring::Counter<1>::New(
"/tensorflow/core/saved_model/read/count",
"The number of SavedModels successfully loaded.", "write_version");
auto* saved_model_write_api = monitoring::Counter<1>::New(
"/tensorflow/core/saved_model/write/api",
"The API used to write the SavedModel.", "api_label");
auto* saved_model_read_api = monitoring::Counter<1>::New(
"/tensorflow/core/saved_model/read/api",
"The API used to load the SavedModel.", "api_label");
auto* saved_model_write_fingerprint = monitoring::Gauge<std::string, 0>::New(
"/tensorflow/core/saved_model/write/fingerprint",
"The fingerprint (saved_model_checksum) of the exported SavedModel.");
auto* saved_model_write_path = monitoring::Gauge<std::string, 0>::New(
"/tensorflow/core/saved_model/write/path",
"The path (saved_model_path) of the exported SavedModel.");
auto* saved_model_write_path_and_singleprint =
monitoring::Gauge<std::string, 0>::New(
"/tensorflow/core/saved_model/write/path_and_singleprint",
"The path (saved_model_path) and singleprint (concatenation of "
"graph_def_program_hash, signature_def_hash, saved_object_graph_hash, "
"and checkpoint_hash) of the newly written SavedModel.");
auto* saved_model_read_fingerprint = monitoring::Gauge<std::string, 0>::New(
"/tensorflow/core/saved_model/read/fingerprint",
"The fingerprint (saved_model_checksum) of the loaded SavedModel.");
auto* saved_model_read_path = monitoring::Gauge<std::string, 0>::New(
"/tensorflow/core/saved_model/read/path",
"The path (saved_model_path) of the loaded SavedModel.");
auto* saved_model_read_path_and_singleprint =
monitoring::Gauge<std::string, 0>::New(
"/tensorflow/core/saved_model/read/path_and_singleprint",
"The path (saved_model_path) and singleprint (concatenation of "
"graph_def_program_hash, signature_def_hash, saved_object_graph_hash, "
"and checkpoint_hash) of the loaded SavedModel.");
auto* saved_model_found_fingerprint_on_load =
monitoring::Gauge<std::string, 0>::New(
"/tensorflow/core/saved_model/found_fingerprint_on_load",
"Whether or not the fingerprint.pb file was found when loading the "
"SavedModel.");
auto* checkpoint_write_durations = monitoring::Sampler<1>::New(
{
"/tensorflow/core/checkpoint/write/write_durations",
"Distribution of the wall time duration in microseconds of the "
"checkpoint write operation.",
"api_label"
},
monitoring::Buckets::Exponential(1000, 1.5, 41));
auto* checkpoint_read_durations = monitoring::Sampler<1>::New(
{
"/tensorflow/core/checkpoint/read/read_durations",
"Distribution of the wall time duration in microseconds of the "
"checkpoint read operation.",
"api_label"
},
monitoring::Buckets::Exponential(1000, 1.5, 41));
auto* async_checkpoint_write_durations = monitoring::Sampler<1>::New(
{
"/tensorflow/core/checkpoint/write/async_write_durations",
"Distribution of the wall time duration in microseconds of the async "
"checkpoint write operation",
"api_label"
},
monitoring::Buckets::Exponential(1000, 1.5, 41));
auto* checkpoint_training_time_saved = monitoring::Counter<1>::New(
"/tensorflow/core/checkpoint/write/training_time_saved",
"Total time in microseconds elapsed between two consecutive write "
"operations in a single job or between Checkpoint construction and the "
"first write operation.",
"api_label");
auto* checkpoint_size = monitoring::Counter<2>::New(
"/tensorflow/core/checkpoint/write/checkpoint_size",
"Size of checkpoint (.index and sharded data files), rounded to the "
"nearest 100 MB.",
"api_label", "filesize");
}
auto* sharding_callback_duration = monitoring::Counter<0>::New(
"/tensorflow/core/checkpoint/sharding/callback_duration",
"Sharding callback execution duration in microseconds.");
auto* num_checkpoint_shards_written = monitoring::Counter<0>::New(
"/tensorflow/core/checkpoint/sharding/num_checkpoint_shards_written",
"Number of checkpoint shard files written during saving.");
auto* sharding_callback_description = monitoring::Gauge<std::string, 0>::New(
"/tensorflow/core/checkpoint/sharding/callback_description",
"Describes the callback used to shard the checkpoint during saving.");
monitoring::CounterCell& SavedModelWriteCount(absl::string_view write_version) {
return *saved_model_write_counter->GetCell(std::string(write_version));
}
monitoring::CounterCell& SavedModelReadCount(absl::string_view write_version) {
return *saved_model_read_counter->GetCell(std::string(write_version));
}
monitoring::CounterCell& SavedModelWriteApi(absl::string_view api_label) {
return *saved_model_write_api->GetCell(std::string(api_label));
}
monitoring::CounterCell& SavedModelReadApi(absl::string_view api_label) {
return *saved_model_read_api->GetCell(std::string(api_label));
}
monitoring::GaugeCell<std::string>& SavedModelReadFingerprint() {
return *saved_model_read_fingerprint->GetCell();
}
monitoring::GaugeCell<std::string>& SavedModelReadPath() {
return *saved_model_read_path->GetCell();
}
monitoring::GaugeCell<std::string>& SavedModelReadPathAndSingleprint() {
return *saved_model_read_path_and_singleprint->GetCell();
}
monitoring::GaugeCell<std::string>& SavedModelWriteFingerprint() {
return *saved_model_write_fingerprint->GetCell();
}
monitoring::GaugeCell<std::string>& SavedModelWritePath() {
return *saved_model_write_path->GetCell();
}
monitoring::GaugeCell<std::string>& SavedModelWritePathAndSingleprint() {
return *saved_model_write_path_and_singleprint->GetCell();
}
std::string MakeFingerprintJson(FingerprintDef fingerprint_def) {
Json::Value fingerprint = Json::objectValue;
fingerprint["saved_model_checksum"] =
Json::UInt64(fingerprint_def.saved_model_checksum());
fingerprint["graph_def_program_hash"] =
Json::UInt64(fingerprint_def.graph_def_program_hash());
fingerprint["signature_def_hash"] =
Json::UInt64(fingerprint_def.signature_def_hash());
fingerprint["saved_object_graph_hash"] =
Json::UInt64(fingerprint_def.saved_object_graph_hash());
fingerprint["checkpoint_hash"] =
Json::UInt64(fingerprint_def.checkpoint_hash());
Json::StreamWriterBuilder json_factory;
return Json::writeString(json_factory, fingerprint);
}
absl::StatusOr<std::string> MakeSavedModelPathAndSingleprint(
std::string path, std::string singleprint) {
if (path.empty()) {
return absl::InvalidArgumentError(
"Invalid path_and_singleprint argument. Empty path.");
}
if (singleprint.empty()) {
return absl::InvalidArgumentError(
"Invalid path_and_singleprint argument. Empty singleprint.");
}
return absl::StrCat(path, ":", singleprint);
}
absl::StatusOr<std::pair<std::string, std::string>>
ParseSavedModelPathAndSingleprint(std::string path_and_singleprint) {
size_t delimiter = path_and_singleprint.rfind(':');
if (delimiter == std::string::npos) {
return absl::InvalidArgumentError(
"Invalid path_and_singleprint argument. Found no delimeter.");
}
std::string path = path_and_singleprint.substr(0, delimiter);
if (path.empty()) {
return absl::InvalidArgumentError(
"Invalid path_and_singleprint argument. Empty path.");
}
std::string singleprint = path_and_singleprint.substr(delimiter + 1);
if (singleprint.empty()) {
return absl::InvalidArgumentError(
"Invalid path_and_singleprint argument. Empty singleprint.");
}
return std::pair<std::string, std::string>(path, singleprint);
}
monitoring::GaugeCell<std::string>& SavedModelFoundFingerprintOnLoad() {
return *saved_model_found_fingerprint_on_load->GetCell();
}
monitoring::SamplerCell& CheckpointReadDuration(absl::string_view api_label) {
return *checkpoint_read_durations->GetCell(std::string(api_label));
}
monitoring::SamplerCell& CheckpointWriteDuration(absl::string_view api_label) {
return *checkpoint_write_durations->GetCell(std::string(api_label));
}
monitoring::SamplerCell& AsyncCheckpointWriteDuration(
absl::string_view api_label) {
return *async_checkpoint_write_durations->GetCell(std::string(api_label));
}
monitoring::CounterCell& TrainingTimeSaved(absl::string_view api_label) {
return *checkpoint_training_time_saved->GetCell(std::string(api_label));
}
monitoring::CounterCell& CheckpointSize(absl::string_view api_label,
int64_t filesize) {
return *checkpoint_size->GetCell(std::string(api_label),
std::to_string(filesize));
}
monitoring::CounterCell& ShardingCallbackDuration() {
return *sharding_callback_duration->GetCell();
}
monitoring::CounterCell& NumCheckpointShardsWritten() {
return *num_checkpoint_shards_written->GetCell();
}
monitoring::GaugeCell<std::string>& ShardingCallbackDescription() {
return *sharding_callback_description->GetCell();
}
}
} | #include "tensorflow/cc/saved_model/metrics.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "json/json.h"
#include "json/reader.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/fingerprint.pb.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace metrics {
TEST(MetricsTest, TestSavedModelWrite) {
EXPECT_EQ(SavedModelWriteApi("foo").value(), 0);
SavedModelWriteApi("foo").IncrementBy(1);
EXPECT_EQ(SavedModelWriteApi("foo").value(), 1);
EXPECT_EQ(SavedModelWriteCount("1").value(), 0);
SavedModelWriteCount("1").IncrementBy(1);
EXPECT_EQ(SavedModelWriteCount("1").value(), 1);
}
TEST(MetricsTest, TestSavedModelRead) {
SavedModelReadApi("bar").IncrementBy(1);
EXPECT_EQ(SavedModelReadApi("bar").value(), 1);
SavedModelReadCount("2").IncrementBy(1);
EXPECT_EQ(SavedModelReadCount("2").value(), 1);
SavedModelReadApi("baz").IncrementBy(1);
EXPECT_EQ(SavedModelReadApi("baz").value(), 1);
SavedModelReadCount("2").IncrementBy(1);
EXPECT_EQ(SavedModelReadCount("2").value(), 2);
}
TEST(MetricsTest, TestCheckpointRead) {
EXPECT_EQ(CheckpointReadDuration("foo").value().num(), 0);
CheckpointReadDuration("foo").Add(100);
EXPECT_EQ(CheckpointReadDuration("foo").value().num(), 1);
}
TEST(MetricsTest, TestCheckpointWrite) {
EXPECT_EQ(CheckpointWriteDuration("foo").value().num(), 0);
CheckpointWriteDuration("foo").Add(100);
EXPECT_EQ(CheckpointWriteDuration("foo").value().num(), 1);
}
TEST(MetricsTest, TestAsyncCheckpointWrite) {
EXPECT_EQ(AsyncCheckpointWriteDuration("foo").value().num(), 0);
AsyncCheckpointWriteDuration("foo").Add(100);
EXPECT_EQ(AsyncCheckpointWriteDuration("foo").value().num(), 1);
}
TEST(MetricsTest, TestTrainingTimeSaved) {
EXPECT_EQ(TrainingTimeSaved("foo").value(), 0);
TrainingTimeSaved("foo").IncrementBy(100);
EXPECT_EQ(TrainingTimeSaved("foo").value(), 100);
}
TEST(MetricsTest, TestCheckpointSize) {
EXPECT_EQ(CheckpointSize("foo", 10).value(), 0);
CheckpointSize("foo", 10).IncrementBy(1);
EXPECT_EQ(CheckpointSize("foo", 10).value(), 1);
}
TEST(MetricsTest, TestWriteFingerprint) {
EXPECT_EQ(SavedModelWriteFingerprint().value(), "");
SavedModelWriteFingerprint().Set("foo");
EXPECT_EQ(SavedModelWriteFingerprint().value(), "foo");
SavedModelWriteFingerprint().Set("bar");
EXPECT_EQ(SavedModelWriteFingerprint().value(), "bar");
}
TEST(MetricsTest, TestWritePath) {
EXPECT_EQ(SavedModelWritePath().value(), "");
SavedModelWritePath().Set("foo");
EXPECT_EQ(SavedModelWritePath().value(), "foo");
SavedModelWritePath().Set("bar");
EXPECT_EQ(SavedModelWritePath().value(), "bar");
}
TEST(MetricsTest, TestWritePathAndSingleprint) {
EXPECT_EQ(SavedModelWritePathAndSingleprint().value(), "");
SavedModelWritePathAndSingleprint().Set("foo");
EXPECT_EQ(SavedModelWritePathAndSingleprint().value(), "foo");
SavedModelWritePathAndSingleprint().Set("bar");
EXPECT_EQ(SavedModelWritePathAndSingleprint().value(), "bar");
EXPECT_EQ(
MakeSavedModelPathAndSingleprint("path", "singleprint").value_or(""),
"path:singleprint");
}
TEST(MetricsTest, TestInvalidMakePathAndSingleprint) {
EXPECT_THAT(MakeSavedModelPathAndSingleprint("", "singleprint"),
testing::StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(MakeSavedModelPathAndSingleprint("path", ""),
testing::StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(MetricsTest, TestReadFingerprint) {
EXPECT_EQ(SavedModelReadFingerprint().value(), "");
SavedModelReadFingerprint().Set("foo");
EXPECT_EQ(SavedModelReadFingerprint().value(), "foo");
SavedModelReadFingerprint().Set("bar");
EXPECT_EQ(SavedModelReadFingerprint().value(), "bar");
}
TEST(MetricsTest, TestReadPath) {
EXPECT_EQ(SavedModelReadPath().value(), "");
SavedModelReadPath().Set("foo");
EXPECT_EQ(SavedModelReadPath().value(), "foo");
SavedModelReadPath().Set("bar");
EXPECT_EQ(SavedModelReadPath().value(), "bar");
}
TEST(MetricsTest, TestReadPathAndSingleprint) {
EXPECT_EQ(SavedModelReadPathAndSingleprint().value(), "");
SavedModelReadPathAndSingleprint().Set("foo");
EXPECT_EQ(SavedModelReadPathAndSingleprint().value(), "foo");
SavedModelReadPathAndSingleprint().Set("bar");
EXPECT_EQ(SavedModelReadPathAndSingleprint().value(), "bar");
TF_ASSERT_OK_AND_ASSIGN(
auto path_singleprint,
ParseSavedModelPathAndSingleprint("path/model:name:singleprint"));
auto [path, singleprint] = path_singleprint;
EXPECT_EQ(path, "path/model:name");
EXPECT_EQ(singleprint, "singleprint");
}
TEST(MetricsTest, TestMakeFingerprintJson) {
FingerprintDef fingerprint;
fingerprint.set_saved_model_checksum(1);
fingerprint.set_graph_def_program_hash(2);
fingerprint.set_signature_def_hash(3);
fingerprint.set_saved_object_graph_hash(4);
fingerprint.set_checkpoint_hash(5);
std::string serialized_fingerprint_json = MakeFingerprintJson(fingerprint);
EXPECT_EQ(
serialized_fingerprint_json,
"{\n\t\"checkpoint_hash\" : 5,\n\t\"graph_def_program_hash\" : "
"2,\n\t\"saved_model_checksum\" : 1,\n\t\"saved_object_graph_hash\" : "
"4,\n\t\"signature_def_hash\" : 3\n}");
Json::Value fingerprint_json = Json::objectValue;
Json::Reader reader = Json::Reader();
reader.parse(serialized_fingerprint_json, fingerprint_json);
EXPECT_EQ(fingerprint_json["saved_model_checksum"].asUInt64(), 1);
EXPECT_EQ(fingerprint_json["graph_def_program_hash"].asUInt64(), 2);
EXPECT_EQ(fingerprint_json["signature_def_hash"].asUInt64(), 3);
EXPECT_EQ(fingerprint_json["saved_object_graph_hash"].asUInt64(), 4);
EXPECT_EQ(fingerprint_json["checkpoint_hash"].asUInt64(), 5);
}
TEST(MetricsTest, TestFoundFingerprintOnLoad) {
EXPECT_EQ(SavedModelFoundFingerprintOnLoad().value(), "");
SavedModelFoundFingerprintOnLoad().Set(kFingerprintFound);
EXPECT_EQ(SavedModelFoundFingerprintOnLoad().value(), "FOUND");
SavedModelFoundFingerprintOnLoad().Set(kFingerprintNotFound);
EXPECT_EQ(SavedModelFoundFingerprintOnLoad().value(), "NOT_FOUND");
SavedModelFoundFingerprintOnLoad().Set(kFingerprintError);
EXPECT_EQ(SavedModelFoundFingerprintOnLoad().value(), "ERROR");
}
TEST(MetricsTest, TestShardingCallbackDuration) {
EXPECT_EQ(ShardingCallbackDuration().value(), 0);
ShardingCallbackDuration().IncrementBy(100);
EXPECT_EQ(ShardingCallbackDuration().value(), 100);
}
TEST(MetricsTest, TestNumCheckpointShardsWritten) {
EXPECT_EQ(NumCheckpointShardsWritten().value(), 0);
NumCheckpointShardsWritten().IncrementBy(10);
EXPECT_EQ(NumCheckpointShardsWritten().value(), 10);
}
TEST(MetricsTest, TestShardingCallbackDescription) {
EXPECT_EQ(ShardingCallbackDescription().value(), "");
ShardingCallbackDescription().Set("foo");
EXPECT_EQ(ShardingCallbackDescription().value(), "foo");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/metrics.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/metrics_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
05b3a5fb-d17e-46f2-a604-6a0d2c9a26bd | cpp | tensorflow/tensorflow | se_gpu_pjrt_client | third_party/xla/xla/pjrt/gpu/se_gpu_pjrt_client.cc | third_party/xla/xla/pjrt/gpu/se_gpu_pjrt_client_test.cc | #include "xla/pjrt/gpu/se_gpu_pjrt_client.h"
#include <array>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/any_invocable.h"
#include "absl/functional/bind_front.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/client/local_client.h"
#include "xla/client/xla_computation.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/pjrt/distributed/in_memory_key_value_store.h"
#include "xla/pjrt/distributed/key_value_store_interface.h"
#include "xla/pjrt/distributed/topology_util.h"
#include "xla/pjrt/event_pool.h"
#include "xla/pjrt/gpu/gpu_helpers.h"
#include "xla/pjrt/gpu/gpu_topology.h"
#include "xla/pjrt/gpu/gpu_topology.pb.h"
#include "xla/pjrt/host_memory_spaces.h"
#include "xla/pjrt/local_device_state.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_compiler.h"
#include "xla/pjrt/pjrt_device_description.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/pjrt/pjrt_stream_executor_client.h"
#include "xla/pjrt/stream_executor_executable.h"
#include "xla/pjrt/tracked_device_buffer.h"
#include "xla/service/compiler.h"
#include "xla/service/computation_placer.h"
#include "xla/service/global_device_id.h"
#include "xla/service/shaped_buffer.h"
#include "xla/service/transfer_manager.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/framework/allocator.h"
#include "xla/tsl/lib/strings/proto_serialization.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/fingerprint.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/profiler/lib/connected_traceme.h"
#include "tsl/profiler/lib/nvtx_utils.h"
#include "tsl/profiler/lib/traceme.h"
#if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM)
#include "xla/debug_options_flags.h"
#include "xla/pjrt/compile_options.pb.h"
#include "xla/pjrt/gpu/gpu_metrics.h"
#include "xla/pjrt/gpu/nccl_id_store.h"
#include "xla/pjrt/stream_executor_executable.pb.h"
#include "xla/service/gpu/gpu_compiler.h"
#include "xla/service/gpu/gpu_memory_space_assignment.h"
#include "xla/xla.pb.h"
#endif
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "xla/stream_executor/gpu/gpu_cudamallocasync_allocator.h"
#elif TENSORFLOW_USE_ROCM
#include "rocm/rocm_config.h"
#endif
#include "xla/service/gpu/gpu_executable_run_options.h"
#include "xla/stream_executor/integrations/device_mem_allocator.h"
#include "xla/stream_executor/integrations/tf_allocator_adapter.h"
#include "xla/util.h"
namespace xla {
class AsyncHostToDeviceTransferManager
: public xla::PjRtClient::AsyncHostToDeviceTransferManager {
public:
static absl::StatusOr<std::unique_ptr<AsyncHostToDeviceTransferManager>>
Create(absl::Span<const PjRtClient::ShapeSpec> shape_specs,
std::optional<absl::Span<const Layout>> device_layouts,
PjRtStreamExecutorDevice* device, PjRtStreamExecutorClient* client,
PjRtMemorySpace* memory_space) {
if (device_layouts != std::nullopt &&
device_layouts->size() != shape_specs.size()) {
return InvalidArgument(
"Number of layouts %d does not match the number of shapes %d",
device_layouts->size(), shape_specs.size());
}
absl::InlinedVector<std::unique_ptr<PjRtBuffer>, 4> buffers;
absl::InlinedVector<std::shared_ptr<TrackedDeviceBuffer>, 4> buffer_ptrs;
absl::InlinedVector<std::shared_ptr<BufferSequencingEvent>, 4>
definition_events;
absl::InlinedVector<Shape, 4> device_shapes;
buffers.reserve(shape_specs.size());
buffer_ptrs.reserve(shape_specs.size());
definition_events.reserve(shape_specs.size());
device_shapes.reserve(shape_specs.size());
for (int i = 0; i < shape_specs.size(); ++i) {
const PjRtClient::ShapeSpec& shape_spec = shape_specs[i];
if (shape_spec.element_type == TUPLE) {
return Unimplemented(
"Async buffer transfer of tuples not implemented.");
}
definition_events.push_back(
std::make_shared<BufferSequencingEvent>(client->thread_pool()));
Shape& device_shape = device_shapes.emplace_back(
ShapeUtil::MakeShape(shape_spec.element_type, shape_spec.dims));
if (device_layouts == std::nullopt) {
TF_ASSIGN_OR_RETURN(device_shape,
client->client()
->backend()
.transfer_manager()
->ChooseCompactLayoutForShape(device_shape));
} else {
*device_shape.mutable_layout() = (*device_layouts)[i];
}
LocalDeviceState* local_device = device->local_device_state();
se::Stream* h2d_stream = local_device->host_to_device_stream();
TF_ASSIGN_OR_RETURN(auto buffer,
AllocateDestinationBuffer(
device_shape, device, local_device, h2d_stream,
true, client,
definition_events.back(), memory_space));
auto* se_buffer =
tensorflow::down_cast<PjRtStreamExecutorBuffer*>(buffer.get());
DCHECK(se_buffer);
auto hold = se_buffer->GetBufferWithUsageHold();
buffer_ptrs.push_back(hold.buffer());
buffers.push_back(std::move(buffer));
}
return std::make_unique<AsyncHostToDeviceTransferManager>(
std::move(buffers), std::move(buffer_ptrs),
std::move(definition_events), std::move(device_shapes), device);
}
AsyncHostToDeviceTransferManager(
absl::InlinedVector<std::unique_ptr<PjRtBuffer>, 4> buffers,
absl::InlinedVector<std::shared_ptr<TrackedDeviceBuffer>, 4> buffer_ptrs,
absl::InlinedVector<std::shared_ptr<BufferSequencingEvent>, 4>
definition_events,
absl::InlinedVector<Shape, 4> device_shapes,
PjRtStreamExecutorDevice* device)
: buffers_(std::move(buffers)),
buffer_ptrs_(std::move(buffer_ptrs)),
definition_events_(std::move(definition_events)),
device_shapes_(std::move(device_shapes)),
remaining_buffer_count_(buffer_ptrs_.size()),
transfers_in_flight_(0),
device_(device) {
buffer_sizes_.reserve(buffer_ptrs_.size());
for (const auto& ptr : buffer_ptrs_) {
DCHECK_EQ(ptr->device_memory().size(), 1);
buffer_sizes_.push_back(ptr->device_memory()[0].size());
}
last_transfer_started_.resize(buffer_ptrs_.size(), false);
}
~AsyncHostToDeviceTransferManager() override {
auto transfers_finished = [this]() {
mu_.AssertHeld();
return transfers_in_flight_ == 0;
};
{
absl::MutexLock l(&mu_);
mu_.Await(absl::Condition(&transfers_finished));
}
}
size_t buffer_count() const override { return buffers_.size(); };
size_t buffer_size(int buffer_index) const override {
DCHECK_LT(buffer_index, buffer_sizes_.size());
return buffer_sizes_[buffer_index];
}
PjRtDevice* device() const override { return device_; }
std::unique_ptr<PjRtBuffer> RetrieveBuffer(int buffer_index) override {
DCHECK_LT(buffer_index, buffers_.size());
return std::move(buffers_[buffer_index]);
};
absl::Status TransferLiteralToBuffer(
int buffer_index, const LiteralSlice& literal,
absl::AnyInvocable<void() &&> on_done) override {
tsl::profiler::TraceMe traceme(
"AsyncHostToDeviceTransferManager::TransferLiteralToBuffer");
auto* stream = device_->local_device_state()->host_to_device_stream();
auto* se_client =
tensorflow::down_cast<PjRtStreamExecutorClient*>(device_->client());
DCHECK(se_client);
TransferManager* transfer_manager =
se_client->client()->backend().transfer_manager();
std::shared_ptr<TrackedDeviceBuffer> buffer;
{
absl::MutexLock l(&mu_);
DCHECK_LT(buffer_index, buffer_ptrs_.size());
if (last_transfer_started_[buffer_index]) {
return InvalidArgument(
"TransferLiteralToBuffer requested for buffer index %d which has "
"already been fully transferred",
buffer_index);
}
last_transfer_started_[buffer_index] = true;
buffer = buffer_ptrs_[buffer_index];
DCHECK(buffer);
if (buffer->device_memory().empty()) {
return InvalidArgument(
"TransferLiteralToBuffer requested for buffer index %d which has "
"been donated. Async transfer of donated buffers is not supported "
"in SE:GPU",
buffer_index);
}
DCHECK_EQ(buffer->device_memory().size(), 1);
++transfers_in_flight_;
}
auto transfer_h2d = [this, buffer_index, stream, transfer_manager, literal,
device_buffer = buffer.get(),
local_device =
std::move(device_->local_device_state()),
on_done = std::move(on_done)]() mutable {
tsl::profiler::TraceMe traceme(
"AsyncHostToDeviceTransferManager::TransferLiteralToBuffer::transfer_"
"h2d");
auto event = local_device->event_pool().AllocateEvent(stream->parent());
ShapedBuffer buffer =
device_buffer->AsShapedBuffer(device_shapes_[buffer_index]);
TF_CHECK_OK(transfer_manager->TransferLiteralToDeviceAsync(
stream, literal, buffer));
local_device->event_pool().ThenRecordEvent(stream, event.value());
auto cleanup = [this, buffer_index, stream, on_done = std::move(on_done),
event = std::move(event).value()]() mutable {
CleanUp(buffer_index, std::move(event), stream,
true, std::move(on_done));
};
auto status = stream->DoHostCallback(std::move(cleanup));
if (!status.ok()) {
LOG(ERROR) << "DoHostCallback failed: " << status;
}
};
se_client->thread_pool()->Schedule(
([ptr = new absl::AnyInvocable<void()>(std::move(transfer_h2d))]() {
(*ptr)();
delete ptr;
}));
return absl::OkStatus();
}
absl::Status TransferRawDataToBuffer(
int buffer_index, absl::string_view data,
absl::AnyInvocable<void() &&> on_done) override {
return TransferRawDataToSubBuffer(buffer_index, data.data(),
0, data.size(),
true,
std::move(on_done));
}
absl::Status TransferRawDataToSubBuffer(
int buffer_index, const void* data, int64_t offset, int64_t transfer_size,
bool is_last_transfer, absl::AnyInvocable<void() &&> on_done) override {
auto* stream = device_->local_device_state()->host_to_device_stream();
auto* client =
tensorflow::down_cast<PjRtStreamExecutorClient*>(device_->client());
bool should_stage_host_to_device_transfers =
client->should_stage_host_to_device_transfers();
std::shared_ptr<void> staging_buffer;
if (should_stage_host_to_device_transfers) {
auto* host_memory_allocator = client->host_memory_allocator();
if (host_memory_allocator == nullptr) {
return InvalidArgument(
"host_memory_allocator should be initialized for staging buffer "
"transfer.");
}
void* ptr = host_memory_allocator->AllocateRaw(
tsl::Allocator::kAllocatorAlignment, transfer_size);
staging_buffer = std::shared_ptr<void>(
ptr, [host_memory_allocator = host_memory_allocator](void* ptr) {
host_memory_allocator->DeallocateRaw(ptr);
});
}
absl::ReleasableMutexLock l(&mu_);
DCHECK_LT(buffer_index, buffer_ptrs_.size());
if (last_transfer_started_[buffer_index]) {
return InvalidArgument(
"TransferRawData requested for buffer index %d which has "
"already been fully transferred",
buffer_index);
}
if (is_last_transfer) {
last_transfer_started_[buffer_index] = true;
}
DCHECK(buffer_ptrs_[buffer_index]);
if (buffer_ptrs_[buffer_index]->device_memory().empty()) {
return InvalidArgument(
"TransferRawDataToSubBuffer requested for buffer index %d which has "
"been donated. Async transfer of donated buffers is not supported "
"in SE:GPU",
buffer_index);
}
DCHECK_EQ(buffer_ptrs_[buffer_index]->device_memory().size(), 1);
auto& buffer_memory = buffer_ptrs_[buffer_index]->device_memory()[0];
se::DeviceMemoryBase sub_buffer;
CHECK_LE(offset, buffer_memory.size());
CHECK_LE(transfer_size, buffer_memory.size() - offset);
if (transfer_size < buffer_memory.size()) {
sub_buffer = buffer_memory.GetByteSlice(offset, transfer_size);
} else {
sub_buffer = buffer_memory;
}
++transfers_in_flight_;
l.Release();
auto event = device_->local_device_state()->event_pool().AllocateEvent(
stream->parent());
if (transfer_size != 0) {
if (staging_buffer != nullptr) {
auto copy_to_staging_buffer = [data, transfer_size,
staging_buffer]() mutable {
std::memcpy(staging_buffer.get(), data, transfer_size);
};
if (auto status =
stream->DoHostCallback(std::move(copy_to_staging_buffer));
!status.ok()) {
return status;
}
if (auto status = stream->Memcpy(&sub_buffer, staging_buffer.get(),
transfer_size);
!status.ok()) {
return status;
}
} else if (auto status = stream->Memcpy(&sub_buffer, data, transfer_size);
!status.ok()) {
return status;
}
}
device_->local_device_state()->event_pool().ThenRecordEvent(stream,
event.value());
auto cleanup = [this, buffer_index, event = std::move(event).value(),
stream, is_last_transfer, on_done = std::move(on_done),
staging_buffer = std::move(staging_buffer)]() mutable {
CleanUp(buffer_index, std::move(event), stream, is_last_transfer,
std::move(on_done));
};
return stream->DoHostCallback(std::move(cleanup));
}
void SetBufferError(int buffer_index, absl::Status error) override {
{
absl::MutexLock l(&mu_);
CHECK(!definition_events_[buffer_index]->IsDefined());
definition_events_[buffer_index]->SetDefinedStatus(error);
}
VLOG(1) << "SetBufferError sets the " << buffer_index
<< "th buffer error: " << error;
}
void AddTransferMetadata(const TransferMetadata& meta) override {}
private:
absl::Mutex mu_;
absl::InlinedVector<std::unique_ptr<PjRtBuffer>, 4> buffers_;
absl::InlinedVector<size_t, 4> buffer_sizes_;
absl::InlinedVector<std::shared_ptr<TrackedDeviceBuffer>, 4> buffer_ptrs_
ABSL_GUARDED_BY(mu_);
absl::InlinedVector<bool, 4> last_transfer_started_ ABSL_GUARDED_BY(mu_);
absl::InlinedVector<std::shared_ptr<BufferSequencingEvent>, 4>
definition_events_ ABSL_GUARDED_BY(mu_);
const absl::InlinedVector<Shape, 4> device_shapes_;
size_t remaining_buffer_count_ ABSL_GUARDED_BY(mu_);
int transfers_in_flight_ ABSL_GUARDED_BY(mu_);
PjRtStreamExecutorDevice* device_;
void CleanUp(int buffer_index, EventPool::Handle event, se::Stream* stream,
bool is_last_transfer, absl::AnyInvocable<void() &&> on_done) {
{
absl::MutexLock l(&mu_);
CHECK_GT(transfers_in_flight_, 0);
--transfers_in_flight_;
if (is_last_transfer) {
CHECK(buffer_ptrs_[buffer_index]);
buffer_ptrs_[buffer_index] = nullptr;
CHECK_GT(remaining_buffer_count_, 0);
--remaining_buffer_count_;
definition_events_[buffer_index]->SetSequencingEvent(std::move(event),
stream);
if (remaining_buffer_count_ == 0) {
VLOG(1) << "TransferLiteralToBuffer for all buffers is done.";
}
}
}
std::move(on_done)();
}
};
StreamExecutorGpuClient::StreamExecutorGpuClient(
std::string platform_name, LocalClient* client,
std::vector<std::unique_ptr<PjRtStreamExecutorDevice>> devices,
int process_index, std::unique_ptr<se::DeviceMemoryAllocator> allocator,
std::unique_ptr<tsl::Allocator> host_memory_allocator,
bool should_stage_host_to_device_transfers,
std::unique_ptr<gpu::GpuExecutableRunOptions> gpu_run_options,
std::shared_ptr<KeyValueStoreInterface> kv_store,
std::shared_ptr<const GpuTopology> gpu_topology)
: xla::PjRtStreamExecutorClient(
platform_name, client, std::move(devices), process_index,
std::move(allocator), std::move(host_memory_allocator),
should_stage_host_to_device_transfers, std::move(gpu_run_options)),
topology_(xla::StreamExecutorGpuTopologyDescription::Create(
tsl::Fingerprint64(platform_name), platform_name,
std::move(gpu_topology))),
kv_store_(std::move(kv_store)) {
for (auto* device : addressable_devices()) {
const int id = device->id();
auto memory_space =
std::make_unique<StreamExecutorGpuHbmMemorySpace>(id, device);
tensorflow::down_cast<PjRtStreamExecutorDevice*>(device)->AttachMemorySpace(
memory_space.get());
owned_memory_spaces_.push_back(std::move(memory_space));
const size_t basePinnedId = devices.size();
auto pinned = std::make_unique<PinnedHostMemorySpace>(basePinnedId, device);
tensorflow::down_cast<PjRtStreamExecutorDevice*>(device)->AttachMemorySpace(
pinned.get());
owned_memory_spaces_.push_back(std::move(pinned));
}
for (const std::unique_ptr<PjRtMemorySpace>& memory_space :
owned_memory_spaces_) {
memory_spaces_.push_back(memory_space.get());
}
absl::c_sort(memory_spaces_,
[](const PjRtMemorySpace* a, const PjRtMemorySpace* b) {
return a->id() < b->id();
});
}
absl::string_view StreamExecutorGpuClient::platform_version() const {
#define STRINGIFY2(X) #X
#define STRINGIFY(X) STRINGIFY2(X)
#if TENSORFLOW_USE_ROCM && defined(TF_ROCM_VERSION)
return "rocm " STRINGIFY(TF_ROCM_VERSION);
#elif GOOGLE_CUDA && defined(CUDART_VERSION)
return "cuda " STRINGIFY(CUDART_VERSION);
#else
return "<unknown>";
#endif
}
absl::StatusOr<std::unique_ptr<PjRtClient::AsyncHostToDeviceTransferManager>>
StreamExecutorGpuClient::CreateBuffersForAsyncHostToDevice(
absl::Span<const PjRtClient::ShapeSpec> shape_specs,
std::optional<absl::Span<const Layout>> device_layouts,
PjRtDevice* device) {
auto* stream_executor_device =
tensorflow::down_cast<PjRtStreamExecutorDevice*>(device);
return xla::AsyncHostToDeviceTransferManager::Create(
shape_specs, std::move(device_layouts), stream_executor_device, this,
nullptr);
}
absl::StatusOr<std::unique_ptr<PjRtClient::AsyncHostToDeviceTransferManager>>
StreamExecutorGpuClient::CreateBuffersForAsyncHostToDevice(
absl::Span<const Shape> shapes, PjRtDevice* device) {
absl::InlinedVector<PjRtClient::ShapeSpec, 4> shape_specs;
shape_specs.reserve(shapes.size());
for (const auto& shape : shapes) {
shape_specs.emplace_back(PjRtClient::ShapeSpec{
shape.element_type(),
DimensionVector(shape.dimensions().begin(), shape.dimensions().end())});
}
return CreateBuffersForAsyncHostToDevice(
shape_specs, std::nullopt, device);
}
absl::StatusOr<std::unique_ptr<PjRtClient::AsyncHostToDeviceTransferManager>>
StreamExecutorGpuClient::CreateBuffersForAsyncHostToDevice(
absl::Span<const PjRtClient::ShapeSpec> shape_specs,
std::optional<absl::Span<const Layout>> device_layouts,
PjRtMemorySpace* memory_space) {
CHECK_EQ(memory_space->devices().size(), 1);
PjRtDevice* device = memory_space->devices()[0];
auto* stream_executor_device =
tensorflow::down_cast<PjRtStreamExecutorDevice*>(device);
return xla::AsyncHostToDeviceTransferManager::Create(
shape_specs, std::move(device_layouts), stream_executor_device, this,
memory_space);
}
absl::StatusOr<std::unique_ptr<PjRtClient::AsyncHostToDeviceTransferManager>>
StreamExecutorGpuClient::CreateBuffersForAsyncHostToDevice(
absl::Span<const Shape> shapes, PjRtMemorySpace* memory_space) {
absl::InlinedVector<PjRtClient::ShapeSpec, 4> shape_specs;
shape_specs.reserve(shapes.size());
for (const auto& shape : shapes) {
shape_specs.emplace_back(PjRtClient::ShapeSpec{
shape.element_type(),
DimensionVector(shape.dimensions().begin(), shape.dimensions().end())});
}
return CreateBuffersForAsyncHostToDevice(
shape_specs, std::nullopt, memory_space);
}
absl::StatusOr<xla::DeviceAssignment>
StreamExecutorGpuClient::GetDefaultDeviceAssignment(int num_replicas,
int num_partitions) const {
if (num_partitions == 1 && num_replicas <= addressable_devices().size()) {
xla::DeviceAssignment assignment(num_replicas, 1);
for (int i = 0; i < num_replicas; ++i) {
assignment(i, 0) = addressable_devices().at(i)->id();
}
return assignment;
}
return PjRtStreamExecutorClient::GetDefaultDeviceAssignment(num_replicas,
num_partitions);
}
PjRtFuture<> StreamExecutorGpuClient::CopyRawSubBufferToHost(
PjRtBuffer* pjrt_buffer, PjRtFuture<void*> dst, int64_t offset,
int64_t transfer_size) {
auto* buffer = tensorflow::down_cast<PjRtStreamExecutorBuffer*>(pjrt_buffer);
DCHECK(buffer);
PjRtStreamExecutorDevice* device = buffer->device();
LocalDeviceState* local_device = device->local_device_state();
se::Stream* stream = local_device->GetDeviceToHostStream();
PjRtStreamExecutorBuffer::ScopedHold hold(buffer->GetBufferWithUsageHold());
if (!hold.ok()) {
return PjRtFuture<>(hold.status());
}
auto device_buffer = hold.buffer();
if (device_buffer->device_memory().size() != 1) {
return PjRtFuture<>(InvalidArgument("Copy raw buffer called on tuple"));
}
auto promise = PjRtFuture<>::CreatePromise();
auto usage_event =
std::make_shared<BufferSequencingEvent>(this->thread_pool());
hold.ConvertUsageHold(stream, usage_event, true);
auto async_copy = [this, promise, offset, transfer_size, stream, local_device,
device_buffer, usage_event = std::move(usage_event)](
absl::StatusOr<void*> dst) mutable {
absl::StatusOr<EventPool::Handle> event =
local_device->event_pool().AllocateEvent(stream->parent());
if (!event.ok()) {
promise.Set(event.status());
return;
}
absl::Status defined_status =
device_buffer->definition_events()[0]->GetDefinedStatus();
if (!defined_status.ok()) {
promise.Set(defined_status);
return;
}
auto& device_memory = device_buffer->device_memory()[0];
if (offset < 0 || offset > device_memory.size() ||
device_memory.size() - offset < transfer_size) {
promise.Set(
InvalidArgument("Copy raw buffer called on buffer size %lld with "
"invalid offset %lld, transfer size %lld",
device_memory.size(), offset, transfer_size));
return;
}
std::unique_ptr<se::DeviceMemoryBase> sub_buffer;
if (transfer_size < device_memory.size()) {
sub_buffer = std::make_unique<se::DeviceMemoryBase>(
device_memory.GetByteSlice(offset, transfer_size));
} else {
sub_buffer = std::make_unique<se::DeviceMemoryBase>(device_memory);
}
WaitForBufferDefinitionEventsOnStream(*device_buffer, stream);
if (transfer_size != 0) {
if (should_stage_host_to_device_transfers()) {
if (host_memory_allocator() == nullptr) {
promise.Set(InvalidArgument(
"host_memory_allocator should be initialized for staging buffer "
"transfer."));
return;
}
void* ptr = host_memory_allocator()->AllocateRaw(
tsl::Allocator::kAllocatorAlignment, transfer_size);
std::shared_ptr<void> staging_buffer = std::shared_ptr<void>(
ptr, [host_memory_allocator = host_memory_allocator()](void* ptr) {
host_memory_allocator->DeallocateRaw(ptr);
});
if (auto status = stream->Memcpy(staging_buffer.get(), *sub_buffer,
transfer_size);
!status.ok()) {
promise.Set(std::move(status));
return;
}
auto copy_to_staging_buffer = [dst, transfer_size,
staging_buffer]() mutable {
std::memcpy(*dst, staging_buffer.get(), transfer_size);
};
if (auto status = stream->DoHostCallback(copy_to_staging_buffer);
!status.ok()) {
promise.Set(std::move(status));
return;
}
} else {
auto status = stream->Memcpy(*dst, *sub_buffer, transfer_size);
if (!status.ok()) {
promise.Set(std::move(status));
return;
}
}
}
local_device->event_pool().ThenRecordEvent(stream, event.value());
usage_event->SetSequencingEvent(std::move(event).value(), stream);
auto callback_status = local_device->ThenExecuteCallback(
stream, [promise, device_buffer = std::move(device_buffer)]() mutable {
promise.Set();
});
if (!callback_status.ok()) {
promise.Set(std::move(callback_status));
return;
}
};
device_buffer->definition_events()[0]->ExecuteOrAddToFutureTasks(
absl::StrFormat("async_copy_raw_sub_buffer_to_host_%p", &async_copy),
[this, dst, async_copy = std::move(async_copy)]() mutable {
dst.OnReady([this, async_copy = std::move(async_copy)](
absl::StatusOr<void*> dst) {
thread_pool()->Schedule(absl::bind_front(async_copy, std::move(dst)));
});
});
return PjRtFuture<>(
std::move(promise),
[]() {
tsl::profiler::TraceMeProducer traceme(
"StreamExecutorGpuClient::CopyRawSubBufferToHost");
VLOG(1) << "StreamExecutorGpuClient::CopyRawSubBufferToHost";
return PjRtFutureHelpers::ProfilingKeys(
{traceme.GetContextId()});
},
[](PjRtFutureHelpers::ProfilingKeys keys) {
tsl::profiler::TraceMeConsumer traceme(
"StreamExecutorGpuClient::CopyRawSubBufferToHost",
keys.traceme_context_id);
});
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
StreamExecutorGpuClient::Compile(const XlaComputation& computation,
CompileOptions options) {
options.executable_build_options.set_key_value_store(kv_store_);
auto executable = PjRtStreamExecutorClient::Compile(computation, options);
#if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM)
for (const PjRtDevice* device : addressable_devices()) {
LocalDeviceState* local_device_state =
tensorflow::down_cast<const PjRtStreamExecutorDevice*>(device)
->local_device_state();
int64_t free_memory, total_memory;
if (local_device_state != nullptr) {
se::StreamExecutor* executor = local_device_state->executor();
int device_ordinal = executor->device_ordinal();
if (executor->DeviceMemoryUsage(&free_memory, &total_memory)) {
gpu_metrics::RecordFreeGpuSystemMemory(device_ordinal, free_memory);
} else {
LOG(ERROR) << "Failed to query available memory for GPU "
<< device_ordinal;
}
}
}
#endif
return executable;
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
StreamExecutorGpuClient::LoadSerialized(absl::string_view serialized,
std::optional<CompileOptions> options,
const LoadOptions& load_options) {
return PjRtStreamExecutorClient::DeserializeExecutable(serialized, options);
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
StreamExecutorGpuClient::Load(std::unique_ptr<PjRtExecutable> executable) {
auto se_executable = absl::WrapUnique(
tensorflow::down_cast<StreamExecutorExecutable*>(executable.release()));
CompileOptions compile_options = se_executable->compile_options();
CompileOptions input_options = compile_options;
TF_RETURN_IF_ERROR(compile_options.ApplyAllOptionOverrides());
TF_ASSIGN_OR_RETURN(ExecutableExtras extras,
GetExecutableExtras(&compile_options));
std::vector<std::unique_ptr<LocalExecutable>> local_executables;
local_executables.reserve(se_executable->aot_executables().size());
for (std::unique_ptr<xla::AotCompilationResult>& aot_executable :
se_executable->aot_executables()) {
TF_ASSIGN_OR_RETURN(std::string serialized,
aot_executable->SerializeAsString());
TF_ASSIGN_OR_RETURN(
std::unique_ptr<LocalExecutable> local_executable,
client()->Load(serialized, compile_options.executable_build_options));
local_executables.push_back(std::move(local_executable));
}
bool parameter_is_tupled_arguments =
compile_options.parameter_is_tupled_arguments;
auto ret = std::make_unique<PjRtStreamExecutorLoadedExecutable>(
std::move(local_executables), parameter_is_tupled_arguments,
std::move(extras.device_assignment), std::move(input_options),
std::move(extras.addressable_device_logical_ids),
std::move(extras.addressable_devices), this);
TF_RETURN_IF_ERROR(ret->SetUpDonation(parameter_is_tupled_arguments));
return std::unique_ptr<PjRtLoadedExecutable>(std::move(ret));
}
namespace {
#if defined(GOOGLE_CUDA) && CUDA_VERSION >= 11020
absl::StatusOr<std::unique_ptr<se::GpuCudaMallocAsyncAllocator>>
CreateCudaAsyncAllocator(const LocalDeviceState& device, double memory_fraction,
bool reserve_memory, bool create_new_pool,
bool sync_mode, bool compute_stats = true) {
se::StreamExecutor* executor = device.executor();
int device_ordinal = executor->device_ordinal();
int64_t free_memory;
int64_t total_memory;
if (!executor->DeviceMemoryUsage(&free_memory, &total_memory)) {
return Unavailable("Failed to query available memory from device %i",
device_ordinal);
}
size_t allocator_memory = total_memory * memory_fraction;
if (reserve_memory) {
LOG(INFO) << "XLA backend allocating " << allocator_memory
<< " bytes on device " << device_ordinal
<< " for CudaAsyncAllocator.";
} else {
LOG(INFO) << "XLA backend will use up to " << allocator_memory
<< " bytes on device " << device_ordinal
<< " for CudaAsyncAllocator.";
}
auto allocator = std::make_unique<se::GpuCudaMallocAsyncAllocator>(
tsl::PlatformDeviceId(device_ordinal),
create_new_pool,
allocator_memory,
reserve_memory,
reserve_memory ? allocator_memory : 0,
sync_mode,
compute_stats);
allocator->SetStreamAndPreallocateMemory(
device.compute_stream()->platform_specific_handle().stream);
return allocator;
}
#else
absl::StatusOr<std::unique_ptr<tsl::Allocator>> CreateCudaAsyncAllocator(
const LocalDeviceState& device, double memory_fraction, bool reserve_memory,
bool create_new_pool, bool sync_mode, bool compute_stats = true) {
return FailedPrecondition("CUDA async allocator requires CUDA >= 11.2");
}
#endif
absl::StatusOr<std::map<int, std::unique_ptr<LocalDeviceState>>>
BuildLocalDeviceStates(LocalClient* xla_client) {
std::map<int, std::unique_ptr<LocalDeviceState>> addressable_devices;
for (se::StreamExecutor* executor :
xla_client->backend().stream_executors()) {
addressable_devices.emplace(
executor->device_ordinal(),
std::make_unique<LocalDeviceState>(
executor, xla_client, LocalDeviceState::kComputeSynchronized,
32,
true, true));
}
return std::move(addressable_devices);
}
absl::StatusOr<std::unique_ptr<se::DeviceMemoryAllocator>>
GetStreamExecutorGpuDeviceAllocator(
se::Platform* platform, const GpuAllocatorConfig& allocator_config,
const std::map<int, std::unique_ptr<LocalDeviceState>>&
addressable_devices) {
std::vector<se::MultiDeviceAdapter::AllocatorInfo> allocators;
switch (allocator_config.kind) {
case GpuAllocatorConfig::Kind::kCudaAsync: {
for (const auto& ordinal_and_device : addressable_devices) {
TF_ASSIGN_OR_RETURN(
auto async_allocator,
CreateCudaAsyncAllocator(
*(ordinal_and_device.second), allocator_config.memory_fraction,
allocator_config.preallocate, false, false, true));
allocators.emplace_back(std::move(async_allocator),
ordinal_and_device.second->compute_stream(),
0);
}
break;
}
case GpuAllocatorConfig::Kind::kDefault:
case GpuAllocatorConfig::Kind::kBFC: {
LOG(INFO) << "Using BFC allocator.";
for (const auto& ordinal_and_device : addressable_devices) {
TF_ASSIGN_OR_RETURN(
auto bfc_allocator,
CreateBFCAllocator(ordinal_and_device.second->executor(),
allocator_config.memory_fraction,
allocator_config.preallocate,
allocator_config.gpu_system_memory_size));
allocators.emplace_back(std::move(bfc_allocator),
ordinal_and_device.second->compute_stream(),
0);
}
break;
}
case GpuAllocatorConfig::Kind::kPlatform:
LOG(INFO) << "Using platform allocator.";
if (allocator_config.collective_memory_size != 0) {
LOG(WARNING)
<< "collective_memory_size is non-zero, but allocator kind is set "
"to \"platform\". Collective memory will not be allocated.";
}
return nullptr;
}
for (const auto& ordinal_and_device : addressable_devices) {
TF_ASSIGN_OR_RETURN(
auto collective_bfc_allocator,
CreateCollectiveBFCAllocator(
ordinal_and_device.second->executor(),
1.0 - allocator_config.memory_fraction,
allocator_config.collective_memory_size));
allocators.emplace_back(std::move(collective_bfc_allocator),
ordinal_and_device.second->compute_stream(),
1);
}
for (const auto& ordinal_and_device : addressable_devices) {
auto host_allocator =
GetGpuHostAllocator(ordinal_and_device.second->executor());
allocators.emplace_back(std::move(host_allocator),
ordinal_and_device.second->compute_stream(),
static_cast<int>(se::MemoryType::kHost));
}
#if defined(GOOGLE_CUDA) && CUDA_VERSION >= 11020
const auto& debug_options = xla::GetDebugOptionsFromFlags();
if (debug_options.xla_gpu_temp_buffer_use_separate_color()) {
for (const auto& ordinal_and_device : addressable_devices) {
TF_ASSIGN_OR_RETURN(
auto async_allocator,
CreateCudaAsyncAllocator(*(ordinal_and_device.second), 1.0, false,
true, true, true));
allocators.emplace_back(
std::move(async_allocator),
ordinal_and_device.second->compute_stream(),
gpu::kTempBufferMemorySpaceColor);
}
}
#endif
return std::make_unique<se::MultiDeviceAdapter>(platform,
std::move(allocators));
}
void NameDeviceAndLauncherThread(const LocalTopologyProto& node,
const DeviceProto& device_proto,
WorkerThread* launcher_thread) {
auto suffix = absl::StrFormat(":#global=%d,local=%d,process=%d,slice=%d#",
device_proto.global_device_id(),
device_proto.local_device_ordinal(),
node.node_id(), device_proto.slice_index());
tsl::profiler::NameDevice(device_proto.local_device_ordinal(),
absl::StrCat("Xla", suffix));
launcher_thread->Schedule([name = absl::StrCat("XlaLauncher", suffix)] {
tsl::profiler::NameCurrentThread(name);
});
}
}
absl::StatusOr<DeviceTopologyPair> BuildDistributedDevices(
std::string_view platform_name,
std::map<int, std::unique_ptr<LocalDeviceState>> local_device_states,
int node_id, int num_nodes,
gpu::GpuExecutableRunOptions* gpu_executable_run_options,
std::shared_ptr<KeyValueStoreInterface> kv_store, bool enable_mock_nccl,
absl::Duration get_local_topology_timeout,
absl::Duration get_global_topology_timeout) {
std::vector<std::unique_ptr<PjRtStreamExecutorDevice>> devices;
LocalTopologyProto local_topology;
local_topology.set_node_id(node_id);
std::string boot_id_str;
auto boot_id_str_or_status = GetBootIdString();
if (!boot_id_str_or_status.ok()) {
LOG(INFO) << boot_id_str_or_status.status();
} else {
boot_id_str = boot_id_str_or_status.value();
}
local_topology.set_boot_id(boot_id_str);
for (const auto& ordinal_and_device : local_device_states) {
const se::Platform* platform =
ordinal_and_device.second->executor()->GetPlatform();
TF_ASSIGN_OR_RETURN(
std::unique_ptr<xla::se::DeviceDescription> desc,
platform->DescriptionForDevice(
ordinal_and_device.second->local_hardware_id().value()));
DeviceProto* device_proto = local_topology.add_devices();
device_proto->set_local_device_ordinal(ordinal_and_device.first);
device_proto->set_name(desc->name());
device_proto->set_vendor(desc->device_vendor());
device_proto->set_compute_capability(
MakeComputeCapabilityString(desc.get()));
device_proto->set_core_count(desc->core_count());
}
GlobalTopologyProto global_topology;
if (enable_mock_nccl) {
std::vector<LocalTopologyProto> local_topologies(num_nodes, local_topology);
for (int i = 0; i < num_nodes; ++i) {
local_topologies[i].set_node_id(i);
local_topologies[i].set_boot_id(absl::StrCat(i));
}
global_topology = BuildGlobalTopology(absl::MakeSpan(local_topologies),
true);
} else {
TF_RETURN_IF_ERROR(ExchangeTopologies(
platform_name, node_id, num_nodes, get_local_topology_timeout,
get_global_topology_timeout, kv_store.get(), local_topology,
&global_topology, true));
}
std::map<int, GlobalDeviceId> gpu_device_ids;
absl::flat_hash_map<GlobalDeviceId, int> device_to_node;
for (const LocalTopologyProto& node : global_topology.nodes()) {
for (const DeviceProto& device_proto : node.devices()) {
GlobalDeviceId global_device_id(device_proto.global_device_id());
device_to_node[global_device_id] = node.node_id();
std::unique_ptr<LocalDeviceState> local_device;
if (node.node_id() == node_id) {
auto it = local_device_states.find(device_proto.local_device_ordinal());
TF_RET_CHECK(it != local_device_states.end())
<< device_proto.local_device_ordinal();
TF_RET_CHECK(it->second != nullptr);
local_device = std::move(it->second);
gpu_device_ids[device_proto.local_device_ordinal()] = global_device_id;
NameDeviceAndLauncherThread(node, device_proto,
local_device->execute_thread());
}
auto device = std::make_unique<StreamExecutorGpuDevice>(
device_proto.global_device_id(), std::move(local_device),
device_proto.name(), device_proto.vendor(),
device_proto.compute_capability(), device_proto.core_count(),
node.node_id(), device_proto.slice_index());
devices.push_back(std::move(device));
}
}
for (const auto& device : local_device_states) {
TF_RET_CHECK(device.second == nullptr);
}
gpu_executable_run_options->set_gpu_global_device_ids(
std::move(gpu_device_ids));
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
if (num_nodes > 1) {
auto nccl_id_store = std::make_shared<NcclIdStore>(node_id, device_to_node,
std::move(kv_store));
gpu_executable_run_options->set_nccl_clique_id_callback(
[nccl_id_store](const gpu::NcclCliqueKey& key) {
return nccl_id_store->GetNcclUniqueId(key);
});
}
#endif
TF_ASSIGN_OR_RETURN(GpuTopologyProto gpu_topology,
BuildGpuTopology(global_topology));
return std::make_pair(std::move(devices), gpu_topology);
}
std::string MakeComputeCapabilityString(const se::DeviceDescription* desc) {
se::GpuComputeCapability cc = desc->gpu_compute_capability();
if (std::holds_alternative<se::CudaComputeCapability>(cc)) {
auto nvcc = std::get<se::CudaComputeCapability>(cc);
return absl::StrCat(nvcc.major, ".", nvcc.minor);
} else if (std::holds_alternative<se::RocmComputeCapability>(cc)) {
auto rocmcc = std::get<se::RocmComputeCapability>(cc);
return rocmcc.gfx_version();
} else {
return "unknown";
}
}
StreamExecutorGpuDevice::StreamExecutorGpuDevice(
int id, std::unique_ptr<LocalDeviceState> local_device_state,
std::string device_kind, std::string device_vendor,
std::string compute_capability, int core_count, int node_id,
int slice_index)
: PjRtStreamExecutorDevice(id, std::move(local_device_state),
std::move(device_kind), node_id),
device_vendor_(std::move(device_vendor)),
slice_index_(slice_index) {
std::array<int, 1> coords = {local_device_id().value()};
description().SetCoords(coords);
std::vector<int64_t> v_coords(description().coords().begin(),
description().coords().end());
description().SetAttributes(
{{"coords", xla::PjRtDeviceAttribute(v_coords)},
{"device_vendor", device_vendor_},
{"slice_index", static_cast<int64_t>(slice_index)},
{"compute_capability", xla::PjRtDeviceAttribute(compute_capability)},
{"core_count", static_cast<int64_t>(core_count)}});
description().SetToString(absl::StrFormat(
"StreamExecutorGpuDevice(device_kind=%s, id=%i, process_index=%i, "
"slice_index=%i))",
description().device_kind(), id, process_index(), slice_index));
description().SetDebugString(absl::StrFormat("%s_%i(process=%i,(%i))",
description().device_kind(), id,
process_index(), v_coords[0]));
}
int StreamExecutorGpuDevice::slice_index() const { return slice_index_; }
absl::string_view StreamExecutorGpuDevice::device_vendor() const {
return device_vendor_;
}
absl::StatusOr<tsl::AllocatorStats> StreamExecutorGpuDevice::GetAllocatorStats()
const {
if (!IsAddressable()) {
return FailedPrecondition(
"GetAllocatorStats() is allowed only for addressable devices");
}
auto* allocator_adapter = dynamic_cast<se::MultiDeviceAdapter*>(
tensorflow::down_cast<PjRtStreamExecutorClient*>(client())->allocator());
if (!allocator_adapter) {
return Unimplemented(
"GetAllocatorStats() is only implemented with MultiDeviceAdapter "
"allocator");
}
TF_ASSIGN_OR_RETURN(auto allocator, allocator_adapter->GetAllocator(
local_device_id().value()));
auto stats = allocator->GetStats();
TF_RET_CHECK(stats.has_value());
return stats.value();
}
absl::Span<int const> StreamExecutorGpuDevice::coords() const {
return description().coords();
}
absl::StatusOr<PjRtMemorySpace*> StreamExecutorGpuDevice::default_memory_space()
const {
return memory_space_by_kind_id(StreamExecutorGpuHbmMemorySpace::kKindId);
}
const int StreamExecutorGpuHbmMemorySpace::kKindId = []() {
uint32_t kind_id = tsl::Fingerprint32(StreamExecutorGpuHbmMemorySpace::kKind);
return static_cast<int>(kind_id);
}();
StreamExecutorGpuHbmMemorySpace::StreamExecutorGpuHbmMemorySpace(
int id, PjRtDevice* device)
: PjRtStreamExecutorMemorySpace(id, device, kKind, kKindId) {}
absl::StatusOr<std::unique_ptr<PjRtClient>> GetStreamExecutorGpuClient(
const GpuClientOptions& options) {
#if TENSORFLOW_USE_ROCM
auto pjrt_platform_name = xla::RocmName();
#elif TENSORFLOW_USE_SYCL
auto pjrt_platform_name = xla::SyclName();
#else
auto pjrt_platform_name = xla::CudaName();
#endif
TF_ASSIGN_OR_RETURN(
LocalClient * xla_client,
GetGpuXlaClient(options.platform_name, options.allowed_devices));
std::map<int, std::unique_ptr<LocalDeviceState>> local_device_states;
TF_ASSIGN_OR_RETURN(local_device_states, BuildLocalDeviceStates(xla_client));
EnablePeerAccess(xla_client->backend().stream_executors());
TF_ASSIGN_OR_RETURN(auto allocator,
GetStreamExecutorGpuDeviceAllocator(
xla_client->platform(), options.allocator_config,
local_device_states));
auto host_memory_allocator =
GetGpuHostAllocator(local_device_states.begin()->second->executor());
auto gpu_run_options = std::make_unique<gpu::GpuExecutableRunOptions>();
if (options.enable_mock_nccl) {
gpu_run_options->set_enable_mock_nccl_collectives();
}
std::shared_ptr<KeyValueStoreInterface> kv_store = options.kv_store;
if (options.enable_mock_nccl) {
kv_store = std::make_shared<InMemoryKeyValueStore>();
}
TF_RET_CHECK(options.num_nodes == 1 || kv_store != nullptr);
TF_ASSIGN_OR_RETURN(
DeviceTopologyPair device_topology_pair,
BuildDistributedDevices(pjrt_platform_name,
std::move(local_device_states), options.node_id,
options.num_nodes, gpu_run_options.get(),
kv_store, options.enable_mock_nccl));
auto gpu_topology = std::shared_ptr<const GpuTopology>(
GpuTopology::FromProto(device_topology_pair.second));
return std::unique_ptr<PjRtClient>(std::make_unique<StreamExecutorGpuClient>(
pjrt_platform_name, xla_client, std::move(device_topology_pair.first),
options.node_id, std::move(allocator), std::move(host_memory_allocator),
options.should_stage_host_to_device_transfers, std::move(gpu_run_options),
std::move(kv_store), std::move(gpu_topology)));
}
absl::StatusOr<std::string> StreamExecutorGpuTopologyDescription::Serialize()
const {
std::string result;
if (!tsl::SerializeToStringDeterministic(gpu_topology_->ToProto(), &result)) {
return absl::InternalError("Failed to serialize gpu_topology");
}
return result;
}
absl::StatusOr<Layout> StreamExecutorGpuTopologyDescription::GetDefaultLayout(
PrimitiveType element_type, absl::Span<const int64_t> dims) const {
Shape shape = ShapeUtil::MakeShape(element_type, dims);
return LayoutUtil::GetWithDefaultLayout(shape).layout();
}
std::vector<std::unique_ptr<PjRtStreamExecutorDevice>> BuildLocalDevices(
std::map<int, std::unique_ptr<LocalDeviceState>> local_device_states,
int node_id) {
std::vector<std::unique_ptr<PjRtStreamExecutorDevice>> devices;
for (auto& ordinal_and_device : local_device_states) {
const se::DeviceDescription& desc =
ordinal_and_device.second->executor()->GetDeviceDescription();
auto device = std::make_unique<StreamExecutorGpuDevice>(
ordinal_and_device.first, std::move(ordinal_and_device.second),
desc.name(), desc.device_vendor(), MakeComputeCapabilityString(&desc),
desc.core_count(), node_id);
devices.push_back(std::move(device));
}
return devices;
}
} | #include "xla/pjrt/gpu/se_gpu_pjrt_client.h"
#include <stdlib.h>
#include <array>
#include <cstdint>
#include <cstring>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/client/xla_computation.h"
#include "xla/ffi/ffi.h"
#include "xla/ffi/ffi_api.h"
#include "xla/layout.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/pjrt/distributed/in_memory_key_value_store.h"
#include "xla/pjrt/gpu/gpu_topology.h"
#include "xla/pjrt/host_memory_spaces.h"
#include "xla/pjrt/mlir_to_hlo.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/pjrt/pjrt_stream_executor_client.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/platform_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/stream.h"
#include "xla/test.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::Eq;
using ::testing::FloatEq;
using ::testing::Ge;
using ::testing::Gt;
using ::testing::HasSubstr;
using ::testing::SizeIs;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
absl::StatusOr<std::unique_ptr<xla::PjRtLoadedExecutable>> CompileExecutable(
absl::string_view program, xla::PjRtClient& client,
xla::CompileOptions compile_options = xla::CompileOptions()) {
TF_ASSIGN_OR_RETURN(auto hlo_module,
ParseAndReturnUnverifiedModule(program, {}));
xla::XlaComputation xla_computation(hlo_module->ToProto());
return client.Compile(xla_computation, compile_options);
}
absl::StatusOr<std::shared_ptr<xla::Literal>> ExtractSingleResult(
absl::StatusOr<std::vector<std::vector<std::unique_ptr<xla::PjRtBuffer>>>>&
result) {
TF_RETURN_IF_ERROR(result.status());
TF_RET_CHECK(result->size() == 1);
std::vector<std::unique_ptr<xla::PjRtBuffer>>& result_buffers = (*result)[0];
TF_RET_CHECK(result_buffers.size() == 1);
auto literal_or = result_buffers[0]->ToLiteralSync();
if (!literal_or.status().ok()) return literal_or.status();
return *literal_or;
}
static constexpr char const* kProgram = R"(HloModule HostTransfer
ENTRY SendRecvSynchronous() -> f32[2] {
in_chain = token[] after-all()
data = f32[2] constant({2, 3})
send = (f32[2], u32[], token[]) send(data, in_chain),
channel_id=1,
is_host_transfer=true,
frontend_attributes={
_xla_host_transfer_handler_name="undef",
_xla_host_transfer_rendezvous="undef"
}
send-done = token[] send-done(send),
channel_id=1, is_host_transfer=true
recv = (f32[2], u32[], token[]) recv(send-done),
channel_id=2,
is_host_transfer=true,
frontend_attributes={
_xla_host_transfer_handler_name="undef",
_xla_host_transfer_rendezvous="undef"
}
recv-done = (f32[2], token[]) recv-done(recv),
channel_id=2, is_host_transfer=true
ROOT result = f32[2] get-tuple-element(recv-done), index=0
})";
TEST(StreamExecutorGpuClientTest, MemorySpace) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_GE(client->devices().size(), 1);
for (auto* device : client->devices()) {
TF_ASSERT_OK_AND_ASSIGN(auto* memory_space, device->default_memory_space());
EXPECT_EQ(memory_space->kind(), StreamExecutorGpuHbmMemorySpace::kKind);
EXPECT_EQ(memory_space->kind_id(),
StreamExecutorGpuHbmMemorySpace::kKindId);
EXPECT_THAT(
device->memory_space_by_kind(StreamExecutorGpuHbmMemorySpace::kKind),
IsOkAndHolds(memory_space));
EXPECT_EQ(device->memory_spaces().size(), 2);
auto* pinned = device->memory_spaces()[1];
EXPECT_EQ(pinned->kind_id(), PinnedHostMemorySpace::kKindId);
EXPECT_THAT(device->memory_space_by_kind(PinnedHostMemorySpace::kKind),
IsOkAndHolds(pinned));
}
}
TEST(StreamExecutorGpuClientTest, PropagateError) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
auto shape = xla::ShapeUtil::MakeScalarShape(xla::F32);
absl::Status input_error = absl::InvalidArgumentError("input error");
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->CreateErrorBuffer(
input_error, shape,
*client->addressable_devices()[0]->default_memory_space()));
static constexpr char const* kAddProgram =
R"(
HloModule Add.6, entry_computation_layout={(f32[], f32[])->(f32[], f32[])}
ENTRY %Add.6 (a.1: f32[], b.2: f32[]) -> (f32[], f32[]) {
%a.1 = f32[] parameter(0)
%b.2 = f32[] parameter(1)
%add.3 = f32[] add(f32[] %a.1, f32[] %b.2)
%add.4 = f32[] add(f32[] %add.3, f32[] %add.3)
ROOT %tuple.5 = (f32[], f32[]) tuple(f32[] %add.3, f32[] %add.4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto executable,
CompileExecutable(kAddProgram, *client));
TF_ASSERT_OK_AND_ASSIGN(
auto result,
executable->Execute({{buffer.get(), buffer.get()}}, {}));
ASSERT_EQ(result.size(), 1);
ASSERT_EQ(result[0].size(), 1);
EXPECT_EQ(result[0][0]->GetReadyFuture().Await(), input_error);
}
TEST(StreamExecutorGpuClientTest, SendRecvChunked) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto executable,
CompileExecutable(kProgram, *client));
std::array<float, 2> sent_value = {0.0f, 0.0f};
SendCallback send_callback = {
1, [&](const PjRtTransferMetadata& m, PjRtChunk chunk,
int64_t total_size_in_bytes, bool done) {
float* data = reinterpret_cast<float*>(chunk.data());
sent_value[0] = data[0];
sent_value[1] = data[1];
return absl::OkStatus();
}};
RecvCallback recv_callback = {
2, [&](const PjRtTransferMetadata& m,
std::unique_ptr<CopyToDeviceStream> stream) {
auto chunk0 = PjRtChunk::AllocateDefault(sizeof(float));
*reinterpret_cast<float*>(chunk0.data()) = 5.0f;
TF_CHECK_OK(stream->AddChunk(std::move(chunk0)).Await());
auto chunk1 = PjRtChunk::AllocateDefault(sizeof(float));
*reinterpret_cast<float*>(chunk1.data()) = 6.0f;
TF_CHECK_OK(stream->AddChunk(std::move(chunk1)).Await());
return absl::OkStatus();
}};
std::vector<std::vector<SendCallback>> send_callbacks = {{send_callback}};
std::vector<std::vector<RecvCallback>> recv_callbacks = {{recv_callback}};
ExecuteOptions opts;
opts.send_callbacks = send_callbacks;
opts.recv_callbacks = recv_callbacks;
auto result = executable->Execute({{}}, opts);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::Literal> result_literal,
ExtractSingleResult(result));
EXPECT_EQ(sent_value[0], 2.0f);
EXPECT_EQ(sent_value[1], 3.0f);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<float>({5.0f, 6.0f}),
*result_literal));
}
TEST(StreamExecutorGpuClientTest, SendErrorNoDeadLock) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto executable,
CompileExecutable(kProgram, *client));
SendCallback send_callback = {
1,
[&](const PjRtTransferMetadata&, PjRtChunk, int64_t, bool) {
return Internal("Uh-oh, can send chunk to host");
}};
RecvCallback recv_callback = {
2, [&](const PjRtTransferMetadata& m,
std::unique_ptr<CopyToDeviceStream> stream) {
return absl::OkStatus();
}};
std::vector<std::vector<SendCallback>> send_callbacks = {{send_callback}};
std::vector<std::vector<RecvCallback>> recv_callbacks = {{recv_callback}};
ExecuteOptions opts;
opts.send_callbacks = send_callbacks;
opts.recv_callbacks = recv_callbacks;
auto result = executable->Execute({{}}, opts);
EXPECT_TRUE(absl::StrContains(result.status().message(),
"Uh-oh, can send chunk to host"));
}
TEST(StreamExecutorGpuClientTest, RecvErrorNoDeadLock) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto executable,
CompileExecutable(kProgram, *client));
SendCallback send_callback = {
1, [&](const PjRtTransferMetadata&, PjRtChunk, int64_t,
bool) { return absl::OkStatus(); }};
RecvCallback recv_callback = {
2, [&](const PjRtTransferMetadata& m,
std::unique_ptr<CopyToDeviceStream> stream) {
auto chunk = PjRtChunk::AllocateDefault(10 * sizeof(float));
stream->AddChunk(std::move(chunk)).Await().IgnoreError();
return absl::OkStatus();
}};
std::vector<std::vector<SendCallback>> send_callbacks = {{send_callback}};
std::vector<std::vector<RecvCallback>> recv_callbacks = {{recv_callback}};
ExecuteOptions opts;
opts.send_callbacks = send_callbacks;
opts.recv_callbacks = recv_callbacks;
auto result = executable->Execute({{}}, opts);
EXPECT_TRUE(absl::StrContains(result.status().message(),
"Adding chunk of size 40 would overflow buffer "
"of size 8 (0 already transferred)"));
}
struct MemsetValue {
explicit MemsetValue(float value) : value(value) {}
float value;
};
static absl::Status MemsetFromValue(
se::Stream* stream, ffi::Result<ffi::BufferR1<PrimitiveType::F32>> result,
MemsetValue* memset_value) {
uint32_t pattern;
std::memcpy(&pattern, &memset_value->value, sizeof(pattern));
se::DeviceMemoryBase base = result->device_memory();
return stream->Memset32(&base, pattern, base.size());
}
XLA_FFI_DEFINE_HANDLER(kMemsetFromValue, MemsetFromValue,
ffi::Ffi::Bind()
.Ctx<ffi::Stream>()
.Ret<ffi::BufferR1<PrimitiveType::F32>>()
.Ctx<ffi::UserData<MemsetValue>>());
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "MemsetFromValue",
PlatformUtil::CanonicalPlatformName("GPU").value(),
kMemsetFromValue);
TEST(StreamExecutorGpuClientTest, ForwardUserDataToFfiHandler) {
static constexpr char const* kProgram = R"(
HloModule ffi_handler
ENTRY main {
ROOT %custom-call = f32[4] custom-call(),
custom_call_target="MemsetFromValue",
api_version=API_VERSION_TYPED_FFI
})";
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto executable,
CompileExecutable(kProgram, *client));
ExecuteContext context;
TF_ASSERT_OK(context.ffi_context().Emplace<MemsetValue>(42.0f));
ExecuteOptions opts;
opts.context = &context;
auto result = executable->Execute({{}}, opts);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::Literal> result_literal,
ExtractSingleResult(result));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR1<float>({42.0f, 42.0f, 42.0f, 42.0f}),
*result_literal));
}
TEST(StreamExecutorGpuClientTest, ToLiteralAsync) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_GE(client->addressable_devices().size(), 1);
auto src_literal = LiteralUtil::CreateR1<float>({41.0f, 42.0f, 43.0f, 44.0f});
TF_ASSERT_OK_AND_ASSIGN(
auto transfer_manager,
client->CreateBuffersForAsyncHostToDevice(
{src_literal.shape()}, client->addressable_devices()[0]));
auto buffer = transfer_manager->RetrieveBuffer(0);
absl::Mutex mu;
auto literal = std::make_shared<Literal>(
ShapeUtil::DeviceShapeToHostShape(buffer->on_device_shape()));
bool got_literal = false;
TF_ASSERT_OK(
transfer_manager->TransferLiteralToBuffer(0, src_literal, [&]() {}));
buffer->ToLiteral(literal.get()).OnReady([&](absl::Status s) {
absl::MutexLock l(&mu);
TF_ASSERT_OK(s);
got_literal = true;
});
buffer.reset();
{
absl::MutexLock l(&mu);
mu.Await(absl::Condition(&got_literal));
}
ASSERT_TRUE(ShapeUtil::Compatible(src_literal.shape(), literal->shape()));
ASSERT_EQ(src_literal.data<float>(),
literal->Relayout(src_literal.shape().layout()).data<float>());
}
TEST(StreamExecutorGpuClientTest, ToLiteralAsyncWithNonCompactLayout) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_GE(client->addressable_devices().size(), 1);
xla::Shape transposed_shape = xla::ShapeUtil::MakeShapeWithDenseLayout(
xla::S32, {2, 3}, {0, 1});
xla::Literal src_literal = xla::LiteralUtil::CreateR2WithLayout<int32_t>(
{{3, 14, 25}, {36, 47, 58}}, transposed_shape.layout());
PjRtClient::ShapeSpec spec;
spec.element_type = src_literal.shape().element_type();
spec.dims = DimensionVector(src_literal.shape().dimensions().begin(),
src_literal.shape().dimensions().end());
TF_ASSERT_OK_AND_ASSIGN(
auto transfer_manager,
client->CreateBuffersForAsyncHostToDevice(
{spec},
std::make_optional<absl::Span<const Layout>>(
{transposed_shape.layout()}),
client->addressable_devices()[0]->memory_spaces()[0]));
auto buffer = transfer_manager->RetrieveBuffer(0);
absl::Mutex mu;
auto literal = std::make_shared<Literal>(
ShapeUtil::DeviceShapeToHostShape(buffer->on_device_shape()));
bool got_literal = false;
TF_ASSERT_OK(
transfer_manager->TransferLiteralToBuffer(0, src_literal, [&]() {}));
buffer->ToLiteral(literal.get()).OnReady([&](absl::Status s) {
absl::MutexLock l(&mu);
TF_ASSERT_OK(s);
got_literal = true;
});
buffer.reset();
{
absl::MutexLock l(&mu);
mu.Await(absl::Condition(&got_literal));
}
ASSERT_TRUE(ShapeUtil::Compatible(src_literal.shape(), literal->shape()));
ASSERT_EQ(src_literal.data<int32_t>(),
literal->Relayout(src_literal.shape().layout()).data<int32_t>());
}
TEST(StreamExecutorGpuClientTest, ToLiteralAsyncBeforeBufferReady) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_GE(client->addressable_devices().size(), 1);
auto src_literal = LiteralUtil::CreateR1<float>({41.0f, 42.0f, 43.0f, 44.0f});
TF_ASSERT_OK_AND_ASSIGN(
auto transfer_manager,
client->CreateBuffersForAsyncHostToDevice(
{src_literal.shape()}, client->addressable_devices()[0]));
auto buffer = transfer_manager->RetrieveBuffer(0);
absl::Mutex mu;
auto literal = std::make_shared<Literal>(
ShapeUtil::DeviceShapeToHostShape(buffer->on_device_shape()));
bool got_literal = false;
buffer->ToLiteral(literal.get()).OnReady([&](absl::Status s) {
absl::MutexLock l(&mu);
TF_ASSERT_OK(s);
got_literal = true;
});
absl::SleepFor(absl::Milliseconds(10));
ASSERT_FALSE(got_literal);
TF_ASSERT_OK(
transfer_manager->TransferLiteralToBuffer(0, src_literal, [&]() {}));
buffer.reset();
{
absl::MutexLock l(&mu);
mu.Await(absl::Condition(&got_literal));
}
ASSERT_TRUE(ShapeUtil::Compatible(src_literal.shape(), literal->shape()));
ASSERT_EQ(src_literal.data<float>(),
literal->Relayout(src_literal.shape().layout()).data<float>());
}
TEST(StreamExecutorGpuClientTest, FromHostAsync) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_GE(client->addressable_devices().size(), 1);
std::vector<Literal> src_literals;
std::vector<Shape> src_shapes;
for (int i = 0; i < 4; ++i) {
std::vector<float> data(i + 1);
std::iota(data.begin(), data.end(), static_cast<float>(i + 10));
src_literals.emplace_back(LiteralUtil::CreateR1<float>(data));
src_shapes.push_back(src_literals.back().shape());
}
TF_ASSERT_OK_AND_ASSIGN(auto transfer_manager,
client->CreateBuffersForAsyncHostToDevice(
src_shapes, client->addressable_devices()[0]));
std::vector<std::unique_ptr<PjRtBuffer>> buffers;
for (int i = 0; i < src_shapes.size(); ++i) {
buffers.emplace_back(transfer_manager->RetrieveBuffer(i));
}
for (int i = 0; i < src_shapes.size(); ++i) {
TF_ASSERT_OK(transfer_manager->TransferRawDataToBuffer(
i,
absl::string_view(static_cast<char*>(src_literals[i].untyped_data()),
src_literals[i].size_bytes()),
[&]() {}));
}
absl::Mutex mu;
std::vector<std::shared_ptr<Literal>> literals;
int got_literal_count = 0;
int got_callback_count = 0;
for (auto& buffer : buffers) {
literals.push_back(std::make_shared<Literal>(
ShapeUtil::DeviceShapeToHostShape(buffer->on_device_shape())));
buffer->ToLiteral(literals.back().get()).OnReady([&](absl::Status s) {
absl::MutexLock l(&mu);
TF_ASSERT_OK(s);
++got_literal_count;
});
buffer->GetReadyFuture().OnReady([&](absl::Status s) {
absl::MutexLock l(&mu);
TF_ASSERT_OK(s);
++got_callback_count;
});
buffer.reset();
}
{
auto done = [&]() {
return got_literal_count == src_literals.size() &&
got_callback_count == src_literals.size();
};
absl::MutexLock l(&mu);
mu.Await(absl::Condition(&done));
}
for (int i = 0; i < src_literals.size(); ++i) {
ASSERT_TRUE(
ShapeUtil::Compatible(src_literals[i].shape(), literals[i]->shape()));
ASSERT_EQ(
src_literals[i].data<float>(),
literals[i]->Relayout(src_literals[i].shape().layout()).data<float>());
}
}
TEST(StreamExecutorGpuClientTest, FromHostAsyncPinnedHost) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_GE(client->addressable_devices().size(), 1);
TF_ASSERT_OK_AND_ASSIGN(
auto* pinned_memory_space,
client->addressable_devices()[0]->memory_space_by_kind(
PinnedHostMemorySpace::kKind));
std::vector<Literal> src_literals;
std::vector<Shape> src_shapes;
for (int i = 0; i < 4; ++i) {
std::vector<float> data(i + 1);
std::iota(data.begin(), data.end(), static_cast<float>(i + 10));
src_literals.emplace_back(LiteralUtil::CreateR1<float>(data));
src_shapes.push_back(src_literals.back().shape());
}
TF_ASSERT_OK_AND_ASSIGN(auto transfer_manager,
client->CreateBuffersForAsyncHostToDevice(
src_shapes, pinned_memory_space));
std::vector<std::unique_ptr<PjRtBuffer>> buffers;
for (int i = 0; i < src_shapes.size(); ++i) {
buffers.emplace_back(transfer_manager->RetrieveBuffer(i));
}
for (int i = 0; i < src_shapes.size(); ++i) {
TF_ASSERT_OK(transfer_manager->TransferRawDataToBuffer(
i,
absl::string_view(static_cast<char*>(src_literals[i].untyped_data()),
src_literals[i].size_bytes()),
[&]() {}));
}
absl::Mutex mu;
std::vector<std::shared_ptr<Literal>> literals;
int got_literal_count = 0;
int got_callback_count = 0;
for (auto& buffer : buffers) {
literals.push_back(std::make_shared<Literal>(
ShapeUtil::DeviceShapeToHostShape(buffer->on_device_shape())));
buffer->ToLiteral(literals.back().get()).OnReady([&](absl::Status s) {
absl::MutexLock l(&mu);
TF_ASSERT_OK(s);
++got_literal_count;
});
buffer->GetReadyFuture().OnReady([&](absl::Status s) {
absl::MutexLock l(&mu);
TF_ASSERT_OK(s);
++got_callback_count;
});
buffer.reset();
}
{
auto done = [&]() {
return got_literal_count == src_literals.size() &&
got_callback_count == src_literals.size();
};
absl::MutexLock l(&mu);
mu.Await(absl::Condition(&done));
}
for (int i = 0; i < src_literals.size(); ++i) {
ASSERT_TRUE(
ShapeUtil::Compatible(src_literals[i].shape(), literals[i]->shape()));
ASSERT_EQ(
src_literals[i].data<float>(),
literals[i]->Relayout(src_literals[i].shape().layout()).data<float>());
}
}
TEST(StreamExecutorGpuClientTest, FromHostAsyncPinnedHostChunked) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<PjRtClient> client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_THAT(client->addressable_devices(), SizeIs(Gt(0)));
TF_ASSERT_OK_AND_ASSIGN(
PjRtMemorySpace * memspace,
client->addressable_devices()[0]->memory_space_by_kind(
PinnedHostMemorySpace::kKind));
std::vector<float> data{1, 3, 5, 7, 11, 13, 17, 19};
Shape shape = ShapeUtil::MakeShape(F32, {static_cast<int64_t>(data.size())});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<PjRtClient::AsyncHostToDeviceTransferManager> txm,
client->CreateBuffersForAsyncHostToDevice({shape}, memspace));
std::unique_ptr<PjRtBuffer> buf = txm->RetrieveBuffer(0);
ASSERT_THAT(buf->GetReadyFuture().IsReady(), Eq(false));
absl::string_view raw_view(reinterpret_cast<char*>(data.data()),
data.size() * sizeof(data[0]));
int offset = 0;
while (true) {
int end = offset + 3;
if (end > raw_view.size()) {
end = raw_view.size();
}
int sz = end - offset;
bool reaches_end = end == raw_view.size();
TF_ASSERT_OK(txm->TransferRawDataToSubBuffer(
0, raw_view.data() + offset, offset, sz, reaches_end,
[]() {}));
if (reaches_end) {
break;
}
offset = end;
}
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<Literal> lit, buf->ToLiteralSync());
EXPECT_THAT(lit->data<float>(), ElementsAreArray(data));
}
TEST(StreamExecutorGpuClientTest, DeleteBufferThenFulfillBufferNoDeadLock) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<PjRtClient> client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_THAT(client->addressable_devices(), SizeIs(Gt(0)));
TF_ASSERT_OK_AND_ASSIGN(
PjRtMemorySpace * memspace,
client->addressable_devices()[0]->memory_space_by_kind(
PinnedHostMemorySpace::kKind));
std::vector<float> data{1, 3, 5, 7, 11, 13, 17, 19};
Shape shape = ShapeUtil::MakeShape(F32, {static_cast<int64_t>(data.size())});
std::vector<std::unique_ptr<PjRtClient::AsyncHostToDeviceTransferManager>>
txms;
for (int i = 0; i < 10000; ++i) {
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<PjRtClient::AsyncHostToDeviceTransferManager> txm,
client->CreateBuffersForAsyncHostToDevice({shape}, memspace));
std::unique_ptr<PjRtBuffer> buf = txm->RetrieveBuffer(0);
ASSERT_THAT(buf->GetReadyFuture().IsReady(), Eq(false));
txms.push_back(std::move(txm));
}
absl::string_view raw_view(reinterpret_cast<char*>(data.data()),
data.size() * sizeof(data[0]));
for (auto& txm : txms) {
int offset = 0;
while (true) {
int end = offset + 3;
if (end > raw_view.size()) {
end = raw_view.size();
}
int sz = end - offset;
bool reaches_end = end == raw_view.size();
TF_ASSERT_OK(txm->TransferRawDataToSubBuffer(
0, raw_view.data() + offset, offset, sz, reaches_end,
[]() {}));
if (reaches_end) {
break;
}
offset = end;
}
}
}
TEST(StreamExecutorGpuClientTest, CopyRawToHostFullBuffer) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
auto literal = xla::LiteralUtil::CreateR1<float>({41.0f, 42.0f});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<PjRtBuffer> buffer,
client->BufferFromHostLiteral(literal, client->addressable_devices()[0]));
void* dst = aligned_alloc(buffer->GetOnDeviceSizeInBytes().value(), 0);
auto result =
buffer->CopyRawToHost(dst, 0, buffer->GetOnDeviceSizeInBytes().value());
TF_EXPECT_OK(result.Await());
EXPECT_EQ(*(static_cast<float*>(dst)), 41.0f);
EXPECT_EQ(*(static_cast<float*>(dst) + 1), 42.0f);
free(dst);
}
TEST(StreamExecutorGpuClientTest, CopyRawToHostSubBuffer) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
auto literal = xla::LiteralUtil::CreateR1<float>({41.0f, 42.0f});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<PjRtBuffer> buffer,
client->BufferFromHostLiteral(literal, client->addressable_devices()[0]));
void* dst = aligned_alloc(buffer->GetOnDeviceSizeInBytes().value(), 0);
auto result = buffer->CopyRawToHost(dst, 0, sizeof(float));
TF_EXPECT_OK(result.Await());
EXPECT_EQ(*(static_cast<float*>(dst)), 41.0f);
free(dst);
}
TEST(StreamExecutorGpuClientTest, CopyRawToHostOutOfRange) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
auto literal = xla::LiteralUtil::CreateR1<float>({41.0f, 42.0f});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<PjRtBuffer> buffer,
client->BufferFromHostLiteral(literal, client->addressable_devices()[0]));
void* dst = aligned_alloc(buffer->GetOnDeviceSizeInBytes().value(), 0);
auto result =
buffer->CopyRawToHost(dst, 1, buffer->GetOnDeviceSizeInBytes().value());
EXPECT_THAT(result.Await(), StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("invalid offset 1")));
free(dst);
}
TEST(StreamExecutorGpuClientTest, CopyRawToHostFuture) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
auto literal = xla::LiteralUtil::CreateR1<float>({41.0f, 42.0f});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<PjRtBuffer> buffer,
client->BufferFromHostLiteral(literal, client->addressable_devices()[0]));
auto dst_promise = xla::PjRtFuture<void*>::CreatePromise();
xla::PjRtFuture<void*> dst_future(dst_promise);
TF_ASSERT_OK_AND_ASSIGN(int64_t size, buffer->GetOnDeviceSizeInBytes());
auto ready = buffer->GetReadyFuture();
auto result = buffer->CopyRawToHostFuture(dst_future, 0, size);
buffer.reset();
ready.OnReady([dst_promise = std::move(dst_promise),
size](absl::Status status) mutable {
dst_promise.Set(aligned_alloc(size, 0));
});
TF_EXPECT_OK(result.Await());
TF_ASSERT_OK_AND_ASSIGN(auto* dst, dst_future.Await());
EXPECT_EQ(*(static_cast<float*>(dst)), 41.0f);
EXPECT_EQ(*(static_cast<float*>(dst) + 1), 42.0f);
free(dst);
}
TEST(StreamExecutorGpuClientTest, AsyncCopyToDevice) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_GE(client->addressable_devices().size(), 2);
auto* d0 = client->addressable_devices()[0];
auto* d1 = client->addressable_devices()[1];
auto src_literal = LiteralUtil::CreateR1<float>({41.0f, 42.0f, 43.0f, 44.0f});
TF_ASSERT_OK_AND_ASSIGN(
auto transfer_manager,
client->CreateBuffersForAsyncHostToDevice({src_literal.shape()}, d0));
auto src_buffer = transfer_manager->RetrieveBuffer(0);
auto local_recv_buffer = *src_buffer->CopyToDevice(d1);
TF_ASSERT_OK(
transfer_manager->TransferLiteralToBuffer(0, src_literal, []() {}));
auto literal = std::make_shared<Literal>(src_literal.shape());
auto local_recv_literal = local_recv_buffer->ToLiteral(literal.get());
TF_EXPECT_OK(local_recv_literal.Await());
ASSERT_TRUE(ShapeUtil::Compatible(src_literal.shape(), literal->shape()));
ASSERT_EQ(src_literal.data<float>(),
literal->Relayout(src_literal.shape().layout()).data<float>());
}
TEST(StreamExecutorGpuClientTest, CreateMixOfErrorBuffers) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_GE(client->addressable_devices().size(), 1);
std::vector<Literal> src_literals;
std::vector<Shape> src_shapes;
for (int i = 0; i < 4; ++i) {
std::vector<float> data(i + 1);
std::iota(data.begin(), data.end(), static_cast<float>(i + 10));
src_literals.emplace_back(LiteralUtil::CreateR1<float>(data));
src_shapes.push_back(src_literals.back().shape());
}
TF_ASSERT_OK_AND_ASSIGN(
auto transfer_manager,
client->CreateBuffersForAsyncHostToDevice(
src_shapes, client->addressable_devices()[0]->memory_spaces()[0]));
std::vector<std::unique_ptr<PjRtBuffer>> buffers;
for (int i = 0; i < src_shapes.size(); ++i) {
buffers.emplace_back(transfer_manager->RetrieveBuffer(i));
}
absl::Mutex mu;
int got_callback_count = 0;
for (int i = 0; i < 4; ++i) {
auto& buffer = buffers[i];
if (i == 0 || i == 3) {
TF_ASSERT_OK(transfer_manager->TransferLiteralToBuffer(i, src_literals[i],
[&]() {}));
buffer->GetReadyFuture().OnReady([&](absl::Status s) {
absl::MutexLock l(&mu);
TF_ASSERT_OK(s);
++got_callback_count;
});
} else {
absl::Status error = Internal("error %d", i);
transfer_manager->SetBufferError(i, error);
buffer->GetReadyFuture().OnReady(
[error, &mu, &got_callback_count](absl::Status s) {
absl::MutexLock l(&mu);
ASSERT_EQ(s, error);
++got_callback_count;
});
}
buffer.reset();
}
{
auto done = [&]() { return got_callback_count == src_literals.size(); };
absl::MutexLock l(&mu);
QCHECK(mu.AwaitWithTimeout(absl::Condition(&done), absl::Seconds(60)));
}
}
TEST(GpuTopology, FromProto) {
GpuTopologyProto msg;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(
device_ids: [ 3, 2, 1 ]
platform_version: "platform_version"
num_slices: 2
num_hosts_per_slice: 1
num_devices_per_host: 3
)pb",
&msg));
std::unique_ptr<const GpuTopology> gpu_topology = GpuTopology::FromProto(msg);
EXPECT_THAT(gpu_topology->device_ids(), ElementsAre(3, 2, 1));
EXPECT_THAT(gpu_topology->platform_version(), "platform_version");
EXPECT_THAT(gpu_topology->num_slices(), 2);
EXPECT_THAT(gpu_topology->num_hosts_per_slice(), 1);
EXPECT_THAT(gpu_topology->num_devices_per_host(), 3);
}
TEST(GpuTopology, ToProto) {
GpuTopology gpu_topology({3, 2, 1},
"platform_version",
2,
1,
3);
GpuTopologyProto msg = gpu_topology.ToProto();
EXPECT_THAT(msg.device_ids(), ElementsAre(3, 2, 1));
EXPECT_THAT(msg.platform_version(), "platform_version");
EXPECT_THAT(msg.num_slices(), 2);
EXPECT_THAT(msg.num_hosts_per_slice(), 1);
EXPECT_THAT(msg.num_devices_per_host(), 3);
}
TEST(StreamExecutorGpuClientTest, DistributedInit) {
auto kv_store = std::make_shared<InMemoryKeyValueStore>();
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "DistributeInit", 4);
int num_nodes = 2;
for (int i = 0; i < num_nodes; i++) {
thread_pool.Schedule([kv_store, i, num_nodes] {
GpuClientOptions options;
options.node_id = i;
options.num_nodes = num_nodes;
options.kv_store = kv_store;
TF_ASSERT_OK_AND_ASSIGN(auto client, GetStreamExecutorGpuClient(options));
EXPECT_TRUE(client->platform_name() == "cuda" ||
client->platform_name() == "rocm");
EXPECT_EQ(client->addressable_device_count(), 2);
EXPECT_EQ(client->device_count(), 4);
});
}
}
TEST(StreamExecutorGpuClientTest, GetAllocatorStatsTest) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_GE(client->addressable_devices().size(), 2);
for (auto device : client->addressable_devices()) {
const xla::Literal literal = xla::LiteralUtil::CreateR0<int32_t>(0);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<PjRtBuffer> buffer,
client->BufferFromHostLiteral(literal, device));
auto stats = device->GetAllocatorStats();
TF_ASSERT_OK(stats.status());
ASSERT_GT(stats.value().peak_bytes_in_use, 0);
}
}
TEST(StreamExecutorGpuClientTest, GpuDeviceDescriptionTest) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
for (int device_index = 0; device_index < client->device_count();
device_index++) {
auto coords =
static_cast<PjRtStreamExecutorDevice*>(client->devices()[device_index])
->description()
.coords();
EXPECT_EQ(coords[0], device_index);
}
}
TEST(StreamExecutorGpuClientTest, MockNcclClientTest) {
const int num_nodes = 4;
GpuClientOptions options;
options.num_nodes = num_nodes;
options.enable_mock_nccl = true;
TF_ASSERT_OK_AND_ASSIGN(auto client, GetStreamExecutorGpuClient(options));
auto devices_per_host = client->addressable_device_count();
EXPECT_EQ(devices_per_host, 2);
EXPECT_EQ(client->device_count(), devices_per_host * num_nodes);
for (int i = 0; i < client->device_count(); i++) {
auto device = client->devices()[i];
auto slice_index =
std::get<int64_t>(device->Attributes().at("slice_index"));
auto host_index = device->process_index();
EXPECT_EQ(slice_index, host_index);
}
}
TEST(StreamExecutorGpuClientTest, BufferFromHostBufferPinnedMemory) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
std::vector<int32_t> data{1, 2, 3, 4};
Shape shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto* pinned_memory_space,
client->addressable_devices()[0]->memory_space_by_kind(
PinnedHostMemorySpace::kKind));
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
pinned_memory_space, nullptr));
EXPECT_EQ(buffer->memory_space()->kind(), "pinned_host");
EXPECT_TRUE(buffer->IsOnCpu());
TF_ASSERT_OK_AND_ASSIGN(auto literal, buffer->ToLiteralSync());
std::vector<int32_t> expected{1, 2, 3, 4};
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
TEST(StreamExecutorGpuClientTest, CopyToPinnedHostMemorySpace) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
std::vector<int32_t> data{1, 2, 3, 4};
Shape shape = ShapeUtil::MakeShape(S32, {4});
auto device = client->addressable_devices()[0];
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
device));
EXPECT_EQ(buffer->memory_space()->kind(), "device");
auto* pinned_memory_space = device->memory_spaces()[1];
EXPECT_EQ(pinned_memory_space->kind_id(), PinnedHostMemorySpace::kKindId);
TF_ASSERT_OK_AND_ASSIGN(auto result,
buffer->CopyToMemorySpace(pinned_memory_space));
EXPECT_EQ(result->memory_space()->kind(), "pinned_host");
EXPECT_TRUE(result->IsOnCpu());
TF_ASSERT_OK_AND_ASSIGN(auto literal, result->ToLiteralSync());
std::vector<int32_t> expected{1, 2, 3, 4};
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int32_t>(expected),
*literal));
}
TEST(StreamExecutorGpuClientTest, CopyToPinnedHostMemorySpaceInt4) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
std::vector<int8_t> data{1, 2, 3, 4};
Shape shape = ShapeUtil::MakeShape(S4, {4});
auto device = client->addressable_devices()[0];
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
device));
EXPECT_EQ(buffer->memory_space()->kind(), "device");
auto* pinned_memory_space = device->memory_spaces()[1];
EXPECT_EQ(pinned_memory_space->kind_id(), PinnedHostMemorySpace::kKindId);
TF_ASSERT_OK_AND_ASSIGN(auto result,
buffer->CopyToMemorySpace(pinned_memory_space));
EXPECT_EQ(result->memory_space()->kind(), "pinned_host");
EXPECT_TRUE(result->IsOnCpu());
TF_ASSERT_OK_AND_ASSIGN(auto literal, result->ToLiteralSync());
std::vector<xla::s4> expected{xla::s4(1), xla::s4(2), xla::s4(3), xla::s4(4)};
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<xla::s4>(expected),
*literal));
}
TEST(StreamExecutorGpuClientTest, OpaqueDeviceMemoryDataPointer) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<PjRtClient> client,
GetStreamExecutorGpuClient(GpuClientOptions()));
ASSERT_THAT(client->addressable_devices(), SizeIs(Gt(0)));
PjRtDevice* device = client->addressable_devices()[0];
TF_ASSERT_OK_AND_ASSIGN(
PjRtMemorySpace * memspace,
device->memory_space_by_kind(PinnedHostMemorySpace::kKind));
std::vector<float> float_data{12.0, 34.0, 56.0, 78.0};
Shape shape = ShapeUtil::MakeShapeWithType<float>({4});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<PjRtBuffer> buf,
client->BufferFromHostBuffer(
static_cast<const void*>(float_data.data()), shape.element_type(),
shape.dimensions(), std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall,
nullptr, memspace,
nullptr));
ASSERT_THAT(buf->IsOnCpu(), true);
TF_ASSERT_OK_AND_ASSIGN(size_t buf_sz, buf->GetOnDeviceSizeInBytes());
ASSERT_THAT(buf_sz, Ge(sizeof(float) * 4));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<PjRtBuffer::ExternalReference> ref,
buf->AcquireExternalReference());
TF_ASSERT_OK(buf->GetReadyFuture().Await());
const float* float_ptr =
reinterpret_cast<const float*>(ref->OpaqueDeviceMemoryDataPointer());
EXPECT_THAT(*float_ptr, FloatEq(12.0));
EXPECT_THAT(*(float_ptr + 1), FloatEq(34.0));
EXPECT_THAT(*(float_ptr + 2), FloatEq(56.0));
EXPECT_THAT(*(float_ptr + 3), FloatEq(78.0));
TF_ASSERT_OK_AND_ASSIGN(PjRtMemorySpace * default_ms,
device->default_memory_space());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<PjRtClient::AsyncHostToDeviceTransferManager> txm,
client->CreateBuffersForAsyncHostToDevice({shape}, default_ms));
TF_ASSERT_OK(txm->TransferRawDataToBuffer(
0,
absl::string_view(
static_cast<const char*>(ref->OpaqueDeviceMemoryDataPointer()),
buf_sz),
[]() {}));
std::unique_ptr<PjRtBuffer> hbm_buf = txm->RetrieveBuffer(0);
EXPECT_THAT(hbm_buf->GetOnDeviceSizeInBytes(), IsOkAndHolds(buf_sz));
EXPECT_THAT(hbm_buf->HostShape(), IsOkAndHolds(shape));
TF_ASSERT_OK(hbm_buf->GetReadyFuture().Await());
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::Literal> literal,
hbm_buf->ToLiteralSync());
EXPECT_THAT(literal->data<float>(), ElementsAreArray(float_data));
}
namespace {
absl::StatusOr<std::unique_ptr<PjRtBuffer>> CreateDeviceBufferForTest(
xla::PjRtClient* client) {
auto device = client->addressable_devices()[0];
TF_EXPECT_OK(device->default_memory_space());
std::vector<int32_t> data{1, 2, 3, 4};
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(S32, {4}, {0});
TF_ASSIGN_OR_RETURN(
auto input, client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall,
nullptr, device));
EXPECT_EQ(input->memory_space()->kind(), "device");
return input;
}
constexpr char const* kD2HProgram = R"(
HloModule f
ENTRY main.5 {
p = s32[4]{0} parameter(0)
ROOT cc = s32[4] custom-call(p),
custom_call_target="annotate_device_placement",
frontend_attributes={_xla_buffer_placement="pinned_host"}
}
)";
constexpr char const* kD2HProgramTupleOutput = R"(
HloModule f
ENTRY main.5 {
p = s32[4]{0} parameter(0)
cc = s32[4] custom-call(p),
custom_call_target="annotate_device_placement",
frontend_attributes={_xla_buffer_placement="pinned_host"}
ROOT tuple = (s32[4]{0}, s32[4]{0}) tuple(s32[4]{0} p, s32[4]{0} cc)
}
)";
constexpr char const* kCollectiveMemorySpaceOutput = R"(
HloModule jit__psum, entry_computation_layout={(s32[1,4]{1,0})->s32[4]{0}}
region_0.3 {
Arg_0.0 = s32[] parameter(0)
Arg_1.0 = s32[] parameter(1)
ROOT add.0 = s32[] add(Arg_0.0, Arg_1.0)
}
ENTRY main.10_spmd {
param = s32[1,4]{1,0} parameter(0)
reshape = s32[4]{0} reshape(param)
ROOT all-reduce = s32[4]{0} all-reduce(reshape), channel_id=1, to_apply=region_0.3
}
)";
}
TEST(StreamExecutorGpuClientTest, ExecutePinnedHostOutputTest) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto input, CreateDeviceBufferForTest(client.get()));
TF_ASSERT_OK_AND_ASSIGN(auto executable,
CompileExecutable(kD2HProgram, *client));
TF_ASSERT_OK_AND_ASSIGN(
auto result, executable->Execute({{input.get()}}, ExecuteOptions()));
std::vector<std::unique_ptr<xla::PjRtBuffer>>& result_buffers = result[0];
EXPECT_EQ(result_buffers[0]->memory_space()->kind(), "pinned_host");
TF_ASSERT_OK_AND_ASSIGN(auto memory_stats,
executable->GetCompiledMemoryStats());
EXPECT_EQ(memory_stats.output_size_in_bytes, 0);
EXPECT_EQ(memory_stats.host_output_size_in_bytes, 16);
}
TEST(StreamExecutorGpuClientTest, ExecutePinnedHostOutputTupleTest) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto input, CreateDeviceBufferForTest(client.get()));
Shape host_shape = input->on_device_shape();
host_shape.mutable_layout()->set_memory_space(Layout::kHostMemorySpace);
Shape out_shape =
ShapeUtil::MakeTupleShape({input->on_device_shape(), host_shape});
xla::CompileOptions options;
options.executable_build_options.set_result_layout(out_shape);
TF_ASSERT_OK_AND_ASSIGN(
auto executable,
CompileExecutable(kD2HProgramTupleOutput, *client, options));
ExecuteOptions execute_options;
execute_options.untuple_result = true;
TF_ASSERT_OK_AND_ASSIGN(
auto result, executable->Execute({{input.get()}}, execute_options));
std::vector<std::unique_ptr<xla::PjRtBuffer>>& result_buffers = result[0];
EXPECT_EQ(result_buffers.size(), 2);
EXPECT_EQ(result_buffers[0]->memory_space()->kind(), "device");
EXPECT_EQ(result_buffers[1]->memory_space()->kind(), "pinned_host");
}
TEST(StreamExecutorGpuClientTest, ExecutablePinnedHostOutputMemoryKindTest) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto executable,
CompileExecutable(kD2HProgram, *client));
TF_ASSERT_OK_AND_ASSIGN(auto memory_kinds,
executable->GetOutputMemoryKinds());
EXPECT_EQ(memory_kinds.size(), 1);
EXPECT_EQ(memory_kinds[0].size(), 1);
EXPECT_EQ(memory_kinds[0][0], "pinned_host");
}
TEST(StreamExecutorGpuClientTest,
ExecutableCollectiveMemoryOutputMemoryKindTest) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
xla::CompileOptions options;
options.executable_build_options.mutable_debug_options()
->set_xla_gpu_enable_nccl_user_buffers(true);
TF_ASSERT_OK_AND_ASSIGN(
auto executable,
CompileExecutable(kCollectiveMemorySpaceOutput, *client, options));
std::vector<int32_t> data{1, 2, 3, 4};
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(S32, {1, 4},
{1, 0});
shape.mutable_layout()->set_memory_space(Layout::kDefaultMemorySpace);
auto device = client->addressable_devices()[0];
TF_EXPECT_OK(device->default_memory_space());
TF_ASSERT_OK_AND_ASSIGN(
auto input, client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall,
nullptr, device));
EXPECT_EQ(input->memory_space()->kind(), "device");
TF_ASSERT_OK_AND_ASSIGN(auto memory_kinds,
executable->GetOutputMemoryKinds());
EXPECT_EQ(memory_kinds.size(), 1);
EXPECT_EQ(memory_kinds[0].size(), 1);
EXPECT_EQ(memory_kinds[0][0], "device");
TF_ASSERT_OK_AND_ASSIGN(
auto result, executable->Execute({{input.get()}}, ExecuteOptions()));
std::vector<std::unique_ptr<xla::PjRtBuffer>>& result_buffers = result[0];
EXPECT_EQ(result_buffers[0]->memory_space()->kind(), "device");
Shape result_shape = result_buffers[0]->on_device_shape();
auto memory_space = result_shape.layout().memory_space();
EXPECT_EQ(memory_space, 1);
}
TEST(StreamExecutorGpuClientTest,
ExecutablePinnedHostTupleOutputMemoryKindTest) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(S32, {4}, {0});
Shape host_shape = shape;
host_shape.mutable_layout()->set_memory_space(Layout::kHostMemorySpace);
Shape out_shape = ShapeUtil::MakeTupleShape({shape, host_shape});
xla::CompileOptions options;
options.executable_build_options.set_result_layout(out_shape);
TF_ASSERT_OK_AND_ASSIGN(
auto executable,
CompileExecutable(kD2HProgramTupleOutput, *client, options));
TF_ASSERT_OK_AND_ASSIGN(auto memory_kinds,
executable->GetOutputMemoryKinds());
EXPECT_EQ(memory_kinds.size(), 1);
EXPECT_EQ(memory_kinds[0].size(), 2);
EXPECT_EQ(memory_kinds[0][0], "device");
EXPECT_EQ(memory_kinds[0][1], "pinned_host");
}
TEST(StreamExecutorGpuClientTest, MlirParameterHostMemorySpaceIsSetInHlo) {
constexpr char kMlirH2D[] =
R"(
func.func public @main(%arg0: tensor<8x2xi32> {
mhlo.layout_mode = "{1,0}",
mhlo.memory_kind = "pinned_host",
mhlo.sharding = "{devices=[2,2]<=[4]}"
}) -> (tensor<8x2xi32> {
jax.result_info = "",
mhlo.layout_mode = "default",
mhlo.memory_kind = "device",
mhlo.sharding = "{devices=[2,2]<=[4]}"}) {
%0 = stablehlo.custom_call @annotate_device_placement(%arg0) {
has_side_effect = true,
mhlo.frontend_attributes = {_xla_buffer_placement = "device"}
} : (tensor<8x2xi32>) -> tensor<8x2xi32>
return %0 : tensor<8x2xi32>
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(auto module,
xla::ParseMlirModuleString(kMlirH2D, context));
TF_ASSERT_OK_AND_ASSIGN(auto executable, client->Compile(*module, {}));
TF_ASSERT_OK_AND_ASSIGN(auto modules, executable->GetHloModules());
auto first_param_layout =
modules[0]->entry_computation_layout().parameter_layout(0).layout();
EXPECT_EQ(first_param_layout.memory_space(), Layout::kHostMemorySpace);
auto result_layout =
modules[0]->entry_computation_layout().result_layout().layout();
EXPECT_EQ(result_layout.memory_space(), Layout::kDefaultMemorySpace);
}
TEST(StreamExecutorGpuClientTest, MlirResultHostMemorySpaceIsSetInHlo) {
constexpr char kMlirD2H[] =
R"(
func.func public @main(%arg0: tensor<8x2xi32> {
mhlo.layout_mode = "{1,0}",
mhlo.memory_kind = "device",
mhlo.sharding = "{devices=[2,2]<=[4]}"
}) -> (tensor<8x2xi32> {
jax.result_info = "",
mhlo.layout_mode = "default",
mhlo.memory_kind = "pinned_host",
mhlo.sharding = "{devices=[2,2]<=[4]}"}) {
%0 = stablehlo.custom_call @annotate_device_placement(%arg0) {
has_side_effect = true,
mhlo.frontend_attributes = {_xla_buffer_placement = "pinned_host"}
} : (tensor<8x2xi32>) -> tensor<8x2xi32>
return %0 : tensor<8x2xi32>
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(auto module,
xla::ParseMlirModuleString(kMlirD2H, context));
TF_ASSERT_OK_AND_ASSIGN(auto executable, client->Compile(*module, {}));
TF_ASSERT_OK_AND_ASSIGN(auto modules, executable->GetHloModules());
auto first_param_layout =
modules[0]->entry_computation_layout().parameter_layout(0).layout();
EXPECT_EQ(first_param_layout.memory_space(), Layout::kDefaultMemorySpace);
auto result_layout =
modules[0]->entry_computation_layout().result_layout().layout();
EXPECT_EQ(result_layout.memory_space(), Layout::kHostMemorySpace);
}
TEST(StreamExecutorGpuClientTest, MlirAutoResultLayoutIsSet) {
constexpr char kMlirWithParameterLayout[] =
R"(
func.func public @main(%arg0: tensor<2x4x2xi32> {
mhlo.layout_mode = "{2, 1, 0}"
}) -> (tensor<2x2x4xi32> {
jax.result_info = "",
mhlo.layout_mode = "auto"}) {
%0 = stablehlo.transpose %arg0, dims = [0, 2, 1]
: (tensor<2x4x2xi32>) -> tensor<2x2x4xi32>
return %0 : tensor<2x2x4xi32>
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(auto module, xla::ParseMlirModuleString(
kMlirWithParameterLayout, context));
TF_ASSERT_OK_AND_ASSIGN(auto executable, client->Compile(*module, {}));
TF_ASSERT_OK_AND_ASSIGN(auto modules, executable->GetHloModules());
auto result_layout =
modules[0]->entry_computation_layout().result_layout().layout();
EXPECT_EQ(result_layout, Layout({1, 2, 0}));
}
TEST(StreamExecutorGpuClientTest, MlirAutoParameterLayoutIsSet) {
constexpr char kMlirWithParameterLayout[] =
R"(
func.func public @main(%arg0: tensor<2x4x2xi32> {
mhlo.layout_mode = "auto"
}) -> (tensor<2x2x4xi32> {
jax.result_info = "",
mhlo.layout_mode = "{2, 1, 0}"}) {
%0 = stablehlo.transpose %arg0, dims = [0, 2, 1]
: (tensor<2x4x2xi32>) -> tensor<2x2x4xi32>
return %0 : tensor<2x2x4xi32>
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(auto module, xla::ParseMlirModuleString(
kMlirWithParameterLayout, context));
TF_ASSERT_OK_AND_ASSIGN(auto executable, client->Compile(*module, {}));
TF_ASSERT_OK_AND_ASSIGN(auto modules, executable->GetHloModules());
auto first_param_layout =
modules[0]->entry_computation_layout().parameter_layout(0).layout();
EXPECT_EQ(first_param_layout, Layout({1, 2, 0}));
}
TEST(StreamExecutorGpuClientTest, MlirParameterLayoutIsSetInHlo) {
constexpr char kMlirWithParameterLayout[] =
R"(
func.func public @main(%arg0: tensor<2x2x2xi32> {
mhlo.layout_mode = "{0, 2, 1}"
}) -> (tensor<2x2x2xi32> {
jax.result_info = "",
mhlo.layout_mode = "default"}) {
return %arg0 : tensor<2x2x2xi32>
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(auto module, xla::ParseMlirModuleString(
kMlirWithParameterLayout, context));
TF_ASSERT_OK_AND_ASSIGN(auto executable, client->Compile(*module, {}));
TF_ASSERT_OK_AND_ASSIGN(auto modules, executable->GetHloModules());
auto first_param_layout =
modules[0]->entry_computation_layout().parameter_layout(0).layout();
EXPECT_EQ(first_param_layout, Layout({0, 2, 1}));
}
TEST(StreamExecutorGpuClientTest, MlirParameterLayoutFromOptionsIsSetInHlo) {
constexpr char kMlirCopy[] =
R"(
func.func public @main(%arg0: tensor<2x2x2xi32> {
mhlo.layout_mode = "default"
}) -> (tensor<2x2x2xi32> {
jax.result_info = "",
mhlo.layout_mode = "default"}) {
return %arg0 : tensor<2x2x2xi32>
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(auto module,
xla::ParseMlirModuleString(kMlirCopy, context));
xla::CompileOptions options;
options.argument_layouts = {
{ShapeUtil::MakeShapeWithDenseLayout(S32, {2, 2, 2}, {0, 2, 1})}};
TF_ASSERT_OK_AND_ASSIGN(auto executable, client->Compile(*module, options));
TF_ASSERT_OK_AND_ASSIGN(auto modules, executable->GetHloModules());
auto first_param_layout =
modules[0]->entry_computation_layout().parameter_layout(0).layout();
EXPECT_EQ(first_param_layout, Layout({0, 2, 1}));
}
TEST(StreamExecutorGpuClientTest,
MlirResultHostMemorySpaceIsSetInHloWithShardingPropagation) {
constexpr absl::string_view mlir_mul_explicit_sharding_layout_and_memory =
R"mlir(
module @jit_f attributes {
mhlo.num_partitions = 2 : i32,
mhlo.num_replicas = 1 : i32
} {
func.func public @main(%arg0: tensor<8x2xi32> {
mhlo.layout_mode = "{1,0}",
mhlo.memory_kind = "device",
mhlo.sharding = "{devices=[1,2]<=[2]}"
}) -> (tensor<8x2xi32> {
jax.result_info = "",
mhlo.layout_mode = "{0,1}",
mhlo.memory_kind = "pinned_host"
}) {
%c = stablehlo.constant dense<2> : tensor<i32>
%0 = stablehlo.broadcast_in_dim %c, dims = []
: (tensor<i32>) -> tensor<8x2xi32>
%1 = stablehlo.multiply %arg0, %0 : tensor<8x2xi32>
%2 = stablehlo.custom_call @Sharding(%1) {
mhlo.sharding = "{devices=[1,2]<=[2]}"
} : (tensor<8x2xi32>) -> tensor<8x2xi32>
%3 = stablehlo.custom_call @annotate_device_placement(%2) {
has_side_effect = true,
mhlo.frontend_attributes = {
_xla_buffer_placement = "pinned_host"
}
} : (tensor<8x2xi32>) -> tensor<8x2xi32>
return %3 : tensor<8x2xi32>
}
})mlir";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(
auto module, xla::ParseMlirModuleString(
mlir_mul_explicit_sharding_layout_and_memory, context));
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
xla::CompileOptions options;
options.executable_build_options.set_num_partitions(2)
.set_use_spmd_partitioning(true)
.set_allow_spmd_sharding_propagation_to_output({true});
TF_ASSERT_OK_AND_ASSIGN(auto executable, client->Compile(*module, options));
TF_ASSERT_OK_AND_ASSIGN(auto modules, executable->GetHloModules());
auto first_param_layout =
modules[0]->entry_computation_layout().parameter_layout(0).layout();
EXPECT_EQ(first_param_layout.memory_space(), Layout::kDefaultMemorySpace);
auto result_layout =
modules[0]->entry_computation_layout().result_layout().layout();
EXPECT_EQ(result_layout,
Layout({0, 1}).set_memory_space(Layout::kHostMemorySpace));
EXPECT_EQ(executable->GetCompileOptions()
.value()
.executable_build_options.layout_canonicalization_callback(),
nullptr);
}
TEST(StreamExecutorGpuClientTest, GetDefaultLayout) {
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
auto shape = ShapeUtil::MakeShape(S4, {2, 2});
TF_ASSERT_OK_AND_ASSIGN(
auto layout,
client->GetDefaultLayout(shape.element_type(), shape.dimensions()));
EXPECT_EQ(layout.element_size_in_bits(), 4);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/gpu/se_gpu_pjrt_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/gpu/se_gpu_pjrt_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a9884785-43c1-4307-b823-e878e158c6f3 | cpp | tensorflow/tensorflow | se_gpu_pjrt_compiler | third_party/xla/xla/pjrt/gpu/se_gpu_pjrt_compiler.cc | third_party/xla/xla/pjrt/gpu/se_gpu_pjrt_compiler_test.cc | #include "xla/pjrt/gpu/se_gpu_pjrt_compiler.h"
#include <memory>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/client/xla_computation.h"
#include "xla/pjrt/gpu/se_gpu_pjrt_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_compiler.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/stream_executor/platform/initialize.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/statusor.h"
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "xla/client/local_client.h"
#include "xla/pjrt/mlir_to_hlo.h"
#include "xla/pjrt/stream_executor_executable.h"
#include "xla/pjrt/utils.h"
#include "xla/service/dump.h"
#include "xla/service/gpu/executable.pb.h"
#include "xla/service/gpu/gpu_compiler.h"
#include "xla/service/hlo_module_util.h"
#include "xla/service/hlo_proto_util.h"
#include "xla/service/local_service.h"
#include "xla/service/local_service_utils.h"
#endif
#if GOOGLE_CUDA
#include "xla/service/gpu/nvptx_compiler.h"
#include "xla/stream_executor/cuda/cuda_platform_id.h"
#elif TENSORFLOW_USE_ROCM
#include "xla/service/gpu/amdgpu_compiler.h"
#include "xla/stream_executor/rocm/rocm_platform_id.h"
#endif
namespace xla {
namespace {
bool IsGpuClient(const PjRtClient& client) {
return client.platform_id() == CudaId() || client.platform_id() == RocmId() ||
client.platform_id() == SyclId();
}
bool IsSameTopology(const PjRtTopologyDescription& topology1,
const PjRtTopologyDescription& topology2) {
const StreamExecutorGpuTopologyDescription& gpu_topology1 =
tensorflow::down_cast<const StreamExecutorGpuTopologyDescription&>(
topology1);
const StreamExecutorGpuTopologyDescription& gpu_topology2 =
tensorflow::down_cast<const StreamExecutorGpuTopologyDescription&>(
topology2);
return gpu_topology1 == gpu_topology2;
}
absl::Status IsValidTopologyAndClientForCompile(
const PjRtTopologyDescription& topology, PjRtClient* client) {
if (client == nullptr) {
return absl::UnimplementedError(
"SE:GPU compiler requires non-null client.");
}
if (!IsGpuClient(*client)) {
return absl::InvalidArgumentError(
"SE:GPU compiler requires a GPU PjRtClient.");
}
TF_ASSIGN_OR_RETURN(auto client_topology, client->GetTopologyDescription());
if (!IsSameTopology(topology, *client_topology)) {
return absl::UnimplementedError(
"SE:GPU compiler requires the topology same as the one in the client.");
}
return absl::OkStatus();
}
}
absl::StatusOr<std::unique_ptr<PjRtExecutable>>
StreamExecutorGpuCompiler::Compile(CompileOptions options,
const XlaComputation& computation,
const PjRtTopologyDescription& topology,
PjRtClient* client) {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if GOOGLE_CUDA
auto gpu_compiler = gpu::NVPTXCompiler();
#else
auto gpu_compiler = gpu::AMDGPUCompiler();
#endif
CompileOptions input_options = options;
if (!options.target_config) {
if (client != nullptr) {
TF_RETURN_IF_ERROR(IsValidTopologyAndClientForCompile(topology, client));
return client->Compile(computation, options);
}
auto attr = topology.Attributes();
if (auto it = attr.find("target_config"); it != attr.end()) {
auto target_config_str = std::get<std::string>(it->second);
stream_executor::GpuTargetConfigProto gpu_target_config_proto;
if (!gpu_target_config_proto.ParseFromString(target_config_str)) {
return FailedPrecondition("Failed to parse GpuTargetConfigProto");
}
options.target_config.emplace(
Compiler::TargetConfig(gpu_target_config_proto));
} else {
return absl::UnimplementedError(
"Compilation without client and without target_config specified is "
"not implemented");
}
}
TF_RETURN_IF_ERROR(options.ApplyAllOptionOverrides());
std::vector<const Shape*> argument_layout_pointers;
TF_RETURN_IF_ERROR(DetermineArgumentLayoutsFromCompileOptions(
computation,
[](Shape shape) { return LayoutUtil::GetWithDefaultLayout(shape); },
options.argument_layouts, &options.executable_build_options,
&argument_layout_pointers));
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModuleConfig> hlo_config,
GetHloModuleConfig(computation, argument_layout_pointers,
options.executable_build_options));
HloModuleProto hlo_module_proto = computation.proto();
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModule> hlo_module,
HloModule::CreateFromProto(hlo_module_proto, *hlo_config));
UpdateEntryComputationLayout(
hlo_module.get(), std::bind(&Compiler::DefaultDeviceShapeRepresentation,
&gpu_compiler, std::placeholders::_1));
DumpHloModuleIfEnabled(*hlo_module, kBeforeOptimizationsDumpName);
Compiler::CompileOptions opts;
opts.target_config = options.target_config;
AotCompilationOptions aot_options(gpu_compiler.PlatformId());
aot_options.set_target_config(*options.target_config);
aot_options.set_run_backend_only(
options.executable_build_options.run_backend_only());
const int num_replicas = hlo_module->config().replica_count();
const int num_partitions = hlo_module->config().num_partitions();
const std::string name = hlo_module->name();
const std::string fingerprint = hlo_module->GetFingerprint128();
const int num_outputs = hlo_module->result_shape().IsTuple()
? hlo_module->result_shape().tuple_shapes_size()
: 1;
auto unique_module_group =
std::make_unique<HloModuleGroup>(std::move(hlo_module));
TF_ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<AotCompilationResult>> aot_results,
gpu_compiler.CompileAheadOfTime(std::move(unique_module_group),
aot_options));
std::vector<std::vector<absl::string_view>> output_memory_kinds(1);
output_memory_kinds[0].resize(num_outputs,
StreamExecutorGpuHbmMemorySpace::kKind);
return std::make_unique<StreamExecutorExecutable>(
std::move(input_options), std::move(aot_results), num_replicas,
num_partitions, name, fingerprint, std::move(output_memory_kinds));
#else
return absl::InternalError(
"GPU Compilation requires the target to be built with CUDA or "
"ROCm.");
#endif
}
absl::StatusOr<std::unique_ptr<PjRtExecutable>>
StreamExecutorGpuCompiler::Compile(CompileOptions options,
mlir::ModuleOp module,
const PjRtTopologyDescription& topology,
PjRtClient* client) {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
CompileOptions input_options = options;
XlaComputation xla_computation;
TF_RETURN_IF_ERROR(MlirToXlaComputation(
module, xla_computation,
options.parameter_is_tupled_arguments,
false,
false));
return Compile(std::move(input_options), xla_computation, topology, client);
#else
return absl::InternalError(
"GPU AOT compilation requires the target to be built with CUDA or "
"ROCm.");
#endif
}
#if TENSORFLOW_USE_ROCM
STREAM_EXECUTOR_REGISTER_MODULE_INITIALIZER(pjrt_register_se_gpu_compiler, {
PjRtRegisterCompiler(RocmName(),
std::make_unique<StreamExecutorGpuCompiler>());
});
#else
STREAM_EXECUTOR_REGISTER_MODULE_INITIALIZER(pjrt_register_se_gpu_compiler, {
PjRtRegisterCompiler(CudaName(),
std::make_unique<StreamExecutorGpuCompiler>());
});
#endif
} | #include "xla/pjrt/gpu/se_gpu_pjrt_compiler.h"
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include "absl/status/status.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Parser/Parser.h"
#include "xla/client/xla_computation.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/pjrt/gpu/gpu_topology.h"
#include "xla/pjrt/gpu/se_gpu_pjrt_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_compiler.h"
#include "xla/service/hlo_parser.h"
#include "xla/test.h"
#include "xla/tests/literal_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::tsl::testing::StatusIs;
constexpr absl::string_view kProgram = R"(HloModule Computation
ENTRY Computation() -> s32[] {
ROOT result = s32[] constant(2)
})";
constexpr absl::string_view mlir_str = R"mlir(
module {
func.func @main() -> tensor<i32> {
%0 = mhlo.constant dense<2> : tensor<i32>
return %0 : tensor<i32>
}
})mlir";
absl::StatusOr<xla::XlaComputation> GetXlaComputation(
absl::string_view program) {
TF_ASSIGN_OR_RETURN(auto hlo_module,
xla::ParseAndReturnUnverifiedModule(program, {}));
return XlaComputation(hlo_module->ToProto());
}
std::shared_ptr<xla::GpuTopology> GetGpuTopology(
std::vector<int> device_ids, absl::string_view platform_version,
int num_slices, int num_hosts_per_slice, int num_devices_per_host,
int core_count_per_chip) {
return std::make_shared<xla::GpuTopology>(device_ids, platform_version,
num_slices, num_hosts_per_slice,
num_devices_per_host);
}
TEST(StreamExecutorGpuCompilerTest, NoClientXla) {
StreamExecutorGpuCompiler compiler;
StreamExecutorGpuTopologyDescription topology(
CudaId(), CudaName(), GetGpuTopology({0, 1}, "Fake_device", 1, 1, 2, 10));
TF_ASSERT_OK_AND_ASSIGN(auto computation, GetXlaComputation(kProgram));
EXPECT_THAT(compiler.Compile(xla::CompileOptions(), computation, topology,
nullptr),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST(StreamExecutorGpuCompilerTest, TopologyNotSameXla) {
StreamExecutorGpuCompiler compiler;
StreamExecutorGpuTopologyDescription topology(
CudaId(), CudaName(), GetGpuTopology({0, 1}, "Fake_device", 1, 1, 2, 10));
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto computation, GetXlaComputation(kProgram));
EXPECT_THAT(compiler.Compile(xla::CompileOptions(), computation, topology,
client.get()),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST(StreamExecutorGpuCompilerTest, SuccessXla) {
StreamExecutorGpuCompiler compiler;
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto computation, GetXlaComputation(kProgram));
TF_ASSERT_OK_AND_ASSIGN(auto topology, client->GetTopologyDescription());
TF_ASSERT_OK_AND_ASSIGN(auto executable,
compiler.Compile(xla::CompileOptions(), computation,
*topology, client.get()));
const LoadOptions load_options;
TF_ASSERT_OK_AND_ASSIGN(auto loaded_executable,
client->Load(std::move(executable), load_options));
TF_ASSERT_OK_AND_ASSIGN(
auto result, loaded_executable->Execute({{}}, {}));
ASSERT_EQ(result.size(), 1);
std::vector<std::unique_ptr<xla::PjRtBuffer>>& result_buffers = result[0];
ASSERT_EQ(result_buffers.size(), 1);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::Literal> result_literal,
result_buffers[0]->ToLiteralSync());
EXPECT_TRUE(
LiteralTestUtil::Equal(LiteralUtil::CreateR0(2), *result_literal));
}
TEST(StreamExecutorGpuCompilerTest, NoClientMlir) {
StreamExecutorGpuCompiler compiler;
mlir::MLIRContext context;
context.loadDialect<mlir::mhlo::MhloDialect, mlir::func::FuncDialect>();
auto mlir_module =
mlir::parseSourceString<mlir::ModuleOp>(mlir_str, &context);
StreamExecutorGpuTopologyDescription topology(
CudaId(), CudaName(), GetGpuTopology({0, 1}, "Fake_device", 1, 1, 2, 10));
EXPECT_THAT(
compiler.Compile(xla::CompileOptions(), mlir_module.get(), topology,
nullptr),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST(StreamExecutorGpuCompilerTest, TopologyNotSameMlir) {
StreamExecutorGpuCompiler compiler;
mlir::MLIRContext context;
context.loadDialect<mlir::mhlo::MhloDialect, mlir::func::FuncDialect>();
auto mlir_module =
mlir::parseSourceString<mlir::ModuleOp>(mlir_str, &context);
StreamExecutorGpuTopologyDescription topology(
CudaId(), CudaName(), GetGpuTopology({0, 1}, "Fake_device", 1, 1, 2, 10));
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
EXPECT_THAT(compiler.Compile(xla::CompileOptions(), mlir_module.get(),
topology, client.get()),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST(StreamExecutorGpuCompilerTest, SuccessMlir) {
StreamExecutorGpuCompiler compiler;
mlir::MLIRContext context;
context.loadDialect<mlir::mhlo::MhloDialect, mlir::func::FuncDialect>();
auto mlir_module =
mlir::parseSourceString<mlir::ModuleOp>(mlir_str, &context);
TF_ASSERT_OK_AND_ASSIGN(auto client,
GetStreamExecutorGpuClient(GpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto topology, client->GetTopologyDescription());
TF_ASSERT_OK_AND_ASSIGN(
auto executable,
compiler.Compile(xla::CompileOptions(), mlir_module.get(), *topology,
client.get()));
const LoadOptions load_options;
TF_ASSERT_OK_AND_ASSIGN(auto loaded_executable,
client->Load(std::move(executable), load_options));
TF_ASSERT_OK_AND_ASSIGN(
auto result, loaded_executable->Execute({{}}, {}));
ASSERT_EQ(result.size(), 1);
std::vector<std::unique_ptr<xla::PjRtBuffer>>& result_buffers = result[0];
ASSERT_EQ(result_buffers.size(), 1);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::Literal> result_literal,
result_buffers[0]->ToLiteralSync());
EXPECT_TRUE(
LiteralTestUtil::Equal(LiteralUtil::CreateR0(2), *result_literal));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/gpu/se_gpu_pjrt_compiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/gpu/se_gpu_pjrt_compiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
880f9741-8dae-41d1-975d-c0afa4e453ed | cpp | tensorflow/tensorflow | cpu_topology | third_party/xla/xla/pjrt/cpu/cpu_topology.cc | third_party/xla/xla/pjrt/cpu/cpu_topology_test.cc | #include "xla/pjrt/cpu/cpu_topology.h"
#include <cstddef>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "xla/pjrt/cpu/cpu_topology.pb.h"
namespace xla {
std::unique_ptr<const CpuTopology> CpuTopology::FromProto(
const CpuTopologyProto& cpu_topology_proto) {
std::vector<CpuTopology::CpuDevice> devices;
devices.reserve(cpu_topology_proto.cpu_devices_size());
for (size_t i = 0; i < cpu_topology_proto.cpu_devices_size(); ++i) {
auto& cpu_device_proto = cpu_topology_proto.cpu_devices(i);
devices.push_back(CpuDevice{cpu_device_proto.process_index(),
cpu_device_proto.local_hardware_id()});
}
std::vector<std::string> machine_attributes;
machine_attributes.reserve(cpu_topology_proto.machine_attributes_size());
for (size_t i = 0; i < cpu_topology_proto.machine_attributes_size(); ++i) {
machine_attributes.push_back(cpu_topology_proto.machine_attributes(i));
}
return std::make_unique<CpuTopology>(std::move(devices),
std::move(machine_attributes));
}
CpuTopologyProto CpuTopology::ToProto() const {
CpuTopologyProto proto;
for (auto& cpu_device : cpu_devices_) {
auto* cpu_device_proto = proto.add_cpu_devices();
cpu_device_proto->set_process_index(cpu_device.process_id);
cpu_device_proto->set_local_hardware_id(cpu_device.local_device_id);
}
for (const std::string& machine_attribute : machine_attributes_) {
proto.add_machine_attributes(machine_attribute);
}
return proto;
}
} | #include "xla/pjrt/cpu/cpu_topology.h"
#include <memory>
#include "xla/pjrt/cpu/cpu_topology.pb.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
TEST(CpuTopology, FromProto) {
CpuTopologyProto msg;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(
cpu_devices:
[ { process_index: 2, local_hardware_id: 3 }]
machine_attributes: [ "x86_64", "Intel" ]
)pb",
&msg));
std::unique_ptr<const CpuTopology> cpu_topology = CpuTopology::FromProto(msg);
EXPECT_EQ(cpu_topology->devices().size(), 1);
EXPECT_EQ(cpu_topology->devices()[0].process_id, 2);
EXPECT_EQ(cpu_topology->devices()[0].local_device_id, 3);
EXPECT_EQ(cpu_topology->machine_attributes().size(), 2);
EXPECT_EQ(cpu_topology->machine_attributes()[0], "x86_64");
EXPECT_EQ(cpu_topology->machine_attributes()[1], "Intel");
}
TEST(CpuTopology, ToProto) {
CpuTopology cpu_topology({{2, 3}}, {"ab", "cd"});
CpuTopologyProto msg = cpu_topology.ToProto();
EXPECT_EQ(msg.cpu_devices_size(), 1);
EXPECT_EQ(msg.cpu_devices(0).process_index(), 2);
EXPECT_EQ(msg.cpu_devices(0).local_hardware_id(), 3);
EXPECT_EQ(msg.machine_attributes_size(), 2);
EXPECT_EQ(msg.machine_attributes(0), "ab");
EXPECT_EQ(msg.machine_attributes(1), "cd");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/cpu/cpu_topology.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/cpu/cpu_topology_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
69e4171f-276f-4c20-a4b6-d6c7373352d8 | cpp | tensorflow/tensorflow | gloo_collectives | third_party/xla/xla/pjrt/cpu/gloo_collectives.cc | third_party/xla/xla/pjrt/cpu/gloo_collectives_test.cc | #include "xla/pjrt/cpu/gloo_collectives.h"
#include <complex>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <exception>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "gloo/algorithm.h"
#include "gloo/allgather.h"
#include "gloo/allreduce.h"
#include "gloo/context.h"
#include "gloo/math.h"
#include "gloo/reduce_scatter.h"
#include "gloo/rendezvous/context.h"
#include "gloo/rendezvous/prefix_store.h"
#include "gloo/rendezvous/store.h"
#include "gloo/transport/device.h"
#include "gloo/transport/unbound_buffer.h"
#include "gloo/types.h"
#include "xla/primitive_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/cpu/collectives_interface.h"
#include "xla/service/global_device_id.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla::cpu {
GlooCollectivesCommunicator::GlooCollectivesCommunicator(
std::shared_ptr<gloo::Context> context)
: context_(std::move(context)) {}
GlooCollectivesCommunicator::~GlooCollectivesCommunicator() = default;
template <typename T>
static absl::Status SetAllReduceOptions(ReductionKind reduction_kind,
const void* input_buffer,
void* output_buffer,
size_t num_elements,
gloo::AllreduceOptions& options) {
options.setInput(reinterpret_cast<T*>(const_cast<void*>(input_buffer)),
num_elements);
options.setOutput(reinterpret_cast<T*>(const_cast<void*>(output_buffer)),
num_elements);
using ReductionFn = void (*)(void*, const void*, const void*, size_t);
switch (reduction_kind) {
case ReductionKind::SUM:
options.setReduceFunction(static_cast<ReductionFn>(&gloo::sum<T>));
break;
case ReductionKind::PRODUCT:
options.setReduceFunction(static_cast<ReductionFn>(&gloo::product<T>));
break;
case ReductionKind::MIN:
if constexpr (!is_complex_v<T>) {
options.setReduceFunction(static_cast<ReductionFn>(&gloo::min<T>));
} else {
return absl::InvalidArgumentError(
"MIN reduction not supported for complex types");
}
break;
case ReductionKind::MAX:
if constexpr (!is_complex_v<T>) {
options.setReduceFunction(static_cast<ReductionFn>(&gloo::max<T>));
} else {
return absl::InvalidArgumentError(
"MAX reduction not supported for complex types");
}
break;
}
return absl::OkStatus();
}
absl::Status GlooCollectivesCommunicator::AllReduce(
const RendezvousKey& key, ReductionKind reduction_kind,
PrimitiveType element_type, size_t num_elements, const void* input_buffer,
void* output_buffer, absl::Duration timeout) {
gloo::AllreduceOptions options(context_);
switch (element_type) {
case S8:
TF_RETURN_IF_ERROR(SetAllReduceOptions<int8_t>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case PRED:
case U8:
TF_RETURN_IF_ERROR(SetAllReduceOptions<uint8_t>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case S16:
TF_RETURN_IF_ERROR(SetAllReduceOptions<int16_t>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case U16:
TF_RETURN_IF_ERROR(SetAllReduceOptions<uint16_t>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case S32:
TF_RETURN_IF_ERROR(SetAllReduceOptions<int32_t>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case U32:
TF_RETURN_IF_ERROR(SetAllReduceOptions<uint32_t>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case S64:
TF_RETURN_IF_ERROR(SetAllReduceOptions<int64_t>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case U64:
TF_RETURN_IF_ERROR(SetAllReduceOptions<uint64_t>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case F16:
TF_RETURN_IF_ERROR(SetAllReduceOptions<gloo::float16>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case BF16:
TF_RETURN_IF_ERROR(SetAllReduceOptions<bfloat16>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case F32:
TF_RETURN_IF_ERROR(SetAllReduceOptions<float>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case F64:
TF_RETURN_IF_ERROR(SetAllReduceOptions<double>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case C64:
TF_RETURN_IF_ERROR(SetAllReduceOptions<std::complex<float>>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case C128:
TF_RETURN_IF_ERROR(SetAllReduceOptions<std::complex<double>>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
default:
return absl::InvalidArgumentError("Unknown datatype in allreduce");
}
options.setAlgorithm(gloo::AllreduceOptions::Algorithm::RING);
options.setTimeout(absl::ToChronoMilliseconds(timeout));
try {
gloo::allreduce(options);
} catch (std::exception& e) {
return absl::UnknownError(
absl::StrCat("Gloo all-reduce failed: ", e.what()));
}
return absl::OkStatus();
}
static constexpr uint8_t kCollectivePermuteSlotPrefix = 0x40;
absl::Status GlooCollectivesCommunicator::CollectivePermute(
const RendezvousKey& key, size_t num_bytes, std::optional<int> source_rank,
absl::Span<int const> target_ranks, const void* input_buffer,
void* output_buffer, absl::Duration timeout) {
uint32_t tag = 0;
const auto slot = gloo::Slot::build(kCollectivePermuteSlotPrefix, tag);
try {
std::unique_ptr<gloo::transport::UnboundBuffer> in;
std::unique_ptr<gloo::transport::UnboundBuffer> out;
for (int target : target_ranks) {
if (target != context_->rank) {
VLOG(1) << "send from " << context_->rank << " to " << target;
if (!in) {
in = context_->createUnboundBuffer(const_cast<void*>(input_buffer),
num_bytes);
}
in->send(target, slot);
}
}
if (source_rank) {
if (*source_rank == context_->rank) {
std::memcpy(output_buffer, input_buffer, num_bytes);
} else {
VLOG(1) << "recv at " << context_->rank << " from " << *source_rank;
out = context_->createUnboundBuffer(output_buffer, num_bytes);
out->recv(*source_rank, slot);
}
} else {
std::memset(output_buffer, 0, num_bytes);
}
VLOG(1) << "wait for send at " << context_->rank;
auto deadline = absl::ToChronoTime(absl::Now() + timeout);
if (in) {
in->waitSend(deadline);
}
VLOG(1) << "wait for recv at " << context_->rank;
if (out) {
out->waitRecv(deadline);
}
VLOG(1) << "done waiting at " << context_->rank;
} catch (std::exception& e) {
return absl::UnknownError(
absl::StrCat("Gloo collective permute failed: ", e.what()));
}
return absl::OkStatus();
}
absl::Status GlooCollectivesCommunicator::AllToAll(
const RendezvousKey& key, size_t chunk_bytes,
absl::Span<const void* const> input_buffers,
absl::Span<void* const> output_buffers, absl::Duration timeout) {
uint32_t tag = 0;
int my_rank = context_->rank;
int world_size = context_->size;
TF_RET_CHECK(world_size == input_buffers.size());
TF_RET_CHECK(world_size == output_buffers.size());
try {
const auto slot = gloo::Slot::build(gloo::kAlltoallSlotPrefix, tag);
std::vector<std::unique_ptr<gloo::transport::UnboundBuffer>> ins(
context_->size);
std::vector<std::unique_ptr<gloo::transport::UnboundBuffer>> outs(
context_->size);
for (size_t i = 0; i < world_size; ++i) {
if (i != my_rank) {
ins[i] = context_->createUnboundBuffer(
const_cast<void*>(input_buffers[i]), chunk_bytes);
outs[i] = context_->createUnboundBuffer(output_buffers[i], chunk_bytes);
}
}
for (int i = 1; i < world_size; i++) {
int send_rank = (my_rank + i) % world_size;
int recv_rank = (my_rank + world_size - i) % world_size;
ins[send_rank]->send(send_rank, slot);
outs[recv_rank]->recv(recv_rank, slot);
}
std::memcpy(output_buffers[my_rank], input_buffers[my_rank], chunk_bytes);
auto deadline = absl::ToChronoTime(absl::Now() + timeout);
for (int i = 0; i < world_size; i++) {
if (i != my_rank) {
ins[i]->waitSend(deadline);
outs[i]->waitRecv(deadline);
}
}
} catch (std::exception& e) {
return absl::UnknownError(
absl::StrCat("Gloo all-to-all failed: ", e.what()));
}
return absl::OkStatus();
}
absl::Status GlooCollectivesCommunicator::AllGather(const RendezvousKey& key,
size_t chunk_bytes,
const void* input_buffer,
void* output_buffer,
absl::Duration timeout) {
uint32_t tag = 0;
gloo::AllgatherOptions options(context_);
options.setTag(tag);
options.setTimeout(absl::ToChronoMilliseconds(timeout));
options.setInput(reinterpret_cast<char*>(const_cast<void*>(input_buffer)),
chunk_bytes);
options.setOutput(reinterpret_cast<char*>(output_buffer),
chunk_bytes * context_->size);
try {
gloo::allgather(options);
} catch (std::exception& e) {
return absl::UnknownError(
absl::StrCat("Gloo AllGather failed: ", e.what()));
}
return absl::OkStatus();
}
template <typename T>
absl::Status ReduceScatterHelper(std::shared_ptr<gloo::Context> context,
ReductionKind reduction_kind, void* buffer,
size_t chunk_elems) {
const gloo::ReductionFunction<T>* reduction_function = nullptr;
if constexpr (is_complex_v<T>) {
switch (reduction_kind) {
case ReductionKind::SUM:
reduction_function = gloo::ReductionFunction<T>::sum;
break;
case ReductionKind::PRODUCT:
reduction_function = gloo::ReductionFunction<T>::product;
break;
default:
return absl::InvalidArgumentError(absl::StrCat(
"Unsupported reduction kind: ", static_cast<int>(reduction_kind)));
}
} else {
switch (reduction_kind) {
case ReductionKind::SUM:
reduction_function = gloo::ReductionFunction<T>::sum;
break;
case ReductionKind::PRODUCT:
reduction_function = gloo::ReductionFunction<T>::product;
break;
case ReductionKind::MAX:
reduction_function = gloo::ReductionFunction<T>::max;
break;
case ReductionKind::MIN:
reduction_function = gloo::ReductionFunction<T>::min;
break;
default:
return absl::InvalidArgumentError(absl::StrCat(
"Unsupported reduction kind: ", static_cast<int>(reduction_kind)));
}
}
try {
std::vector<int> recv_elems(context->size, chunk_elems);
gloo::ReduceScatterHalvingDoubling<T> algorithm(
context, std::vector<T*>{reinterpret_cast<T*>(buffer)},
chunk_elems * context->size, recv_elems, reduction_function);
algorithm.run();
} catch (std::exception& e) {
return absl::UnknownError(
absl::StrCat("Gloo ReduceScatter failed: ", e.what()));
}
return absl::OkStatus();
}
absl::Status GlooCollectivesCommunicator::ReduceScatter(
const RendezvousKey& key, ReductionKind reduction_kind,
PrimitiveType element_type, size_t chunk_elems, const void* input_buffer,
void* output_buffer, absl::Duration timeout) {
size_t chunk_bytes = chunk_elems * primitive_util::ByteWidth(element_type);
std::unique_ptr<char[]> temp(new char[chunk_bytes * context_->size]);
std::memcpy(temp.get(), input_buffer, chunk_bytes * context_->size);
switch (element_type) {
case S8:
TF_RETURN_IF_ERROR(ReduceScatterHelper<int8_t>(context_, reduction_kind,
temp.get(), chunk_elems));
break;
case PRED:
case U8:
TF_RETURN_IF_ERROR(ReduceScatterHelper<uint8_t>(context_, reduction_kind,
temp.get(), chunk_elems));
break;
case S16:
TF_RETURN_IF_ERROR(ReduceScatterHelper<int16_t>(context_, reduction_kind,
temp.get(), chunk_elems));
break;
case U16:
TF_RETURN_IF_ERROR(ReduceScatterHelper<uint16_t>(
context_, reduction_kind, temp.get(), chunk_elems));
break;
case S32:
TF_RETURN_IF_ERROR(ReduceScatterHelper<int32_t>(context_, reduction_kind,
temp.get(), chunk_elems));
break;
case U32:
TF_RETURN_IF_ERROR(ReduceScatterHelper<uint32_t>(
context_, reduction_kind, temp.get(), chunk_elems));
break;
case S64:
TF_RETURN_IF_ERROR(ReduceScatterHelper<int64_t>(context_, reduction_kind,
temp.get(), chunk_elems));
break;
case U64:
TF_RETURN_IF_ERROR(ReduceScatterHelper<uint64_t>(
context_, reduction_kind, temp.get(), chunk_elems));
break;
case BF16:
TF_RETURN_IF_ERROR(ReduceScatterHelper<bfloat16>(
context_, reduction_kind, temp.get(), chunk_elems));
break;
case F16:
TF_RETURN_IF_ERROR(ReduceScatterHelper<gloo::float16>(
context_, reduction_kind, temp.get(), chunk_elems));
break;
case F32:
TF_RETURN_IF_ERROR(ReduceScatterHelper<float>(context_, reduction_kind,
temp.get(), chunk_elems));
break;
case F64:
TF_RETURN_IF_ERROR(ReduceScatterHelper<double>(context_, reduction_kind,
temp.get(), chunk_elems));
break;
case C64:
TF_RETURN_IF_ERROR(ReduceScatterHelper<std::complex<float>>(
context_, reduction_kind, temp.get(), chunk_elems));
break;
case C128:
TF_RETURN_IF_ERROR(ReduceScatterHelper<std::complex<double>>(
context_, reduction_kind, temp.get(), chunk_elems));
break;
default:
return absl::InvalidArgumentError("Unknown datatype in reducescatter");
}
std::memcpy(output_buffer, temp.get(), chunk_bytes);
return absl::OkStatus();
}
GlooCollectives::GlooCollectives(
std::unique_ptr<gloo::rendezvous::Store> store,
std::shared_ptr<gloo::transport::Device> device)
: store_(std::move(store)), device_(std::move(device)) {}
GlooCollectives::~GlooCollectives() = default;
absl::StatusOr<std::shared_ptr<CollectivesCommunicator>>
GlooCollectives::GetCommunicator(
absl::Span<GlobalDeviceId const> global_devices, int rank) {
Context* context;
{
absl::MutexLock lock(&mu_);
auto& context_ref = contexts_[std::make_tuple(
std::vector<GlobalDeviceId>(global_devices.begin(),
global_devices.end()),
rank)];
if (!context_ref) {
context_ref = std::make_unique<Context>();
}
context = context_ref.get();
}
absl::MutexLock context_lock(&context->mu);
if (context->communicator) {
return context->communicator;
}
auto gloo_context =
std::make_shared<gloo::rendezvous::Context>(rank, global_devices.size());
auto prefix_store = gloo::rendezvous::PrefixStore(
absl::StrCat("gloo/",
absl::StrJoin(global_devices, ",",
[](std::string* out, GlobalDeviceId id) {
absl::StrAppend(out, id.value());
})),
*store_);
try {
gloo_context->connectFullMesh(prefix_store, device_);
} catch (std::exception& e) {
return absl::UnknownError(
absl::StrCat("Gloo context initialization failed: ", e.what()));
}
context->communicator =
std::make_shared<GlooCollectivesCommunicator>(std::move(gloo_context));
return context->communicator;
}
} | #include "xla/pjrt/cpu/gloo_collectives.h"
#include <unistd.h>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#if defined(__linux__)
#include "gloo/transport/tcp/attr.h"
#include "gloo/transport/tcp/device.h"
#elif defined(__APPLE__)
#include "gloo/transport/uv/device.h"
#endif
#include "xla/executable_run_options.h"
#include "xla/pjrt/cpu/gloo_kv_store.h"
#include "xla/pjrt/distributed/in_memory_key_value_store.h"
#include "xla/pjrt/distributed/key_value_store_interface.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/cpu/collectives_interface.h"
#include "xla/service/global_device_id.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace xla::cpu {
namespace {
using ::testing::Each;
using ::testing::Eq;
constexpr int kNumParticipants = 2;
constexpr size_t kBufferSize = 256;
constexpr absl::Duration kTimeout = absl::Seconds(5);
absl::StatusOr<std::shared_ptr<CollectivesCommunicator>> GetCommunicator(
size_t kNumParticipants, absl::Span<GlobalDeviceId const> global_devices,
const std::shared_ptr<xla::KeyValueStoreInterface>& kv_store, int rank) {
auto collectives = std::make_shared<cpu::GlooCollectives>(
std::make_unique<cpu::GlooKeyValueStore>(kv_store),
#if defined(__linux__)
gloo::transport::tcp::CreateDevice(gloo::transport::tcp::attr()));
#elif defined(__APPLE__)
gloo::transport::uv::CreateDevice(gloo::transport::uv::attr()));
#endif
return collectives->GetCommunicator(global_devices, rank);
}
RendezvousKey MakeRendezvousKey(std::vector<GlobalDeviceId> global_devices) {
return RendezvousKey(RunId(0), global_devices, kNumParticipants,
RendezvousKey::CollectiveOpKind::kCrossModule,
0);
}
absl::StatusOr<std::vector<uint8_t>> AllReduce(
const std::shared_ptr<xla::KeyValueStoreInterface>& kv_store,
const std::vector<uint8_t>& input_buffer,
std::vector<GlobalDeviceId> global_devices, int rank) {
std::vector<uint8_t> output_buffer(kBufferSize);
RendezvousKey rendezvous_key = MakeRendezvousKey(global_devices);
TF_ASSIGN_OR_RETURN(
auto communicator,
GetCommunicator(kNumParticipants, global_devices, kv_store, rank));
TF_RETURN_IF_ERROR(communicator->AllReduce(
rendezvous_key, xla::ReductionKind::SUM, xla::PrimitiveType::U8,
kBufferSize, input_buffer.data(), output_buffer.data(), kTimeout));
return output_buffer;
}
TEST(GlooCollectives, AllReduce) {
std::vector<GlobalDeviceId> global_devices;
global_devices.reserve(kNumParticipants);
for (int rank = 0; rank < kNumParticipants; ++rank) {
global_devices.push_back(GlobalDeviceId(rank));
}
auto kv_store = std::make_shared<xla::InMemoryKeyValueStore>();
std::vector<absl::StatusOr<std::vector<uint8_t>>> output_buffers(
kNumParticipants);
{
tsl::thread::ThreadPool thread_pool(
tsl::Env::Default(), "AllReduceParticipants", kNumParticipants);
for (int rank = 0; rank < kNumParticipants; ++rank) {
thread_pool.Schedule(
[rank, &output_buffers, &kv_store, &global_devices]() {
std::vector<uint8_t> input_buffer(kBufferSize, rank + 1);
output_buffers[rank] =
AllReduce(kv_store, input_buffer, global_devices, rank);
});
}
}
for (int rank = 0; rank < kNumParticipants; ++rank) {
TF_ASSERT_OK(output_buffers[rank].status());
}
for (int rank = 0; rank < kNumParticipants; ++rank) {
EXPECT_THAT(output_buffers[rank].value(),
Each(Eq(kNumParticipants * (kNumParticipants + 1) / 2)));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/cpu/gloo_collectives.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/cpu/gloo_collectives_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
de686d71-386c-4413-83f6-6378d7e0f65c | cpp | tensorflow/tensorflow | cpu_client | third_party/xla/xla/pjrt/cpu/cpu_client.cc | third_party/xla/xla/pjrt/cpu/cpu_client_test.cc | #include "xla/pjrt/cpu/cpu_client.h"
#define EIGEN_USE_THREADS
#include <algorithm>
#include <cfenv>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <functional>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "mlir/IR/BuiltinOps.h"
#include "xla/array.h"
#include "xla/backends/cpu/runtime/buffer_allocations.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/backends/cpu/runtime/thunk_executor.h"
#include "xla/client/executable_build_options.h"
#include "xla/debug_options_flags.h"
#include "xla/executable_run_options.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/pjrt/compile_options.pb.h"
#include "xla/pjrt/cpu/abstract_tfrt_cpu_buffer.h"
#include "xla/pjrt/cpu/cpu_topology.h"
#include "xla/pjrt/cpu/tracked_tfrt_cpu_device_buffer.h"
#include "xla/pjrt/host_memory_spaces.h"
#include "xla/pjrt/mlir_to_hlo.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_compiler.h"
#include "xla/pjrt/pjrt_device_description.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/pjrt/semaphore.h"
#include "xla/pjrt/transpose.h"
#include "xla/pjrt/utils.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/compiler.h"
#include "xla/service/computation_placer.h"
#include "xla/service/cpu/collectives_interface.h"
#include "xla/service/cpu/cpu_compiler.h"
#include "xla/service/cpu/cpu_event.h"
#include "xla/service/cpu/cpu_executable.h"
#include "xla/service/cpu/cpu_executable_run_options.h"
#include "xla/service/cpu/cpu_runtime.h"
#include "xla/service/cpu/cpu_xfeed.h"
#include "xla/service/cpu/simple_orc_jit.h"
#include "xla/service/custom_call_status.h"
#include "xla/service/custom_call_status_internal.h"
#include "xla/service/dump.h"
#include "xla/service/executable.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_module_util.h"
#include "xla/service/hlo_value.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/tsl/lib/strings/proto_serialization.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/denormal.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/setround.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/profiler/lib/connected_traceme.h"
#include "tsl/profiler/lib/context_types.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla {
namespace {
absl::StatusOr<std::unique_ptr<TfrtCpuBuffer>> AllocateDestinationBuffer(
const Shape& on_device_shape,
absl::InlinedVector<tsl::AsyncValueRef<CpuEvent>, 4> definition_events,
TfrtCpuDevice* device, TfrtCpuClient* client) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<TrackedTfrtCpuDeviceBuffer> tracked_device_buffer,
AbstractTfrtCpuBuffer::AllocateTrackedDeviceBuffer(
on_device_shape, std::move(definition_events)));
return std::make_unique<TfrtCpuBuffer>(
on_device_shape, std::move(tracked_device_buffer), client, device,
*device->default_memory_space());
}
absl::StatusOr<std::unique_ptr<TfrtCpuBuffer>> AllocateDestinationBufferAndAvs(
const Shape& shape,
absl::InlinedVector<tsl::RCReference<tsl::AsyncValue>, 4>* avs,
TfrtCpuDevice* device, TfrtCpuClient* client) {
absl::InlinedVector<tsl::AsyncValueRef<CpuEvent>, 4> definition_events;
AbstractTfrtCpuBuffer::AllocateAvsAndEvents(shape, avs, &definition_events);
return AllocateDestinationBuffer(
shape, std::move(definition_events),
tensorflow::down_cast<TfrtCpuDevice*>(device), client);
}
const char kCpuPlatformName[] = "cpu";
void EnqueueWork(tsl::thread::ThreadPool* pool,
absl::AnyInvocable<void()> callee) {
pool->Schedule([ptr = new absl::AnyInvocable<void()>(std::move(callee))]() {
(*ptr)();
delete ptr;
});
}
void EnqueueWorkWhenReady(
tsl::thread::ThreadPool* pool,
absl::Span<const tsl::RCReference<tsl::AsyncValue>> values,
absl::AnyInvocable<void()> callee) {
RunWhenReady(values, [pool, callee = std::move(callee)]() mutable {
EnqueueWork(pool, std::move(callee));
});
}
class ThreadPoolAsyncWorkRunner : public AsyncWorkRunner {
public:
explicit ThreadPoolAsyncWorkRunner(tsl::thread::ThreadPool* pool)
: pool_(pool) {}
void Schedule(absl::AnyInvocable<void()> work) override {
EnqueueWork(pool_, std::move(work));
}
void ScheduleWhenReady(
absl::Span<const tsl::RCReference<tsl::AsyncValue>> values,
absl::AnyInvocable<void()> work) override {
EnqueueWorkWhenReady(pool_, values, std::move(work));
}
private:
tsl::thread::ThreadPool* pool_;
};
class TfrtCpuAsyncHostToDeviceTransferManager
: public AbstractAsyncHostToHostMemoryTransferManager {
public:
static absl::StatusOr<
std::unique_ptr<TfrtCpuAsyncHostToDeviceTransferManager>>
Create(absl::Span<const Shape> shapes, TfrtCpuDevice* device,
TfrtCpuClient* client) {
absl::InlinedVector<std::unique_ptr<AbstractTfrtCpuBuffer>, 4> buffers;
buffers.reserve(shapes.size());
absl::InlinedVector<tsl::RCReference<tsl::AsyncValue>, 4> avs;
avs.reserve(shapes.size());
for (const auto& shape : shapes) {
if (shape.IsTuple()) {
return Unimplemented(
"Tuples are not supported by "
"TfrtCpuAsyncHostToDeviceTransferManager");
}
absl::InlinedVector<tsl::RCReference<tsl::AsyncValue>, 4> local_avs;
TF_ASSIGN_OR_RETURN(auto buffer, AllocateDestinationBufferAndAvs(
shape, &local_avs, device, client));
CHECK_EQ(local_avs.size(), 1);
avs.push_back(std::move(local_avs[0]));
buffers.push_back(std::move(buffer));
}
absl::InlinedVector<TrackedTfrtCpuDeviceBuffer*, 4> device_buffers;
absl::InlinedVector<size_t, 4> buffer_sizes;
absl::InlinedVector<int64_t, 4> buffer_transfers_in_flight;
absl::InlinedVector<bool, 4> last_transfer_finished;
TF_RETURN_IF_ERROR(
AbstractAsyncHostToHostMemoryTransferManager::
PopulateAsyncTransferManagerData(
buffers, device_buffers, buffer_sizes,
buffer_transfers_in_flight, last_transfer_finished));
return absl::WrapUnique(new TfrtCpuAsyncHostToDeviceTransferManager(
std::move(avs), std::move(buffers), std::move(device_buffers),
std::move(buffer_sizes), std::move(buffer_transfers_in_flight),
std::move(last_transfer_finished), client->async_work_runner(),
device));
}
PjRtDevice* device() const override { return device_; }
private:
TfrtCpuAsyncHostToDeviceTransferManager(
absl::InlinedVector<tsl::RCReference<tsl::AsyncValue>, 4> avs,
absl::InlinedVector<std::unique_ptr<AbstractTfrtCpuBuffer>, 4> buffers,
absl::InlinedVector<TrackedTfrtCpuDeviceBuffer*, 4> device_buffers,
absl::InlinedVector<size_t, 4> buffer_sizes,
absl::InlinedVector<int64_t, 4> buffer_transfers_in_flight,
absl::InlinedVector<bool, 4> last_transfer_finished,
AsyncWorkRunner* async_work_runner, TfrtCpuDevice* device)
: AbstractAsyncHostToHostMemoryTransferManager(
std::move(avs), std::move(buffers), std::move(device_buffers),
std::move(buffer_sizes), std::move(buffer_transfers_in_flight),
std::move(last_transfer_finished), async_work_runner),
device_(device) {}
TfrtCpuDevice* device_;
};
}
TfrtCpuDeviceDescription::TfrtCpuDeviceDescription(int process_id,
int local_device_id)
: id_(PackCpuDeviceId(process_id, local_device_id)),
process_index_(process_id),
local_hardware_id_(local_device_id) {
debug_string_ = absl::StrCat("TFRT_CPU_", id_.value());
to_string_ = absl::StrCat("CpuDevice(id=", id_.value(), ")");
}
absl::string_view TfrtCpuDeviceDescription::device_kind() const {
return kCpuPlatformName;
}
absl::string_view TfrtCpuDeviceDescription::DebugString() const {
return debug_string_;
}
absl::string_view TfrtCpuDeviceDescription::ToString() const {
return to_string_;
}
TfrtCpuTopologyDescription TfrtCpuTopologyDescription::Create(
PjRtPlatformId platform_id, absl::string_view platform_name,
absl::string_view platform_version,
absl::Span<const std::unique_ptr<TfrtCpuDevice>> devices,
absl::Span<const std::string> machine_attributes) {
std::vector<CpuTopology::CpuDevice> cpu_devices;
cpu_devices.reserve(devices.size());
for (auto& device : devices) {
cpu_devices.push_back(CpuTopology::CpuDevice{
device->process_index(), device->local_hardware_id().value()});
}
return TfrtCpuTopologyDescription(platform_id, platform_name,
platform_version, cpu_devices,
machine_attributes);
}
absl::StatusOr<Layout> TfrtCpuTopologyDescription::GetDefaultLayout(
PrimitiveType element_type, absl::Span<const int64_t> dims) const {
Shape shape = ShapeUtil::MakeShape(element_type, dims);
return LayoutUtil::GetWithDefaultLayout(shape).layout();
}
absl::StatusOr<std::string> TfrtCpuTopologyDescription::Serialize() const {
std::string result;
if (!tsl::SerializeToStringDeterministic(cpu_topology_.ToProto(), &result)) {
return absl::InternalError("Failed to serialize cpu_topology");
}
return result;
}
std::vector<std::unique_ptr<const PjRtDeviceDescription>>
TfrtCpuTopologyDescription::DeviceDescriptions() const {
std::vector<std::unique_ptr<const PjRtDeviceDescription>> devices;
devices.reserve(cpu_topology_.number_of_devices());
for (const CpuTopology::CpuDevice& device : cpu_topology_.devices()) {
devices.push_back(std::make_unique<TfrtCpuDeviceDescription>(
device.process_id, device.local_device_id));
}
return devices;
}
TfrtCpuDevice::TfrtCpuDevice(int process_id, int local_device_id,
int max_inflight_computations)
: description_(process_id, local_device_id),
max_inflight_computations_semaphore_(
max_inflight_computations) {}
absl::Status TfrtCpuDevice::TransferToInfeed(const LiteralSlice& literal) {
return TransferLiteralToInfeedOnCpu(local_hardware_id().value(), literal);
}
absl::Status TfrtCpuDevice::TransferFromOutfeed(
MutableBorrowingLiteral literal) {
return TransferLiteralFromOutfeedOnCpu(local_hardware_id().value(), literal);
}
void TfrtCpuDevice::AttachMemorySpace(PjRtMemorySpace* memory_space) {
CHECK(memory_space != nullptr);
CHECK(client_ == memory_space->client()) << absl::StrFormat(
"Could not attach a TfrtCpuDevice to a PjRtMemorySpace owned by a "
"different client, the device's client: %s, the memory space's client: "
"%s.",
client_->platform_name(), memory_space->client()->platform_name());
memory_spaces_.push_back(memory_space);
memory_spaces_by_id_.emplace(memory_space->kind_id(), memory_space);
}
absl::Span<PjRtMemorySpace* const> TfrtCpuDevice::memory_spaces() const {
return memory_spaces_;
}
absl::StatusOr<PjRtMemorySpace*> TfrtCpuDevice::default_memory_space() const {
return memory_space_by_kind_id(UnpinnedHostMemorySpace::kKindId);
}
absl::StatusOr<PjRtMemorySpace*> TfrtCpuDevice::memory_space_by_kind(
absl::string_view memory_space_kind) const {
auto it =
absl::c_find_if(memory_spaces_, [memory_space_kind](PjRtMemorySpace* ms) {
return ms->kind() == memory_space_kind;
});
if (it != memory_spaces_.end()) {
return *it;
}
return absl::InternalError(
absl::StrCat("No memory space found (kind: ", memory_space_kind, ")"));
}
absl::StatusOr<PjRtMemorySpace*> TfrtCpuDevice::memory_space_by_kind_id(
int id) const {
auto it = memory_spaces_by_id_.find(id);
if (it == memory_spaces_by_id_.end()) {
return absl::InternalError(
absl::StrCat("No memory space found (kind_id: ", id, ")"));
}
return it->second;
}
static int CpuDeviceCount() {
return GetDebugOptionsFromFlags().xla_force_host_platform_device_count();
}
absl::StatusOr<std::unique_ptr<PjRtClient>> GetTfrtCpuClient(
const CpuClientOptions& options) {
int cpu_device_count = options.cpu_device_count.value_or(CpuDeviceCount());
size_t num_threads = std::max(DefaultThreadPoolSize(), cpu_device_count);
std::vector<std::unique_ptr<TfrtCpuDevice>> devices;
for (int i = 0; i < cpu_device_count; ++i) {
auto device = std::make_unique<TfrtCpuDevice>(
options.process_id, i,
options.max_inflight_computations_per_device);
devices.push_back(std::move(device));
}
return std::unique_ptr<PjRtClient>(std::make_unique<TfrtCpuClient>(
options.process_id, std::move(devices), std::move(options.collectives),
num_threads, options.asynchronous));
}
static const size_t kMaxIntraOpThreads = 256;
static tsl::ThreadOptions GetThreadOptions() {
tsl::ThreadOptions thread_options;
thread_options.stack_size = 8 * 1024 * 1024;
return thread_options;
}
TfrtCpuClient::TfrtCpuClient(
int process_index, std::vector<std::unique_ptr<TfrtCpuDevice>> devices,
std::shared_ptr<cpu::CollectivesInterface> collectives, size_t num_threads,
bool asynchronous)
: process_index_(process_index),
owned_devices_(std::move(devices)),
computation_placer_(std::make_unique<ComputationPlacer>()),
eigen_intraop_pool_(new tsl::thread::ThreadPool(
tsl::Env::Default(), GetThreadOptions(), "XLAEigen",
std::min(num_threads, kMaxIntraOpThreads))),
eigen_intraop_device_(
new Eigen::ThreadPoolDevice(eigen_intraop_pool_->AsEigenThreadPool(),
eigen_intraop_pool_->NumThreads())),
pjrt_client_thread_pool_(
new tsl::thread::ThreadPool(tsl::Env::Default(), GetThreadOptions(),
"XLATfrtCpuClient", num_threads)),
async_work_runner_(std::make_unique<ThreadPoolAsyncWorkRunner>(
pjrt_client_thread_pool_.get())),
last_collective_launch_event_(
tsl::MakeAvailableAsyncValueRef<CpuEvent>()),
transpose_cache_(1024),
collectives_(std::move(collectives)),
topology_(TfrtCpuTopologyDescription::Create(
platform_id(), platform_name(), platform_version(), owned_devices_,
cpu::DetectMachineAttributes())),
asynchronous_(asynchronous) {
for (const std::unique_ptr<TfrtCpuDevice>& device : owned_devices_) {
devices_.push_back(device.get());
CHECK(
id_to_device_.insert({device->global_device_id(), device.get()}).second)
<< "Duplicate device id: " << device->global_device_id();
device->SetClient(this);
if (device->IsAddressable()) {
int idx = device->local_hardware_id().value();
if (idx >= addressable_devices_.size()) {
addressable_devices_.resize(idx + 1);
}
CHECK(addressable_devices_[idx] == nullptr) << idx;
addressable_devices_[idx] = device.get();
}
}
for (int idx = 0; idx < addressable_devices_.size(); ++idx) {
auto* const device = addressable_devices_[idx];
CHECK(device != nullptr) << idx;
const int id = device->id();
auto memory_space = std::make_unique<UnpinnedHostMemorySpace>(id, device);
tensorflow::down_cast<TfrtCpuDevice*>(device)->AttachMemorySpace(
memory_space.get());
memory_spaces_.push_back(memory_space.get());
owned_memory_spaces_.push_back(std::move(memory_space));
}
VLOG(1) << "TfrtCpuClient created.";
}
TfrtCpuClient::~TfrtCpuClient() { VLOG(1) << "TfrtCpuClient destroyed."; }
absl::StatusOr<PjRtDevice*> TfrtCpuClient::LookupDevice(
xla::PjRtGlobalDeviceId global_device_id) const {
auto it = id_to_device_.find(global_device_id);
if (it != id_to_device_.end()) {
return it->second;
}
return InvalidArgument("No matching device found for device_id %d",
global_device_id.value());
}
absl::StatusOr<PjRtDevice*> TfrtCpuClient::LookupAddressableDevice(
PjRtLocalDeviceId local_device_id) const {
for (auto* device : addressable_devices_) {
if (local_device_id == device->local_device_id()) {
return device;
}
}
return InvalidArgument("No matching device found for local_device_id %d",
local_device_id.value());
}
absl::Span<PjRtMemorySpace* const> TfrtCpuClient::memory_spaces() const {
return memory_spaces_;
}
absl::StatusOr<DeviceAssignment> TfrtCpuClient::GetDefaultDeviceAssignment(
int num_replicas, int num_partitions) const {
if (num_partitions * num_replicas <= addressable_devices().size()) {
xla::DeviceAssignment assignment(num_replicas, num_partitions);
for (int i = 0; i < num_replicas; ++i) {
for (int j = 0; j < num_partitions; ++j) {
assignment(i, j) =
addressable_devices().at(i * num_partitions + j)->id();
}
}
return assignment;
}
return computation_placer_->AssignDevices(num_replicas, num_partitions);
}
absl::StatusOr<Layout> TfrtCpuClient::GetDefaultLayout(
PrimitiveType element_type, absl::Span<const int64_t> dims) {
Shape shape = ShapeUtil::MakeShape(element_type, dims);
return LayoutUtil::GetWithDefaultLayout(shape).layout();
}
absl::StatusOr<std::unique_ptr<HloCostAnalysis>>
TfrtCpuClient::GetHloCostAnalysis() const {
return std::make_unique<HloCostAnalysis>(cpu::CpuExecutable::ShapeSizeBytes);
}
static const InstructionValueSet& GetRootValueSet(
const BufferAssignment& assignment, const HloModule& module) {
return assignment.dataflow_analysis().GetInstructionValueSet(
module.entry_computation()->root_instruction());
}
static absl::StatusOr<absl::InlinedVector<BufferAllocation::Index, 4>>
FindResultBufferAllocationIndex(const BufferAssignment& assignment,
const HloModule& module) {
absl::InlinedVector<BufferAllocation::Index, 4> buffer_indices;
const InstructionValueSet& root_value_set =
GetRootValueSet(assignment, module);
const Shape& result_shape = module.result_shape();
if (!result_shape.IsTuple()) {
const HloValueSet& sources = root_value_set.element({});
CHECK_EQ(1, sources.values().size());
const HloValue* value_source = sources.values()[0];
HloInstruction* src = value_source->instruction();
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice slice,
assignment.GetUniqueSlice(src, value_source->index()));
const BufferAllocation::Index buffer_index = slice.index();
buffer_indices.push_back(buffer_index);
return {std::move(buffer_indices)};
}
buffer_indices.reserve(result_shape.tuple_shapes_size());
for (int i = 0; i < result_shape.tuple_shapes_size(); ++i) {
const HloValueSet& sources = root_value_set.element({i});
CHECK_EQ(1, sources.values().size());
const HloValue* value_source = sources.values()[0];
HloInstruction* src = value_source->instruction();
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice slice,
assignment.GetUniqueSlice(src, value_source->index()));
const BufferAllocation::Index buffer_index = slice.index();
buffer_indices.push_back(buffer_index);
}
return {std::move(buffer_indices)};
}
absl::StatusOr<std::string> TfrtCpuExecutable::SerializeExecutable() const {
cpu::CpuCompiler compiler;
TF_ASSIGN_OR_RETURN(std::unique_ptr<AotCompilationResult> aot_result,
compiler.Export(cpu_executable_.get()));
TF_ASSIGN_OR_RETURN(std::string serialized, aot_result->SerializeAsString());
if (serialized.empty()) {
return Internal(
"TfrtCpuClient::SerializeExecutable proto serialization failed");
}
ExecutableAndOptionsProto proto;
*proto.mutable_serialized_executable() = std::move(serialized);
TF_ASSIGN_OR_RETURN(*proto.mutable_compile_options(),
compile_options_.ToProto());
return proto.SerializeAsString();
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
TfrtCpuClient::DeserializeExecutable(absl::string_view serialized,
std::optional<CompileOptions> options) {
ExecutableAndOptionsProto proto;
if (serialized.size() > std::numeric_limits<int>::max()) {
return Internal(
"TfrtCpuClient::DeserializeExecutable proto too large (>2GB)");
}
if (!proto.ParseFromArray(serialized.data(), serialized.size())) {
return Internal(
"TfrtCpuClient::DeserializeExecutable proto deserialization failed");
}
CompileOptions compile_options;
if (options.has_value()) {
compile_options = *std::move(options);
} else {
TF_ASSIGN_OR_RETURN(compile_options,
CompileOptions::FromProto(proto.compile_options()));
}
auto input_options = compile_options;
cpu::CpuCompiler compiler;
std::string str = std::move(*proto.mutable_serialized_executable());
TF_ASSIGN_OR_RETURN(std::unique_ptr<AotCompilationResult> aot_result,
compiler.LoadAotCompilationResult(str));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<Executable> executable,
aot_result->LoadExecutable(&compiler, nullptr));
int num_replicas;
int num_partitions;
std::shared_ptr<DeviceAssignment> device_assignment;
TF_RETURN_IF_ERROR(ParseDeviceAssignmentCompileOptions(
compile_options.compile_portable_executable,
&compile_options.executable_build_options,
[this](int num_replicas, int num_partitions) {
return this->GetDefaultDeviceAssignment(num_replicas, num_partitions);
},
&num_replicas, &num_partitions, &device_assignment));
auto cpu_executable_ptr =
tensorflow::down_cast<cpu::CpuExecutable*>(executable.get());
TF_ASSIGN_OR_RETURN(
const BufferAllocation::Slice result_slice,
cpu_executable_ptr->buffer_assignment().GetUniqueTopLevelOutputSlice());
TF_ASSIGN_OR_RETURN(
auto result_buffer_indices,
FindResultBufferAllocationIndex(cpu_executable_ptr->buffer_assignment(),
executable->module()));
std::vector<PjRtLoadedExecutable::LogicalDeviceIds>
addressable_device_logical_ids;
std::vector<PjRtDevice*> addressable_devices;
ExecutableBuildOptions& build_options =
compile_options.executable_build_options;
if (device_assignment != nullptr) {
addressable_device_logical_ids.reserve(num_replicas * num_partitions);
addressable_devices.reserve(num_replicas * num_partitions);
for (int replica = 0; replica < num_replicas; ++replica) {
for (int partition = 0; partition < num_partitions; ++partition) {
PjRtGlobalDeviceId device_id((*device_assignment)(replica, partition));
if (UnpackCpuProcessIndex(device_id) != process_index()) {
VLOG(3) << "Non-local device: " << device_id;
continue;
}
TF_ASSIGN_OR_RETURN(PjRtDevice * device, LookupDevice(device_id));
PjRtLoadedExecutable::LogicalDeviceIds logica_device_ids;
logica_device_ids.replica = replica;
logica_device_ids.partition = partition;
addressable_device_logical_ids.push_back(std::move(logica_device_ids));
addressable_devices.push_back(device);
}
}
if (addressable_devices.empty()) {
return InvalidArgument(
"Device assignment (%s) does not have any local devices.",
device_assignment->ToString());
}
if (build_options.device_ordinal() < 0) {
build_options.set_device_ordinal(
addressable_devices.front()->local_hardware_id().value());
}
}
auto tfrt_cpu_executable = std::make_unique<TfrtCpuExecutable>(
num_replicas, num_partitions, std::move(device_assignment),
compile_options.parameter_is_tupled_arguments, std::move(input_options),
std::move(executable), result_slice.index(),
std::move(result_buffer_indices),
std::move(addressable_device_logical_ids), std::move(addressable_devices),
this);
TF_RETURN_IF_ERROR(tfrt_cpu_executable->SetUpDonation(
compile_options.parameter_is_tupled_arguments));
return std::unique_ptr<PjRtLoadedExecutable>(std::move(tfrt_cpu_executable));
}
static absl::StatusOr<std::unique_ptr<xla::Executable>> JitCompile(
const XlaComputation& computation,
const absl::Span<const Shape* const> argument_layouts,
const ExecutableBuildOptions& build_options,
const ExecutionOptions& execution_options,
const xla::Compiler::CompileOptions& compile_options, int num_threads) {
TF_ASSIGN_OR_RETURN(ProgramShape program_shape,
computation.GetProgramShape());
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModuleConfig> hlo_module_config,
CreateModuleConfig(program_shape, argument_layouts, &execution_options,
execution_options.num_replicas(), num_threads,
nullptr));
const xla::HloModuleProto& hlo_module_proto = computation.proto();
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModule> hlo_module,
xla::HloModule::CreateFromProto(hlo_module_proto, *hlo_module_config));
VLOG(3) << "Unoptimized HLO module: " << hlo_module->ToString();
static constexpr char kBeforeOptimizationsDumpName[] = "before_optimizations";
DumpHloModuleIfEnabled(*hlo_module, kBeforeOptimizationsDumpName);
cpu::CpuCompiler compiler;
TF_ASSIGN_OR_RETURN(hlo_module, compiler.RunHloPasses(std::move(hlo_module),
nullptr,
compile_options));
return compiler.RunBackend(std::move(hlo_module), nullptr,
compile_options);
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>> TfrtCpuClient::Compile(
const XlaComputation& computation, CompileOptions options) {
tsl::profiler::TraceMe traceme("TfrtCpuClient::Compile (XlaComputation)");
auto input_options = options;
ExecutableBuildOptions& build_options = options.executable_build_options;
TF_RETURN_IF_ERROR(options.ApplyAllOptionOverrides());
int num_replicas;
int num_partitions;
std::shared_ptr<DeviceAssignment> device_assignment;
TF_RETURN_IF_ERROR(ParseDeviceAssignmentCompileOptions(
options.compile_portable_executable, &options.executable_build_options,
[this](int num_replicas, int num_partitions) {
return this->GetDefaultDeviceAssignment(num_replicas, num_partitions);
},
&num_replicas, &num_partitions, &device_assignment));
if (collectives_ == nullptr && device_assignment) {
for (int replica = 0; replica < device_assignment->replica_count();
++replica) {
for (int computation = 0;
computation < device_assignment->computation_count();
++computation) {
PjRtGlobalDeviceId id((*device_assignment)(replica, computation));
if (UnpackCpuProcessIndex(id) != process_index()) {
return InvalidArgument(
"Multiprocess computations aren't implemented on the CPU "
"backend.");
}
}
}
}
std::vector<const Shape*> argument_layout_pointers;
TF_RETURN_IF_ERROR(DetermineArgumentLayoutsFromCompileOptions(
computation, &LayoutUtil::GetWithDefaultLayout, options.argument_layouts,
&options.executable_build_options, &argument_layout_pointers));
std::vector<PjRtLoadedExecutable::LogicalDeviceIds>
addressable_device_logical_ids;
std::vector<PjRtDevice*> addressable_devices;
if (device_assignment != nullptr) {
addressable_device_logical_ids.reserve(num_replicas * num_partitions);
addressable_devices.reserve(num_replicas * num_partitions);
for (int replica = 0; replica < num_replicas; ++replica) {
for (int partition = 0; partition < num_partitions; ++partition) {
PjRtGlobalDeviceId device_id((*device_assignment)(replica, partition));
if (UnpackCpuProcessIndex(device_id) != process_index()) {
VLOG(3) << "Non-local device: " << device_id;
continue;
}
TF_ASSIGN_OR_RETURN(PjRtDevice * device, LookupDevice(device_id));
PjRtLoadedExecutable::LogicalDeviceIds logica_device_ids;
logica_device_ids.replica = replica;
logica_device_ids.partition = partition;
addressable_device_logical_ids.push_back(std::move(logica_device_ids));
addressable_devices.push_back(device);
}
}
if (addressable_devices.empty()) {
return InvalidArgument(
"Device assignment (%s) does not have any local devices.",
device_assignment->ToString());
}
if (build_options.device_ordinal() < 0) {
build_options.set_device_ordinal(
addressable_devices.front()->local_hardware_id().value());
}
}
TF_ASSIGN_OR_RETURN(ProgramShape program_shape,
computation.GetProgramShape());
ExecutionOptions execution_options =
CreateExecutionOptions(build_options, &program_shape);
xla::Compiler::CompileOptions compile_options{
build_options.device_allocator(), build_options.compile_thread_pool(),
build_options.layout_canonicalization_callback()};
if (!compile_options.thread_pool) {
compile_options.thread_pool = pjrt_client_thread_pool();
}
TF_ASSIGN_OR_RETURN(
std::unique_ptr<Executable> cpu_executable,
JitCompile(computation, argument_layout_pointers, build_options,
execution_options, compile_options,
eigen_intraop_device()->getPool()->NumThreads()));
auto cpu_executable_ptr =
tensorflow::down_cast<cpu::CpuExecutable*>(cpu_executable.get());
TF_ASSIGN_OR_RETURN(
const BufferAllocation::Slice result_slice,
cpu_executable_ptr->buffer_assignment().GetUniqueTopLevelOutputSlice());
TF_ASSIGN_OR_RETURN(
auto result_buffer_indices,
FindResultBufferAllocationIndex(cpu_executable_ptr->buffer_assignment(),
cpu_executable->module()));
auto executable = std::make_unique<TfrtCpuExecutable>(
num_replicas, num_partitions, std::move(device_assignment),
options.parameter_is_tupled_arguments, std::move(input_options),
std::move(cpu_executable), result_slice.index(),
std::move(result_buffer_indices),
std::move(addressable_device_logical_ids), std::move(addressable_devices),
this);
TF_RETURN_IF_ERROR(
executable->SetUpDonation(options.parameter_is_tupled_arguments));
return std::unique_ptr<PjRtLoadedExecutable>(std::move(executable));
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>> TfrtCpuClient::Compile(
mlir::ModuleOp module, CompileOptions options) {
tsl::profiler::TraceMe traceme("TfrtCpuClient::Compile (mlir::ModuleOp)");
XlaComputation xla_computation;
const ExecutableBuildOptions& exec_build_options =
options.executable_build_options;
TF_RETURN_IF_ERROR(MlirToXlaComputation(
module, xla_computation,
options.parameter_is_tupled_arguments,
false, exec_build_options.use_shardy_partitioner()));
return Compile(xla_computation, options);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
TfrtCpuClient::CreateViewOfDeviceBuffer(
void* device_ptr, const Shape& shape, PjRtDevice* device,
std::function<void()> on_delete_callback,
std::optional<std::intptr_t> stream) {
if (stream) {
return Unimplemented(
"TfrtCpuClient::CreateViewOfDeviceBuffer does not support `stream` "
"argument.");
}
absl::InlinedVector<tsl::AsyncValueRef<MaybeOwningCpuMemory>, 4> buffers;
size_t byte_size = ShapeUtil::ByteSizeOf(shape);
auto non_owning_buffer =
tsl::MakeAvailableAsyncValueRef<MaybeOwningCpuMemory>(device_ptr,
byte_size);
buffers.push_back(std::move(non_owning_buffer));
auto tracked_device_buffer = std::make_unique<TrackedTfrtCpuDeviceBuffer>(
false, false, std::move(buffers),
tsl::MakeAvailableAsyncValueRef<CpuEvent>(),
std::move(on_delete_callback));
return std::unique_ptr<PjRtBuffer>(std::make_unique<TfrtCpuBuffer>(
shape, std::move(tracked_device_buffer), this,
tensorflow::down_cast<TfrtCpuDevice*>(device),
*device->default_memory_space()));
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> TfrtCpuClient::CreateErrorBuffer(
absl::Status error, const Shape& shape, PjRtDevice* device) {
if (device->client() != this) {
return absl::InvalidArgumentError("Device is not attached to this client");
}
return std::make_unique<TfrtCpuBuffer>(
shape,
std::make_unique<TrackedTfrtCpuDeviceBuffer>(
false, true,
absl::InlinedVector<tsl::AsyncValueRef<MaybeOwningCpuMemory>, 4>{},
absl::InlinedVector<tsl::AsyncValueRef<CpuEvent>, 4>{
tsl::AsyncValueRef<CpuEvent>(
tsl::MakeErrorAsyncValueRef(std::move(error)))}),
this, tensorflow::down_cast<TfrtCpuDevice*>(device),
*device->default_memory_space());
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> TfrtCpuClient::CreateErrorBuffer(
absl::Status error, const Shape& shape, PjRtMemorySpace* memory) {
return CreateErrorBuffer(std::move(error), shape, memory->devices()[0]);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
TfrtCpuClient::CreateUninitializedBuffer(const Shape& shape,
PjRtDevice* device) {
tsl::profiler::TraceMe traceme("TfrtCpuClient::CreateUninitializedBuffer");
VLOG(1) << "TfrtCpuClient::CreateUninitializedBuffer: shape: "
<< shape.DebugString() << " device: " << device->DebugString();
return AllocateDestinationBuffer(
shape, {},
tensorflow::down_cast<TfrtCpuDevice*>(device), this);
}
absl::StatusOr<std::unique_ptr<PjRtClient::AsyncHostToDeviceTransferManager>>
TfrtCpuClient::CreateBuffersForAsyncHostToDevice(absl::Span<const Shape> shapes,
PjRtDevice* device) {
auto* tfrt_device = tensorflow::down_cast<TfrtCpuDevice*>(device);
return TfrtCpuAsyncHostToDeviceTransferManager::Create(shapes, tfrt_device,
this);
}
absl::StatusOr<std::unique_ptr<PjRtClient::AsyncHostToDeviceTransferManager>>
TfrtCpuClient::CreateBuffersForAsyncHostToDevice(
absl::Span<const Shape> shapes, PjRtMemorySpace* memory_space) {
CHECK_EQ(memory_space->devices().size(), 1);
return CreateBuffersForAsyncHostToDevice(shapes, memory_space->devices()[0]);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> TfrtCpuClient::BufferFromHostBuffer(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
absl::AnyInvocable<void() &&> on_done_with_host_buffer,
PjRtDevice* device) {
tsl::profiler::TraceMe traceme("TfrtCpuClient::BufferFromHostBuffer");
Shape shape = ShapeUtil::MakeShape(type, dims);
VLOG(2) << "TfrtCpuClient::BufferFromHostBuffer: shape: " << shape.ToString()
<< " device: " << device->DebugString();
if (!device->IsAddressable()) {
return InvalidArgument("Cannot copy array to non-addressable device %s",
device->DebugString());
}
TF_ASSIGN_OR_RETURN(
std::unique_ptr<TrackedTfrtCpuDeviceBuffer> tracked_device_buffer,
AbstractTfrtCpuBuffer::BufferFromHostBufferHelper(
data, type, dims, byte_strides, host_buffer_semantics,
std::move(on_done_with_host_buffer), shape, async_work_runner(),
&transpose_mu_, &transpose_cache_));
return std::unique_ptr<PjRtBuffer>(std::make_unique<TfrtCpuBuffer>(
shape, std::move(tracked_device_buffer), this,
tensorflow::down_cast<TfrtCpuDevice*>(device),
*device->default_memory_space()));
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> TfrtCpuClient::BufferFromHostBuffer(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
absl::AnyInvocable<void() &&> on_done_with_host_buffer, PjRtDevice* device,
const Layout* device_layout) {
if (device_layout != nullptr) {
return absl::UnimplementedError(absl::StrCat(
"BufferFromHostBuffer with an optional device layout is not "
"implemented on platform: ",
platform_name()));
}
return BufferFromHostBuffer(data, type, dims, byte_strides,
host_buffer_semantics,
std::move(on_done_with_host_buffer), device);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> TfrtCpuClient::BufferFromHostBuffer(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
absl::AnyInvocable<void() &&> on_done_with_host_buffer,
PjRtMemorySpace* memory_space, const Layout* device_layout) {
CHECK_EQ(memory_space->devices().size(), 1);
return BufferFromHostBuffer(data, type, dims, byte_strides,
host_buffer_semantics,
std::move(on_done_with_host_buffer),
memory_space->devices()[0], device_layout);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
TfrtCpuClient::BufferFromHostLiteral(const LiteralSlice& literal,
PjRtDevice* device) {
tsl::profiler::TraceMe traceme("TfrtCpuClient::BufferFromHostLiteral");
VLOG(1) << "TfrtCpuClient::BufferFromHostLiteral: shape: "
<< literal.shape().DebugString()
<< " device: " << device->DebugString();
const Shape& shape = literal.shape();
absl::InlinedVector<tsl::RCReference<tsl::AsyncValue>, 4> avs;
TF_ASSIGN_OR_RETURN(
std::unique_ptr<TfrtCpuBuffer> output_buffer,
AllocateDestinationBufferAndAvs(
shape, &avs, tensorflow::down_cast<TfrtCpuDevice*>(device), this));
output_buffer->CopyFromLiteral(literal, shape, &avs, async_work_runner());
return std::unique_ptr<PjRtBuffer>(std::move(output_buffer));
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
TfrtCpuClient::BufferFromHostLiteral(const LiteralSlice& literal,
PjRtMemorySpace* memory_space) {
CHECK_EQ(memory_space->devices().size(), 1);
return BufferFromHostLiteral(literal, memory_space->devices()[0]);
}
TfrtCpuBuffer::TfrtCpuBuffer(
Shape on_device_shape,
std::unique_ptr<TrackedTfrtCpuDeviceBuffer> tracked_device_buffer,
TfrtCpuClient* client, TfrtCpuDevice* device, PjRtMemorySpace* memory_space)
: AbstractTfrtCpuBuffer(std::move(on_device_shape),
std::move(tracked_device_buffer)),
client_(client),
device_(device),
memory_space_(memory_space) {}
static std::vector<tsl::RCReference<tsl::AsyncValue>> CopyAsyncValues(
absl::Span<const tsl::RCReference<tsl::AsyncValue>> events) {
std::vector<tsl::RCReference<tsl::AsyncValue>> avs;
avs.reserve(events.size());
for (const auto& ev : events) {
avs.push_back(ev.CopyRef());
}
return avs;
}
PjRtFuture<> TfrtCpuBuffer::ToLiteral(MutableLiteralBase* literal) {
return ToLiteralHelper(literal, client()->async_work_runner());
}
PjRtFuture<> TfrtCpuBuffer::LazyToLiteral(
absl::AnyInvocable<absl::StatusOr<MutableLiteralBase*>() &&> generator) {
auto buffer = std::move(generator)();
if (!buffer.ok()) {
return PjRtFuture<>(buffer.status());
}
return ToLiteralHelper(buffer.value(), client()->async_work_runner());
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> TfrtCpuBuffer::CopyToDevice(
PjRtDevice* dst_device) {
tsl::profiler::TraceMe traceme("TfrtCpuBuffer::CopyToDevice");
if (dst_device == device_) {
return InvalidArgument(
"CopyToDevice cannot accept the same source and destination devices");
}
if (dst_device->client() != client_) {
return CopyToDeviceAcrossClients(dst_device);
}
if (!dst_device->IsAddressable()) {
return InvalidArgument("Cannot copy array to non-addressable device %s",
dst_device->DebugString());
}
TF_ASSIGN_OR_RETURN(
std::unique_ptr<TrackedTfrtCpuDeviceBuffer> tracked_device_buffer,
CopyToDeviceHelper(client()->async_work_runner()));
return std::unique_ptr<PjRtBuffer>(std::make_unique<TfrtCpuBuffer>(
on_device_shape_, std::move(tracked_device_buffer), client(),
tensorflow::down_cast<TfrtCpuDevice*>(dst_device),
*dst_device->default_memory_space()));
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> TfrtCpuBuffer::CopyToMemorySpace(
PjRtMemorySpace* dst_memory_space) {
CHECK_EQ(dst_memory_space->devices().size(), 1);
return CopyToDevice(dst_memory_space->devices()[0]);
}
TfrtCpuExecutable::TfrtCpuExecutable(
int num_replicas, int num_partitions,
std::shared_ptr<DeviceAssignment> device_assignment,
bool parameter_is_tupled_arguments, CompileOptions compile_options,
std::unique_ptr<Executable> cpu_executable,
BufferAllocation::Index result_buffer_index,
absl::InlinedVector<BufferAllocation::Index, 4> result_buffer_indices,
std::vector<LogicalDeviceIds> addressable_device_logical_ids,
std::vector<PjRtDevice*> addressable_devices, TfrtCpuClient* client)
: client_(client),
num_replicas_(num_replicas),
num_partitions_(num_partitions),
device_assignment_(std::move(device_assignment)),
parameter_is_tupled_arguments_(parameter_is_tupled_arguments),
compile_options_(std::move(compile_options)),
cpu_executable_(std::move(cpu_executable)),
result_buffer_index_(result_buffer_index),
result_buffer_indices_(std::move(result_buffer_indices)),
addressable_device_logical_ids_(
std::move(addressable_device_logical_ids)),
addressable_devices_(std::move(addressable_devices)) {
auto hlo_cost_analysis =
std::make_unique<HloCostAnalysis>(cpu::CpuExecutable::ShapeSizeBytes);
CHECK_OK(cpu_executable_->module().entry_computation()->Accept(
hlo_cost_analysis.get()));
cheap_computation_ = hlo_cost_analysis->flop_count() < 1000;
const auto& computation_layout =
cpu_executable_->module().entry_computation_layout();
if (computation_layout.parameter_count() == 0) {
return;
}
if (computation_layout.parameter_count() > 1 ||
!computation_layout.parameter_shape(0).IsTuple()) {
input_buffer_sizes_in_bytes_.reserve(computation_layout.parameter_count());
for (int i = 0; i < computation_layout.parameter_count(); ++i) {
input_buffer_sizes_in_bytes_.push_back(
ShapeUtil::ByteSizeOf(computation_layout.parameter_shape(i)));
}
} else {
input_buffer_sizes_in_bytes_.reserve(
computation_layout.parameter_shape(0).tuple_shapes_size());
for (int i = 0;
i < computation_layout.parameter_shape(0).tuple_shapes_size(); ++i) {
input_buffer_sizes_in_bytes_.push_back(ShapeUtil::ByteSizeOf(
computation_layout.parameter_shape(0).tuple_shapes(i)));
}
}
}
void TfrtCpuExecutable::Delete() {}
bool TfrtCpuExecutable::IsDeleted() { return false; }
absl::StatusOr<std::optional<std::string>> TfrtCpuExecutable::Fingerprint()
const {
return std::optional<std::string>();
}
absl::Status TfrtCpuExecutable::SetUpDonation(bool tuple_inputs) {
TF_ASSIGN_OR_RETURN(parameters_that_must_be_donated_,
ComputeParametersThatMustBeDonated(
*cpu_executable_->shared_module(), tuple_inputs));
return absl::OkStatus();
}
namespace {
struct BufferInfo {
tsl::AsyncValueRef<MaybeOwningCpuMemory> buffer;
bool owns_buffer;
size_t buffer_size;
};
struct BufferAlloc {
absl::InlinedVector<tsl::AsyncValueRef<MaybeOwningCpuMemory>, 4> buffers;
absl::InlinedVector<size_t, 4> allocation_sizes;
void Allocate() {
for (int i = 0; i < buffers.size(); ++i) {
auto memory = MaybeOwningCpuMemory::Allocate(allocation_sizes[i]);
if (!memory.ok()) {
buffers[i].SetError(memory.status());
return;
}
buffers[i].emplace(std::move(*memory));
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(buffers[i]->data(),
allocation_sizes[i]);
}
}
};
struct BufferAllocAndCopy {
absl::InlinedVector<tsl::AsyncValueRef<MaybeOwningCpuMemory>, 4> src_buffers;
absl::InlinedVector<tsl::AsyncValueRef<MaybeOwningCpuMemory>, 4> dst_buffers;
absl::InlinedVector<size_t, 4> allocation_sizes;
void AllocateAndCopy() {
for (int i = 0; i < src_buffers.size(); ++i) {
auto memory = MaybeOwningCpuMemory::Allocate(allocation_sizes[i]);
if (!memory.ok()) {
dst_buffers[i].SetError(memory.status());
return;
}
dst_buffers[i].emplace(std::move(*memory));
CHECK(src_buffers[i].IsConcrete());
std::memcpy(dst_buffers[i]->data(), src_buffers[i]->data(),
allocation_sizes[i]);
}
}
};
}
static absl::StatusOr<BufferInfo> MemoryForAllocation(
const BufferAllocation& allocation,
absl::Span<const cpu::CpuExecutable::ConstantAllocation> constants,
absl::Span<std::pair<bool, TrackedTfrtCpuDeviceBuffer*> const> arguments,
BufferAlloc& buffer_alloc, BufferAllocAndCopy& buffer_alloc_and_copy) {
BufferInfo buffer_info;
if (allocation.is_entry_computation_parameter()) {
auto [can_donate, arg] = arguments[allocation.parameter_number()];
tsl::AsyncValueRef<MaybeOwningCpuMemory> out =
arg->Buffer(allocation.param_shape_index());
CHECK_EQ(allocation.size(), arg->BufferSize(allocation.param_shape_index()))
<< "Size mismatch on param " << allocation.parameter_number()
<< " at shape index " << allocation.param_shape_index().ToString();
if ((!can_donate || !arg->owns_buffers()) && !allocation.is_readonly()) {
auto copy = tsl::MakeUnconstructedAsyncValueRef<MaybeOwningCpuMemory>();
buffer_alloc_and_copy.src_buffers.push_back(std::move(out));
buffer_alloc_and_copy.dst_buffers.push_back(copy);
buffer_alloc_and_copy.allocation_sizes.push_back(allocation.size());
buffer_info.buffer = std::move(copy);
buffer_info.owns_buffer = true;
buffer_info.buffer_size = allocation.size();
return buffer_info;
}
buffer_info.buffer = std::move(out);
buffer_info.owns_buffer = arg->owns_buffers();
buffer_info.buffer_size = arg->BufferSize(allocation.param_shape_index());
return buffer_info;
} else if (allocation.is_constant() &&
allocation.index() < constants.size()) {
se::DeviceMemoryBase constant =
constants[allocation.index()].AsDeviceMemoryBase();
buffer_info.buffer = tsl::MakeAvailableAsyncValueRef<MaybeOwningCpuMemory>(
constant.opaque(), constant.size());
buffer_info.owns_buffer = false;
buffer_info.buffer_size = constant.size();
return buffer_info;
} else if (allocation.is_constant() || allocation.is_thread_local()) {
buffer_info.buffer =
tsl::MakeAvailableAsyncValueRef<MaybeOwningCpuMemory>();
buffer_info.owns_buffer = true;
buffer_info.buffer_size = 0;
return buffer_info;
}
auto out = tsl::MakeUnconstructedAsyncValueRef<MaybeOwningCpuMemory>();
buffer_alloc.buffers.push_back(out);
buffer_alloc.allocation_sizes.push_back(allocation.size());
buffer_info.buffer = std::move(out);
buffer_info.owns_buffer = true;
buffer_info.buffer_size = allocation.size();
return buffer_info;
}
static absl::StatusOr<std::vector<BufferInfo>> CreateBufferTable(
const BufferAssignment& assignment,
absl::Span<const cpu::CpuExecutable::ConstantAllocation> constants,
absl::Span<std::pair<bool, TrackedTfrtCpuDeviceBuffer*> const> arguments,
BufferAlloc& buffer_alloc, BufferAllocAndCopy& buffer_alloc_and_copy) {
std::vector<BufferInfo> buffer_table(assignment.Allocations().size());
for (BufferAllocation::Index i = 0; i < buffer_table.size(); ++i) {
const BufferAllocation& allocation = assignment.GetAllocation(i);
TF_ASSIGN_OR_RETURN(
buffer_table[i],
MemoryForAllocation(allocation, constants, arguments, buffer_alloc,
buffer_alloc_and_copy));
}
return std::move(buffer_table);
}
static absl::InlinedVector<BufferInfo, 4> CreateResultBufferInfo(
absl::Span<const BufferAllocation::Index> buffer_indices,
absl::Span<const BufferInfo> buffer_table) {
absl::InlinedVector<BufferInfo, 4> output_buffer_info;
output_buffer_info.reserve(buffer_indices.size());
for (int i = 0; i < buffer_indices.size(); ++i) {
output_buffer_info.push_back(buffer_table[buffer_indices[i]]);
}
return output_buffer_info;
}
absl::Status TfrtCpuExecutable::CheckBufferCompatibilities(
absl::Span<std::pair<bool, TrackedTfrtCpuDeviceBuffer*> const>
input_buffers) const {
if (input_buffers.size() != input_buffer_sizes_in_bytes_.size()) {
return InvalidArgument(
"Execution supplied %lld buffers but compiled program expected %lld "
"buffers",
input_buffers.size(), input_buffer_sizes_in_bytes_.size());
}
for (int i = 0; i < input_buffers.size(); ++i) {
const auto& buffer = input_buffers[i].second;
if (input_buffer_sizes_in_bytes_[i] != buffer->BufferSizes()[0]) {
return InvalidArgument(
"Executable expected parameter %d of size %lld but got buffer with "
"incompatible size %lld",
i, input_buffer_sizes_in_bytes_[i], buffer->BufferSizes()[0]);
}
}
return absl::OkStatus();
}
absl::StatusOr<PjRtLoadedExecutable::Result> TfrtCpuExecutable::ExecuteHelper(
absl::Span<PjRtBuffer* const> argument_handles, int replica, int partition,
const RunId& run_id, const ExecuteOptions& options,
tsl::AsyncValueRef<CpuEvent> last_collective_launch_event, bool fill_future,
TfrtCpuDevice* device) {
tsl::profiler::TraceMe traceme("TfrtCpuExecutable::ExecuteHelper");
std::shared_ptr<DeviceAssignment> device_assignment;
if (device == nullptr) {
CHECK(device_assignment_ != nullptr);
const int64_t device_id = (*device_assignment_)(replica, partition);
PjRtGlobalDeviceId global_device_id(device_id);
TF_ASSIGN_OR_RETURN(PjRtDevice * pjrt_device,
client_->LookupDevice(global_device_id));
device = tensorflow::down_cast<TfrtCpuDevice*>(pjrt_device);
device_assignment = device_assignment_;
} else {
CHECK(device_assignment_ == nullptr);
CHECK_EQ(replica, 0);
CHECK_EQ(partition, 0);
CHECK(addressable_devices_.empty());
device_assignment = std::make_shared<DeviceAssignment>(1, 1);
(*device_assignment)(0, 0) = device->id();
}
CHECK_EQ(device->process_index(), client_->process_index());
if (options.arguments_are_tupled) {
if (!parameter_is_tupled_arguments_) {
return InvalidArgument(
"Arguments may only be supplied as a tuple when the executable was "
"compiled with a single tupled parameter");
}
if (argument_handles.size() != 1) {
return InvalidArgument(
"Option arguments_are_tupled was true but %d buffers were passed to "
"execution",
argument_handles.size());
}
}
auto execute_event = tsl::MakeConstructedAsyncValueRef<CpuEvent>();
MarkEventReadyOnExit ready_on_exit(execute_event);
absl::InlinedVector<TfrtCpuBuffer::DonationTransaction, 4>
donation_transactions;
absl::InlinedVector<std::pair<bool, TrackedTfrtCpuDeviceBuffer*>, 4>
tracked_buffers;
tracked_buffers.reserve(argument_handles.size());
std::vector<tsl::RCReference<tsl::AsyncValue>> input_deps;
input_deps.reserve(argument_handles.size());
auto donate_it = parameters_that_must_be_donated_.begin();
absl::flat_hash_map<const void*, std::pair<bool, int>> donation_clashes;
donation_clashes.reserve(argument_handles.size());
for (int i = 0; i < argument_handles.size(); ++i) {
PjRtBuffer* handle = argument_handles[i];
auto* tfrt_buffer = tensorflow::down_cast<TfrtCpuBuffer*>(handle);
if (tfrt_buffer->device() != device) {
return InvalidArgument(
"Buffer passed to Execute() as argument %d to replica %d is on "
"device %s, but replica is assigned to device %s.",
i, replica, tfrt_buffer->device()->DebugString(),
device->DebugString());
}
TrackedTfrtCpuDeviceBuffer* tracked_buffer;
auto get_buffer = [&](int i) -> absl::Status {
bool must_donate = donate_it != parameters_that_must_be_donated_.end() &&
*donate_it == i;
TF_RETURN_IF_ERROR(TestBufferDonationClashes(
tfrt_buffer, donation_clashes, must_donate, i, replica, partition));
if (must_donate) {
++donate_it;
absl::StatusOr<TfrtCpuBuffer::DonationTransaction>
donation_transaction = tfrt_buffer->AcquireDonation();
if (donation_transaction.ok()) {
for (const auto& ev :
donation_transaction->device_buffer()->UsageEvents()) {
if (!ev.IsAvailable()) {
input_deps.push_back(ev.CopyRCRef());
}
}
tracked_buffer = donation_transaction->device_buffer();
tracked_buffers.emplace_back(true, tracked_buffer);
donation_transactions.push_back(std::move(*donation_transaction));
return absl::OkStatus();
}
}
tracked_buffer = tfrt_buffer->AcquireUsage(execute_event);
if (!tracked_buffer)
return InvalidArgument(
"Invalid buffer passed: buffer has been deleted or donated.");
tracked_buffers.emplace_back(false, tracked_buffer);
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(get_buffer(i));
const auto& definition_event = tracked_buffer->definition_event();
if (!definition_event.IsAvailable()) {
input_deps.push_back(definition_event.CopyRCRef());
}
}
TF_RETURN_IF_ERROR(CheckBufferCompatibilities(tracked_buffers));
std::unique_ptr<TrackedTfrtCpuDeviceBuffer> tuplized_arg;
if (parameter_is_tupled_arguments_ && !options.arguments_are_tupled) {
bool owns_buffers = true;
absl::InlinedVector<tsl::AsyncValueRef<MaybeOwningCpuMemory>, 4>
leaf_buffers;
absl::InlinedVector<size_t, 4> leaf_buffer_sizes;
leaf_buffers.reserve(tracked_buffers.size());
leaf_buffer_sizes.reserve(tracked_buffers.size());
for (const auto& tracked_buffer : tracked_buffers) {
owns_buffers = owns_buffers && tracked_buffer.second->owns_buffers();
auto span = tracked_buffer.second->Buffers();
leaf_buffers.insert(leaf_buffers.end(), span.begin(), span.end());
auto size_span = tracked_buffer.second->BufferSizes();
leaf_buffer_sizes.insert(leaf_buffer_sizes.end(), size_span.begin(),
size_span.end());
}
tracked_buffers.clear();
tuplized_arg = std::make_unique<TrackedTfrtCpuDeviceBuffer>(
true, owns_buffers, std::move(leaf_buffers),
std::move(leaf_buffer_sizes),
tsl::MakeConstructedAsyncValueRef<CpuEvent>());
tracked_buffers.emplace_back(false, tuplized_arg.get());
}
auto* cpu_executable =
tensorflow::down_cast<cpu::CpuExecutable*>(cpu_executable_.get());
BufferAlloc buffer_alloc;
BufferAllocAndCopy buffer_alloc_and_copy;
TF_ASSIGN_OR_RETURN(
std::vector<BufferInfo> buffer_table,
CreateBufferTable(cpu_executable->buffer_assignment(),
cpu_executable->constants(), tracked_buffers,
buffer_alloc, buffer_alloc_and_copy));
auto result_buffers_info =
CreateResultBufferInfo(result_buffer_indices_, buffer_table);
auto compute_reservation = std::make_unique<Semaphore::ScopedReservation>(
device->max_inflight_computations_semaphore().ScopedAcquire(1));
ExecutableRunOptions run_options;
run_options.set_run_id(run_id);
run_options.set_device_ordinal(device->id());
run_options.set_device_assignment(device_assignment.get());
run_options.set_intra_op_thread_pool(client_->eigen_intraop_device());
auto cpu_run_options = std::make_shared<cpu::CpuExecutableRunOptions>();
cpu_run_options->set_collectives(client_->collectives_.get());
run_options.set_cpu_executable_run_options(cpu_run_options.get());
bool is_a_collective_launch = !!last_collective_launch_event;
if (is_a_collective_launch) {
input_deps.push_back(std::move(last_collective_launch_event));
} else {
auto last_enqueue_event = client_->GetLastEnqueueEvent();
if (!last_enqueue_event.IsAvailable()) {
input_deps.push_back(std::move(last_enqueue_event));
}
}
if (options.context != nullptr) {
run_options.set_ffi_execution_context(&options.context->ffi_context());
}
bool execute_inline = cheap_computation_ || !client_->asynchronous_;
if (options.execution_mode == ExecuteOptions::ExecutionMode::kAsynchronous) {
execute_inline = false;
} else if (options.execution_mode ==
ExecuteOptions::ExecutionMode::kSynchronous) {
execute_inline = true;
}
if (input_deps.empty() && execute_inline) {
tsl::port::ScopedFlushDenormal flush;
tsl::port::ScopedSetRound round(FE_TONEAREST);
XlaCustomCallStatus compute_function_status;
tsl::AsyncValueRef<cpu::Thunk::ExecuteEvent> thunks_execute_event;
buffer_alloc.Allocate();
buffer_alloc_and_copy.AllocateAndCopy();
std::vector<void*> buffer_pointers;
buffer_pointers.reserve(buffer_table.size());
for (const auto& buffer_info : buffer_table) {
CHECK(buffer_info.buffer.IsAvailable());
if (buffer_info.buffer.IsError()) {
return buffer_info.buffer.GetError();
}
buffer_pointers.push_back(buffer_info.buffer->data());
}
void* result_buffer = buffer_pointers[result_buffer_index_];
if (cpu_executable->has_compute_function()) {
cpu_executable->compute_function()(result_buffer, &run_options, nullptr,
buffer_pointers.data(),
&compute_function_status, nullptr);
} else if (cpu_executable->has_thunks()) {
absl::InlinedVector<MaybeOwningDeviceMemory, 8> buffer_device_mem;
buffer_device_mem.reserve(buffer_table.size());
for (const auto& buffer_info : buffer_table) {
buffer_device_mem.emplace_back(se::DeviceMemoryBase(
buffer_info.buffer->data(), buffer_info.buffer->size()));
}
cpu::BufferAllocations allocations(buffer_device_mem);
TF_ASSIGN_OR_RETURN(
cpu::Thunk::CollectiveExecuteParams collective_params,
cpu::Thunk::CollectiveExecuteParams::Create(&run_options));
TF_ASSIGN_OR_RETURN(
cpu::Thunk::CustomCallExecuteParams custom_call_execute_params,
cpu::Thunk::CustomCallExecuteParams::Create(&run_options));
cpu::Thunk::TaskRunner task_runner =
[&run_options](cpu::Thunk::Task task) {
run_options.intra_op_thread_pool()->getPool()->Schedule(
std::move(task));
};
cpu::Thunk::ExecuteParams execute_params = {
&cpu_executable->function_registry(),
&allocations,
cpu::runtime::GetXfeedManager(run_options.device_ordinal()),
run_options.intra_op_thread_pool(),
&task_runner,
&collective_params,
&custom_call_execute_params};
thunks_execute_event = cpu_executable->thunks().Execute(execute_params);
tsl::profiler::TraceMe trace(
"ThunkExecutor::Execute (wait for completion)");
tsl::BlockUntilReady(thunks_execute_event);
} else {
return Internal("CpuExecutable has no compute function or thunks.");
}
for (auto& donation_transaction : donation_transactions) {
std::move(donation_transaction).Commit();
}
if (cpu_executable->has_compute_function()) {
if (auto error_message =
xla::CustomCallStatusGetMessage(&compute_function_status)) {
return Internal("Generated function failed: %s", *error_message);
}
} else if (thunks_execute_event.IsError()) {
return thunks_execute_event.GetError();
}
} else {
if (is_a_collective_launch) {
client_->SetLastCollectiveLaunchEvent(execute_event.CopyRef());
} else {
client_->SetLastEnqueueEvent(execute_event.CopyRef());
}
std::vector<tsl::RCReference<tsl::AsyncValue>> input_deps_avs_copy =
CopyAsyncValues(input_deps);
EnqueueWorkWhenReady(
client()->pjrt_client_thread_pool(), input_deps,
[cpu_executable, buffer_alloc = std::move(buffer_alloc),
buffer_alloc_and_copy = std::move(buffer_alloc_and_copy),
result_buffer_index = result_buffer_index_,
buffer_table = std::move(buffer_table),
run_options = std::move(run_options),
cpu_executable_copy = cpu_executable_,
device_assignment = std::move(device_assignment),
cpu_run_options = std::move(cpu_run_options),
compute_reservation = std::move(compute_reservation),
tuplized_arg = std::move(tuplized_arg),
donation_transactions = std::move(donation_transactions),
execute_event = std::move(ready_on_exit).Release(),
input_deps_avs = std::move(input_deps_avs_copy),
eigen_device = client()->eigen_intraop_device()]() mutable {
buffer_alloc.Allocate();
buffer_alloc_and_copy.AllocateAndCopy();
for (const auto& av : input_deps_avs) {
if (auto* error = av->GetErrorIfPresent()) {
execute_event.SetError(absl::StrCat(
"Error dispatching computation: %s", error->message()));
return;
}
}
tsl::port::ScopedFlushDenormal flush;
tsl::port::ScopedSetRound round(FE_TONEAREST);
std::vector<void*> buffer_pointers;
buffer_pointers.reserve(buffer_table.size());
for (const auto& buffer_info : buffer_table) {
CHECK(buffer_info.buffer.IsAvailable());
if (buffer_info.buffer.IsError()) {
execute_event.SetError(
absl::StrCat("Error preparing computation: %s",
buffer_info.buffer.GetError().message()));
return;
}
buffer_pointers.push_back(buffer_info.buffer->data());
}
void* result_buffer = buffer_pointers[result_buffer_index];
absl::Status status;
if (cpu_executable->has_compute_function()) {
XlaCustomCallStatus compute_function_status;
cpu_executable->compute_function()(
result_buffer, &run_options, nullptr, buffer_pointers.data(),
&compute_function_status, nullptr);
if (auto error_message =
xla::CustomCallStatusGetMessage(&compute_function_status)) {
status =
Internal("Generated function failed: %s", *error_message);
}
} else if (cpu_executable->has_thunks()) {
absl::InlinedVector<MaybeOwningDeviceMemory, 8> buffer_device_mem;
buffer_device_mem.reserve(buffer_table.size());
for (const auto& buffer_info : buffer_table) {
buffer_device_mem.emplace_back(se::DeviceMemoryBase(
buffer_info.buffer->data(), buffer_info.buffer->size()));
}
cpu::BufferAllocations allocations(buffer_device_mem);
absl::StatusOr<cpu::Thunk::CollectiveExecuteParams>
collective_params =
cpu::Thunk::CollectiveExecuteParams::Create(&run_options);
absl::StatusOr<cpu::Thunk::CustomCallExecuteParams>
custom_call_params =
cpu::Thunk::CustomCallExecuteParams::Create(&run_options);
cpu::Thunk::TaskRunner task_runner =
[&run_options](cpu::Thunk::Task task) {
run_options.intra_op_thread_pool()->getPool()->Schedule(
std::move(task));
};
if (collective_params.ok()) {
cpu::Thunk::ExecuteParams execute_params = {
&cpu_executable->function_registry(),
&allocations,
cpu::runtime::GetXfeedManager(run_options.device_ordinal()),
run_options.intra_op_thread_pool(),
&task_runner,
&*collective_params,
&*custom_call_params};
auto thunks_execute_event =
cpu_executable->thunks().Execute(execute_params);
tsl::profiler::TraceMe trace(
"ThunkExecutor::Execute (wait for completion)");
tsl::BlockUntilReady(thunks_execute_event);
status = thunks_execute_event.IsError()
? thunks_execute_event.GetError()
: absl::OkStatus();
} else {
status = collective_params.status();
}
} else {
status =
Internal("CpuExecutable has no compute function or thunks.");
}
for (auto& donation_transaction : donation_transactions) {
std::move(donation_transaction).Commit();
}
if (!status.ok()) {
execute_event.SetError(std::move(status));
return;
}
execute_event.SetStateConcrete();
});
}
const Shape& result_shape = cpu_executable_->result_shape();
std::vector<std::unique_ptr<PjRtBuffer>> res;
if (options.untuple_result && result_shape.IsTuple()) {
res.reserve(result_buffers_info.size());
for (int i = 0; i < result_buffers_info.size(); ++i) {
absl::InlinedVector<tsl::AsyncValueRef<CpuEvent>, 4> definition_events;
definition_events.push_back(execute_event.CopyRef());
auto leaf_tracked_device_buffer =
std::make_unique<TrackedTfrtCpuDeviceBuffer>(
false, result_buffers_info[i].owns_buffer,
absl::InlinedVector<tsl::AsyncValueRef<MaybeOwningCpuMemory>, 4>{
std::move(result_buffers_info[i].buffer)},
absl::InlinedVector<size_t, 4>{
result_buffers_info[i].buffer_size},
std::move(definition_events));
auto leaf_buffer = std::make_unique<TfrtCpuBuffer>(
result_shape.tuple_shapes(i), std::move(leaf_tracked_device_buffer),
client_, device, *device->default_memory_space());
res.push_back(std::move(leaf_buffer));
}
} else {
bool owns_buffers = true;
absl::InlinedVector<tsl::AsyncValueRef<MaybeOwningCpuMemory>, 4>
sub_buffers;
absl::InlinedVector<size_t, 4> sub_buffer_sizes;
sub_buffers.reserve(result_buffers_info.size());
sub_buffer_sizes.reserve(result_buffers_info.size());
for (int i = 0; i < result_buffers_info.size(); ++i) {
owns_buffers = owns_buffers && result_buffers_info[i].owns_buffer;
sub_buffers.push_back(std::move(result_buffers_info[i].buffer));
sub_buffer_sizes.push_back(result_buffers_info[i].buffer_size);
}
auto tracked_device_buffer = std::make_unique<TrackedTfrtCpuDeviceBuffer>(
result_shape.IsTuple(), owns_buffers,
std::move(sub_buffers), std::move(sub_buffer_sizes),
execute_event);
auto tfrt_output_buffer = std::make_unique<TfrtCpuBuffer>(
result_shape, std::move(tracked_device_buffer), client_, device,
*device->default_memory_space());
res.push_back(std::move(tfrt_output_buffer));
}
std::optional<PjRtFuture<>> future;
if (fill_future) {
PjRtFuture<>::Promise promise = PjRtFuture<>::CreatePromise();
execute_event.AndThen([promise, event = execute_event.CopyRef()]() mutable {
if (auto* error = event.GetErrorIfPresent()) {
promise.Set(Internal("Compute error: %s", error->message()));
} else {
promise.Set();
}
});
future = PjRtFuture<>(std::move(promise));
}
return Result({std::move(future), std::move(res)});
}
static void MaybeDumpHloSnapshot(
const HloModule& module, RunId run_id,
const std::vector<PjRtBuffer*>& arguments,
const std::vector<std::unique_ptr<PjRtBuffer>>& results) {
if (!DumpingEnabledForHloModule(module)) {
return;
}
if (!module.config().debug_options().xla_dump_hlo_snapshots()) {
return;
}
xla::HloSnapshot hlo_snapshot;
*hlo_snapshot.mutable_hlo()->mutable_hlo_module() = module.ToProto();
for (auto* argument : arguments) {
*hlo_snapshot.add_arguments() = (*argument->ToLiteralSync())->ToProto();
}
if (results.size() == 1) {
*hlo_snapshot.mutable_result() = (*results[0]->ToLiteralSync())->ToProto();
} else {
std::vector<Literal> result_literals;
result_literals.reserve(results.size());
for (auto& result : results) {
result_literals.push_back(std::move(**result->ToLiteralSync()));
}
*hlo_snapshot.mutable_result() =
LiteralUtil::MakeTupleOwned(std::move(result_literals)).ToProto();
}
DumpToFileInDir(module, "", absl::StrCat("snapshot.", run_id.ToInt(), ".pb"),
hlo_snapshot.SerializeAsString());
}
absl::StatusOr<std::vector<std::vector<std::unique_ptr<PjRtBuffer>>>>
TfrtCpuExecutable::Execute(
absl::Span<const std::vector<PjRtBuffer*>> argument_handles,
const ExecuteOptions& options,
std::optional<std::vector<PjRtFuture<>>>& returned_futures) {
tsl::profiler::TraceMe traceme("TfrtCpuExecutable::Execute");
if (device_assignment_ == nullptr) {
return InvalidArgument("Execute expects a non-null device_assignment");
}
RunId run_id;
tsl::profiler::TraceMeProducer activity("TfrtCpuExecutable::Execute",
tsl::profiler::ContextType::kPjRt,
run_id.ToInt());
const int num_addressable_devices = addressable_devices_.size();
if (argument_handles.size() != num_addressable_devices) {
return InvalidArgument(
"Attempted to execute with %d argument lists when local device "
"count is %d (total replica count: %d, partition count: %d)",
argument_handles.size(), num_addressable_devices, num_replicas(),
num_partitions());
}
VLOG(1) << "Executing computation " << name()
<< "; num_replicas=" << num_replicas()
<< " num_partitions=" << num_partitions()
<< " num_addressable_devices=" << num_addressable_devices;
std::vector<std::vector<std::unique_ptr<PjRtBuffer>>> wrapped_results(
num_addressable_devices);
if (returned_futures.has_value()) {
returned_futures->resize(num_addressable_devices);
}
if (num_addressable_devices == 1) {
const int replica = addressable_device_logical_ids_[0].replica;
const int partition = addressable_device_logical_ids_[0].partition;
MaybeDumpHloSnapshot(cpu_executable_->module(), run_id, argument_handles[0],
{});
auto statusor = ExecuteHelper(
argument_handles[0], replica, partition, run_id, options,
tsl::AsyncValueRef<CpuEvent>(),
returned_futures.has_value());
if (!statusor.ok()) {
return std::move(statusor).status();
}
wrapped_results[0] = std::move(statusor->buffers);
if (returned_futures.has_value()) {
(*returned_futures)[0] = std::move(*statusor->future);
}
MaybeDumpHloSnapshot(cpu_executable_->module(), run_id, argument_handles[0],
wrapped_results[0]);
} else {
tsl::AsyncValueRef<CpuEvent> last_collective_launch_event =
client_->GetLastCollectiveLaunchEvent();
absl::Mutex mu;
int running = num_addressable_devices;
int failed = 0;
absl::Status first_failure_status;
for (int i = 0; i < num_addressable_devices; ++i) {
const int replica = addressable_device_logical_ids_[i].replica;
const int partition = addressable_device_logical_ids_[i].partition;
auto* thread_pool = client()->pjrt_client_thread_pool();
EnqueueWork(thread_pool, [&, replica, partition, i] {
auto statusor =
ExecuteHelper(argument_handles[i], replica, partition, run_id,
options, last_collective_launch_event.CopyRef(),
returned_futures.has_value());
if (statusor.ok()) {
wrapped_results[i] = std::move(statusor->buffers);
if (returned_futures.has_value()) {
(*returned_futures)[i] = std::move(*statusor->future);
}
}
absl::MutexLock lock(&mu);
--running;
if (!statusor.ok()) {
if (failed == 0) {
first_failure_status = AppendStatus(
std::move(statusor).status(),
absl::StrFormat(
"while running replica %d and partition %d of a "
"replicated computation (other "
"replicas may have failed as well).",
replica, partition));
}
++failed;
}
});
}
{
auto done_running = [&]() {
mu.AssertHeld();
return running == 0;
};
absl::MutexLock lock(&mu);
mu.Await(absl::Condition(&done_running));
}
if (!first_failure_status.ok()) return first_failure_status;
}
VLOG(1) << "Replicated execution complete.";
return wrapped_results;
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>>
TfrtCpuExecutable::ExecuteSharded(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options, std::optional<PjRtFuture<>>& returned_future,
bool fill_future) {
tsl::profiler::TraceMe traceme("TfrtCpuExecutable::ExecuteSharded");
if (device_assignment_ == nullptr) {
return InvalidArgument("ExecuteShard expects a non-null device_assignment");
}
for (int i = 0; i < addressable_devices_.size(); ++i) {
if (addressable_devices_[i] == device) {
VLOG(1) << "ExecuteShard executes computation " << name()
<< " on assigned replica/partition on device "
<< device->DebugString();
TF_ASSIGN_OR_RETURN(
auto result,
ExecuteHelper(
argument_handles, addressable_device_logical_ids_[i].replica,
addressable_device_logical_ids_[i].partition, RunId(), options,
tsl::AsyncValueRef<CpuEvent>(), fill_future));
returned_future = std::move(result.future);
return std::move(result.buffers);
}
}
return InvalidArgument(
"ExecuteShard attempted to execute on device id %d which is not "
"addressable by this client",
device->id());
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>>
TfrtCpuExecutable::ExecutePortable(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options, std::optional<PjRtFuture<>>& returned_future,
bool fill_future) {
tsl::profiler::TraceMe traceme("TfrtCpuExecutable::ExecutePortable");
if (device_assignment_ != nullptr) {
return InvalidArgument("ExecutePortable gets a non-portable executable");
}
if (num_replicas() != 1 || num_partitions() != 1) {
return InvalidArgument(
"ExecutePortable expects a single-core executable but gets "
"one with %d replica %d partition",
num_replicas(), num_partitions());
}
if (device == nullptr) {
return InvalidArgument("ExecutePortable expects a device to be specified");
}
VLOG(1) << "ExecutePortable executes single-core portable executable "
<< name();
TF_ASSIGN_OR_RETURN(
auto result,
ExecuteHelper(
argument_handles,
0,
0, RunId(), options,
tsl::AsyncValueRef<CpuEvent>(),
fill_future, tensorflow::down_cast<TfrtCpuDevice*>(device)));
returned_future = std::move(result.future);
return std::move(result.buffers);
}
} | #include "xla/pjrt/cpu/cpu_client.h"
#include "xla/service/hlo.pb.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#ifndef _WIN32
#include <unistd.h>
#endif
#include <algorithm>
#include <cstdint>
#include <cstring>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/notification.h"
#include "xla/ffi/ffi.h"
#include "xla/ffi/ffi_api.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/pjrt/host_memory_spaces.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_utils.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using ::testing::Each;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::testing::IsFalse;
using ::tsl::testing::IsOkAndHolds;
static absl::Status TestError(ffi::AnyBuffer, ffi::Result<ffi::AnyBuffer>,
ffi::Result<ffi::AnyBuffer>) {
return absl::InternalError("test error.");
}
XLA_FFI_DEFINE_HANDLER(kTestError, TestError,
ffi::Ffi::Bind()
.Arg<ffi::AnyBuffer>()
.Ret<ffi::AnyBuffer>()
.Ret<ffi::AnyBuffer>()
);
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$TestError", "Host",
kTestError);
TEST(TfrtCpuClientTest, MemorySpace) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetTfrtCpuClient(CpuClientOptions()));
ASSERT_GE(client->devices().size(), 1);
ASSERT_EQ(client->memory_spaces().size(),
client->addressable_devices().size());
for (auto* device : client->devices()) {
TF_ASSERT_OK_AND_ASSIGN(auto* memory_space, device->default_memory_space());
EXPECT_THAT(device->memory_spaces(), ElementsAre(memory_space));
EXPECT_EQ(memory_space->kind(), UnpinnedHostMemorySpace::kKind);
EXPECT_EQ(memory_space->kind_id(), UnpinnedHostMemorySpace::kKindId);
EXPECT_THAT(device->memory_space_by_kind(UnpinnedHostMemorySpace::kKind),
IsOkAndHolds(memory_space));
}
}
TEST(TfrtCpuClientTest, DonationWithExecutionError) {
static constexpr char kProgram[] =
R"(
HloModule DonationWithExecutionError,
input_output_alias={ {}: (0, {}, must-alias) }
ENTRY DonationWithExecutionError() -> f32[2, 2] {
%input = f32[2, 2] parameter(0)
%custom-call = (f32[2, 2], u8[0]) custom-call(%input),
custom_call_target="__xla_test$$TestError",
api_version=API_VERSION_TYPED_FFI,
output_to_operand_aliasing={{0}: (0, {})}
ROOT %result = f32[2, 2] get-tuple-element(%custom-call), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto client, GetTfrtCpuClient(CpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnUnverifiedModule(kProgram, {}));
XlaComputation xla_computation(hlo_module->ToProto());
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_executable,
client->Compile(xla_computation, {}));
std::vector<float> data(4, 0);
Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
auto result = pjrt_executable->Execute({{buffer.get()}},
{});
ASSERT_FALSE(result.ok());
EXPECT_THAT(result.status().message(), HasSubstr("test error."));
result = pjrt_executable->Execute({{buffer.get()}},
{});
ASSERT_FALSE(result.ok());
EXPECT_THAT(result.status().message(),
HasSubstr("buffer has been deleted or donated."));
}
TEST(TfrtCpuClientTest, HloSnapshot) {
static constexpr char kProgram[] = R"(
HloModule add
ENTRY add {
x = f32[3,2] parameter(0)
y = f32[3,2] parameter(1)
ROOT add = f32[3,2] add(x, y)
})";
CpuClientOptions cpu_options;
cpu_options.cpu_device_count = 1;
TF_ASSERT_OK_AND_ASSIGN(auto client, GetTfrtCpuClient(cpu_options));
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnUnverifiedModule(kProgram, {}));
std::string dir = tsl::testing::TmpDir();
xla::CompileOptions options;
auto* debug_opts = options.executable_build_options.mutable_debug_options();
debug_opts->set_xla_dump_to(dir);
debug_opts->set_xla_dump_hlo_snapshots(true);
XlaComputation xla_computation(hlo_module->ToProto());
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_executable,
client->Compile(xla_computation, options));
std::vector<float> data1{1.0, 2.0, 3.0, 4.0, 5.0, 6.0};
std::vector<float> data2{10.0, 20.0, 30.0, 40.0, 50.0, 60.0};
Shape shape = ShapeUtil::MakeShape(F32, {3, 2});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer1,
client->BufferFromHostBuffer(
data1.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
TF_ASSERT_OK_AND_ASSIGN(
auto buffer2,
client->BufferFromHostBuffer(
data2.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
auto result = pjrt_executable->Execute(
{{buffer1.get(), buffer2.get()}},
{});
ASSERT_TRUE(result.ok());
tsl::FileSystem* fs;
ASSERT_TRUE(tsl::Env::Default()->GetFileSystemForFile(dir, &fs).ok());
std::vector<std::string> paths;
ASSERT_TRUE(fs->GetMatchingPaths(dir + "false,
[]() {}));
TF_ASSERT_OK(transfer_manager->TransferRawDataToSubBuffer(
0, raw_data_view.data(), raw_data_size - 1, 1, true,
[]() {}));
TF_ASSERT_OK_AND_ASSIGN(auto literal, buffer->ToLiteralSync());
ASSERT_EQ(literal->element_count(), 3 * 2);
EXPECT_THAT(literal->data<uint32_t>(), Each(0x42424242));
}
struct MemsetValue {
explicit MemsetValue(float value) : value(value) {}
float value;
};
static absl::Status MemsetFromValue(
ffi::Result<ffi::BufferR1<PrimitiveType::F32>> result,
MemsetValue* memset_value) {
for (size_t i = 0; i < result->element_count(); ++i) {
result->typed_data()[i] = memset_value->value;
}
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kMemsetFromValue, MemsetFromValue,
ffi::Ffi::Bind()
.Ret<ffi::BufferR1<PrimitiveType::F32>>()
.Ctx<ffi::UserData<MemsetValue>>());
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "MemsetFromValue", "HOST",
kMemsetFromValue);
TEST(TfrtCpuClientTest, ForwardUserDataToFfiHandler) {
static constexpr char const* kProgram = R"(
HloModule ffi_handler
ENTRY main {
ROOT %custom-call = f32[4] custom-call(),
custom_call_target="MemsetFromValue",
api_version=API_VERSION_TYPED_FFI
})";
TF_ASSERT_OK_AND_ASSIGN(auto client, GetTfrtCpuClient(CpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnUnverifiedModule(kProgram, {}));
XlaComputation xla_computation(hlo_module->ToProto());
TF_ASSERT_OK_AND_ASSIGN(auto executable,
client->Compile(xla_computation, {}));
ExecuteContext context;
TF_ASSERT_OK(context.ffi_context().Emplace<MemsetValue>(42.0f));
ExecuteOptions opts;
opts.context = &context;
auto result = executable->Execute({{}}, opts);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::Literal> result_literal,
result->at(0).at(0)->ToLiteralSync());
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR1<float>({42.0f, 42.0f, 42.0f, 42.0f}),
*result_literal));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/cpu/cpu_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/cpu/cpu_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e93ee444-09bb-4afd-933a-0cbe6a24366c | cpp | tensorflow/tensorflow | tracked_tfrt_cpu_device_buffer | third_party/xla/xla/pjrt/cpu/tracked_tfrt_cpu_device_buffer.cc | third_party/xla/xla/pjrt/cpu/tracked_tfrt_cpu_device_buffer_test.cc | #include "xla/pjrt/cpu/tracked_tfrt_cpu_device_buffer.h"
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <utility>
#include "absl/base/casts.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/service/cpu/cpu_event.h"
#include "xla/shape_util.h"
#include "xla/tsl/concurrency/async_value_ref.h"
namespace xla {
namespace {
tsl::AsyncValueRef<CpuEvent> AfterAll(
absl::Span<const tsl::AsyncValueRef<CpuEvent>> events) {
if (events.empty()) return tsl::MakeAvailableAsyncValueRef<CpuEvent>();
struct State {
State(int count, tsl::AsyncValueRef<CpuEvent> after_all)
: count(count), after_all(std::move(after_all)) {}
std::atomic<int> count;
tsl::AsyncValueRef<CpuEvent> after_all;
absl::Mutex mutex;
absl::Status error;
};
auto after_all = tsl::MakeConstructedAsyncValueRef<CpuEvent>();
auto* state = new State(events.size(), after_all);
for (auto& event : events) {
event.AndThen([state, event = event.AsPtr()]() {
if (event.IsError()) {
absl::MutexLock lock(&state->mutex);
state->error = event.GetError();
}
if (state->count.fetch_sub(1, std::memory_order_acq_rel) == 1) {
if (!state->error.ok()) {
state->after_all.SetError(state->error);
} else {
state->after_all.SetStateConcrete();
}
delete state;
}
});
}
return after_all;
}
}
TrackedTfrtCpuDeviceBuffer::TrackedTfrtCpuDeviceBuffer(
bool is_tuple, bool owns_buffers,
absl::InlinedVector<tsl::AsyncValueRef<MaybeOwningCpuMemory>, 4> buffers,
absl::InlinedVector<tsl::AsyncValueRef<CpuEvent>, 4> definition_events,
absl::AnyInvocable<void() &&> on_delete_callback)
: TrackedTfrtCpuDeviceBuffer(is_tuple, owns_buffers, std::move(buffers),
AfterAll(definition_events),
std::move(on_delete_callback)) {}
TrackedTfrtCpuDeviceBuffer::TrackedTfrtCpuDeviceBuffer(
bool is_tuple, bool owns_buffers,
absl::InlinedVector<tsl::AsyncValueRef<MaybeOwningCpuMemory>, 4> buffers,
absl::InlinedVector<size_t, 4> buffer_sizes,
absl::InlinedVector<tsl::AsyncValueRef<CpuEvent>, 4> definition_events,
absl::AnyInvocable<void() &&> on_delete_callback)
: TrackedTfrtCpuDeviceBuffer(
is_tuple, owns_buffers, std::move(buffers), std::move(buffer_sizes),
AfterAll(definition_events), std::move(on_delete_callback)) {}
TrackedTfrtCpuDeviceBuffer::TrackedTfrtCpuDeviceBuffer(
bool is_tuple, bool owns_buffers,
absl::InlinedVector<tsl::AsyncValueRef<MaybeOwningCpuMemory>, 4> buffers,
tsl::AsyncValueRef<CpuEvent> definition_event,
absl::AnyInvocable<void() &&> on_delete_callback)
: is_tuple_(is_tuple),
owns_buffers_(owns_buffers),
buffers_(std::move(buffers)),
definition_event_(std::move(definition_event)),
on_delete_callback_(std::move(on_delete_callback)) {
DCHECK(definition_event_);
for (const auto& buffer : buffers_) {
CHECK(buffer.IsConcrete());
buffer_sizes_.push_back(buffer->size());
}
if (is_tuple) {
size_t index_table_byte_size = buffers_.size() * sizeof(void*);
tuple_index_table_ =
MaybeOwningCpuMemory::AllocateAvailableAvr(index_table_byte_size)
.value();
uintptr_t* index_table =
reinterpret_cast<uintptr_t*>(tuple_index_table_->data());
for (int i = 0; i < buffers_.size(); ++i) {
index_table[i] = absl::bit_cast<uintptr_t>(buffers_[i]->data());
}
}
}
TrackedTfrtCpuDeviceBuffer::TrackedTfrtCpuDeviceBuffer(
bool is_tuple, bool owns_buffers,
absl::InlinedVector<tsl::AsyncValueRef<MaybeOwningCpuMemory>, 4> buffers,
absl::InlinedVector<size_t, 4> buffer_sizes,
tsl::AsyncValueRef<CpuEvent> definition_event,
absl::AnyInvocable<void() &&> on_delete_callback)
: is_tuple_(is_tuple),
owns_buffers_(owns_buffers),
buffers_(std::move(buffers)),
buffer_sizes_(std::move(buffer_sizes)),
definition_event_(std::move(definition_event)),
on_delete_callback_(std::move(on_delete_callback)) {
DCHECK(definition_event_);
if (is_tuple) {
tuple_index_table_ =
tsl::MakeUnconstructedAsyncValueRef<MaybeOwningCpuMemory>();
tsl::RunWhenReady(
absl::MakeConstSpan(buffers_),
[buffers = buffers_, tuple_index_table = tuple_index_table_] {
size_t index_table_byte_size = buffers.size() * sizeof(void*);
tuple_index_table.emplace(
MaybeOwningCpuMemory::Allocate(index_table_byte_size).value());
uintptr_t* index_table =
reinterpret_cast<uintptr_t*>(tuple_index_table->data());
for (int i = 0; i < buffers.size(); ++i) {
index_table[i] = absl::bit_cast<uintptr_t>(buffers[i]->data());
}
});
}
}
TrackedTfrtCpuDeviceBuffer::~TrackedTfrtCpuDeviceBuffer() {
ReleaseDeviceMemory();
if (on_delete_callback_) {
std::move(on_delete_callback_)();
}
}
tsl::AsyncValueRef<MaybeOwningCpuMemory> TrackedTfrtCpuDeviceBuffer::Buffer(
const ShapeIndex& shape_index) {
if (shape_index.empty()) {
if (is_tuple_) return tuple_index_table_;
return buffers_[0];
}
CHECK(is_tuple_);
CHECK_EQ(shape_index.size(), 1) << "nested tuple not supported";
return buffers_[shape_index[0]];
}
size_t TrackedTfrtCpuDeviceBuffer::BufferSize(const ShapeIndex& shape_index) {
if (shape_index.empty()) {
if (is_tuple_) return buffers_.size() * sizeof(void*);
return buffer_sizes_[0];
}
CHECK(is_tuple_);
CHECK_EQ(shape_index.size(), 1) << "nested tuple not supported";
return buffer_sizes_[shape_index[0]];
}
void TrackedTfrtCpuDeviceBuffer::AddUsageEvents(
absl::Span<tsl::AsyncValueRef<CpuEvent>> events) {
if (usage_events_.size() >= 1024) {
int i = 0;
while (i < usage_events_.size()) {
auto& event = usage_events_[i];
if (event.IsAvailable()) {
using std::swap;
swap(event, usage_events_.back());
usage_events_.pop_back();
continue;
}
++i;
}
}
for (auto& ev : events) {
usage_events_.push_back(std::move(ev));
}
}
absl::InlinedVector<tsl::AsyncValueRef<CpuEvent>, 4>
TrackedTfrtCpuDeviceBuffer::LockUseAndTransferUsageEvents() {
return std::move(usage_events_);
}
void TrackedTfrtCpuDeviceBuffer::ReleaseDeviceMemory() {
tuple_index_table_.reset();
buffers_.clear();
definition_event_.reset();
usage_events_.clear();
}
} | #include "xla/pjrt/cpu/tracked_tfrt_cpu_device_buffer.h"
#include <cstring>
#include <string>
#include <gtest/gtest.h>
#include "xla/service/cpu/cpu_event.h"
#include "xla/tsl/concurrency/async_value.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace {
using ::tsl::BlockUntilReady;
using ::tsl::MakeConstructedAsyncValueRef;
using ::tsl::MakeUnconstructedAsyncValueRef;
using ::tsl::thread::ThreadPool;
TEST(TrackedTfrtCpuDeviceBufferTest, Basic) {
std::string expected = "tracked_tfrt_cpu_device_buffer_test";
TF_ASSERT_OK_AND_ASSIGN(
auto buffer, MaybeOwningCpuMemory::AllocateAvailableAvr(expected.size()));
auto definition_event = MakeConstructedAsyncValueRef<CpuEvent>();
ThreadPool thread_pool(tsl::Env::Default(), "tracked_buffer_test",
4);
thread_pool.Schedule([&]() {
std::memcpy(buffer->data(), expected.data(), expected.size());
definition_event.SetStateConcrete();
});
TrackedTfrtCpuDeviceBuffer tracked_buffer(
false, true, {buffer}, definition_event,
nullptr);
BlockUntilReady(tracked_buffer.definition_event().GetAsyncValue());
auto result = tracked_buffer.Buffers()[0];
ASSERT_TRUE(result.IsAvailable());
EXPECT_EQ(
std::string(static_cast<const char*>(result->data()), result->size()),
expected);
}
TEST(TrackedTfrtCpuDeviceBufferTest, Tuple) {
std::string expected_0 = "tracked_tfrt_cpu_device_buffer_test";
std::string expected_1 = "tuple";
TF_ASSERT_OK_AND_ASSIGN(
auto buffer_0,
MaybeOwningCpuMemory::AllocateAvailableAvr(expected_0.size()));
TF_ASSERT_OK_AND_ASSIGN(
auto buffer_1,
MaybeOwningCpuMemory::AllocateAvailableAvr(expected_1.size()));
auto definition_event_0 = MakeConstructedAsyncValueRef<CpuEvent>();
auto definition_event_1 = MakeConstructedAsyncValueRef<CpuEvent>();
ThreadPool thread_pool(tsl::Env::Default(), "tracked_buffer_test",
4);
thread_pool.Schedule([&]() {
std::memcpy(buffer_0->data(), expected_0.data(), expected_0.size());
definition_event_0.SetStateConcrete();
});
thread_pool.Schedule([&]() {
std::memcpy(buffer_1->data(), expected_1.data(), expected_1.size());
definition_event_1.SetStateConcrete();
});
TrackedTfrtCpuDeviceBuffer tracked_buffer(
true, true, {buffer_0, buffer_1},
{definition_event_0, definition_event_1},
nullptr);
BlockUntilReady(tracked_buffer.definition_event().GetAsyncValue());
auto result_0 = tracked_buffer.Buffers()[0];
auto result_1 = tracked_buffer.Buffers()[1];
ASSERT_TRUE(result_0.IsAvailable());
ASSERT_TRUE(result_1.IsAvailable());
EXPECT_EQ(
std::string(static_cast<const char*>(result_0->data()), result_0->size()),
expected_0);
EXPECT_EQ(
std::string(static_cast<const char*>(result_1->data()), result_1->size()),
expected_1);
}
TEST(TrackedTfrtCpuDeviceBufferTest, BasicError) {
TF_ASSERT_OK_AND_ASSIGN(auto buffer,
MaybeOwningCpuMemory::AllocateAvailableAvr(64));
auto definition_event = MakeConstructedAsyncValueRef<CpuEvent>();
ThreadPool thread_pool(tsl::Env::Default(), "tracked_buffer_test",
4);
thread_pool.Schedule([&]() {
definition_event.SetError("tracked_tfrt_cpu_device_buffer_test error.");
});
TrackedTfrtCpuDeviceBuffer tracked_buffer(
false, true, {buffer}, definition_event,
nullptr);
BlockUntilReady(tracked_buffer.definition_event().GetAsyncValue());
ASSERT_TRUE(tracked_buffer.definition_event().IsError());
EXPECT_EQ(tracked_buffer.definition_event().GetError().message(),
"tracked_tfrt_cpu_device_buffer_test error.");
}
TEST(TrackedTfrtCpuDeviceBufferTest, TupleError) {
std::string expected = "tracked_tfrt_cpu_device_buffer_test";
TF_ASSERT_OK_AND_ASSIGN(
auto buffer_0,
MaybeOwningCpuMemory::AllocateAvailableAvr(expected.size()));
TF_ASSERT_OK_AND_ASSIGN(
auto buffer_1,
MaybeOwningCpuMemory::AllocateAvailableAvr(expected.size()));
auto definition_event_0 = MakeConstructedAsyncValueRef<CpuEvent>();
auto definition_event_1 = MakeConstructedAsyncValueRef<CpuEvent>();
ThreadPool thread_pool(tsl::Env::Default(), "tracked_buffer_test",
4);
thread_pool.Schedule([&]() {
std::memcpy(buffer_0->data(), expected.data(), expected.size());
definition_event_0.SetStateConcrete();
});
thread_pool.Schedule([&]() {
definition_event_1.SetError(
"tracked_tfrt_cpu_device_buffer_test tuple error.");
});
TrackedTfrtCpuDeviceBuffer tracked_buffer(
true, true, {buffer_0, buffer_1},
{definition_event_0, definition_event_1},
nullptr);
BlockUntilReady(tracked_buffer.definition_event().GetAsyncValue());
ASSERT_TRUE(tracked_buffer.definition_event().IsError());
EXPECT_EQ(tracked_buffer.definition_event().GetError().message(),
"tracked_tfrt_cpu_device_buffer_test tuple error.");
}
TEST(TrackedTfrtCpuDeviceBufferTest, DelayedAllocation) {
std::string expected = "tracked_tfrt_cpu_device_buffer_test";
auto buffer = MakeUnconstructedAsyncValueRef<MaybeOwningCpuMemory>();
auto malloc_event = MakeConstructedAsyncValueRef<CpuEvent>();
malloc_event.AndThen([buffer_copy = buffer.CopyRef(),
buffer_size = expected.size()] {
buffer_copy.emplace(MaybeOwningCpuMemory::Allocate(buffer_size).value());
});
auto definition_event = MakeConstructedAsyncValueRef<CpuEvent>();
TrackedTfrtCpuDeviceBuffer tracked_buffer(false,
true, {buffer},
{expected.size()}, definition_event,
nullptr);
auto result = tracked_buffer.Buffers()[0];
ASSERT_FALSE(result.IsAvailable());
ASSERT_EQ(tracked_buffer.BufferSizes()[0], expected.size());
ThreadPool thread_pool(tsl::Env::Default(), "tracked_buffer_test",
4);
thread_pool.Schedule([&]() {
malloc_event.SetStateConcrete();
std::memcpy(buffer->data(), expected.data(), expected.size());
definition_event.SetStateConcrete();
});
BlockUntilReady(tracked_buffer.definition_event().GetAsyncValue());
EXPECT_EQ(
std::string(static_cast<const char*>(result->data()), result->size()),
expected);
}
TEST(TrackedTfrtCpuDeviceBufferTest, DelayedAllocationTuple) {
std::string expected_0 = "tracked_tfrt_cpu_device_buffer_test";
std::string expected_1 = "tuple";
auto buffer_0 = MakeUnconstructedAsyncValueRef<MaybeOwningCpuMemory>();
auto malloc_event_0 = MakeConstructedAsyncValueRef<CpuEvent>();
malloc_event_0.AndThen(
[buffer_0_copy = buffer_0.CopyRef(), buffer_0_size = expected_0.size()] {
buffer_0_copy.emplace(
MaybeOwningCpuMemory::Allocate(buffer_0_size).value());
});
auto buffer_1 = MakeUnconstructedAsyncValueRef<MaybeOwningCpuMemory>();
auto malloc_event_1 = MakeConstructedAsyncValueRef<CpuEvent>();
malloc_event_1.AndThen(
[buffer_1_copy = buffer_1.CopyRef(), buffer_1_size = expected_1.size()] {
buffer_1_copy.emplace(
MaybeOwningCpuMemory::Allocate(buffer_1_size).value());
});
auto definition_event_0 = MakeConstructedAsyncValueRef<CpuEvent>();
auto definition_event_1 = MakeConstructedAsyncValueRef<CpuEvent>();
TrackedTfrtCpuDeviceBuffer tracked_buffer(
true,
true, {buffer_0, buffer_1},
{expected_0.size(), expected_1.size()},
{definition_event_0, definition_event_1},
nullptr);
auto result_0 = tracked_buffer.Buffers()[0];
auto result_1 = tracked_buffer.Buffers()[1];
ASSERT_FALSE(result_0.IsAvailable());
ASSERT_FALSE(result_1.IsAvailable());
ASSERT_EQ(tracked_buffer.BufferSizes()[0], expected_0.size());
ASSERT_EQ(tracked_buffer.BufferSizes()[1], expected_1.size());
ThreadPool thread_pool(tsl::Env::Default(), "tracked_buffer_test",
4);
thread_pool.Schedule([&]() {
malloc_event_0.SetStateConcrete();
std::memcpy(buffer_0->data(), expected_0.data(), expected_0.size());
definition_event_0.SetStateConcrete();
});
thread_pool.Schedule([&]() {
malloc_event_1.SetStateConcrete();
std::memcpy(buffer_1->data(), expected_1.data(), expected_1.size());
definition_event_1.SetStateConcrete();
});
BlockUntilReady(tracked_buffer.definition_event().GetAsyncValue());
EXPECT_EQ(
std::string(static_cast<const char*>(result_0->data()), result_0->size()),
expected_0);
EXPECT_EQ(
std::string(static_cast<const char*>(result_1->data()), result_1->size()),
expected_1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/cpu/tracked_tfrt_cpu_device_buffer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/cpu/tracked_tfrt_cpu_device_buffer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
18d40d61-c549-4e2b-ba54-b152b402180e | cpp | tensorflow/tensorflow | client | third_party/xla/xla/client/client.cc | third_party/xla/xla/tests/client_test.cc | #include "xla/client/client.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/execution_options_util.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/layout.h"
#include "xla/literal.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/service.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace xla {
Client::Client(Service* stub) : stub_(stub) {}
Client::~Client() = default;
absl::StatusOr<Literal> Client::Transfer(const GlobalData& data,
const Shape* shape_with_layout) {
return stub_->TransferToClient(data, shape_with_layout);
}
absl::StatusOr<std::unique_ptr<GlobalData>> Client::TransferToServer(
const LiteralSlice& literal, const DeviceHandle* device_handle) {
return stub_->TransferToServer(literal, device_handle);
}
absl::Status Client::TransferToInfeed(const LiteralSlice& literal,
int64_t replica_id,
const DeviceHandle* device_handle) {
return stub_->TransferToInfeed(literal, replica_id, device_handle);
}
absl::StatusOr<Literal> Client::TransferFromOutfeed(
const Shape* shape_with_layout, int64_t replica_id,
const DeviceHandle* device_handle) {
return stub_->TransferFromOutfeed(shape_with_layout, replica_id,
device_handle);
}
absl::Status Client::ResetDevice() { return stub_->ResetDevice(); }
absl::StatusOr<Literal> Client::ExecuteAndTransfer(
const XlaComputation& computation, absl::Span<GlobalData* const> arguments,
const ExecutionOptions* execution_options,
ExecutionProfile* execution_profile) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<GlobalData> data,
Execute(computation, arguments, execution_options, execution_profile));
std::optional<Shape> shape_with_output_layout;
if (execution_options && execution_options->has_shape_with_output_layout()) {
shape_with_output_layout =
Shape(execution_options->shape_with_output_layout());
}
return Transfer(*data, shape_with_output_layout.has_value()
? &(*shape_with_output_layout)
: nullptr);
}
absl::StatusOr<Literal> Client::ComputeConstant(
const XlaComputation& computation, const Layout* output_layout) const {
return stub_->ComputeConstantGraph(computation, output_layout);
}
absl::StatusOr<XlaComputation> Client::LoadSnapshot(const HloSnapshot& module) {
TF_RET_CHECK(module.has_hlo() && module.hlo().has_hlo_module());
return XlaComputation(module.hlo().hlo_module());
}
absl::StatusOr<ExecutionHandle> Client::Compile(
const XlaComputation& computation, absl::Span<const Shape> argument_shapes,
const ExecutionOptions* execution_options) {
std::optional<ExecutionOptions> opts;
if (!execution_options) {
opts = CreateDefaultExecutionOptions();
}
return stub_->Compile(computation, argument_shapes,
execution_options ? *execution_options : *opts);
}
absl::StatusOr<std::unique_ptr<GlobalData>> Client::Execute(
const ExecutionHandle& handle, absl::Span<GlobalData* const> arguments,
ExecutionProfile* execution_profile) {
return stub_->Execute(handle, arguments, execution_profile);
}
absl::StatusOr<std::unique_ptr<GlobalData>> Client::Execute(
const XlaComputation& computation, absl::Span<GlobalData* const> arguments,
const ExecutionOptions* execution_options,
ExecutionProfile* execution_profile) {
std::optional<ExecutionOptions> options_storage;
if (!execution_options || execution_options->device_handles().empty()) {
if (execution_options) {
options_storage.emplace(*execution_options);
} else {
options_storage.emplace(CreateDefaultExecutionOptions());
}
execution_options = &*options_storage;
TF_ASSIGN_OR_RETURN(auto device_handles,
GetDeviceHandles(1));
TF_RET_CHECK(!device_handles.empty());
*options_storage->add_device_handles() = std::move(device_handles[0]);
}
std::vector<XlaComputationInstance> computation_instances = {
XlaComputationInstance{
computation,
std::vector<GlobalData*>(arguments.begin(), arguments.end()),
*execution_options, execution_profile}};
VLOG(1) << "Making ExecuteParallel request: "
<< execution_options->DebugString();
TF_ASSIGN_OR_RETURN(auto results, ExecuteParallel(computation_instances));
VLOG(1) << "ExecuteParallel request done.";
for (int64_t i = 0, end = results.size(); i < end; i++) {
TF_ASSIGN_OR_RETURN(const Shape& shape, GetShape(*results[i]));
if (!ShapeUtil::IsEmptyTuple(shape)) {
VLOG(3) << "Fetching result from device " << i << ": "
<< ShapeUtil::HumanString(shape);
return std::move(results[i]);
}
}
TF_RET_CHECK(!results.empty());
VLOG(1) << "Defaulting to device 0 result";
return std::move(results[0]);
}
absl::StatusOr<std::vector<std::unique_ptr<GlobalData>>>
Client::ExecuteParallel(absl::Span<const XlaComputationInstance> computations) {
return stub_->ExecuteGraphParallel(computations);
}
absl::StatusOr<std::vector<DeviceHandle>> Client::GetDeviceHandles(
int64_t device_count) {
if (device_count < 1) {
return InvalidArgument("device_count must be greater than 0");
}
return stub_->GetDeviceHandles(device_count);
}
absl::Status Client::Unregister(const GlobalData& data) {
return stub_->Unregister(data.handle());
}
absl::StatusOr<std::vector<std::unique_ptr<GlobalData>>>
Client::DeconstructTuple(const GlobalData& data) {
return stub_->DeconstructTuple(data);
}
absl::StatusOr<std::unique_ptr<ProgramShape>> Client::GetComputationShape(
const XlaComputation& computation) {
TF_ASSIGN_OR_RETURN(const auto& result, computation.GetProgramShape());
return std::make_unique<ProgramShape>(result);
}
absl::StatusOr<Shape> Client::GetShape(const GlobalData& data) {
return stub_->GetShape(data);
}
absl::StatusOr<ChannelHandle> Client::CreateChannelHandleByType(
ChannelHandle::ChannelType type) {
return stub_->CreateChannelHandle(type);
}
absl::StatusOr<ChannelHandle> Client::CreateChannelHandle() {
return CreateChannelHandleByType(ChannelHandle::DEVICE_TO_DEVICE);
}
absl::StatusOr<ChannelHandle> Client::CreateHostToDeviceChannelHandle() {
return CreateChannelHandleByType(ChannelHandle::HOST_TO_DEVICE);
}
absl::StatusOr<ChannelHandle> Client::CreateDeviceToHostChannelHandle() {
return CreateChannelHandleByType(ChannelHandle::DEVICE_TO_HOST);
}
} | #include <memory>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/client/global_data.h"
#include "xla/client/local_client.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/test_helpers.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_macros.h"
#include "xla/tests/test_utils.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class ClientTest : public ClientLibraryTestBase {};
XLA_TEST_F(ClientTest, ExecuteWithLayout) {
XlaBuilder b(TestName());
std::vector<std::vector<int64_t>> layouts = {{0, 1}, {1, 0}};
for (const std::vector<int64_t>& execute_layout : layouts) {
for (const std::vector<int64_t>& transfer_layout : layouts) {
Add(ConstantR2<int32_t>(&b, {{1, 2}, {3, 4}}),
ConstantR2<int32_t>(&b, {{10, 20}, {30, 40}}));
TF_ASSERT_OK_AND_ASSIGN(auto computation, b.Build());
ExecutionOptions execution_options = execution_options_;
*execution_options.mutable_shape_with_output_layout() =
ShapeUtil::MakeShapeWithDenseLayout(S32, {2, 2},
execute_layout)
.ToProto();
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GlobalData> data,
client_->Execute(computation, {}, &execution_options));
Literal expected_literal = LiteralUtil::CreateR2WithLayout<int32_t>(
{{11, 22}, {33, 44}}, LayoutUtil::MakeLayout(transfer_layout));
TF_ASSERT_OK_AND_ASSIGN(
auto computed, client_->Transfer(*data, &expected_literal.shape()));
ASSERT_TRUE(LiteralTestUtil::EqualShapesAndLayouts(
expected_literal.shape(), computed.shape()));
EXPECT_TRUE(LiteralTestUtil::Equal(expected_literal, computed));
}
}
}
XLA_TEST_F(ClientTest, ExecuteWithTupleLayout) {
XlaBuilder b(TestName());
Tuple(&b, {ConstantR2<int32_t>(&b, {{1, 2}, {3, 4}}),
ConstantR2<int32_t>(&b, {{10, 20}, {30, 40}})});
TF_ASSERT_OK_AND_ASSIGN(auto computation, b.Build());
ExecutionOptions execution_options = execution_options_;
*execution_options.mutable_shape_with_output_layout() =
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShapeWithDenseLayout(S32, {2, 2},
{0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(S32, {2, 2},
{1, 0})})
.ToProto();
TF_ASSERT_OK_AND_ASSIGN(
auto result,
client_->ExecuteAndTransfer(computation, {}, &execution_options));
LiteralTestUtil::ExpectR2Equal<int32_t>({{1, 2}, {3, 4}},
LiteralSlice(result, {0}));
LiteralTestUtil::ExpectR2Equal<int32_t>({{10, 20}, {30, 40}},
LiteralSlice(result, {1}));
EXPECT_TRUE(result.shape().IsTuple());
EXPECT_EQ(2, ShapeUtil::TupleElementCount(result.shape()));
EXPECT_TRUE(ShapeUtil::Equal(
ShapeUtil::GetTupleElementShape(result.shape(), 0),
ShapeUtil::MakeShapeWithDenseLayout(S32, {2, 2},
{0, 1})));
EXPECT_TRUE(ShapeUtil::Equal(
ShapeUtil::GetTupleElementShape(result.shape(), 1),
ShapeUtil::MakeShapeWithDenseLayout(S32, {2, 2},
{1, 0})));
}
XLA_TEST_F(ClientTest,
DISABLED_ON_INTERPRETER(DISABLED_ON_GPU(ExecuteParallel))) {
XlaComputation add_with_one_arg, mul_with_two_args, dot_with_one_arg;
Shape shape = ShapeUtil::MakeShape(S32, {2, 2});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GlobalData> const_arg,
client_->TransferToServer(
LiteralUtil::CreateR2<int32_t>({{5, 6}, {7, 8}})));
XlaBuilder b(TestName() + ".add");
Add(Parameter(&b, 0, shape, "param_0"),
ConstantR2<int32_t>(&b, {{1, 2}, {3, 4}}));
TF_ASSERT_OK_AND_ASSIGN(add_with_one_arg, b.Build());
std::vector<XlaComputationInstance> computation_instances;
TF_ASSERT_OK_AND_ASSIGN(std::vector<xla::DeviceHandle> devices,
client_->GetDeviceHandles(1));
ASSERT_EQ(devices.size(), 1);
ExecutionOptions options = execution_options_;
*options.add_device_handles() = devices[0];
computation_instances.push_back(XlaComputationInstance(
add_with_one_arg, {const_arg.get()}, options, nullptr));
TF_ASSERT_OK_AND_ASSIGN(auto results,
client_->ExecuteParallel(computation_instances));
auto expected_result = LiteralUtil::CreateR2<int32_t>({{6, 8}, {10, 12}});
TF_ASSERT_OK_AND_ASSIGN(
auto result_literal,
client_->Transfer(*results[0], &expected_result.shape()));
EXPECT_TRUE(LiteralTestUtil::Equal(expected_result, result_literal));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/client/client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d2611c8d-f70c-4648-a29c-f15b9d7fb929 | cpp | tensorflow/tensorflow | topology_util | third_party/xla/xla/pjrt/distributed/topology_util.cc | third_party/xla/xla/pjrt/distributed/topology_util_test.cc | #include "xla/pjrt/distributed/topology_util.h"
#include <algorithm>
#include <fstream>
#include <map>
#include <set>
#include <string>
#include <string_view>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/synchronization/blocking_counter.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/pjrt/distributed/key_value_store_interface.h"
#include "xla/pjrt/distributed/protocol.pb.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/utils.h"
#include "xla/util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
namespace xla {
static constexpr char kBootIdPath[] = "/proc/sys/kernel/random/boot_id";
absl::StatusOr<std::string> GetBootIdString() {
std::string boot_id_str;
#ifdef __linux__
std::ifstream file(kBootIdPath);
if (!file) {
return NotFound("%s not found.", kBootIdPath);
}
std::string line;
while (std::getline(file, line)) {
absl::StripAsciiWhitespace(&line);
absl::StrAppend(&boot_id_str, line);
}
#endif
return boot_id_str;
}
static std::string GetLocalTopologyKey(std::string_view platform, int node_id) {
return absl::StrCat("local_topology/", platform, "/", node_id);
}
static std::string GetGlobalTopologyKey(std::string_view platform) {
return absl::StrCat("global_topology/", platform);
}
static absl::StatusOr<std::vector<LocalTopologyProto>> GetAllLocalTopologies(
std::string_view platform, int num_nodes, KeyValueStoreInterface* kv_store,
absl::Duration timeout) {
std::vector<absl::StatusOr<std::string>> local_topology_strs(num_nodes);
tsl::thread::ThreadPool thread_pool(
tsl::Env::Default(), "GetAllLocalTopologies", DefaultThreadPoolSize());
absl::BlockingCounter blocking_counter(num_nodes);
absl::Mutex mu;
for (int i = 0; i < num_nodes; i++) {
thread_pool.Schedule([&, i] {
absl::StatusOr<std::string> local_topology_str =
kv_store->Get(GetLocalTopologyKey(platform, i), timeout);
{
absl::MutexLock lock(&mu);
local_topology_strs[i] = local_topology_str;
}
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
std::vector<std::string> error_messages;
std::vector<LocalTopologyProto> local_topologies;
int max_num_failed_message = 10;
int failed_count = 0;
for (const absl::StatusOr<std::string>& str : local_topology_strs) {
if (str.ok()) {
LocalTopologyProto local;
local.ParseFromString(*str);
local_topologies.push_back(local);
} else {
error_messages.push_back(
absl::StrCat("Error ", ++failed_count, ": ", str.status().message()));
if (failed_count > max_num_failed_message) {
break;
}
}
}
if (error_messages.empty()) {
return local_topologies;
}
return absl::InternalError(
absl::StrCat("Getting local topologies failed: ",
absl::StrJoin(error_messages, "\n\n")));
}
GlobalTopologyProto BuildGlobalTopology(
absl::Span<LocalTopologyProto> local_topologies,
bool assign_global_device_ids) {
GlobalTopologyProto global_topology;
int next_global_device_id = 0;
int next_slice_index = 0;
absl::flat_hash_map<std::string, int> boot_id_to_slice_index;
for (LocalTopologyProto& local : local_topologies) {
std::string_view boot_id = local.boot_id();
auto [it, inserted] =
boot_id_to_slice_index.try_emplace(boot_id, next_slice_index);
if (inserted) {
++next_slice_index;
}
for (DeviceProto& device : *local.mutable_devices()) {
if (assign_global_device_ids) {
device.set_global_device_id(next_global_device_id++);
}
device.set_slice_index(it->second);
}
global_topology.add_nodes()->Swap(&local);
}
if (VLOG_IS_ON(10)) {
for (auto it = boot_id_to_slice_index.begin();
it != boot_id_to_slice_index.end(); ++it) {
LOG(INFO) << "BuildGlobalTopology boot_id_to_slice_index " << it->first
<< "->" << it->second;
}
}
return global_topology;
}
absl::Status ExchangeTopologies(std::string_view platform, int node_id,
int num_nodes,
absl::Duration get_local_topology_timeout,
absl::Duration get_global_topology_timeout,
KeyValueStoreInterface* kv_store,
const LocalTopologyProto& local_topology,
GlobalTopologyProto* global_topology,
bool assign_global_device_ids) {
VLOG(3) << "Local Topology for platform" << platform << ":\n"
<< local_topology.DebugString();
if (num_nodes == 1) {
LocalTopologyProto* topology = global_topology->add_nodes();
*topology = local_topology;
for (DeviceProto& device : *topology->mutable_devices()) {
device.set_global_device_id(device.local_device_ordinal());
}
return absl::OkStatus();
}
CHECK(kv_store != nullptr);
TF_RETURN_IF_ERROR(kv_store->Set(GetLocalTopologyKey(platform, node_id),
local_topology.SerializeAsString()));
std::string global_topology_key = GetGlobalTopologyKey(platform);
if (node_id == 0) {
TF_ASSIGN_OR_RETURN(std::vector<LocalTopologyProto> local_topologies,
GetAllLocalTopologies(platform, num_nodes, kv_store,
get_local_topology_timeout));
*global_topology =
BuildGlobalTopology(absl::Span<LocalTopologyProto>(local_topologies),
assign_global_device_ids);
TF_RETURN_IF_ERROR(kv_store->Set(global_topology_key,
global_topology->SerializeAsString()));
} else {
TF_ASSIGN_OR_RETURN(
std::string global_topology_str,
kv_store->Get(global_topology_key, get_global_topology_timeout));
global_topology->ParseFromString(global_topology_str);
}
VLOG(3) << "Global topology for platform " << platform << ":\n"
<< global_topology->DebugString();
return absl::OkStatus();
}
bool IsGpuTopologySymmetric(
const std::map<int, std::set<int>>& slice_id_to_node_ids,
const std::map<int, int>& node_id_to_device_count) {
CHECK(!slice_id_to_node_ids.empty());
CHECK(!node_id_to_device_count.empty());
int num_hosts_per_slice = slice_id_to_node_ids.begin()->second.size();
int num_devices_per_host = node_id_to_device_count.begin()->second;
for (const auto& [slice_id, node_ids] : slice_id_to_node_ids) {
if (node_ids.size() != num_hosts_per_slice) {
LOG(INFO) << "GpuTopology is asymmetric as it has different number "
"of hosts per slice.";
return false;
}
}
for (const auto& [node_id, device_count] : node_id_to_device_count) {
if (device_count != num_devices_per_host) {
LOG(INFO) << "GpuTopology is asymmetric as it has different number "
"of devices per host.";
return false;
}
}
return true;
}
absl::StatusOr<GpuTopologyProto> BuildGpuTopology(
const GlobalTopologyProto& global_topology) {
GpuTopologyProto gpu_topology;
std::map<int, std::set<int>> slice_id_to_node_ids;
std::map<int, int> node_id_to_device_count;
std::vector<int> device_ids;
for (int i = 0; i < global_topology.nodes_size(); ++i) {
const LocalTopologyProto& local_topology = global_topology.nodes(i);
node_id_to_device_count[local_topology.node_id()] =
local_topology.devices_size();
for (const DeviceProto& device : local_topology.devices()) {
if (gpu_topology.platform_version().empty()) {
gpu_topology.set_platform_version(device.name());
}
slice_id_to_node_ids[device.slice_index()].insert(
local_topology.node_id());
device_ids.push_back(device.global_device_id());
}
}
if (IsGpuTopologySymmetric(slice_id_to_node_ids, node_id_to_device_count)) {
gpu_topology.set_num_slices(slice_id_to_node_ids.size());
gpu_topology.set_num_hosts_per_slice(
slice_id_to_node_ids.begin()->second.size());
gpu_topology.set_num_devices_per_host(
node_id_to_device_count.begin()->second);
} else {
gpu_topology.set_num_slices(-1);
gpu_topology.set_num_hosts_per_slice(-1);
gpu_topology.set_num_devices_per_host(-1);
}
std::sort(device_ids.begin(), device_ids.end());
gpu_topology.mutable_device_ids()->Add(device_ids.begin(), device_ids.end());
return gpu_topology;
}
} | #include "xla/pjrt/distributed/topology_util.h"
#include <string>
#include <string_view>
#include <vector>
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/pjrt/distributed/in_memory_key_value_store.h"
#include "xla/pjrt/distributed/protocol.pb.h"
#include "xla/test_helpers.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace {
TEST(TopologyTest, BuildGlobalTopology) {
std::vector<LocalTopologyProto> locals(2);
DeviceProto* d0 = locals[0].add_devices();
d0->set_local_device_ordinal(0);
DeviceProto* d1 = locals[0].add_devices();
d1->set_local_device_ordinal(0);
DeviceProto* d2 = locals[1].add_devices();
d2->set_local_device_ordinal(0);
DeviceProto* d3 = locals[1].add_devices();
d3->set_local_device_ordinal(1);
GlobalTopologyProto global =
BuildGlobalTopology(absl::Span<LocalTopologyProto>(locals),
true);
EXPECT_EQ(global.nodes_size(), 2);
EXPECT_EQ(global.nodes()[0].devices_size(), 2);
EXPECT_EQ(global.nodes()[1].devices_size(), 2);
}
TEST(TopologyTest, ExchangeTopology) {
int num_nodes = 2;
std::vector<LocalTopologyProto> locals(num_nodes);
DeviceProto* d0 = locals[0].add_devices();
d0->set_local_device_ordinal(0);
DeviceProto* d1 = locals[0].add_devices();
d1->set_local_device_ordinal(0);
DeviceProto* d2 = locals[1].add_devices();
d2->set_local_device_ordinal(0);
DeviceProto* d3 = locals[1].add_devices();
d3->set_local_device_ordinal(1);
InMemoryKeyValueStore kv_store;
std::vector<GlobalTopologyProto> globals(num_nodes);
{
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "TestPool",
num_nodes);
for (int i = 0; i < num_nodes; i++) {
thread_pool.Schedule([&, i] {
TF_ASSERT_OK(ExchangeTopologies(
"cuda", i, num_nodes,
absl::Seconds(10),
absl::Seconds(10), &kv_store, locals[i], &globals[i],
true));
});
}
}
for (const GlobalTopologyProto& global : globals) {
EXPECT_EQ(global.nodes_size(), 2);
EXPECT_EQ(global.nodes()[0].devices_size(), 2);
EXPECT_EQ(global.nodes()[1].devices_size(), 2);
}
}
TEST(TopologyTest, BuildGpuTopology) {
std::string slice_0_boot_id = "foo";
std::string slice_1_boot_id = "bar";
std::vector<LocalTopologyProto> locals(2);
locals[0].set_boot_id(slice_0_boot_id);
locals[1].set_boot_id(slice_1_boot_id);
locals[0].set_node_id(0);
locals[1].set_node_id(1);
DeviceProto* d0 = locals[0].add_devices();
d0->set_local_device_ordinal(0);
d0->set_core_count(20);
DeviceProto* d1 = locals[0].add_devices();
d1->set_local_device_ordinal(1);
d1->set_core_count(20);
DeviceProto* d2 = locals[1].add_devices();
d2->set_local_device_ordinal(0);
d2->set_core_count(20);
DeviceProto* d3 = locals[1].add_devices();
d3->set_local_device_ordinal(1);
d3->set_core_count(20);
GlobalTopologyProto global =
BuildGlobalTopology(absl::Span<LocalTopologyProto>(locals),
true);
TF_ASSERT_OK_AND_ASSIGN(auto gpu_topology, BuildGpuTopology(global));
EXPECT_EQ(gpu_topology.device_ids_size(), 4);
EXPECT_EQ(gpu_topology.num_slices(), 2);
EXPECT_EQ(gpu_topology.num_hosts_per_slice(), 1);
EXPECT_EQ(gpu_topology.num_devices_per_host(), 2);
}
TEST(TopologyTest, BuildGpuTopologyWithDifferentNumHostsPerSlice) {
std::string slice_0_boot_id = "foo";
std::string slice_1_boot_id = "bar";
std::vector<LocalTopologyProto> locals(3);
locals[0].set_boot_id(slice_0_boot_id);
locals[1].set_boot_id(slice_0_boot_id);
locals[2].set_boot_id(slice_1_boot_id);
locals[0].set_node_id(0);
locals[1].set_node_id(1);
locals[2].set_node_id(2);
DeviceProto* d0 = locals[0].add_devices();
d0->set_local_device_ordinal(0);
DeviceProto* d1 = locals[1].add_devices();
d1->set_local_device_ordinal(0);
DeviceProto* d2 = locals[2].add_devices();
d2->set_local_device_ordinal(0);
GlobalTopologyProto global =
BuildGlobalTopology(absl::Span<LocalTopologyProto>(locals),
true);
TF_ASSERT_OK_AND_ASSIGN(auto gpu_topology, BuildGpuTopology(global));
EXPECT_EQ(gpu_topology.device_ids_size(), 3);
EXPECT_EQ(gpu_topology.num_slices(), -1);
EXPECT_EQ(gpu_topology.num_hosts_per_slice(), -1);
EXPECT_EQ(gpu_topology.num_devices_per_host(), -1);
}
TEST(TopologyTest, BuildGpuTopologyWithDifferentNumDevicesPerHost) {
std::string slice_0_boot_id = "foo";
std::string slice_1_boot_id = "bar";
std::vector<LocalTopologyProto> locals(2);
locals[0].set_boot_id(slice_0_boot_id);
locals[1].set_boot_id(slice_1_boot_id);
locals[0].set_node_id(0);
locals[1].set_node_id(1);
DeviceProto* d0 = locals[0].add_devices();
d0->set_local_device_ordinal(0);
DeviceProto* d1 = locals[0].add_devices();
d1->set_local_device_ordinal(1);
DeviceProto* d2 = locals[1].add_devices();
d2->set_local_device_ordinal(0);
GlobalTopologyProto global =
BuildGlobalTopology(absl::Span<LocalTopologyProto>(locals),
true);
TF_ASSERT_OK_AND_ASSIGN(auto gpu_topology, BuildGpuTopology(global));
EXPECT_EQ(gpu_topology.device_ids_size(), 3);
EXPECT_EQ(gpu_topology.num_slices(), -1);
EXPECT_EQ(gpu_topology.num_hosts_per_slice(), -1);
EXPECT_EQ(gpu_topology.num_devices_per_host(), -1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/distributed/topology_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/distributed/topology_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
983decde-fe7b-4ef5-bfc6-14c31e95fc7b | cpp | tensorflow/tensorflow | pjrt_c_api_gpu | third_party/xla/xla/pjrt/c/pjrt_c_api_gpu.cc | third_party/xla/xla/pjrt/c/pjrt_c_api_gpu_test.cc | #include "xla/pjrt/c/pjrt_c_api_gpu.h"
#include "absl/base/call_once.h"
#include "absl/log/initialize.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/c/pjrt_c_api_gpu_internal.h"
#include "tsl/platform/platform.h"
const PJRT_Api* GetPjrtApi() {
#ifndef PLATFORM_GOOGLE
static absl::once_flag once;
absl::call_once(once, []() { absl::InitializeLog(); });
#endif
return pjrt::gpu_plugin::GetGpuPjrtApi();
} | #include "xla/pjrt/c/pjrt_c_api_gpu.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <numeric>
#include <string>
#include <thread>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/client/client_library.h"
#include "xla/ffi/api/ffi.h"
#include "xla/ffi/execution_context.h"
#include "xla/ffi/ffi_api.h"
#include "xla/ffi/type_id_registry.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/c/pjrt_c_api_ffi_extension.h"
#include "xla/pjrt/c/pjrt_c_api_gpu_extension.h"
#include "xla/pjrt/c/pjrt_c_api_gpu_internal.h"
#include "xla/pjrt/c/pjrt_c_api_helpers.h"
#include "xla/pjrt/c/pjrt_c_api_test.h"
#include "xla/pjrt/c/pjrt_c_api_test_base.h"
#include "xla/pjrt/c/pjrt_c_api_wrapper_impl.h"
#include "xla/pjrt/distributed/in_memory_key_value_store.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/service/custom_call_target_registry.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/tests/literal_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace pjrt {
namespace {
#ifdef TENSORFLOW_USE_ROCM
const bool kUnused = (RegisterPjRtCApiTestFactory([]() { return GetPjrtApi(); },
"rocm"),
true);
#else
const bool kUnused = (RegisterPjRtCApiTestFactory([]() { return GetPjrtApi(); },
"cuda"),
true);
#endif
class PjrtCApiGpuTest : public PjrtCApiTestBase {
public:
PjrtCApiGpuTest() : PjrtCApiTestBase(GetPjrtApi()) {}
};
TEST_F(PjrtCApiGpuTest, CreateViewOfDeviceBuffer) {
auto [buffer, buffer_future] = create_buffer();
TF_CHECK_OK(buffer_future.Await());
PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args device_buffer_ptr_args;
device_buffer_ptr_args.struct_size =
PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args_STRUCT_SIZE;
device_buffer_ptr_args.extension_start = nullptr;
device_buffer_ptr_args.buffer = buffer.get();
PJRT_Error* device_buffer_ptr_error =
api_->PJRT_Buffer_OpaqueDeviceMemoryDataPointer(&device_buffer_ptr_args);
ASSERT_EQ(device_buffer_ptr_error, nullptr);
PJRT_Buffer_Device_Args device_args = PJRT_Buffer_Device_Args{
PJRT_Buffer_Device_Args_STRUCT_SIZE,
nullptr,
buffer.get(),
};
PJRT_Error* device_error = api_->PJRT_Buffer_Device(&device_args);
ASSERT_EQ(device_error, nullptr);
PJRT_Client_CreateViewOfDeviceBuffer_Args create_view_args;
create_view_args.struct_size =
PJRT_Client_CreateViewOfDeviceBuffer_Args_STRUCT_SIZE;
create_view_args.extension_start = nullptr;
create_view_args.client = client_;
create_view_args.device_buffer_ptr = device_buffer_ptr_args.device_memory_ptr;
xla::Shape shape = xla::ShapeUtil::MakeShape(xla::S32, {4});
create_view_args.dims = shape.dimensions().data();
create_view_args.num_dims = shape.dimensions().size();
create_view_args.element_type =
pjrt::ConvertToPjRtBufferType(shape.element_type());
pjrt::BufferMemoryLayoutData c_layout_data;
TF_ASSERT_OK_AND_ASSIGN(
c_layout_data, pjrt::ConvertToBufferMemoryLayoutData(shape.layout()));
create_view_args.layout = &(c_layout_data.c_layout);
create_view_args.device = device_args.device;
std::function<void()> on_delete_callback = []() mutable {};
create_view_args.on_delete_callback_arg =
new std::function(on_delete_callback);
create_view_args.on_delete_callback = [](void* device_buffer_ptr,
void* user_arg) {
auto c_func = reinterpret_cast<std::function<void()>*>(user_arg);
(*c_func)();
delete c_func;
};
create_view_args.stream = reinterpret_cast<intptr_t>(nullptr);
PJRT_Error* error =
api_->PJRT_Client_CreateViewOfDeviceBuffer(&create_view_args);
ASSERT_EQ(error, nullptr);
std::unique_ptr<PJRT_Buffer, ::pjrt::PJRT_BufferDeleter> view_buffer(
create_view_args.buffer, ::pjrt::MakeBufferDeleter(api_));
PJRT_Buffer_ToHostBuffer_Args to_host_args;
to_host_args.struct_size = PJRT_Buffer_ToHostBuffer_Args_STRUCT_SIZE;
to_host_args.extension_start = nullptr;
to_host_args.src = view_buffer.get();
xla::Shape host_shape = xla::ShapeUtil::MakeShape(xla::F32, {4});
auto literal = std::make_shared<xla::Literal>(host_shape);
to_host_args.host_layout = nullptr;
to_host_args.dst = literal->untyped_data();
to_host_args.dst_size = xla::ShapeUtil::ByteSizeOfElements(host_shape);
to_host_args.event = nullptr;
PJRT_Error* to_host_error = api_->PJRT_Buffer_ToHostBuffer(&to_host_args);
ASSERT_EQ(to_host_error, nullptr);
xla::PjRtFuture<> transfer_to_host =
::pjrt::ConvertCEventToCppFuture(to_host_args.event, api_);
TF_CHECK_OK(transfer_to_host.Await());
ASSERT_EQ(literal->data<float>().size(), 4);
std::vector<float> float_data(4);
std::iota(float_data.begin(), float_data.end(), 41.0f);
EXPECT_TRUE(xla::LiteralTestUtil::Equal(
xla::LiteralUtil::CreateR1<float>(float_data), *literal));
}
TEST_F(PjrtCApiGpuTest, CreateAndDestroyExecuteContext) {
PJRT_ExecuteContext_Create_Args create_arg;
create_arg.struct_size = PJRT_ExecuteContext_Create_Args_STRUCT_SIZE;
create_arg.extension_start = nullptr;
create_arg.context = nullptr;
EXPECT_EQ(api_->PJRT_ExecuteContext_Create(&create_arg), nullptr);
EXPECT_NE(create_arg.context, nullptr);
const PJRT_FFI_Extension* ffi_extension =
pjrt::FindExtension<PJRT_FFI_Extension>(
api_, PJRT_Extension_Type::PJRT_Extension_Type_FFI);
ASSERT_NE(ffi_extension, nullptr);
std::string string_data = "string_data";
PJRT_FFI_UserData_Add_Args add_args;
add_args.struct_size = PJRT_FFI_UserData_Add_Args_STRUCT_SIZE;
add_args.extension_start = nullptr;
add_args.user_data.type_id = 42;
add_args.user_data.data = &string_data;
add_args.user_data.deleter = nullptr;
add_args.context = create_arg.context;
EXPECT_EQ(ffi_extension->user_data_add(&add_args), nullptr);
TF_ASSERT_OK_AND_ASSIGN(
auto lookup_user_data,
create_arg.context->execute_context->ffi_context().Lookup(
xla::ffi::TypeIdRegistry::TypeId(42)));
EXPECT_EQ(lookup_user_data, &string_data);
PJRT_ExecuteContext_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_Error_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.context = create_arg.context;
api_->PJRT_ExecuteContext_Destroy(&destroy_args);
}
absl::StatusOr<PJRT_Client_Create_Args> BuildCreateArg(
::pjrt::PJRT_KeyValueCallbackData* kv_callback_data,
std::vector<PJRT_NamedValue>& c_options) {
PJRT_Client_Create_Args args;
args.struct_size = PJRT_Client_Create_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.create_options = c_options.data();
args.num_options = c_options.size();
args.kv_get_callback = kv_callback_data->c_kv_get;
args.kv_get_user_arg = &kv_callback_data->kv_get_c_func;
args.kv_put_callback = kv_callback_data->c_kv_put;
args.kv_put_user_arg = &kv_callback_data->kv_put_c_func;
args.client = nullptr;
return args;
}
TEST(PjrtCApiGpuKVStoreTest, CreateClientWithKVCallback) {
auto api = GetPjrtApi();
auto kv_store = std::make_shared<xla::InMemoryKeyValueStore>();
std::shared_ptr<::pjrt::PJRT_KeyValueCallbackData> kv_callback_data =
::pjrt::ConvertToCKeyValueCallbacks(kv_store);
xla::ClientLibrary::DestroyLocalInstances();
int num_nodes = 2;
std::vector<std::thread> threads;
for (int i = 0; i < num_nodes; i++) {
threads.emplace_back([api, i, num_nodes,
kv_callback_data = kv_callback_data,
kv_store = kv_store] {
absl::flat_hash_map<std::string, xla::PjRtValueType> options = {
{"num_nodes", static_cast<int64_t>(num_nodes)},
{"node_id", static_cast<int64_t>(i)},
{"visible_devices", std::vector<int64_t>({0})}};
TF_ASSERT_OK_AND_ASSIGN(std::vector<PJRT_NamedValue> c_options,
::pjrt::ConvertToPjRtNamedValueList(options));
TF_ASSERT_OK_AND_ASSIGN(
PJRT_Client_Create_Args create_arg,
BuildCreateArg(kv_callback_data.get(), c_options));
PJRT_Error* error = api->PJRT_Client_Create(&create_arg);
EXPECT_EQ(error, nullptr) << error->status.message();
PJRT_Client_Devices_Args device_args;
device_args.struct_size = PJRT_Client_Devices_Args_STRUCT_SIZE;
device_args.extension_start = nullptr;
device_args.client = create_arg.client;
PJRT_Error* device_error = api->PJRT_Client_Devices(&device_args);
EXPECT_EQ(device_error, nullptr);
EXPECT_EQ(device_args.num_devices, 2);
PJRT_Client_AddressableDevices_Args addressable_device_args;
addressable_device_args.struct_size =
PJRT_Client_AddressableDevices_Args_STRUCT_SIZE;
addressable_device_args.extension_start = nullptr;
addressable_device_args.client = create_arg.client;
PJRT_Error* addressable_device_error =
api->PJRT_Client_AddressableDevices(&addressable_device_args);
EXPECT_EQ(addressable_device_error, nullptr);
EXPECT_EQ(addressable_device_args.num_addressable_devices, 1);
PJRT_Client_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_Client_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.client = create_arg.client;
PJRT_Error* destroy_error = api->PJRT_Client_Destroy(&destroy_args);
CHECK_EQ(destroy_error, nullptr);
});
}
for (auto& t : threads) {
t.join();
}
}
TEST(PjrtCApiGpuAllocatorTest, ValidOptionsParsing) {
auto api = GetPjrtApi();
std::vector<std::string> allocator_options = {"default", "platform", "bfc",
"cuda_async"};
for (const std::string& allocator_option : allocator_options) {
#ifdef TENSORFLOW_USE_ROCM
if (allocator_option == "cuda_async") {
VLOG(1) << "cuda_async allocator not available on ROCm!";
continue;
}
#endif
absl::flat_hash_map<std::string, xla::PjRtValueType> options = {
{"allocator", allocator_option},
{"visible_devices", xla::PjRtValueType(std::vector<int64_t>{0, 1})},
};
if (allocator_option == "bfc" || allocator_option == "cuda_async") {
options["memory_fraction"] = 0.5f;
}
if (allocator_option == "cuda_async") {
options["preallocate"] = true;
}
TF_ASSERT_OK_AND_ASSIGN(std::vector<PJRT_NamedValue> c_options,
::pjrt::ConvertToPjRtNamedValueList(options));
PJRT_Client_Create_Args create_arg;
create_arg.struct_size = PJRT_Client_Create_Args_STRUCT_SIZE;
create_arg.extension_start = nullptr;
create_arg.client = nullptr;
create_arg.create_options = c_options.data();
create_arg.num_options = c_options.size();
PJRT_Error* error = api->PJRT_Client_Create(&create_arg);
EXPECT_EQ(error, nullptr) << error->status.message();
PJRT_Client_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_Client_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.client = create_arg.client;
PJRT_Error* destroy_error = api->PJRT_Client_Destroy(&destroy_args);
CHECK_EQ(destroy_error, nullptr);
}
}
TEST(PjrtCApiGpuAllocatorTest, InvalidAllocatorOptionsParsing) {
auto api = GetPjrtApi();
absl::flat_hash_map<std::string, xla::PjRtValueType> options = {
{"allocator", static_cast<std::string>("invalid_allocator")},
{"memory_fraction", 0.5f},
{"preallocate", true},
};
TF_ASSERT_OK_AND_ASSIGN(std::vector<PJRT_NamedValue> c_options,
::pjrt::ConvertToPjRtNamedValueList(options));
PJRT_Client_Create_Args create_arg;
create_arg.struct_size = PJRT_Client_Create_Args_STRUCT_SIZE;
create_arg.extension_start = nullptr;
create_arg.client = nullptr;
create_arg.create_options = c_options.data();
create_arg.num_options = c_options.size();
PJRT_Error* error = api->PJRT_Client_Create(&create_arg);
EXPECT_NE(error, nullptr);
EXPECT_THAT(error->status,
::tsl::testing::StatusIs(
absl::StatusCode::kUnimplemented,
"Allocator invalid_allocator not supported for PJRT GPU "
"plugin. Supported allocator options are: 'default', "
"'platform', 'bfc' and 'cuda_async'."));
PJRT_Error_Destroy_Args error_destroy_args;
error_destroy_args.struct_size = PJRT_Error_Destroy_Args_STRUCT_SIZE;
error_destroy_args.extension_start = nullptr;
error_destroy_args.error = error;
api->PJRT_Error_Destroy(&error_destroy_args);
}
TEST(PjrtCApiPlatformNameTest, AvailablePlatformName) {
auto api = GetPjrtApi();
std::string expected_platform_name_for_cuda = "cuda";
std::string expected_platform_name_for_rocm = "rocm";
absl::flat_hash_map<std::string, xla::PjRtValueType> options = {
{"platform_name", static_cast<std::string>("gpu")},
{"allocator", static_cast<std::string>("default")},
{"visible_devices", xla::PjRtValueType(std::vector<int64_t>{0, 1})},
};
TF_ASSERT_OK_AND_ASSIGN(std::vector<PJRT_NamedValue> c_options,
::pjrt::ConvertToPjRtNamedValueList(options));
PJRT_Client_Create_Args create_arg;
create_arg.struct_size = PJRT_Client_Create_Args_STRUCT_SIZE;
create_arg.extension_start = nullptr;
create_arg.client = nullptr;
create_arg.create_options = c_options.data();
create_arg.num_options = c_options.size();
PJRT_Error* error = api->PJRT_Client_Create(&create_arg);
EXPECT_EQ(error, nullptr) << error->status.message();
PJRT_Client_PlatformName_Args platform_name_args;
platform_name_args.struct_size = PJRT_Client_PlatformName_Args_STRUCT_SIZE;
platform_name_args.extension_start = nullptr;
platform_name_args.client = create_arg.client;
PJRT_Error* platform_name_error =
api->PJRT_Client_PlatformName(&platform_name_args);
EXPECT_EQ(platform_name_error, nullptr);
#if TENSORFLOW_USE_ROCM
EXPECT_EQ(platform_name_args.platform_name, expected_platform_name_for_rocm);
#else
EXPECT_EQ(platform_name_args.platform_name, expected_platform_name_for_cuda);
#endif
PJRT_Client_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_Client_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.client = create_arg.client;
PJRT_Error* destroy_error = api->PJRT_Client_Destroy(&destroy_args);
CHECK_EQ(destroy_error, nullptr);
}
TEST(PjrtCApiPlatformNameTest, UnavailablePlatformName) {
auto api = GetPjrtApi();
absl::flat_hash_map<std::string, xla::PjRtValueType> options = {
{"platform_name", static_cast<std::string>("invalid_platform_name")},
{"allocator", static_cast<std::string>("default")},
{"visible_devices", xla::PjRtValueType(std::vector<int64_t>{0, 1})},
};
TF_ASSERT_OK_AND_ASSIGN(std::vector<PJRT_NamedValue> c_options,
::pjrt::ConvertToPjRtNamedValueList(options));
PJRT_Client_Create_Args create_arg;
create_arg.struct_size = PJRT_Client_Create_Args_STRUCT_SIZE;
create_arg.extension_start = nullptr;
create_arg.client = nullptr;
create_arg.create_options = c_options.data();
create_arg.num_options = c_options.size();
PJRT_Error* error = api->PJRT_Client_Create(&create_arg);
EXPECT_NE(error, nullptr);
EXPECT_THAT(error->status,
::tsl::testing::StatusIs(
absl::StatusCode::kNotFound,
testing::StartsWith("Could not find registered platform with "
"name: \"invalid_platform_name\". "
"Available platform names are:")));
PJRT_Error_Destroy_Args error_destroy_args;
error_destroy_args.struct_size = PJRT_Error_Destroy_Args_STRUCT_SIZE;
error_destroy_args.extension_start = nullptr;
error_destroy_args.error = error;
api->PJRT_Error_Destroy(&error_destroy_args);
}
TEST(PJRTGpuDeviceTopologyTest, CreateGpuTopology) {
auto pjrt_api = gpu_plugin::GetGpuPjrtApi();
PJRT_TopologyDescription_Create_Args args;
args.struct_size = PJRT_TopologyDescription_Create_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.topology = nullptr;
args.num_options = 0;
args.create_options = nullptr;
PJRT_Error* error = pjrt_api->PJRT_TopologyDescription_Create(&args);
EXPECT_EQ(error, nullptr) << error->status.message();
auto pjrt_topology =
reinterpret_cast<const PJRT_TopologyDescription*>(args.topology);
ASSERT_NE(pjrt_topology, nullptr);
#ifdef TENSORFLOW_USE_ROCM
EXPECT_EQ(pjrt_topology->topology->platform_id(), xla::RocmId());
EXPECT_EQ(pjrt_topology->topology->platform_name(), xla::RocmName());
#else
EXPECT_EQ(pjrt_topology->topology->platform_id(), xla::CudaId());
EXPECT_EQ(pjrt_topology->topology->platform_name(), xla::CudaName());
#endif
PJRT_TopologyDescription_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_TopologyDescription_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.topology = const_cast<PJRT_TopologyDescription*>(pjrt_topology);
PJRT_Error* destroy_error =
pjrt_api->PJRT_TopologyDescription_Destroy(&destroy_args);
EXPECT_EQ(destroy_error, nullptr) << destroy_error->status.message();
}
constexpr char const* kTargetConfigString = R"(gpu_device_info {
threads_per_block_limit: 1024
threads_per_warp: 32
shared_memory_per_block: 49152
shared_memory_per_core: 98304
threads_per_core_limit: 2048
core_count: 80
fpus_per_core: 64
block_dim_limit_x: 2147483647
block_dim_limit_y: 65535
block_dim_limit_z: 65535
memory_bandwidth: 898048000000
l2_cache_size: 6291456
clock_rate_ghz: 1.53
device_memory_size: 34072559616
shared_memory_per_block_optin: 98304
cuda_compute_capability {
major: 7
}
registers_per_core_limit: 65536
registers_per_block_limit: 65536
}
platform_name: "CUDA"
dnn_version_info {
major: 9
minor: 3
}
device_description_str: "Tesla V100-SXM2-32GB"
)";
TEST(PJRTGpuDeviceTopologyTest, CreateExplicitGpuTopologyAndTargetConfig) {
auto pjrt_api = gpu_plugin::GetGpuPjrtApi();
absl::flat_hash_map<std::string, xla::PjRtValueType> options = {
{"topology", static_cast<std::string>("16 x 2 x 4")},
{"target_config", static_cast<std::string>(kTargetConfigString)}};
TF_ASSERT_OK_AND_ASSIGN(std::vector<PJRT_NamedValue> c_options,
::pjrt::ConvertToPjRtNamedValueList(options));
PJRT_TopologyDescription_Create_Args args;
args.struct_size = PJRT_TopologyDescription_Create_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.topology = nullptr;
args.num_options = c_options.size();
args.create_options = c_options.data();
PJRT_Error* error = pjrt_api->PJRT_TopologyDescription_Create(&args);
EXPECT_EQ(error, nullptr) << error->status.message();
auto pjrt_topology =
reinterpret_cast<const PJRT_TopologyDescription*>(args.topology);
ASSERT_NE(pjrt_topology, nullptr);
EXPECT_EQ(pjrt_topology->topology->platform_id(), xla::CudaId());
EXPECT_EQ(pjrt_topology->topology->platform_name(), xla::CudaName());
EXPECT_EQ(pjrt_topology->topology->ProcessCount().value(), 16 * 2);
EXPECT_EQ(pjrt_topology->topology->DeviceDescriptions().size(), 16 * 2 * 4);
EXPECT_EQ(pjrt_topology->topology->DeviceDescriptions()[0]->device_kind(),
"Tesla V100-SXM2-32GB");
PJRT_TopologyDescription_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_TopologyDescription_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.topology = const_cast<PJRT_TopologyDescription*>(pjrt_topology);
PJRT_Error* destroy_error =
pjrt_api->PJRT_TopologyDescription_Destroy(&destroy_args);
EXPECT_EQ(destroy_error, nullptr) << destroy_error->status.message();
}
TEST(PJRTGpuDeviceTopologyTest, CreateExplicitGpuTopology) {
auto pjrt_api = gpu_plugin::GetGpuPjrtApi();
absl::flat_hash_map<std::string, xla::PjRtValueType> options = {
{"topology", static_cast<std::string>("16 x 2 x 4")}};
TF_ASSERT_OK_AND_ASSIGN(std::vector<PJRT_NamedValue> c_options,
::pjrt::ConvertToPjRtNamedValueList(options));
PJRT_TopologyDescription_Create_Args args;
args.struct_size = PJRT_TopologyDescription_Create_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.topology = nullptr;
args.num_options = c_options.size();
args.create_options = c_options.data();
PJRT_Error* error = pjrt_api->PJRT_TopologyDescription_Create(&args);
EXPECT_EQ(error, nullptr) << error->status.message();
auto pjrt_topology =
reinterpret_cast<const PJRT_TopologyDescription*>(args.topology);
ASSERT_NE(pjrt_topology, nullptr);
EXPECT_EQ(pjrt_topology->topology->ProcessCount().value(), 16 * 2);
EXPECT_EQ(pjrt_topology->topology->DeviceDescriptions().size(), 16 * 2 * 4);
PJRT_TopologyDescription_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_TopologyDescription_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.topology = const_cast<PJRT_TopologyDescription*>(pjrt_topology);
PJRT_Error* destroy_error =
pjrt_api->PJRT_TopologyDescription_Destroy(&destroy_args);
EXPECT_EQ(destroy_error, nullptr) << destroy_error->status.message();
}
void TestCustomCallV2() {}
TEST(PjrtCApiGpuExtensionTest, CustomCallUntyped) {
PJRT_Gpu_Register_Custom_Call_Args args;
args.struct_size = PJRT_Gpu_Register_Custom_Call_Args_STRUCT_SIZE;
std::string function_name = "untyped_function_name";
args.function_name = function_name.c_str();
args.function_name_size = function_name.size();
args.api_version = 0;
args.handler_instantiate = nullptr;
args.handler_prepare = nullptr;
args.handler_initialize = nullptr;
args.handler_execute = reinterpret_cast<void*>(&TestCustomCallV2);
auto api = GetPjrtApi();
const PJRT_Extension_Base* next =
reinterpret_cast<const PJRT_Extension_Base*>(api->extension_start);
while (next != nullptr &&
next->type !=
PJRT_Extension_Type::PJRT_Extension_Type_Gpu_Custom_Call) {
next = next->next;
}
ASSERT_NE(next, nullptr);
PJRT_Error* error =
reinterpret_cast<const PJRT_Gpu_Custom_Call*>(next)->custom_call(&args);
CHECK_EQ(error, nullptr);
void* custom_call = xla::CustomCallTargetRegistry::Global()->Lookup(
function_name, stream_executor::GpuPlatformName());
EXPECT_EQ(custom_call, reinterpret_cast<void*>(&TestCustomCallV2));
}
TEST(PjrtCApiGpuExtensionTest, CustomCallTyped) {
static constexpr auto* noop = +[] { return xla::ffi::Error::Success(); };
XLA_FFI_DEFINE_HANDLER(kNoop, noop, xla::ffi::Ffi::Bind());
PJRT_Gpu_Register_Custom_Call_Args args;
args.struct_size = PJRT_Gpu_Register_Custom_Call_Args_STRUCT_SIZE;
std::string function_name = "typed_function_name";
args.function_name = function_name.c_str();
args.function_name_size = function_name.size();
args.api_version = 1;
args.handler_instantiate = nullptr;
args.handler_prepare = nullptr;
args.handler_initialize = nullptr;
args.handler_execute = reinterpret_cast<void*>(kNoop);
auto api = GetPjrtApi();
const PJRT_Extension_Base* next =
reinterpret_cast<const PJRT_Extension_Base*>(api->extension_start);
while (next != nullptr &&
next->type !=
PJRT_Extension_Type::PJRT_Extension_Type_Gpu_Custom_Call) {
next = next->next;
}
ASSERT_NE(next, nullptr);
PJRT_Error* error =
reinterpret_cast<const PJRT_Gpu_Custom_Call*>(next)->custom_call(&args);
CHECK_EQ(error, nullptr);
auto registration =
xla::ffi::FindHandler(function_name, stream_executor::GpuPlatformName())
.value();
EXPECT_EQ(reinterpret_cast<void*>(registration.bundle.execute), kNoop);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/c/pjrt_c_api_gpu.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/c/pjrt_c_api_gpu_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b157befa-2457-49ea-98c8-27c339742c71 | cpp | tensorflow/tensorflow | pjrt_c_api_cpu | third_party/xla/xla/pjrt/c/pjrt_c_api_cpu.cc | third_party/xla/xla/pjrt/c/pjrt_c_api_cpu_test.cc | #include "xla/pjrt/c/pjrt_c_api_cpu.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/c/pjrt_c_api_cpu_internal.h"
const PJRT_Api* GetPjrtApi() { return pjrt::cpu_plugin::GetCpuPjrtApi(); } | #include "xla/pjrt/c/pjrt_c_api_cpu.h"
#include "xla/pjrt/c/pjrt_c_api_test.h"
#include "xla/pjrt/c/pjrt_c_api_wrapper_impl.h"
namespace pjrt {
namespace {
const bool kUnused = (RegisterPjRtCApiTestFactory([]() { return GetPjrtApi(); },
"cpu"),
true);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/c/pjrt_c_api_cpu.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/c/pjrt_c_api_cpu_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8a6d5e13-b058-42e0-b8ea-d7b62c632acd | cpp | tensorflow/tensorflow | pjrt_c_api_helpers | third_party/xla/xla/pjrt/c/pjrt_c_api_helpers.cc | third_party/xla/xla/pjrt/c/pjrt_c_api_helpers_test.cc | #include "xla/pjrt/c/pjrt_c_api_helpers.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/layout.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/c/pjrt_c_api_layouts_extension.h"
#include "xla/pjrt/c/pjrt_c_api_profiler_extension.h"
#include "xla/pjrt/distributed/key_value_store_interface.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/primitive_util.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/connected_traceme.h"
#include "tsl/profiler/lib/context_types.h"
namespace pjrt {
const absl::string_view kHloFormat = "hlo";
const absl::string_view kMlirFormat = "mlir";
const absl::string_view kHloWithConfigFormat = "hlo_with_config";
PJRT_ClientDeleter MakeClientDeleter(const PJRT_Api* api) {
return [api](PJRT_Client* client) -> void {
PJRT_Client_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_Client_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.client = client;
PJRT_Error* error = api->PJRT_Client_Destroy(&destroy_args);
CHECK(error == nullptr);
};
}
PJRT_ErrorDeleter MakeErrorDeleter(const PJRT_Api* api) {
return [api](PJRT_Error* error) -> void {
PJRT_Error_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_Error_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.error = error;
api->PJRT_Error_Destroy(&destroy_args);
};
}
PJRT_BufferDeleter MakeBufferDeleter(const PJRT_Api* api) {
return [api](PJRT_Buffer* buffer) -> void {
PJRT_Buffer_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_Buffer_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.buffer = buffer;
pjrt::LogFatalIfPjrtError(api->PJRT_Buffer_Destroy(&destroy_args), api);
};
}
PJRT_ExecutableDeleter MakeExecutableDeleter(const PJRT_Api* api) {
return [api](PJRT_Executable* executable) -> void {
PJRT_Executable_Destroy_Args args;
args.struct_size = PJRT_Executable_Destroy_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = executable;
pjrt::LogFatalIfPjrtError(api->PJRT_Executable_Destroy(&args), api);
};
}
PJRT_LoadedExecutableDeleter MakeLoadedExecutableDeleter(const PJRT_Api* api) {
return [api](PJRT_LoadedExecutable* executable) -> void {
PJRT_LoadedExecutable_Destroy_Args args;
args.struct_size = PJRT_LoadedExecutable_Destroy_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = executable;
pjrt::LogFatalIfPjrtError(api->PJRT_LoadedExecutable_Destroy(&args), api);
};
}
absl::Status PjrtErrorToStatus(const PJRT_Error* error, const PJRT_Api* api) {
absl::Status status;
if (error != nullptr) {
status = absl::Status(PjrtErrorToStatusCode(error, api),
GetPjrtErrorMessage(error, api));
}
return status;
}
PJRT_TopologyDescriptionDeleter MakeTopologyDescriptionDeleter(
const PJRT_Api* api) {
return [api](PJRT_TopologyDescription* topology) -> void {
PJRT_TopologyDescription_Destroy_Args destroy_args;
destroy_args.struct_size =
PJRT_TopologyDescription_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.topology = topology;
pjrt::LogFatalIfPjrtError(
api->PJRT_TopologyDescription_Destroy(&destroy_args), api);
};
}
PJRT_Layouts_MemoryLayoutDeleter MakeMemoryLayoutDeleter(const PJRT_Api* api) {
PJRT_Layouts_Extension* ext_api =
FindExtension<PJRT_Layouts_Extension>(api, PJRT_Extension_Type_Layouts);
CHECK_NE(ext_api, nullptr) << "MakeMemoryLayoutDeleter passed PJRT_Api that "
"doesn't support layouts extension";
return [api, ext_api](PJRT_Layouts_MemoryLayout* layout) -> void {
PJRT_Layouts_MemoryLayout_Destroy_Args args;
args.struct_size = PJRT_Layouts_MemoryLayout_Destroy_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.layout = layout;
pjrt::LogFatalIfPjrtError(ext_api->PJRT_Layouts_MemoryLayout_Destroy(&args),
api);
};
}
PJRT_Error_Code GetErrorCode(const PJRT_Error* error, const PJRT_Api* api) {
PJRT_Error_GetCode_Args args;
args.struct_size = PJRT_Error_GetCode_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.error = error;
pjrt::LogFatalIfPjrtError(api->PJRT_Error_GetCode(&args), api);
return args.code;
}
absl::StatusCode PjrtErrorToStatusCode(const PJRT_Error* error,
const PJRT_Api* api) {
return PjrtErrorCodeToStatusCode(GetErrorCode(error, api));
}
absl::StatusCode PjrtErrorCodeToStatusCode(PJRT_Error_Code code) {
switch (code) {
case PJRT_Error_Code_CANCELLED:
case PJRT_Error_Code_UNKNOWN:
case PJRT_Error_Code_INVALID_ARGUMENT:
case PJRT_Error_Code_DEADLINE_EXCEEDED:
case PJRT_Error_Code_NOT_FOUND:
case PJRT_Error_Code_ALREADY_EXISTS:
case PJRT_Error_Code_PERMISSION_DENIED:
case PJRT_Error_Code_RESOURCE_EXHAUSTED:
case PJRT_Error_Code_FAILED_PRECONDITION:
case PJRT_Error_Code_ABORTED:
case PJRT_Error_Code_OUT_OF_RANGE:
case PJRT_Error_Code_UNIMPLEMENTED:
case PJRT_Error_Code_INTERNAL:
case PJRT_Error_Code_UNAVAILABLE:
case PJRT_Error_Code_DATA_LOSS:
case PJRT_Error_Code_UNAUTHENTICATED:
return static_cast<absl::StatusCode>(code);
}
}
PJRT_Error_Code StatusCodeToPjrtErrorCode(absl::StatusCode code) {
switch (static_cast<tsl::error::Code>(code)) {
case tsl::error::CANCELLED:
case tsl::error::UNKNOWN:
case tsl::error::INVALID_ARGUMENT:
case tsl::error::DEADLINE_EXCEEDED:
case tsl::error::NOT_FOUND:
case tsl::error::ALREADY_EXISTS:
case tsl::error::PERMISSION_DENIED:
case tsl::error::UNAUTHENTICATED:
case tsl::error::RESOURCE_EXHAUSTED:
case tsl::error::FAILED_PRECONDITION:
case tsl::error::ABORTED:
case tsl::error::OUT_OF_RANGE:
case tsl::error::UNIMPLEMENTED:
case tsl::error::INTERNAL:
case tsl::error::UNAVAILABLE:
case tsl::error::DATA_LOSS:
return static_cast<PJRT_Error_Code>(code);
case tsl::error::OK:
CHECK(false) << "Status::OK() cannot be converted to PJRT_Error code, "
"use nullptr instead";
case tensorflow::error::
DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_:
CHECK(false) << "got DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_"
"USE_DEFAULT_IN_SWITCH_INSTEAD_";
case tensorflow::error::Code_INT_MIN_SENTINEL_DO_NOT_USE_:
CHECK(false) << "got Code_INT_MIN_SENTINEL_DO_NOT_USE_";
case tensorflow::error::Code_INT_MAX_SENTINEL_DO_NOT_USE_:
CHECK(false) << "got Code_INT_MAX_SENTINEL_DO_NOT_USE_";
}
}
absl::string_view GetPjrtErrorMessage(const PJRT_Error* error,
const PJRT_Api* api) {
PJRT_Error_Message_Args message_args;
message_args.struct_size = PJRT_Error_Message_Args_STRUCT_SIZE;
message_args.extension_start = nullptr;
message_args.error = error;
api->PJRT_Error_Message(&message_args);
return absl::string_view(message_args.message, message_args.message_size);
}
void LogFatalIfPjrtError(PJRT_Error* error, const PJRT_Api* api) {
std::unique_ptr<PJRT_Error, pjrt::PJRT_ErrorDeleter> _error(
error, MakeErrorDeleter(api));
absl::Status _status = PjrtErrorToStatus(_error.get(), api);
if (!_status.ok()) {
LOG(FATAL) << "Unexpected error status " << _status.message();
}
}
PJRT_EventDeleter MakeEventDeleter(const PJRT_Api* api) {
CHECK(api != nullptr);
return [api](PJRT_Event* managed) {
PJRT_Event_Destroy_Args args;
args.struct_size = PJRT_Event_Destroy_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.event = managed;
LogFatalIfPjrtError(api->PJRT_Event_Destroy(&args), api);
};
}
PJRT_Buffer_Type ConvertToPjRtBufferType(xla::PrimitiveType type) {
switch (type) {
case xla::PrimitiveType::PRIMITIVE_TYPE_INVALID:
return PJRT_Buffer_Type::PJRT_Buffer_Type_INVALID;
case xla::PrimitiveType::PRED:
return PJRT_Buffer_Type::PJRT_Buffer_Type_PRED;
case xla::PrimitiveType::TOKEN:
return PJRT_Buffer_Type::PJRT_Buffer_Type_TOKEN;
case xla::PrimitiveType::S2:
return PJRT_Buffer_Type::PJRT_Buffer_Type_S2;
case xla::PrimitiveType::S4:
return PJRT_Buffer_Type::PJRT_Buffer_Type_S4;
case xla::PrimitiveType::S8:
return PJRT_Buffer_Type::PJRT_Buffer_Type_S8;
case xla::PrimitiveType::S16:
return PJRT_Buffer_Type::PJRT_Buffer_Type_S16;
case xla::PrimitiveType::S32:
return PJRT_Buffer_Type::PJRT_Buffer_Type_S32;
case xla::PrimitiveType::S64:
return PJRT_Buffer_Type::PJRT_Buffer_Type_S64;
case xla::PrimitiveType::U2:
return PJRT_Buffer_Type::PJRT_Buffer_Type_U2;
case xla::PrimitiveType::U4:
return PJRT_Buffer_Type::PJRT_Buffer_Type_U4;
case xla::PrimitiveType::U8:
return PJRT_Buffer_Type::PJRT_Buffer_Type_U8;
case xla::PrimitiveType::U16:
return PJRT_Buffer_Type::PJRT_Buffer_Type_U16;
case xla::PrimitiveType::U32:
return PJRT_Buffer_Type::PJRT_Buffer_Type_U32;
case xla::PrimitiveType::U64:
return PJRT_Buffer_Type::PJRT_Buffer_Type_U64;
case xla::PrimitiveType::F16:
return PJRT_Buffer_Type::PJRT_Buffer_Type_F16;
case xla::PrimitiveType::F32:
return PJRT_Buffer_Type::PJRT_Buffer_Type_F32;
case xla::PrimitiveType::BF16:
return PJRT_Buffer_Type::PJRT_Buffer_Type_BF16;
case xla::PrimitiveType::F64:
return PJRT_Buffer_Type::PJRT_Buffer_Type_F64;
case xla::PrimitiveType::F8E5M2:
return PJRT_Buffer_Type::PJRT_Buffer_Type_F8E5M2;
case xla::PrimitiveType::F8E4M3:
return PJRT_Buffer_Type::PJRT_Buffer_Type_F8E4M3;
case xla::PrimitiveType::F8E4M3FN:
return PJRT_Buffer_Type::PJRT_Buffer_Type_F8E4M3FN;
case xla::PrimitiveType::F8E4M3B11FNUZ:
return PJRT_Buffer_Type::PJRT_Buffer_Type_F8E4M3B11FNUZ;
case xla::PrimitiveType::F8E5M2FNUZ:
return PJRT_Buffer_Type::PJRT_Buffer_Type_F8E5M2FNUZ;
case xla::PrimitiveType::F8E4M3FNUZ:
return PJRT_Buffer_Type::PJRT_Buffer_Type_F8E4M3FNUZ;
case xla::PrimitiveType::F8E3M4:
return PJRT_Buffer_Type::PJRT_Buffer_Type_F8E3M4;
case xla::PrimitiveType::C64:
return PJRT_Buffer_Type::PJRT_Buffer_Type_C64;
case xla::PrimitiveType::C128:
return PJRT_Buffer_Type::PJRT_Buffer_Type_C128;
default:
CHECK(false)
<< "Element type of the shape is not supported in C API layer: "
<< xla::primitive_util::LowercasePrimitiveTypeName(type);
}
}
xla::PrimitiveType ConvertFromPjRtBufferType(PJRT_Buffer_Type type) {
switch (type) {
case PJRT_Buffer_Type::PJRT_Buffer_Type_PRED:
return xla::PrimitiveType::PRED;
case PJRT_Buffer_Type::PJRT_Buffer_Type_TOKEN:
return xla::PrimitiveType::TOKEN;
case PJRT_Buffer_Type::PJRT_Buffer_Type_S2:
return xla::PrimitiveType::S2;
case PJRT_Buffer_Type::PJRT_Buffer_Type_S4:
return xla::PrimitiveType::S4;
case PJRT_Buffer_Type::PJRT_Buffer_Type_S8:
return xla::PrimitiveType::S8;
case PJRT_Buffer_Type::PJRT_Buffer_Type_S16:
return xla::PrimitiveType::S16;
case PJRT_Buffer_Type::PJRT_Buffer_Type_S32:
return xla::PrimitiveType::S32;
case PJRT_Buffer_Type::PJRT_Buffer_Type_S64:
return xla::PrimitiveType::S64;
case PJRT_Buffer_Type::PJRT_Buffer_Type_U2:
return xla::PrimitiveType::U2;
case PJRT_Buffer_Type::PJRT_Buffer_Type_U4:
return xla::PrimitiveType::U4;
case PJRT_Buffer_Type::PJRT_Buffer_Type_U8:
return xla::PrimitiveType::U8;
case PJRT_Buffer_Type::PJRT_Buffer_Type_U16:
return xla::PrimitiveType::U16;
case PJRT_Buffer_Type::PJRT_Buffer_Type_U32:
return xla::PrimitiveType::U32;
case PJRT_Buffer_Type::PJRT_Buffer_Type_U64:
return xla::PrimitiveType::U64;
case PJRT_Buffer_Type::PJRT_Buffer_Type_F16:
return xla::PrimitiveType::F16;
case PJRT_Buffer_Type::PJRT_Buffer_Type_F32:
return xla::PrimitiveType::F32;
case PJRT_Buffer_Type::PJRT_Buffer_Type_BF16:
return xla::PrimitiveType::BF16;
case PJRT_Buffer_Type::PJRT_Buffer_Type_F64:
return xla::PrimitiveType::F64;
case PJRT_Buffer_Type::PJRT_Buffer_Type_C64:
return xla::PrimitiveType::C64;
case PJRT_Buffer_Type::PJRT_Buffer_Type_C128:
return xla::PrimitiveType::C128;
case PJRT_Buffer_Type::PJRT_Buffer_Type_F8E5M2:
return xla::PrimitiveType::F8E5M2;
case PJRT_Buffer_Type::PJRT_Buffer_Type_F8E4M3:
return xla::PrimitiveType::F8E4M3;
case PJRT_Buffer_Type::PJRT_Buffer_Type_F8E4M3FN:
return xla::PrimitiveType::F8E4M3FN;
case PJRT_Buffer_Type::PJRT_Buffer_Type_F8E4M3B11FNUZ:
return xla::PrimitiveType::F8E4M3B11FNUZ;
case PJRT_Buffer_Type::PJRT_Buffer_Type_F8E5M2FNUZ:
return xla::PrimitiveType::F8E5M2FNUZ;
case PJRT_Buffer_Type::PJRT_Buffer_Type_F8E4M3FNUZ:
return xla::PrimitiveType::F8E4M3FNUZ;
case PJRT_Buffer_Type::PJRT_Buffer_Type_F8E3M4:
return xla::PrimitiveType::F8E3M4;
case PJRT_Buffer_Type::PJRT_Buffer_Type_INVALID:
CHECK(false) << "Buffer type is not supported in C API layer.";
}
}
const char* HostBufferSemanticsToString(
xla::PjRtClient::HostBufferSemantics h) {
switch (h) {
case xla::PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall:
return "xla::PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall";
case xla::PjRtClient::HostBufferSemantics::kImmutableZeroCopy:
return "xla::PjRtClient::HostBufferSemantics::kImmutableZeroCopy";
case xla::PjRtClient::HostBufferSemantics::kMutableZeroCopy:
return "xla::PjRtClient::HostBufferSemantics::kMutableZeroCopy";
case xla::PjRtClient::HostBufferSemantics::kImmutableUntilTransferCompletes:
return "xla::PjRtClient::HostBufferSemantics::"
"kImmutableUntilTransferCompletes";
}
}
PJRT_HostBufferSemantics ConvertToPjRtHostBufferSemantics(
xla::PjRtClient::HostBufferSemantics buffer_semantics) {
switch (buffer_semantics) {
case xla::PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall:
return PJRT_HostBufferSemantics::
PJRT_HostBufferSemantics_kImmutableOnlyDuringCall;
case xla::PjRtClient::HostBufferSemantics::kImmutableUntilTransferCompletes:
return PJRT_HostBufferSemantics::
PJRT_HostBufferSemantics_kImmutableUntilTransferCompletes;
case xla::PjRtClient::HostBufferSemantics::kImmutableZeroCopy:
return PJRT_HostBufferSemantics::
PJRT_HostBufferSemantics_kImmutableZeroCopy;
case xla::PjRtClient::HostBufferSemantics::kMutableZeroCopy:
return PJRT_HostBufferSemantics::
PJRT_HostBufferSemantics_kMutableZeroCopy;
default:
CHECK(false)
<< "Input host buffer semantics is not supported in C API layer: "
<< HostBufferSemanticsToString(buffer_semantics);
}
}
xla::PjRtClient::HostBufferSemantics ConvertFromPjRtHostBufferSemantics(
PJRT_HostBufferSemantics buffer_semantics) {
switch (buffer_semantics) {
case PJRT_HostBufferSemantics::
PJRT_HostBufferSemantics_kImmutableOnlyDuringCall:
return xla::PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall;
case PJRT_HostBufferSemantics::
PJRT_HostBufferSemantics_kImmutableUntilTransferCompletes:
return xla::PjRtClient::HostBufferSemantics::
kImmutableUntilTransferCompletes;
case PJRT_HostBufferSemantics::PJRT_HostBufferSemantics_kImmutableZeroCopy:
return xla::PjRtClient::HostBufferSemantics::kImmutableZeroCopy;
case PJRT_HostBufferSemantics::PJRT_HostBufferSemantics_kMutableZeroCopy:
return xla::PjRtClient::HostBufferSemantics::kMutableZeroCopy;
}
}
xla::PjRtFuture<> ConvertCEventToCppFuture(PJRT_Event* c_event,
const PJRT_Api* c_api) {
using xla::PjRtFuture;
PJRT_Event_OnReady_Args event_onready_args;
event_onready_args.struct_size = PJRT_Event_OnReady_Args_STRUCT_SIZE;
event_onready_args.extension_start = nullptr;
event_onready_args.event = c_event;
PjRtFuture<>::Promise promise = PjRtFuture<>::CreatePromise();
event_onready_args.user_arg = new std::function<void(PJRT_Error*)>(
[promise, c_event, c_api](PJRT_Error* error) mutable {
if (error != nullptr) {
promise.Set(::pjrt::PjrtErrorToStatus(error, c_api));
::pjrt::MakeErrorDeleter(c_api)(error);
} else {
promise.Set();
}
::pjrt::MakeEventDeleter(c_api)(c_event);
});
event_onready_args.callback = [](PJRT_Error* error, void* arg) {
std::function<void(PJRT_Error*)>* set_future =
reinterpret_cast<std::function<void(PJRT_Error*)>*>(arg);
(*set_future)(error);
delete set_future;
};
PJRT_Error* error = c_api->PJRT_Event_OnReady(&event_onready_args);
if (error != nullptr) {
return PjRtFuture<>(::pjrt::PjrtErrorToStatus(error, c_api));
}
return PjRtFuture<>(std::move(promise));
}
static absl::StatusOr<PJRT_NamedValue> ConvertToPjRtNamedValue(
const std::string& name, const xla::PjRtValueType& value) {
PJRT_NamedValue c_value;
c_value.struct_size = PJRT_NamedValue_STRUCT_SIZE;
c_value.extension_start = nullptr;
c_value.name = name.c_str();
c_value.name_size = name.size();
if (std::holds_alternative<std::string>(value)) {
c_value.type = PJRT_NamedValue_Type::PJRT_NamedValue_kString;
const std::string& option_string_value = std::get<std::string>(value);
c_value.string_value = option_string_value.c_str();
c_value.value_size = option_string_value.size();
} else if (std::holds_alternative<int64_t>(value)) {
c_value.type = PJRT_NamedValue_Type::PJRT_NamedValue_kInt64;
c_value.int64_value = std::get<int64_t>(value);
c_value.value_size = 1;
} else if (std::holds_alternative<std::vector<int64_t>>(value)) {
c_value.type = PJRT_NamedValue_Type::PJRT_NamedValue_kInt64List;
const std::vector<int64_t>& option_int_list_value =
std::get<std::vector<int64_t>>(value);
c_value.int64_array_value = option_int_list_value.data();
c_value.value_size = option_int_list_value.size();
} else if (std::holds_alternative<float>(value)) {
c_value.type = PJRT_NamedValue_Type::PJRT_NamedValue_kFloat;
c_value.float_value = std::get<float>(value);
c_value.value_size = 1;
} else if (std::holds_alternative<bool>(value)) {
c_value.type = PJRT_NamedValue_Type::PJRT_NamedValue_kBool;
c_value.bool_value = std::get<bool>(value);
c_value.value_size = 1;
} else {
return tsl::errors::InvalidArgument("Unexpected PjRtValueType: '",
value.index(), " with name: ", name);
}
return c_value;
}
absl::StatusOr<std::vector<PJRT_NamedValue>> ConvertToPjRtNamedValueList(
const absl::flat_hash_map<std::string, xla::PjRtValueType>& cpp_value_map) {
std::vector<PJRT_NamedValue> c_value_list;
c_value_list.reserve(cpp_value_map.size());
for (const auto& [name, value] : cpp_value_map) {
TF_ASSIGN_OR_RETURN(PJRT_NamedValue c_value,
ConvertToPjRtNamedValue(name, value));
c_value_list.push_back(c_value);
}
return c_value_list;
}
absl::flat_hash_map<std::string, xla::PjRtValueType>
ConvertFromPjRtNamedValueList(const PJRT_NamedValue* c_value_list,
size_t list_size) {
absl::flat_hash_map<std::string, xla::PjRtValueType> cpp_value_map;
for (int i = 0; i < list_size; ++i) {
const PJRT_NamedValue& c_value = c_value_list[i];
absl::string_view name = absl::string_view(c_value.name, c_value.name_size);
switch (c_value.type) {
case PJRT_NamedValue_Type::PJRT_NamedValue_kString: {
std::string string_value(c_value.string_value, c_value.value_size);
cpp_value_map[name] = xla::PjRtValueType(string_value);
break;
}
case PJRT_NamedValue_Type::PJRT_NamedValue_kInt64: {
cpp_value_map[name] = xla::PjRtValueType(c_value.int64_value);
break;
}
case PJRT_NamedValue_Type::PJRT_NamedValue_kInt64List: {
const int64_t* array_ptr(c_value.int64_array_value);
std::vector<int64_t> int64_array(array_ptr,
array_ptr + c_value.value_size);
cpp_value_map[name] = xla::PjRtValueType(int64_array);
break;
}
case PJRT_NamedValue_Type::PJRT_NamedValue_kFloat: {
cpp_value_map[name] = xla::PjRtValueType(c_value.float_value);
break;
}
case PJRT_NamedValue_Type::PJRT_NamedValue_kBool: {
cpp_value_map[name] = xla::PjRtValueType(c_value.bool_value);
break;
}
default: {
LOG(FATAL) << "Unexpected PJRT_NamedValue type: " << c_value.type
<< " with name: " << name;
break;
}
}
}
return cpp_value_map;
}
static absl::StatusOr<PJRT_NamedValue_Type> GetPjrtNamedValueType(
xla::PjRtValueType cpp_value) {
if (std::holds_alternative<std::string>(cpp_value)) {
return PJRT_NamedValue_Type::PJRT_NamedValue_kString;
}
if (std::holds_alternative<int64_t>(cpp_value)) {
return PJRT_NamedValue_Type::PJRT_NamedValue_kInt64;
}
if (std::holds_alternative<std::vector<int64_t>>(cpp_value)) {
return PJRT_NamedValue_Type::PJRT_NamedValue_kInt64List;
}
if (std::holds_alternative<float>(cpp_value)) {
return PJRT_NamedValue_Type::PJRT_NamedValue_kFloat;
}
if (std::holds_alternative<bool>(cpp_value)) {
return PJRT_NamedValue_Type::PJRT_NamedValue_kBool;
}
return tsl::errors::InvalidArgument("Unexpected PjRtValueType with index",
cpp_value.index());
}
absl::Status ValidateCreateOptions(
const absl::flat_hash_map<std::string, xla::PjRtValueType>& value_map,
const absl::flat_hash_map<std::string, PJRT_NamedValue_Type>&
expected_name_and_types) {
for (const auto& [name, value] : value_map) {
auto it = expected_name_and_types.find(name);
if (it == expected_name_and_types.end()) {
return tsl::errors::InvalidArgument(
"Unexpected option name passed to PJRT_Client_Create: ", name);
}
TF_ASSIGN_OR_RETURN(PJRT_NamedValue_Type type,
GetPjrtNamedValueType(value));
if (type != it->second) {
return tsl::errors::InvalidArgument(
"Option passed to PJRT_Client_Create with name ", name,
" has type index ", value.index(), " but expected type index is ",
it->second);
}
}
return absl::OkStatus();
}
const std::vector<PJRT_NamedValue>& GetXlaPluginCAttributes() {
constexpr absl::string_view kXlaVersion = "xla_version";
PJRT_NamedValue c_value;
c_value.struct_size = PJRT_NamedValue_STRUCT_SIZE;
c_value.extension_start = nullptr;
c_value.name = kXlaVersion.data();
c_value.name_size = kXlaVersion.size();
c_value.type = PJRT_NamedValue_Type::PJRT_NamedValue_kInt64;
c_value.int64_value = 2;
c_value.value_size = 1;
static const std::vector<PJRT_NamedValue>* c_values =
new std::vector<PJRT_NamedValue>({c_value});
return *c_values;
}
static std::string StructSizeErrorMsg(absl::string_view struct_name,
size_t expected_size,
size_t actual_size) {
std::string error_msg = absl::StrCat(
"Unexpected ", struct_name, " size: expected ", expected_size, ", got ",
actual_size, ". Check installed software versions.");
#if defined(PJRT_API_MAJOR)
absl::StrAppend(&error_msg, " The framework PJRT API version is ",
PJRT_API_MAJOR, ".", PJRT_API_MINOR, ".");
#endif
return error_msg;
}
absl::Status ActualStructSizeIsGreaterOrEqual(absl::string_view struct_name,
size_t expected_size,
size_t actual_size) {
if (actual_size < expected_size) {
return tsl::errors::InvalidArgument(
StructSizeErrorMsg(struct_name, expected_size, actual_size));
}
if (actual_size > expected_size) {
VLOG(2) << StructSizeErrorMsg(struct_name, expected_size, actual_size);
}
return absl::OkStatus();
}
absl::string_view GetPlatformVersion(PJRT_Client* client, const PJRT_Api* api) {
PJRT_Client_PlatformVersion_Args args;
args.struct_size = PJRT_Client_PlatformVersion_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.client = client;
LogFatalIfPjrtError(api->PJRT_Client_PlatformVersion(&args), api);
absl::string_view platform_version(args.platform_version,
args.platform_version_size);
return platform_version;
}
absl::string_view GetPlatformName(PJRT_Client* client, const PJRT_Api* api) {
PJRT_Client_PlatformName_Args args;
args.client = client;
args.struct_size = PJRT_Client_PlatformName_Args_STRUCT_SIZE;
args.extension_start = nullptr;
pjrt::LogFatalIfPjrtError(api->PJRT_Client_PlatformName(&args), api);
absl::string_view platform_name(args.platform_name, args.platform_name_size);
return platform_name;
}
absl::StatusOr<PJRT_TopologyDescription*> GetTopologyDescription(
PJRT_Client* client, const PJRT_Api* api) {
PJRT_Client_TopologyDescription_Args args;
args.struct_size = PJRT_Client_TopologyDescription_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.client = client;
RETURN_STATUS_IF_PJRT_ERROR(api->PJRT_Client_TopologyDescription(&args), api);
return args.topology;
}
PJRT_Chunk ConvertFromCppChunk(xla::PjRtChunk chunk) {
PJRT_Chunk c_chunk;
c_chunk.data = chunk.data();
c_chunk.size = static_cast<size_t>(chunk.size());
c_chunk.deleter_arg = new std::function(chunk.deleter());
c_chunk.deleter = [](void* data, void* deleter_arg) {
auto* deleter = reinterpret_cast<std::function<void(void*)>*>(deleter_arg);
(*deleter)(data);
delete deleter;
};
chunk.release();
return c_chunk;
}
xla::PjRtChunk ConvertToCppChunk(const PJRT_Chunk& chunk) {
return xla::PjRtChunk(
chunk.data, chunk.size,
[deleter_arg = chunk.deleter_arg, deleter = chunk.deleter](void* data) {
deleter(data, deleter_arg);
});
}
PJRT_DeviceDescription* GetDeviceDescription(const PJRT_Api* api,
PJRT_Device* device) {
PJRT_Device_GetDescription_Args args;
args.struct_size = PJRT_Device_GetDescription_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device = device;
pjrt::LogFatalIfPjrtError(api->PJRT_Device_GetDescription(&args), api);
return args.device_description;
}
absl::Span<PJRT_Memory* const> GetAddressableMemories(const PJRT_Api* api,
PJRT_Device* device) {
PJRT_Device_AddressableMemories_Args args;
args.struct_size = PJRT_Device_AddressableMemories_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.device = device;
pjrt::LogFatalIfPjrtError(api->PJRT_Device_AddressableMemories(&args), api);
return absl::MakeSpan(args.memories, args.num_memories);
}
int GetId(const PJRT_Api* api, PJRT_DeviceDescription* device_desc) {
PJRT_DeviceDescription_Id_Args args = PJRT_DeviceDescription_Id_Args{
PJRT_DeviceDescription_Id_Args_STRUCT_SIZE, nullptr, device_desc};
pjrt::LogFatalIfPjrtError(api->PJRT_DeviceDescription_Id(&args), api);
return args.id;
}
static void PjRtValueDeleterCallback(char* value) { delete[] value; }
static PJRT_KeyValueGetCFunc ToKVGetCFunc(
xla::KeyValueStoreInterface* kv_store) {
return [kv_store](PJRT_KeyValueGetCallback_Args* args) -> PJRT_Error* {
absl::StatusOr<std::string> output =
kv_store->Get(std::string_view(args->key, args->key_size),
absl::Milliseconds(args->timeout_in_ms));
if (!output.ok()) {
absl::string_view message = output.status().message();
return (*args->callback_error)(
StatusCodeToPjrtErrorCode(output.status().code()), message.data(),
message.size());
}
args->value = new char[output->size()];
std::copy(output->begin(), output->end(), args->value);
args->value_size = output->size();
args->value_deleter_callback = &PjRtValueDeleterCallback;
return nullptr;
};
}
static PJRT_KeyValuePutCFunc ToKVPutCFunc(
xla::KeyValueStoreInterface* kv_store) {
return [kv_store](PJRT_KeyValuePutCallback_Args* args) -> PJRT_Error* {
absl::Status status =
kv_store->Set(std::string_view(args->key, args->key_size),
std::string_view(args->value, args->value_size));
if (!status.ok()) {
absl::string_view message = status.message();
return (*args->callback_error)(StatusCodeToPjrtErrorCode(status.code()),
message.data(), message.size());
}
return nullptr;
};
}
static PJRT_KeyValueGetCallback ToCKVGetCallback(
PJRT_KeyValueGetCFunc* kv_get_c_func) {
return [](PJRT_KeyValueGetCallback_Args* args) -> PJRT_Error* {
PJRT_KeyValueGetCFunc* kv_get_c_func =
reinterpret_cast<PJRT_KeyValueGetCFunc*>(args->user_arg);
if (kv_get_c_func == nullptr) {
absl::Status status = xla::InvalidArgument(
"got nullptr for PJRT_KeyValueGet_Args.user_arg");
return (*args->callback_error)(StatusCodeToPjrtErrorCode(status.code()),
status.message().data(),
status.message().size());
}
return (*kv_get_c_func)(args);
};
}
static PJRT_KeyValuePutCallback ToCKVPutCallback(
PJRT_KeyValuePutCFunc* kv_put_c_func) {
return [](PJRT_KeyValuePutCallback_Args* args) -> PJRT_Error* {
PJRT_KeyValuePutCFunc* kv_put_c_func =
reinterpret_cast<PJRT_KeyValuePutCFunc*>(args->user_arg);
if (kv_put_c_func == nullptr) {
absl::Status status = xla::InvalidArgument(
"got nullptr for PJRT_KeyValuePut_Args.user_arg");
return (*args->callback_error)(StatusCodeToPjrtErrorCode(status.code()),
status.message().data(),
status.message().size());
}
return (*kv_put_c_func)(args);
};
}
std::unique_ptr<PJRT_KeyValueCallbackData> ConvertToCKeyValueCallbacks(
std::shared_ptr<xla::KeyValueStoreInterface> kv_store) {
auto kv_callback_data = std::make_unique<PJRT_KeyValueCallbackData>();
kv_callback_data->kv_get_c_func = ToKVGetCFunc(kv_store.get());
kv_callback_data->kv_put_c_func = ToKVPutCFunc(kv_store.get());
kv_callback_data->c_kv_get =
ToCKVGetCallback(&kv_callback_data->kv_get_c_func);
kv_callback_data->c_kv_put =
ToCKVPutCallback(&kv_callback_data->kv_put_c_func);
kv_callback_data->kv_store = std::move(kv_store);
return kv_callback_data;
}
PJRT_SendCallbackInfo CppSendCallbackToCSendCallback(
xla::SendCallback cpp_send_callback,
PJRT_SendCallbackFunction* send_callback_function) {
return PJRT_SendCallbackInfo{
cpp_send_callback.channel_id,
send_callback_function,
[](PJRT_Chunk* chunk, PJRT_CallbackError* callback_error,
size_t total_size_in_bytes, bool done, void* user_arg) -> PJRT_Error* {
PJRT_SendCallbackFunction* send_callback =
reinterpret_cast<PJRT_SendCallbackFunction*>(user_arg);
return (*send_callback)(chunk, callback_error, total_size_in_bytes,
done);
}};
}
PJRT_RecvCallbackInfo CppRecvCallbackToCRecvCallback(
xla::RecvCallback cpp_recv_callback,
PJRT_RecvCallbackFunction* recv_callback_function) {
return PJRT_RecvCallbackInfo{
cpp_recv_callback.channel_id,
recv_callback_function,
[](PJRT_CopyToDeviceStream* stream, void* user_arg) {
auto* recv_callback =
reinterpret_cast<std::function<void(PJRT_CopyToDeviceStream*)>*>(
user_arg);
(*recv_callback)(stream);
}};
}
absl::StatusOr<BufferMemoryLayoutData> ConvertToBufferMemoryLayoutData(
const xla::Layout& cpp_layout) {
BufferMemoryLayoutData layout_data;
layout_data.c_layout.type =
PJRT_Buffer_MemoryLayout_Type::PJRT_Buffer_MemoryLayout_Type_Tiled;
PJRT_Buffer_MemoryLayout_Tiled c_tiled;
layout_data.minor_to_major.assign(cpp_layout.minor_to_major().begin(),
cpp_layout.minor_to_major().end());
c_tiled.minor_to_major = layout_data.minor_to_major.data();
c_tiled.minor_to_major_size = layout_data.minor_to_major.size();
c_tiled.num_tiles = cpp_layout.tiles().size();
if (c_tiled.num_tiles >= 0) {
layout_data.tile_dim_sizes.reserve(c_tiled.num_tiles);
for (int i = 0; i < c_tiled.num_tiles; ++i) {
absl::Span<const int64_t> tile_dim = cpp_layout.tiles()[i].dimensions();
layout_data.tile_dims.insert(layout_data.tile_dims.end(),
tile_dim.begin(), tile_dim.end());
layout_data.tile_dim_sizes.push_back(tile_dim.size());
}
c_tiled.tile_dims = layout_data.tile_dims.data();
c_tiled.tile_dim_sizes = layout_data.tile_dim_sizes.data();
}
layout_data.c_layout.tiled = c_tiled;
return layout_data;
}
absl::StatusOr<BufferMemoryLayoutData> ConvertToBufferMemoryLayoutData(
absl::Span<int64_t const> byte_strides) {
BufferMemoryLayoutData layout_data;
layout_data.c_layout.type =
PJRT_Buffer_MemoryLayout_Type::PJRT_Buffer_MemoryLayout_Type_Strides;
layout_data.c_layout.strides.byte_strides = byte_strides.data();
layout_data.c_layout.strides.num_byte_strides = byte_strides.size();
return layout_data;
}
absl::StatusOr<xla::Layout> ConvertToLayout(
const PJRT_Buffer_MemoryLayout_Tiled& c_tiled) {
absl::Span<const int64_t> minor_to_major(c_tiled.minor_to_major,
c_tiled.minor_to_major_size);
absl::InlinedVector<xla::Tile, 1> tiles;
tiles.reserve(c_tiled.num_tiles);
const int64_t* current_tile = c_tiled.tile_dims;
for (int i = 0; i < c_tiled.num_tiles; ++i) {
tiles.push_back(xla::Tile(
absl::Span<const int64_t>(current_tile, c_tiled.tile_dim_sizes[i])));
current_tile += c_tiled.tile_dim_sizes[i];
}
xla::Layout layout = xla::Layout(minor_to_major);
layout.mutable_tiles()->assign(tiles.begin(), tiles.end());
return layout;
}
PJRT_Buffer_Type GetElementType(const PJRT_Api* api, PJRT_Buffer* buffer) {
PJRT_Buffer_ElementType_Args args;
args.struct_size = PJRT_Buffer_ElementType_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer;
LogFatalIfPjrtError(api->PJRT_Buffer_ElementType(&args), api);
return args.type;
}
absl::Span<const int64_t> GetDimensions(const PJRT_Api* api,
PJRT_Buffer* buffer) {
PJRT_Buffer_Dimensions_Args args;
args.struct_size = PJRT_Buffer_Dimensions_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer;
LogFatalIfPjrtError(api->PJRT_Buffer_Dimensions(&args), api);
return {args.dims, args.num_dims};
}
std::unique_ptr<PJRT_Layouts_MemoryLayout, PJRT_Layouts_MemoryLayoutDeleter>
GetMemoryLayout(const PJRT_Api* api, PJRT_Buffer* buffer) {
PJRT_Layouts_PJRT_Buffer_MemoryLayout_Args args;
args.struct_size = PJRT_Layouts_PJRT_Buffer_MemoryLayout_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.buffer = buffer;
PJRT_Layouts_Extension* ext_api =
FindExtension<PJRT_Layouts_Extension>(api, PJRT_Extension_Type_Layouts);
CHECK_NE(ext_api, nullptr) << "GetMemoryLayout called with PJRT_Api that "
"doesn't support layouts extension";
LogFatalIfPjrtError(ext_api->PJRT_Layouts_PJRT_Buffer_MemoryLayout(&args),
api);
return std::unique_ptr<PJRT_Layouts_MemoryLayout,
PJRT_Layouts_MemoryLayoutDeleter>(
args.layout, MakeMemoryLayoutDeleter(api));
}
absl::StatusOr<xla::Shape> BuildXlaShapeFromC(
PJRT_Buffer_Type element_type, const int64_t* dims, size_t num_dims,
PJRT_Buffer_MemoryLayout* layout) {
xla::Shape shape =
xla::ShapeUtil::MakeShape(ConvertFromPjRtBufferType(element_type),
absl::Span<const int64_t>(dims, num_dims));
xla::Layout cpp_layout;
if (layout != nullptr) {
switch (layout->type) {
case PJRT_Buffer_MemoryLayout_Type::PJRT_Buffer_MemoryLayout_Type_Tiled: {
TF_ASSIGN_OR_RETURN(cpp_layout, ConvertToLayout(layout->tiled));
break;
}
case PJRT_Buffer_MemoryLayout_Type::
PJRT_Buffer_MemoryLayout_Type_Strides: {
TF_RETURN_IF_ERROR(absl::InvalidArgumentError(
"PJRT_Buffer_MemoryLayout_Type_Strides is not supported to be "
"converted to a xla::Shape"));
break;
}
default: {
TF_RETURN_IF_ERROR(absl::InvalidArgumentError(absl::StrCat(
"Unexpected PJRT_Buffer_MemoryLayout_Type type: ", layout->type)));
}
}
*shape.mutable_layout() = cpp_layout;
}
return shape;
}
absl::string_view PlatformName(const PJRT_Api* api,
const PJRT_TopologyDescription* topo_desc) {
PJRT_TopologyDescription_PlatformName_Args args;
args.struct_size = PJRT_TopologyDescription_PlatformName_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.topology = const_cast<PJRT_TopologyDescription*>(topo_desc);
LogFatalIfPjrtError(api->PJRT_TopologyDescription_PlatformName(&args), api);
return {args.platform_name, args.platform_name_size};
}
absl::Span<PJRT_DeviceDescription* const> DeviceDescriptions(
const PJRT_Api* api, const PJRT_TopologyDescription* topo_desc) {
PJRT_TopologyDescription_GetDeviceDescriptions_Args args;
args.struct_size =
PJRT_TopologyDescription_GetDeviceDescriptions_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.topology = const_cast<PJRT_TopologyDescription*>(topo_desc);
LogFatalIfPjrtError(
api->PJRT_TopologyDescription_GetDeviceDescriptions(&args), api);
return {args.descriptions, args.num_descriptions};
}
absl::StatusOr<xla::CompiledMemoryStats> GetCompiledMemoryStats(
const PJRT_Api* api, PJRT_Executable* executable) {
PJRT_Executable_GetCompiledMemoryStats_Args args;
args.struct_size = PJRT_Executable_GetCompiledMemoryStats_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.executable = executable;
RETURN_STATUS_IF_PJRT_ERROR(
api->PJRT_Executable_GetCompiledMemoryStats(&args), api);
xla::CompiledMemoryStats results;
results.generated_code_size_in_bytes = args.generated_code_size_in_bytes;
results.argument_size_in_bytes = args.argument_size_in_bytes;
results.output_size_in_bytes = args.output_size_in_bytes;
results.alias_size_in_bytes = args.alias_size_in_bytes;
results.temp_size_in_bytes = args.temp_size_in_bytes;
results.host_generated_code_size_in_bytes =
args.host_generated_code_size_in_bytes;
results.host_argument_size_in_bytes = args.host_argument_size_in_bytes;
results.host_output_size_in_bytes = args.host_output_size_in_bytes;
results.host_alias_size_in_bytes = args.host_alias_size_in_bytes;
results.host_temp_size_in_bytes = args.host_temp_size_in_bytes;
return results;
}
PJRT_Profiler_Extension CreatePjrtProfilerExtension(
absl::string_view traceme_name) {
tsl::profiler::TraceMeProducer producer(
traceme_name, tsl::profiler::ContextType::kPjrtLibraryCall);
int64_t traceme_context_id = producer.GetContextId();
PJRT_Profiler_Extension profiler_extension{
PJRT_Profiler_Extension_STRUCT_SIZE,
PJRT_Extension_Type::PJRT_Extension_Type_Profiler,
nullptr,
nullptr,
traceme_context_id,
};
return profiler_extension;
}
} | #include "xla/pjrt/c/pjrt_c_api_helpers.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "xla/layout.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/c/pjrt_c_api_wrapper_impl.h"
#include "xla/pjrt/distributed/in_memory_key_value_store.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace pjrt {
namespace {
using ::testing::HasSubstr;
TEST(PjRtCApiHelperTest, ConvertValidPjRtValueType) {
std::vector<int64_t> int64_list = {static_cast<int64_t>(1),
static_cast<int64_t>(2)};
absl::flat_hash_map<std::string, xla::PjRtValueType> original_cpp_map = {
{"string", "v1"},
{"int64", static_cast<int64_t>(1)},
{"int64_list", int64_list},
{"float", static_cast<float>(1.0)}};
TF_ASSERT_OK_AND_ASSIGN(std::vector<PJRT_NamedValue> c_map,
ConvertToPjRtNamedValueList(original_cpp_map));
auto converted_back_cpp_map =
ConvertFromPjRtNamedValueList(c_map.data(), c_map.size());
EXPECT_THAT(converted_back_cpp_map,
testing::UnorderedElementsAreArray(original_cpp_map));
}
TEST(PjRtCApiHelperTest, ValidOptionNameAndPjRtValueTypeIndex) {
const auto expected = absl::flat_hash_map<std::string, PJRT_NamedValue_Type>({
{"string", PJRT_NamedValue_Type::PJRT_NamedValue_kString},
{"int64", PJRT_NamedValue_Type::PJRT_NamedValue_kInt64},
});
absl::flat_hash_map<std::string, xla::PjRtValueType> valid_map = {
{"string", static_cast<std::string>("v1")},
{"int64", static_cast<int64_t>(1)}};
TF_EXPECT_OK(ValidateCreateOptions(valid_map, expected));
}
TEST(PjRtCApiHelperTest, InvalidOptionName) {
const auto expected = absl::flat_hash_map<std::string, PJRT_NamedValue_Type>({
{"string", PJRT_NamedValue_Type::PJRT_NamedValue_kString},
{"int64", PJRT_NamedValue_Type::PJRT_NamedValue_kInt64},
});
absl::flat_hash_map<std::string, xla::PjRtValueType> invalid_map = {
{"invalid", "v1"}};
auto status = ValidateCreateOptions(invalid_map, expected);
EXPECT_NE(status, absl::OkStatus());
EXPECT_THAT(status.message(),
HasSubstr("Unexpected option name passed to PJRT_Client_Create"));
}
TEST(PjRtCApiHelperTest, InvalidOptionTypeIndex) {
const auto expected = absl::flat_hash_map<std::string, PJRT_NamedValue_Type>({
{"string", PJRT_NamedValue_Type::PJRT_NamedValue_kString},
{"int64", PJRT_NamedValue_Type::PJRT_NamedValue_kInt64},
});
absl::flat_hash_map<std::string, xla::PjRtValueType> invalid_map = {
{"string", static_cast<int64_t>(1)}};
auto status = ValidateCreateOptions(invalid_map, expected);
EXPECT_NE(status, absl::OkStatus());
EXPECT_THAT(status.message(),
HasSubstr("Option passed to PJRT_Client_Create with name string "
"has type index 2 but expected type index is 0"));
}
TEST(PjRtCApiHelperTest, Callback) {
auto kv_store = std::make_shared<xla::InMemoryKeyValueStore>();
auto kv_callback_data = ConvertToCKeyValueCallbacks(kv_store);
auto converted_kv_store = ToCppKeyValueStore(
kv_callback_data->c_kv_get, &kv_callback_data->kv_get_c_func,
kv_callback_data->c_kv_put, &kv_callback_data->kv_put_c_func);
auto s = converted_kv_store->Set("key", "value");
TF_EXPECT_OK(s);
auto v = converted_kv_store->Get("key", absl::Seconds(1));
TF_EXPECT_OK(v.status());
EXPECT_EQ(*v, "value");
}
TEST(PjRtCApiHelperTest, ConvertToCLayoutFromStrides) {
std::vector<int64_t> strides = {4, 8};
absl::StatusOr<BufferMemoryLayoutData> layout_data =
ConvertToBufferMemoryLayoutData(strides);
EXPECT_TRUE(layout_data.ok());
EXPECT_EQ(
layout_data->c_layout.type,
PJRT_Buffer_MemoryLayout_Type::PJRT_Buffer_MemoryLayout_Type_Strides);
EXPECT_EQ(layout_data->c_layout.strides.num_byte_strides, 2);
EXPECT_EQ(layout_data->c_layout.strides.byte_strides[0], strides[0]);
EXPECT_EQ(layout_data->c_layout.strides.byte_strides[1], strides[1]);
}
TEST(PjRtCApiHelperTest, ConvertToCLayoutFromLayoutNoTiles) {
std::vector<int64_t> minor_to_major = {1, 0};
xla::Layout layout(minor_to_major);
TF_ASSERT_OK_AND_ASSIGN(BufferMemoryLayoutData layout_data,
ConvertToBufferMemoryLayoutData(layout));
EXPECT_EQ(layout_data.c_layout.type,
PJRT_Buffer_MemoryLayout_Type::PJRT_Buffer_MemoryLayout_Type_Tiled);
EXPECT_EQ(layout_data.c_layout.tiled.num_tiles, 0);
PJRT_Buffer_MemoryLayout_Tiled tiled = layout_data.c_layout.tiled;
EXPECT_EQ(tiled.minor_to_major_size, 2);
EXPECT_EQ(tiled.minor_to_major[0], minor_to_major[0]);
EXPECT_EQ(tiled.minor_to_major[1], minor_to_major[1]);
}
TEST(PjRtCApiHelperTest, ConvertToCLayoutFromLayoutWithTiles) {
std::vector<int64_t> minor_to_major = {1, 0};
xla::Layout layout(minor_to_major);
std::vector<int64_t> tile_dims_1 = {2, 4};
std::vector<int64_t> tile_dims_2 = {1};
layout.mutable_tiles()->push_back(xla::Tile(tile_dims_1));
layout.mutable_tiles()->push_back(xla::Tile(tile_dims_2));
TF_ASSERT_OK_AND_ASSIGN(BufferMemoryLayoutData layout_data,
ConvertToBufferMemoryLayoutData(layout));
EXPECT_EQ(layout_data.c_layout.type,
PJRT_Buffer_MemoryLayout_Type::PJRT_Buffer_MemoryLayout_Type_Tiled);
PJRT_Buffer_MemoryLayout_Tiled tiled = layout_data.c_layout.tiled;
EXPECT_EQ(tiled.minor_to_major_size, 2);
EXPECT_EQ(tiled.minor_to_major[0], minor_to_major[0]);
EXPECT_EQ(tiled.minor_to_major[1], minor_to_major[1]);
EXPECT_EQ(tiled.num_tiles, 2);
EXPECT_EQ(tiled.tile_dim_sizes[0], tile_dims_1.size());
EXPECT_EQ(tiled.tile_dim_sizes[1], tile_dims_2.size());
EXPECT_EQ(tiled.tile_dims[0], tile_dims_1[0]);
EXPECT_EQ(tiled.tile_dims[1], tile_dims_1[1]);
EXPECT_EQ(tiled.tile_dims[2], tile_dims_2[0]);
}
TEST(PjRtCApiHelperTest, ConvertFromCLayoutToLayout) {
PJRT_Buffer_MemoryLayout c_layout;
c_layout.type =
PJRT_Buffer_MemoryLayout_Type::PJRT_Buffer_MemoryLayout_Type_Tiled;
std::vector<int64_t> minor_to_major = {1, 0};
c_layout.tiled.minor_to_major_size = 2;
c_layout.tiled.minor_to_major = minor_to_major.data();
c_layout.tiled.num_tiles = 2;
std::vector<size_t> tile_dim_sizes = {2, 1};
c_layout.tiled.tile_dim_sizes = tile_dim_sizes.data();
std::vector<int64_t> tile_dims = {2, 4, 1};
c_layout.tiled.tile_dims = tile_dims.data();
TF_ASSERT_OK_AND_ASSIGN(xla::Layout layout, ConvertToLayout(c_layout.tiled));
EXPECT_EQ(layout.ToString(), "{1,0:T(2,4)(1)}");
}
TEST(PjRtCApiHelperTest, ConvertFromCLayoutToLayoutNoTile) {
PJRT_Buffer_MemoryLayout c_layout;
c_layout.type =
PJRT_Buffer_MemoryLayout_Type::PJRT_Buffer_MemoryLayout_Type_Tiled;
c_layout.tiled.num_tiles = 0;
std::vector<int64_t> minor_to_major = {1, 0};
c_layout.tiled.minor_to_major_size = 2;
c_layout.tiled.minor_to_major = minor_to_major.data();
TF_ASSERT_OK_AND_ASSIGN(xla::Layout layout, ConvertToLayout(c_layout.tiled));
EXPECT_EQ(layout.ToString(), "{1,0}");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/c/pjrt_c_api_helpers.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/c/pjrt_c_api_helpers_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
17d19366-2d7f-45aa-9c96-4b5ac5ca6c8d | cpp | tensorflow/tensorflow | hlo_control_flow_flattening | third_party/xla/xla/tools/hlo_control_flow_flattening.cc | third_party/xla/xla/tools/hlo_control_flow_flattening_test.cc | #include "xla/tools/hlo_control_flow_flattening.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/tuple_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
HloInstruction* CreateConstant(const Shape& shape,
HloComputation* computation) {
if (shape.IsTuple()) {
std::vector<HloInstruction*> tuple_arguments(shape.tuple_shapes_size());
for (int index = 0; index < shape.tuple_shapes_size(); ++index) {
tuple_arguments[index] =
CreateConstant(shape.tuple_shapes(index), computation);
}
return computation->AddInstruction(
HloInstruction::CreateTuple(tuple_arguments));
} else {
return computation->AddInstruction(
HloInstruction::CreateConstant(Literal::CreateFromShape(shape)));
}
}
void PrintSubexpression(HloInstruction* inst, int depth) {
if (depth == 0) {
return;
}
for (auto* operand : inst->operands()) {
PrintSubexpression(operand, depth - 1);
}
VLOG(2) << inst->ToString();
}
bool IsConstantScalarInt(const HloInstruction* inst) {
return inst->opcode() == HloOpcode::kConstant &&
ShapeUtil::IsEffectiveScalar(inst->shape()) &&
inst->shape().IsInteger();
}
bool IsNotContainedInLoop(const HloInstruction& while_hlo,
const CallGraph& call_graph) {
const HloComputation* computation = while_hlo.parent();
while (!computation->IsEntryComputation()) {
auto& node = call_graph.GetNode(computation);
CHECK_EQ(node.caller_callsites().size(), 1)
<< "The module is not flattened!";
auto& callsite = node.caller_callsites()[0];
if (callsite.instruction()->opcode() == HloOpcode::kWhile) {
return false;
}
computation = callsite.instruction()->parent();
}
return true;
}
}
int GetLoopBound(const HloInstruction& while_hlo, const int default_loop_count,
const int max_loop_count) {
HloInstruction* condition = while_hlo.while_condition()->root_instruction();
if (condition->opcode() == HloOpcode::kCompare) {
int64_t value = 0;
Comparison::Direction cmp = condition->comparison_direction();
if ((cmp == Comparison::Direction::kLt ||
cmp == Comparison::Direction::kLe ||
cmp == Comparison::Direction::kNe) &&
IsConstantScalarInt(condition->operand(1))) {
value = *condition->operand(1)->literal().GetFirstInteger();
} else if ((cmp == Comparison::Direction::kGt ||
cmp == Comparison::Direction::kGe ||
cmp == Comparison::Direction::kNe) &&
IsConstantScalarInt(condition->operand(0))) {
value = *condition->operand(0)->literal().GetFirstInteger();
}
if (value > 0) {
return std::min(value, static_cast<int64_t>(max_loop_count));
}
}
return default_loop_count;
}
int GetLoopBoundWithOuterLoopMax(const HloInstruction& while_hlo,
const CallGraph& call_graph,
const int default_loop_count,
const int max_outer_loop_count,
const int max_loop_count) {
int loop_bound = GetLoopBound(while_hlo, default_loop_count, max_loop_count);
if (loop_bound > max_outer_loop_count) {
if (IsNotContainedInLoop(while_hlo, call_graph)) {
return max_outer_loop_count;
}
}
return loop_bound;
}
absl::Status HloControlFlowFlattening::FlattenWhileLoop(
HloInstruction* while_hlo, const CallGraph& call_graph) const {
CHECK_EQ(while_hlo->opcode(), HloOpcode::kWhile);
HloComputation* computation = while_hlo->parent();
HloInstruction* initialization = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(0)));
HloInstruction* old_tuple = while_hlo->mutable_operand(0);
HloInstruction* new_tuple =
TupleUtil::AppendSuffix(old_tuple, {initialization});
int new_tuple_size = new_tuple->shape().tuple_shapes().size();
TF_RETURN_IF_ERROR(while_hlo->ReplaceOperandWithDifferentShape(0, new_tuple));
auto change_op_shape = [&](HloInstruction* instruction) {
Shape* shape = instruction->mutable_shape();
CHECK(shape->IsTuple());
CHECK_EQ(shape->tuple_shapes().size(), new_tuple_size - 1);
Shape* subshape = shape->add_tuple_shapes();
return ShapeUtil::PopulateShape(S32, {}, subshape);
};
auto replace_non_gte_users =
[](HloInstruction* new_tuple) -> absl::StatusOr<HloInstruction*> {
CHECK(new_tuple->shape().IsTuple());
HloInstruction* prefix = nullptr;
std::vector<HloInstruction*> users(new_tuple->users());
for (HloInstruction* user : users) {
if (user->opcode() == HloOpcode::kGetTupleElement) {
continue;
}
if (prefix == nullptr) {
prefix = TupleUtil::ExtractPrefix(
new_tuple, new_tuple->shape().tuple_shapes_size() - 1);
}
TF_RETURN_IF_ERROR(new_tuple->ReplaceUseWithDifferentShape(user, prefix));
}
return prefix;
};
{
HloComputation* condition = while_hlo->while_condition();
TF_RETURN_IF_ERROR(change_op_shape(condition->parameter_instruction(0)));
TF_RETURN_IF_ERROR(
replace_non_gte_users(condition->parameter_instruction(0)).status());
if (VLOG_IS_ON(2)) {
VLOG(2) << "Loop condition in " << while_hlo->parent()->name();
PrintSubexpression(condition->root_instruction(), 3);
}
const int loop_bound = GetLoopBoundWithOuterLoopMax(
*while_hlo, call_graph, while_execution_count_, max_outer_loop_count_,
max_loop_count_);
VLOG(1) << "loop_bound = " << loop_bound;
HloInstruction* limit = condition->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(loop_bound)));
Shape shape = initialization->shape();
HloInstruction* induction_variable =
condition->AddInstruction(HloInstruction::CreateGetTupleElement(
shape, condition->parameter_instruction(0), new_tuple_size - 1));
HloInstruction* compare =
condition->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), induction_variable, limit,
ComparisonDirection::kLt));
TF_RETURN_IF_ERROR(
condition->ReplaceInstruction(condition->root_instruction(), compare));
}
{
HloComputation* body = while_hlo->while_body();
TF_RETURN_IF_ERROR(change_op_shape(body->parameter_instruction(0)));
TF_RETURN_IF_ERROR(
replace_non_gte_users(body->parameter_instruction(0)).status());
HloInstruction* old_root = body->root_instruction();
Shape shape = initialization->shape();
HloInstruction* induction_variable =
body->AddInstruction(HloInstruction::CreateGetTupleElement(
shape, body->parameter_instruction(0), new_tuple_size - 1));
HloInstruction* increment = body->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int>(1)));
induction_variable = body->AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, induction_variable, increment));
HloInstruction* new_root =
TupleUtil::AppendSuffix(old_root, {induction_variable});
body->set_root_instruction(new_root, true);
}
std::vector<HloInstruction*> while_users(while_hlo->users().begin(),
while_hlo->users().end());
TF_RETURN_IF_ERROR(change_op_shape(while_hlo));
TF_ASSIGN_OR_RETURN(HloInstruction * prefix,
replace_non_gte_users(while_hlo));
if (while_hlo->parent()->root_instruction() == while_hlo) {
if (prefix == nullptr) {
prefix = TupleUtil::ExtractPrefix(while_hlo, new_tuple_size - 1);
}
while_hlo->parent()->set_root_instruction(prefix,
true);
}
return absl::OkStatus();
}
absl::Status HloControlFlowFlattening::RemoveInfeed(
HloInstruction* infeed_hlo) const {
CHECK_EQ(infeed_hlo->opcode(), HloOpcode::kInfeed);
HloComputation* computation = infeed_hlo->parent();
CHECK_EQ(infeed_hlo->shape().tuple_shapes_size(), 2);
const Shape& infeed_shape = ShapeUtil::GetSubshape(infeed_hlo->shape(), {0});
HloInstruction* custom_call = computation->AddInstruction(
HloInstruction::CreateCustomCall(infeed_shape, {}, kNopCustomCallTarget));
auto new_tuple = HloInstruction::CreateTuple(
{custom_call, infeed_hlo->mutable_operand(0)});
TF_RETURN_IF_ERROR(
computation->ReplaceWithNewInstruction(infeed_hlo, std::move(new_tuple)));
custom_call->SetAndSanitizeName(infeed_hlo->name());
return absl::OkStatus();
}
absl::StatusOr<std::pair<HloInstruction*, HloInstruction*>>
HloControlFlowFlattening::RemoveRecvAndRecvDone(
HloInstruction* recv_done,
absl::flat_hash_set<HloInstruction*>* additional_removed) const {
CHECK_EQ(recv_done->opcode(), HloOpcode::kRecvDone);
CHECK_EQ(recv_done->operand_count(), 1);
HloInstruction* recv = recv_done->mutable_operand(0);
CHECK_EQ(recv->opcode(), HloOpcode::kRecv);
HloComputation* computation = recv_done->parent();
CHECK_EQ(recv_done->shape().tuple_shapes_size(), 2);
HloModule* module = computation->parent();
HloInstruction* custom_call_recv =
computation->AddInstruction(HloInstruction::CreateCustomCall(
recv->shape(), recv->operands(), kNopCustomCallTarget));
std::string original_recv_name(recv->name());
if (module->has_schedule() &&
module->schedule().is_computation_scheduled(computation)) {
module->schedule().replace_instruction(computation, recv, custom_call_recv);
}
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(recv, custom_call_recv));
custom_call_recv->SetAndSanitizeName(original_recv_name);
std::string original_recv_done_name(recv_done->name());
HloInstruction* custom_call_recv_done = computation->AddInstruction(
HloInstruction::CreateCustomCall(
recv_done->shape(), recv_done->operands(), kNopCustomCallTarget),
recv_done->name());
if (module->has_schedule() &&
module->schedule().is_computation_scheduled(computation)) {
module->schedule().replace_instruction(computation, recv_done,
custom_call_recv_done);
}
TF_RETURN_IF_ERROR(
computation->ReplaceInstruction(recv_done, custom_call_recv_done));
custom_call_recv_done->SetAndSanitizeName(original_recv_done_name);
return std::make_pair(custom_call_recv, custom_call_recv_done);
}
absl::Status HloControlFlowFlattening::RemoveOutfeed(
HloInstruction* outfeed_hlo) const {
CHECK_EQ(outfeed_hlo->opcode(), HloOpcode::kOutfeed);
HloComputation* computation = outfeed_hlo->parent();
HloInstruction* custom_call =
computation->AddInstruction(HloInstruction::CreateCustomCall(
outfeed_hlo->shape(), outfeed_hlo->operands(),
kNopReturnTokenCustomCallTarget));
Cast<HloCustomCallInstruction>(custom_call)
->set_custom_call_has_side_effect(true);
custom_call->set_sharding(HloSharding::Manual());
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(outfeed_hlo, custom_call));
custom_call->SetAndSanitizeName(outfeed_hlo->name());
return absl::OkStatus();
}
absl::StatusOr<std::pair<HloInstruction*, HloInstruction*>>
HloControlFlowFlattening::RemoveSendAndSendDone(
HloInstruction* send_done,
absl::flat_hash_set<HloInstruction*>* additional_removed) const {
CHECK_EQ(send_done->opcode(), HloOpcode::kSendDone);
CHECK_EQ(send_done->operand_count(), 1);
HloInstruction* send = send_done->mutable_operand(0);
CHECK_EQ(send->opcode(), HloOpcode::kSend);
HloComputation* computation = send_done->parent();
HloModule* module = computation->parent();
HloInstruction* custom_call_send =
computation->AddInstruction(HloInstruction::CreateCustomCall(
send->shape(), send->operands(), kNopCustomCallTarget));
std::string original_send_name(send->name());
if (module->has_schedule() &&
module->schedule().is_computation_scheduled(computation)) {
module->schedule().replace_instruction(computation, send, custom_call_send);
}
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(send, custom_call_send));
custom_call_send->SetAndSanitizeName(original_send_name);
HloInstruction* custom_call_send_done =
computation->AddInstruction(HloInstruction::CreateCustomCall(
send_done->shape(), send_done->operands(),
kNopReturnTokenCustomCallTarget));
std::string original_send_done_name(send_done->name());
Cast<HloCustomCallInstruction>(custom_call_send_done)
->set_custom_call_has_side_effect(true);
if (module->has_schedule() &&
module->schedule().is_computation_scheduled(computation)) {
module->schedule().replace_instruction(computation, send_done,
custom_call_send_done);
}
TF_RETURN_IF_ERROR(
computation->ReplaceInstruction(send_done, custom_call_send_done));
custom_call_send_done->SetAndSanitizeName(original_send_done_name);
return std::make_pair(custom_call_send, custom_call_send_done);
}
absl::StatusOr<HloInstruction*> HloControlFlowFlattening::RemoveCollective(
HloInstruction* hlo) const {
HloComputation* computation = hlo->parent();
HloInstruction* custom_call =
computation->AddInstruction(HloInstruction::CreateCustomCall(
hlo->shape(), hlo->operands(), kNopCustomCallTarget));
custom_call->CopyBackendConfigFrom(hlo);
HloModule* module = computation->parent();
if (module->has_schedule() &&
module->schedule().is_computation_scheduled(computation)) {
module->schedule().replace_instruction(computation, hlo, custom_call);
}
std::string original_op_name(hlo->name());
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(hlo, custom_call));
custom_call->SetAndSanitizeName(original_op_name);
return custom_call;
}
absl::Status HloControlFlowFlattening::RemoveId(HloInstruction* hlo) const {
HloComputation* computation = hlo->parent();
HloInstruction* zero = CreateConstant(hlo->shape(), computation);
std::string original_op_name(hlo->name());
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(hlo, zero));
zero->SetAndSanitizeName(original_op_name);
return absl::OkStatus();
}
absl::StatusOr<bool> HloControlFlowFlattening::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
auto call_graph = CallGraph::Build(module);
bool changed = false;
absl::flat_hash_set<HloInstruction*> removed;
for (HloComputation* computation : module->computations(execution_threads)) {
if (computation->IsAsyncComputation()) {
continue;
}
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (removed.contains(instruction)) {
continue;
}
if (flatten_while_loop_ && instruction->opcode() == HloOpcode::kWhile) {
VLOG(1) << "Remove " << instruction->name();
TF_RETURN_IF_ERROR(FlattenWhileLoop(instruction, *call_graph));
changed = true;
} else if (remove_infeed_outfeed_ &&
instruction->opcode() == HloOpcode::kInfeed) {
VLOG(1) << "Remove " << instruction->name();
TF_RETURN_IF_ERROR(RemoveInfeed(instruction));
changed = true;
} else if (remove_infeed_outfeed_ &&
instruction->opcode() == HloOpcode::kOutfeed) {
VLOG(1) << "Remove " << instruction->name();
TF_RETURN_IF_ERROR(RemoveOutfeed(instruction));
changed = true;
} else if (instruction->opcode() == HloOpcode::kSendDone) {
auto send_done_instruction =
DynCast<HloSendDoneInstruction>(instruction);
CHECK(send_done_instruction);
if (remove_comm_ || (remove_host_transfer_ &&
send_done_instruction->is_host_transfer())) {
VLOG(1) << "Remove " << instruction->name();
TF_RETURN_IF_ERROR(
RemoveSendAndSendDone(instruction, &removed).status());
changed = true;
}
} else if (instruction->opcode() == HloOpcode::kRecvDone) {
auto recv_done_instruction =
DynCast<HloRecvDoneInstruction>(instruction);
CHECK(recv_done_instruction);
if (remove_comm_ || (remove_host_transfer_ &&
recv_done_instruction->is_host_transfer())) {
VLOG(1) << "Remove " << instruction->name();
TF_RETURN_IF_ERROR(
RemoveRecvAndRecvDone(instruction, &removed).status());
changed = true;
}
} else if (remove_comm_ && IsCollective(instruction) &&
!instruction->parent()->IsFusionComputation() &&
(instruction->opcode() != HloOpcode::kAsyncStart &&
instruction->opcode() != HloOpcode::kAsyncUpdate)) {
if (instruction->opcode() == HloOpcode::kAsyncDone) {
while (instruction->opcode() == HloOpcode::kAsyncDone ||
instruction->opcode() == HloOpcode::kAsyncUpdate ||
instruction->opcode() == HloOpcode::kAsyncStart) {
HloInstruction* operand = instruction->mutable_operand(0);
VLOG(1) << "Remove " << instruction->name();
TF_RETURN_IF_ERROR(RemoveCollective(instruction).status());
instruction = operand;
}
} else {
VLOG(1) << "Remove " << instruction->name();
TF_RETURN_IF_ERROR(RemoveCollective(instruction).status());
}
changed = true;
} else if ((remove_comm_ || remove_id_) &&
(instruction->opcode() == HloOpcode::kPartitionId ||
instruction->opcode() == HloOpcode::kReplicaId ||
(instruction->opcode() == HloOpcode::kCustomCall &&
instruction->custom_call_target() == "SliceId"))) {
VLOG(1) << "Remove " << instruction->name();
TF_RETURN_IF_ERROR(RemoveId(instruction));
changed = true;
}
}
}
HloDCE hlo_dce;
TF_ASSIGN_OR_RETURN(bool dce_changed, hlo_dce.Run(module, execution_threads));
changed |= dce_changed;
if (changed && module->has_schedule()) {
TF_RETURN_IF_ERROR(module->schedule().Update());
}
XLA_VLOG_LINES(3, module->ToString());
return changed;
}
} | #include "xla/tools/hlo_control_flow_flattening.h"
#include <memory>
#include <utility>
#include "absl/strings/str_replace.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/despecializer.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/spmd/spmd_partitioner.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class HloControlFlowFlatteningTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> PartitionComputation(
std::unique_ptr<VerifiedHloModule> hlo_module, int64_t num_devices = 2) {
spmd::SpmdPartitionerOptions options;
auto collective_ops_creator =
spmd::GetDefaultCollectiveOpsCreator(num_devices, 1);
collective_ops_creator.create_cross_partition_all_gather = nullptr;
HloModuleConfig config = GetModuleConfigForTest();
config.set_use_spmd_partitioning(true);
config.set_num_partitions(num_devices);
HloPassPipeline pass("spmd-partitioning");
pass.AddPass<HloVerifier>(false,
false);
pass.AddPass<spmd::SpmdPartitioner>(num_devices, 1,
options, collective_ops_creator);
pass.AddPass<HloVerifier>(false,
false);
TF_RETURN_IF_ERROR(pass.Run(hlo_module.get()).status());
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(hlo_module));
}
};
constexpr int kDefaultMaxLoopCount = 1000;
TEST_F(HloControlFlowFlatteningTest, WhileRoot) {
absl::string_view hlo_string = R"(
HloModule While
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
While.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(100)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY While {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=While.condition, body=While.body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
auto root = module->entry_computation()->root_instruction();
auto while_op = module->entry_computation()->GetInstructionWithName("while");
EXPECT_THAT(root, op::Tuple(op::GetTupleElement(while_op, 0),
op::GetTupleElement(while_op, 1)));
EXPECT_THAT(while_op,
op::While(op::Tuple(op::GetTupleElement(), op::GetTupleElement(),
op::Constant())));
auto condition = while_op->while_condition();
EXPECT_THAT(
condition->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0), 2), op::Constant()));
auto body = while_op->while_body();
EXPECT_THAT(body->root_instruction(),
op::Tuple(op::GetTupleElement(), op::GetTupleElement(),
op::Add(op::GetTupleElement(op::Parameter(0), 2),
op::Constant())));
}
TEST_F(HloControlFlowFlatteningTest, WhileConditionCallComputation) {
absl::string_view hlo_string = R"(
HloModule While
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
While.condition.called {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] custom-call(), custom_call_target="AllocateBuffer", custom_call_has_side_effect=true
less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
ROOT tuple.2 = (pred[]) tuple(less-than)
}
While.condition {
loop_var.3 = (s32[], s32[3]{0}) parameter(0)
call = (pred[]) call(loop_var.3), to_apply=While.condition.called
ROOT get-tuple-element.4 = pred[] get-tuple-element(call), index=0
}
ENTRY While {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=While.condition, body=While.body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
XLA_VLOG_LINES(3, "Loaded HLO module: " + module->ToString());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
auto root = module->entry_computation()->root_instruction();
auto while_op = module->entry_computation()->GetInstructionWithName("while");
EXPECT_THAT(root, op::Tuple(op::GetTupleElement(while_op, 0),
op::GetTupleElement(while_op, 1)));
EXPECT_THAT(while_op,
op::While(op::Tuple(op::GetTupleElement(), op::GetTupleElement(),
op::Constant())));
auto condition = while_op->while_condition();
EXPECT_THAT(
condition->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0), 2), op::Constant()));
auto body = while_op->while_body();
EXPECT_THAT(body->root_instruction(),
op::Tuple(op::GetTupleElement(), op::GetTupleElement(),
op::Add(op::GetTupleElement(op::Parameter(0), 2),
op::Constant())));
}
TEST_F(HloControlFlowFlatteningTest, WhileRootScheduled) {
absl::string_view hlo_string = R"(
HloModule While, is_scheduled=true
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
While.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(100)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY While {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=While.condition, body=While.body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
auto root = module->entry_computation()->root_instruction();
auto while_op = module->entry_computation()->GetInstructionWithName("while");
EXPECT_THAT(root, op::Tuple(op::GetTupleElement(while_op, 0),
op::GetTupleElement(while_op, 1)));
EXPECT_THAT(while_op,
op::While(op::Tuple(op::GetTupleElement(), op::GetTupleElement(),
op::Constant())));
auto condition = while_op->while_condition();
EXPECT_THAT(
condition->root_instruction(),
op::Compare(op::GetTupleElement(op::Parameter(0), 2), op::Constant()));
}
TEST_F(HloControlFlowFlatteningTest, WhileUser) {
absl::string_view hlo_string = R"(
HloModule While
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
While.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(100)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
FusedComputation {
param = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.4 = s32[] get-tuple-element(param), index=0
get-tuple-element.5 = s32[3]{0} get-tuple-element(param), index=1
broadcast = s32[3]{0} broadcast(get-tuple-element.4), dimensions={}
ROOT add = s32[3]{0} add(broadcast, get-tuple-element.5)
}
ENTRY While {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
while = (s32[], s32[3]{0}) while(tuple.1), condition=While.condition, body=While.body
ROOT fusion = s32[3]{0} fusion(while), kind=kLoop, calls=FusedComputation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
auto fusion = module->entry_computation()->root_instruction();
auto while_op = module->entry_computation()->GetInstructionWithName("while");
EXPECT_THAT(fusion, op::Fusion(op::Tuple(op::GetTupleElement(while_op, 0),
op::GetTupleElement(while_op, 1))));
}
TEST_F(HloControlFlowFlatteningTest, Infeed) {
absl::string_view hlo_string = R"(
HloModule Infeed
ENTRY Infeed {
after-all = token[] after-all()
ROOT infeed.23 = ((bf16[3]{0}, s32[12,5]{0,1}), token[]) infeed(after-all)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
auto custom_call =
module->entry_computation()->GetInstructionWithName("infeed.23");
EXPECT_THAT(custom_call, op::CustomCall());
auto tuple = module->entry_computation()->root_instruction();
EXPECT_THAT(tuple, op::Tuple(custom_call, op::AfterAll()));
}
TEST_F(HloControlFlowFlatteningTest, InfeedPreserveLayout) {
absl::string_view hlo_string = R"(
HloModule Infeed
ENTRY Infeed {
after-all = token[] after-all()
ROOT infeed = ((bf16[3]{0}, s32[12,5]{0,1:T(8,128)}), token[]) infeed(after-all)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
Shape root_shape = module->entry_computation()->root_instruction()->shape();
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
auto tuple = module->entry_computation()->root_instruction();
EXPECT_THAT(tuple, op::Tuple(op::CustomCall(), op::AfterAll()));
EXPECT_EQ(tuple->shape(), root_shape);
}
TEST_F(HloControlFlowFlatteningTest, OutfeedCustomCallIsPartitionable) {
absl::string_view hlo_string = R"(
HloModule Outfeed
ENTRY Outfeed {
param = (bf16[3]{0}, s32[12,5]{0,1}) parameter(0)
after-all = token[] after-all()
ROOT outfeed.23 = token[] outfeed(param, after-all)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(HloControlFlowFlattening::Options{
3, 3,
3, true});
EXPECT_TRUE(flattening.Run(module.get()).value());
auto custom_call = module->entry_computation()->root_instruction();
EXPECT_EQ(custom_call->name(), "outfeed.23");
EXPECT_TRUE(custom_call->has_sharding());
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
PartitionComputation(std::move(module)));
}
TEST_F(HloControlFlowFlatteningTest, Outfeed) {
absl::string_view hlo_string = R"(
HloModule Outfeed
ENTRY Outfeed {
param = (bf16[3]{0}, s32[12,5]{0,1}) parameter(0)
after-all = token[] after-all()
ROOT outfeed.23 = token[] outfeed(param, after-all)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
auto custom_call = module->entry_computation()->root_instruction();
EXPECT_EQ(custom_call->name(), "outfeed.23");
EXPECT_THAT(custom_call, op::CustomCall(op::Parameter(0), op::AfterAll()));
}
TEST_F(HloControlFlowFlatteningTest, AllReduce) {
absl::string_view hlo_string = R"(
HloModule AllReduce
sum {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
ENTRY AllReduce {
param0 = f32[3]{0} parameter(0)
param1 = f32[12,5]{0,1} parameter(1)
ROOT all-reduce = (bf16[3]{0}, bf16[12,5]{0,1}) all-reduce(param0, param1), to_apply=sum, replica_groups={}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::Parameter(0), op::Parameter(1)));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(),
"all-reduce");
}
TEST_F(HloControlFlowFlatteningTest, AllReduceStartAndDone) {
absl::string_view hlo_string = R"(
HloModule CRS
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
crs = f32[8]{0} all-reduce-start(input), replica_groups={}, to_apply=add
ROOT done = f32[8]{0} all-reduce-done(crs)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::CustomCall(op::Parameter(0))));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(), "done");
EXPECT_EQ(module->entry_computation()->root_instruction()->operand(0)->name(),
"crs");
}
TEST_F(HloControlFlowFlatteningTest, AllGather) {
absl::string_view hlo_string = R"(
HloModule AllGather
ENTRY AllGather {
input = f32[128,32]{0,1} parameter(0)
ROOT ag = f32[128,128]{0,1} all-gather(input), replica_groups={}, dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::Parameter(0)));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(), "ag");
}
TEST_F(HloControlFlowFlatteningTest, AllToAll) {
absl::string_view hlo_string = R"(
HloModule AllToAll
ENTRY AllToAll {
input = f32[128,32]{0,1} parameter(0)
ROOT a2a = (f32[128,32]{0,1}) all-to-all(input), replica_groups={}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::Parameter(0)));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(), "a2a");
}
TEST_F(HloControlFlowFlatteningTest, CollectivePermute) {
absl::string_view hlo_string = R"(
HloModule CollectivePermute
ENTRY CollectivePermute {
input = f32[128,32]{0,1} parameter(0)
ROOT collective-permute = f32[128,32]{0,1} collective-permute(input), source_target_pairs={{0,1},{1,2},{2,3}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::Parameter(0)));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(),
"collective-permute");
}
TEST_F(HloControlFlowFlatteningTest, ReplicaIdSucceedsWithChange) {
absl::string_view hlo_string = R"(
HloModule ReplicaId
ENTRY ReplicaId {
ROOT replica-id.18600 = u32[]{:T(128)} replica-id()
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(HloControlFlowFlattening::Options{});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(), op::Constant());
EXPECT_EQ(module->entry_computation()->root_instruction()->name(),
"replica-id.18600");
}
TEST_F(HloControlFlowFlatteningTest, RemoveReplicaIdButKeepAllReduce) {
absl::string_view kHloText = R"(
HloModule RemoveReplicaIdButKeepCollective
%sum (a: f32[], b: f32[]) -> f32[] {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(f32[] a, f32[] b)
}
ENTRY ReplicaId {
replica-id.1 = u32[]{:T(128)} replica-id()
ROOT all-reduce.1 = u32[]{:T(128)} all-reduce(replica-id.1), to_apply=sum, replica_groups={}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
HloControlFlowFlattening flattening(HloControlFlowFlattening::Options{
1, 1,
1, false,
false, false,
false, true});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(), op::AllReduce());
EXPECT_THAT(module->entry_computation()->root_instruction()->operand(0),
op::Constant());
}
TEST_F(HloControlFlowFlatteningTest, CollectivePermuteInPlaceUpdate) {
absl::string_view hlo_string = R"(
HloModule CollectivePermuteInPlaceUpdate
ENTRY CollectivePermuteInPlaceUpdate {
input = f32[128,32]{0,1} parameter(0)
constant = f32[] constant(1)
output = f32[128,128]{0,1} broadcast(constant), dimensions={}
constant.1 = s32[] constant(0)
tuple.1 = (s32[], s32[]) tuple(constant.1, constant.1)
constant.2 = s32[] constant(64)
tuple.2 = (s32[], s32[]) tuple(constant.1, constant.2)
ROOT collective-permute = f32[128,128]{0,1} collective-permute(input, output, tuple.1, tuple.2), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{128,32}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::Parameter(0), op::Broadcast(op::Constant()),
op::Tuple(op::Constant(), op::Constant()),
op::Tuple(op::Constant(), op::Constant())));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(),
"collective-permute");
}
TEST_F(HloControlFlowFlatteningTest, CollectivePermuteStartAndDone) {
absl::string_view hlo_string = R"(
HloModule CollectivePermuteStartAndDone
ENTRY CollectivePermuteStartAndDone {
input = f32[128,32]{0,1} parameter(0)
collective-permute-start.1 = (f32[128,32]{0,1}, f32[128,32]{0,1}, u32[], u32[]) collective-permute-start(input), source_target_pairs={{0,1},{1,2},{2,3}}
ROOT collective-permute-done.1 = f32[128,32]{0,1} collective-permute-done(collective-permute-start.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::CustomCall(op::Parameter(0))));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(),
"collective-permute-done.1");
EXPECT_EQ(module->entry_computation()->root_instruction()->operand(0)->name(),
"collective-permute-start.1");
}
TEST_F(HloControlFlowFlatteningTest, Recv) {
absl::string_view hlo_string = R"(
HloModule Recv
ENTRY %Recv () -> (f32[], token[]) {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15
ROOT %recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15
%constant = f32[] constant(2.1)
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0), channel_id=16, control-predecessors={%recv}
%send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ControlDepRemover control_remover;
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
TF_ASSERT_OK(control_remover.Run(module.get()).status());
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::CustomCall()));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(),
"recv-done");
EXPECT_EQ(module->entry_computation()->root_instruction()->operand(0)->name(),
"recv");
}
TEST_F(HloControlFlowFlatteningTest, RecvHostTransfer) {
absl::string_view hlo_string = R"(
HloModule Recv
ENTRY %Recv () -> (f32[], token[]) {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15, is_host_transfer=true
ROOT %recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15, is_host_transfer=true
%constant = f32[] constant(2.1)
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0), channel_id=16, control-predecessors={%recv}
%send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ControlDepRemover control_remover;
HloControlFlowFlattening flattening(HloControlFlowFlattening::Options{
3, 3,
3, true,
true, false,
true});
TF_ASSERT_OK(control_remover.Run(module.get()).status());
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::CustomCall()));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(),
"recv-done");
EXPECT_EQ(module->entry_computation()->root_instruction()->operand(0)->name(),
"recv");
}
TEST_F(HloControlFlowFlatteningTest, Send) {
absl::string_view hlo_string = R"(
HloModule Send
ENTRY %Send () -> token[] {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15
%recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15
%constant = f32[] constant(2.1)
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0), channel_id=16, control-predecessors={%recv}
ROOT %send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ControlDepRemover control_remover;
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
TF_ASSERT_OK(control_remover.Run(module.get()).status());
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::CustomCall()));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(),
"send-done");
EXPECT_EQ(module->entry_computation()->root_instruction()->operand(0)->name(),
"send");
}
TEST_F(HloControlFlowFlatteningTest, SendHostTransfer) {
absl::string_view hlo_string = R"(
HloModule Send
ENTRY %Send () -> token[] {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15
%recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15
%constant = f32[] constant(2.1)
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0), channel_id=16, is_host_transfer=true, control-predecessors={%recv}
ROOT %send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16, is_host_transfer=true
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ControlDepRemover control_remover;
HloControlFlowFlattening flattening(HloControlFlowFlattening::Options{
3, 3,
3, true,
true, false,
true});
TF_ASSERT_OK(control_remover.Run(module.get()).status());
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::CustomCall()));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(),
"send-done");
EXPECT_EQ(module->entry_computation()->root_instruction()->operand(0)->name(),
"send");
}
TEST_F(HloControlFlowFlatteningTest, AllGatherStartAndDone) {
absl::string_view hlo_string = R"(
HloModule AllGatherStartAndDone
ENTRY AllGatherStartAndDone {
%input = f32[8,256,256] parameter(0)
%ag-start = (f32[8,256,256], f32[16,256,256]) all-gather-start(
f32[8,256,256] %input), replica_groups={{0,1}}, dimensions={0},
metadata={op_type="AllGather" op_name="ag0"}
ROOT %ag-done = f32[16,256,256] all-gather-done(
(f32[8,256,256], f32[16,256,256]) %ag-start),
metadata={op_type="AllGather" op_name="ag0"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloControlFlowFlattening flattening(
HloControlFlowFlattening::Options{3});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::CustomCall(op::Parameter(0))));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(), "ag-done");
EXPECT_EQ(module->entry_computation()->root_instruction()->operand(0)->name(),
"ag-start");
}
TEST_F(HloControlFlowFlatteningTest, CollectiveFusion) {
absl::string_view hlo_template = R"(
HloModule collective-fusion, is_scheduled=true
%sum (a: f32[], b: f32[]) -> f32[] {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(f32[] a, f32[] b)
}
%all-gather {
%constant.3 = f32[] constant(0)
%broadcast = f32[full_size,8,128]{2,1,0} broadcast(%constant.3), dimensions={}
%input.0 = f32[4,8,128]{2,1,0} parameter(0)
%input.1 = f32[4,8,128]{2,1,0} parameter(1)
%replica-id.1 = u32[] replica-id()
%constant.4 = u32[] constant(4)
%multiply.1 = u32[] multiply(%replica-id.1, %constant.4)
%constant.5 = u32[] constant(0)
%constant.6 = u32[] constant(0)
%dynamic-update-slice = f32[full_size,8,128]{2,1,0} dynamic-update-slice(%broadcast, %input.0, %multiply.1, %constant.5, %constant.6)
%dynamic-update-slice.1 = f32[full_size,8,128]{2,1,0} dynamic-update-slice(%broadcast, %input.1, %multiply.1, %constant.5, %constant.6)
%all-reduce = (f32[full_size,8,128]{2,1,0}, f32[full_size,8,128]{2,1,0}) all-reduce(%dynamic-update-slice, %dynamic-update-slice.1), replica_groups={}, backend_config="{barrier_config:{barrier_type:3,id:0}}", to_apply=%sum
%gte0 = f32[full_size,8,128]{2,1,0} get-tuple-element(%all-reduce), index=0
%slice = f32[unpadded_size,8,128]{2,1,0} slice(%gte0), slice={[0:unpadded_size], [0:8], [0:128]}
%bitcast = f32[unpadded_size,1,8,128]{3,2,1,0} bitcast(%slice)
%gte1 = f32[full_size,8,128]{2,1,0} get-tuple-element(%all-reduce), index=1
ROOT %tuple = (f32[unpadded_size,1,8,128]{3,2,1,0}, f32[full_size,8,128]{2,1,0}) tuple(%bitcast, %gte1)
}
ENTRY main {
%add.1 = f32[4,8,128]{2,1,0} parameter(0)
%add.2 = f32[4,8,128]{2,1,0} parameter(1)
ROOT %fusion = (f32[unpadded_size,1,8,128]{3,2,1,0}, f32[full_size,8,128]{2,1,0}) fusion(%add.1, %add.2), kind=kCustom, calls=%all-gather
}
)";
auto hlo_string = absl::StrReplaceAll(
hlo_template, {{"full_size", absl::StrCat(12288)},
{"unpadded_size", absl::StrCat(12285)}});
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_TRUE(IsCollective(module->entry_computation()->root_instruction()));
HloControlFlowFlattening flattening({});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::Parameter(0), op::Parameter(1)));
EXPECT_EQ(module->entry_computation()->root_instruction()->name(), "fusion");
}
TEST_F(HloControlFlowFlatteningTest, AsyncAllToAll) {
absl::string_view hlo = R"(
ENTRY main {
param = f32[4,8,128]{2,1,0} parameter(0)
all-to-all-start = ((f32[4,8,128]{2,1,0}), f32[4,8,128]{2,1,0}, u32[], u32[]) all-to-all-start(param), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, dimensions={1}
ROOT all-to-all-done = f32[4,8,128]{2,1,0} all-to-all-done(all-to-all-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
EXPECT_TRUE(IsCollective(module->entry_computation()->root_instruction()));
HloControlFlowFlattening flattening({});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::CustomCall(op::CustomCall(op::Parameter(0))));
}
void CheckWhileBound(HloInstruction* while_op, int expected_bound) {
auto* cond = while_op->while_condition();
ASSERT_NE(cond, nullptr);
auto* hlo_bound = cond->root_instruction()->operand(1);
EXPECT_TRUE(hlo_bound->IsConstant());
if (hlo_bound->IsConstant()) {
EXPECT_TRUE(hlo_bound->literal().IsAll(expected_bound));
}
}
TEST_F(HloControlFlowFlatteningTest, MaxOuterLoopCount) {
absl::string_view hlo_string = R"(
HloModule NestedWhileComp
InnerBody {
constant.8 = pred[] constant(false)
parameter.5 = (s32[], s32[]) parameter(0)
get-tuple-element.6 = s32[] get-tuple-element(parameter.5), index=0
constant.9 = s32[] constant(1)
add.10 = s32[] add(get-tuple-element.6, constant.9)
get-tuple-element.7 = s32[] get-tuple-element(parameter.5), index=1
constant.11 = s32[] constant(1)
add.12 = s32[] add(get-tuple-element.7, constant.11)
ROOT tuple.13 = (s32[], s32[]) tuple(add.10, add.12)
}
InnerCond {
parameter.15 = (s32[], s32[]) parameter(0)
get-tuple-element.17 = s32[] get-tuple-element(parameter.15), index=1
constant.18 = pred[] constant(false)
get-tuple-element.16 = s32[] get-tuple-element(parameter.15), index=0
inner_bound = s32[] constant(100)
ROOT compare.20 = pred[] compare(get-tuple-element.16, inner_bound), direction=LT
}
OuterBody {
constant.24 = pred[] constant(false)
constant.25 = s32[] constant(0)
parameter.22 = (s32[]) parameter(0)
get-tuple-element.23 = s32[] get-tuple-element(parameter.22), index=0
tuple.26 = (s32[], s32[]) tuple(constant.25, get-tuple-element.23)
inner_while = (s32[], s32[]) while(tuple.26), condition=InnerCond, body=InnerBody
get-tuple-element.28 = s32[] get-tuple-element(inner_while), index=0
get-tuple-element.29 = s32[] get-tuple-element(inner_while), index=1
tuple.30 = (s32[], s32[]) tuple(get-tuple-element.28, get-tuple-element.29)
get-tuple-element.31 = s32[] get-tuple-element(tuple.30), index=0
get-tuple-element.32 = s32[] get-tuple-element(tuple.30), index=1
ROOT tuple.33 = (s32[]) tuple(get-tuple-element.32)
}
OuterCond {
constant.37 = pred[] constant(false)
parameter.35 = (s32[]) parameter(0)
get-tuple-element.36 = s32[] get-tuple-element(parameter.35), index=0
outer_bound = s32[] constant(1000)
ROOT compare.39 = pred[] compare(get-tuple-element.36, outer_bound), direction=LT
}
ENTRY NestedWhileComp {
constant.1 = pred[] constant(false)
constant.2 = s32[] constant(0)
tuple.3 = (s32[]) tuple(constant.2)
outer_while = (s32[]) while(tuple.3), condition=OuterCond, body=OuterBody
get-tuple-element.41 = s32[] get-tuple-element(outer_while), index=0
tuple.42 = (s32[]) tuple(get-tuple-element.41)
get-tuple-element.43 = s32[] get-tuple-element(tuple.42), index=0
ROOT tuple.44 = (s32[]) tuple(get-tuple-element.43)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
constexpr int kWhileExecutionCount = 5;
constexpr int kExistingInnerLoopCount = 100;
constexpr int kMaxLoopCount = 10;
HloControlFlowFlattening flattening(HloControlFlowFlattening::Options{
kWhileExecutionCount,
kMaxLoopCount});
EXPECT_TRUE(flattening.Run(module.get()).value());
TF_ASSERT_OK(HloVerifier(true,
true)
.Run(module.get())
.status());
LOG(INFO) << module->ToString();
auto* outer_while =
module->entry_computation()->GetInstructionWithName("outer_while");
ASSERT_NE(outer_while, nullptr);
CheckWhileBound(outer_while, kMaxLoopCount);
auto* while_body = outer_while->while_body();
ASSERT_NE(while_body, nullptr);
auto* inner_while = while_body->GetInstructionWithName("inner_while");
ASSERT_NE(inner_while, nullptr);
CheckWhileBound(inner_while, kExistingInnerLoopCount);
}
TEST_F(HloControlFlowFlatteningTest, MatchLtUseInferedLoopCount) {
absl::string_view hlo_string = R"(
HloModule While
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
While.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(100)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY While {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=While.condition, body=While.body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_EQ(GetLoopBound(*module->entry_computation()->root_instruction(), 123,
kDefaultMaxLoopCount),
100);
}
TEST_F(HloControlFlowFlatteningTest, MatchGtUseInferedLoopCount) {
absl::string_view hlo_string = R"(
HloModule While
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
While.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(50)
ROOT greater-than = pred[] compare(constant.2, get-tuple-element.3), direction=GT
}
ENTRY While {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=While.condition, body=While.body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_EQ(GetLoopBound(*module->entry_computation()->root_instruction(), 123,
kDefaultMaxLoopCount),
50);
}
TEST_F(HloControlFlowFlatteningTest, NotMatchEqUseDefaultLoopCount) {
absl::string_view hlo_string = R"(
HloModule While
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add, multiply)
}
While.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(100)
ROOT equal = pred[] compare(get-tuple-element.3, constant.2), direction=EQ
}
ENTRY While {
constant.3 = s32[] constant(42)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[], s32[3]{0}) while(tuple.1), condition=While.condition, body=While.body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_EQ(GetLoopBound(*module->entry_computation()->root_instruction(), 123,
kDefaultMaxLoopCount),
123);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_control_flow_flattening.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_control_flow_flattening_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cd79ef1a-6098-42da-9354-dac192ad1ce8 | cpp | tensorflow/tensorflow | hlo_decomposer | third_party/xla/xla/tools/hlo_decomposer.cc | third_party/xla/xla/tools/hlo_decomposer_test.cc | #include "xla/tools/hlo_decomposer.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/call_graph.h"
#include "xla/service/compilation_environments.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool ShouldIsolateOpcode(HloOpcode opcode) {
switch (opcode) {
case HloOpcode::kConstant:
case HloOpcode::kGetTupleElement:
case HloOpcode::kParameter:
case HloOpcode::kTuple:
return false;
default:
return true;
}
}
absl::StatusOr<std::vector<std::unique_ptr<HloModule>>> Decompose(
const HloModule& module) {
std::vector<std::unique_ptr<HloModule>> modules;
absl::flat_hash_set<const HloComputation*> computations_to_visit{
module.entry_computation()};
absl::flat_hash_set<const HloComputation*> visited_computations;
while (!computations_to_visit.empty()) {
const HloComputation* computation = *computations_to_visit.begin();
computations_to_visit.erase(computations_to_visit.begin());
visited_computations.insert(computation);
for (const HloInstruction* instruction : computation->instructions()) {
if (GetInstructionCallContext(instruction->opcode()) !=
CallContext::kEmbedded) {
for (const HloComputation* called_computation :
instruction->called_computations()) {
if (!visited_computations.contains(called_computation)) {
computations_to_visit.insert(called_computation);
}
}
}
if (ShouldIsolateOpcode(instruction->opcode())) {
modules.push_back(ExtractInstructionIntoNewModule(*instruction));
}
}
}
return modules;
}
}
absl::StatusOr<std::vector<std::unique_ptr<HloModule>>> DecomposeHloModule(
const HloModule& module, bool deduplicate_modules) {
std::vector<std::unique_ptr<HloModule>> modules;
absl::flat_hash_set<std::string> module_fingerprints;
auto should_add_module = [&](const HloModule* module) {
if (!deduplicate_modules) {
return true;
}
const std::string fingerprint = module->GetFingerprint128();
if (module_fingerprints.contains(fingerprint)) {
return false;
}
module_fingerprints.insert(fingerprint);
return true;
};
TF_ASSIGN_OR_RETURN(std::vector<std::unique_ptr<HloModule>> isolated_modules,
Decompose(module));
for (auto& module : isolated_modules) {
if (should_add_module(module.get())) {
modules.push_back(std::move(module));
}
}
return modules;
}
std::unique_ptr<HloModule> ExtractInstructionIntoNewModule(
const std::vector<HloInstruction*>& instructions) {
CHECK(!instructions.empty());
HloInstruction& first_instruction = *instructions[0];
auto new_hlo_module = std::make_unique<HloModule>(
first_instruction.GetModule()->name() + "_collective_ops",
HloModuleConfig{},
std::make_unique<CompilationEnvironments>(
first_instruction.GetModule()->comp_envs()));
int parameter_number = 0;
HloComputation::Builder builder("entry_computation");
HloCloneContext clone_context(new_hlo_module.get());
std::vector<HloInstruction*> new_instructions;
for (auto* hlo : instructions) {
std::vector<HloInstruction*> new_operands;
for (const HloInstruction* operand : hlo->operands()) {
std::unique_ptr<HloInstruction> new_parameter =
HloInstruction::CreateParameter(parameter_number, operand->shape(),
operand->name());
++parameter_number;
new_operands.push_back(builder.AddInstruction(std::move(new_parameter)));
}
std::unique_ptr<HloInstruction> new_instruction =
hlo->CloneWithNewOperands(hlo->shape(), new_operands, &clone_context);
new_instructions.push_back(
builder.AddInstruction(std::move(new_instruction)));
}
std::unique_ptr<HloInstruction> tuple_instruction =
HloInstruction::CreateTuple(new_instructions);
builder.AddInstruction(std::move(tuple_instruction));
new_hlo_module->AddEntryComputationWithLayouts(builder.Build());
return new_hlo_module;
}
std::unique_ptr<HloModule> ExtractInstructionIntoNewModule(
const HloInstruction& hlo) {
auto new_hlo_module = std::make_unique<HloModule>(
std::string(hlo.name()), HloModuleConfig{},
std::make_unique<CompilationEnvironments>(hlo.GetModule()->comp_envs()));
int parameter_number = 0;
HloComputation::Builder builder("entry_computation");
HloCloneContext clone_context(new_hlo_module.get());
std::vector<HloInstruction*> new_operands;
for (const HloInstruction* operand : hlo.operands()) {
std::unique_ptr<HloInstruction> new_parameter =
HloInstruction::CreateParameter(parameter_number, operand->shape(),
operand->name());
++parameter_number;
new_operands.push_back(builder.AddInstruction(std::move(new_parameter)));
}
std::unique_ptr<HloInstruction> new_instruction =
hlo.CloneWithNewOperands(hlo.shape(), new_operands, &clone_context);
builder.AddInstruction(std::move(new_instruction));
new_hlo_module->AddEntryComputationWithLayouts(builder.Build());
return new_hlo_module;
}
std::unique_ptr<HloModule> ExtractProducerConsumerIntoNewModule(
const HloInstruction& producer, const HloInstruction& consumer) {
auto new_hlo_module =
std::make_unique<HloModule>("extracted", HloModuleConfig{},
std::make_unique<CompilationEnvironments>(
consumer.GetModule()->comp_envs()));
int parameter_number = 0;
HloComputation::Builder builder("entry_computation");
HloCloneContext clone_context(new_hlo_module.get());
absl::InlinedVector<HloInstruction*, 8> producer_operands;
for (const HloInstruction* operand : producer.operands()) {
HloInstruction* new_parameter =
builder.AddInstruction(HloInstruction::CreateParameter(
parameter_number, operand->shape(), operand->name()));
++parameter_number;
producer_operands.push_back(new_parameter);
}
HloInstruction* new_producer =
builder.AddInstruction(producer.CloneWithNewOperands(
producer.shape(), producer_operands, &clone_context));
absl::flat_hash_map<const HloInstruction*, HloInstruction*> operand_map;
operand_map.emplace(&producer, new_producer);
absl::InlinedVector<HloInstruction*, 8> consumer_operands;
for (const HloInstruction* operand : consumer.operands()) {
auto it = operand_map.find(operand);
if (it != operand_map.end()) {
consumer_operands.push_back(it->second);
} else {
HloInstruction* new_parameter =
builder.AddInstruction(HloInstruction::CreateParameter(
parameter_number, operand->shape(), operand->name()));
++parameter_number;
consumer_operands.push_back(new_parameter);
}
}
builder.AddInstruction(consumer.CloneWithNewOperands(
consumer.shape(), consumer_operands, &clone_context));
new_hlo_module->AddEntryComputationWithLayouts(builder.Build());
return new_hlo_module;
}
std::unique_ptr<HloModule> ExtractComputationIntoNewModule(
const HloComputation& computation) {
auto new_hlo_module =
std::make_unique<HloModule>("extracted", HloModuleConfig{},
std::make_unique<CompilationEnvironments>(
computation.parent()->comp_envs()));
HloCloneContext clone_context(new_hlo_module.get());
new_hlo_module->AddEntryComputationWithLayouts(
computation.CloneInContext(clone_context));
return new_hlo_module;
}
} | #include "xla/tools/hlo_decomposer.h"
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class HloDecomposerTest : public HloTestBase {
protected:
std::unique_ptr<HloModule> GetModule() {
absl::string_view kHlo = R"(
HloModule test_module, entry_computation_layout={(bf16[1024,8192]{1,0}, f32[8192]{0}, f32[16384]{0})->(bf16[1024]{0}, bf16[1024]{0}, f32[16384]{0}, f32[16384]{0})}
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add.1 = f32[] add(p0, p1)
}
fused_computation.1 {
param_1.3 = f32[8192]{0} parameter(1)
broadcast.2 = f32[1024,8192]{1,0} broadcast(param_1.3), dimensions={1}
param_0.3 = bf16[1024,8192]{1,0} parameter(0)
convert.5 = f32[1024,8192]{1,0} convert(param_0.3)
multiply.2 = f32[1024,8192]{1,0} multiply(broadcast.2, convert.5)
c0_1 = f32[] constant(0)
reduce.1 = f32[1024]{0} reduce(multiply.2, c0_1), dimensions={1}, to_apply=add
ROOT convert.4 = bf16[1024]{0} convert(reduce.1)
}
fused_computation.2 {
p0.0 = bf16[1024,8192]{1,0} parameter(0)
c.0 = f32[1024,8192]{1,0} convert(p0.0)
co0_1.1 = f32[] constant(0)
p.0 = f32[8192]{0} parameter(1)
b.0 = f32[1024,8192]{1,0} broadcast(p.0), dimensions={1}
m.0 = f32[1024,8192]{1,0} multiply(b.0, c.0)
r.0 = f32[1024]{0} reduce(m.0, co0_1.1), dimensions={1}, to_apply=add
ROOT c.1 = bf16[1024]{0} convert(r.0)
}
exp {
param_0.5 = f32[16384]{0} parameter(0)
m.4 = f32[16384]{0} multiply(param_0.5, param_0.5)
e = f32[16384]{0} exponential(m.4)
l.clone.1 = f32[16384]{0} log(m.4)
ROOT tuple = (f32[16384]{0}, f32[16384]{0}) tuple(e, l.clone.1)
}
ENTRY main {
p0.1 = bf16[1024,8192]{1,0} parameter(0)
p1.1 = f32[8192]{0} parameter(1)
fusion.1 = bf16[1024]{0} fusion(p0.1, p1.1), kind=kInput, calls=fused_computation.1
fusion.2 = bf16[1024]{0} fusion(p0.1, p1.1), kind=kInput, calls=fused_computation.2
p2 = f32[16384]{0} parameter(2)
e.1 = (f32[16384]{0}, f32[16384]{0}) fusion(p2), kind=kInput, calls=exp
get-tuple-element.1 = f32[16384]{0} get-tuple-element(e.1), index=1
get-tuple-element = f32[16384]{0} get-tuple-element(e.1), index=0
ROOT result = (bf16[1024]{0}, bf16[1024]{0}, f32[16384]{0}, f32[16384]{0}) tuple(fusion.1, fusion.2, get-tuple-element.1, get-tuple-element)
})";
return ParseAndReturnVerifiedModule(kHlo).value();
}
void FindAndCompare(const std::vector<std::unique_ptr<HloModule>>& modules,
absl::string_view module_name,
absl::string_view pattern) {
auto iter =
absl::c_find_if(modules, [&](const std::unique_ptr<HloModule>& module) {
return module->name() == module_name;
});
EXPECT_NE(iter, modules.end()) << "No module named " << module_name;
if (iter == modules.end()) {
return;
}
EXPECT_TRUE(*RunFileCheck((*iter)->ToString(), pattern));
}
};
TEST_F(HloDecomposerTest, DecomposeNoDedup) {
auto module = GetModule();
TF_ASSERT_OK_AND_ASSIGN(
auto decomposed,
DecomposeHloModule(*module, false));
EXPECT_EQ(decomposed.size(), 3);
FindAndCompare(decomposed, "fusion.1", R"(
CHECK: %add{{.*}} {
CHECK: %fused_computation.1
CHECK: ENTRY
CHECK-THEN: %parameter.0 = bf16[1024,8192]{1,0} parameter(0)
CHECK-THEN: %parameter.1 = f32[8192]{0} parameter(1)
CHECK-THEN: ROOT %fusion.1
)");
FindAndCompare(decomposed, "fusion.2", R"(
CHECK: %add{{.*}} {
CHECK: %fused_computation.2
CHECK: ENTRY
CHECK-THEN: %parameter.0 = bf16[1024,8192]{1,0} parameter(0)
CHECK-THEN: %parameter.1 = f32[8192]{0} parameter(1)
CHECK-THEN: ROOT %fusion.2
)");
FindAndCompare(decomposed, "e.1", R"(
CHECK: %exp{{.*}} {
CHECK: ENTRY
CHECK-THEN: %parameter.0 = f32[16384]{0} parameter(0)
CHECK-THEN: ROOT %e.1
)");
}
TEST_F(HloDecomposerTest, DecomposeDedup) {
auto module = GetModule();
TF_ASSERT_OK_AND_ASSIGN(
auto decomposed,
DecomposeHloModule(*module, true));
EXPECT_EQ(decomposed.size(), 2);
FindAndCompare(decomposed, "fusion.1", R"(
CHECK: %add{{.*}} {
CHECK: %fused_computation.1
CHECK: ENTRY
CHECK-THEN: %parameter.0 = bf16[1024,8192]{1,0} parameter(0)
CHECK-THEN: %parameter.1 = f32[8192]{0} parameter(1)
CHECK-THEN: ROOT %fusion.1
)");
FindAndCompare(decomposed, "e.1", R"(
CHECK: %exp{{.*}} {
CHECK: ENTRY
CHECK-THEN: %parameter.0 = f32[16384]{0} parameter(0)
CHECK-THEN: ROOT %e.1
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_decomposer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_decomposer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9674d4bc-cfba-40fa-9ad2-05cea053501d | cpp | tensorflow/tensorflow | prepare_reference_module | third_party/xla/xla/tools/prepare_reference_module.cc | third_party/xla/xla/tools/prepare_reference_module_test.cc | #include "xla/tools/prepare_reference_module.h"
#include <functional>
#include <memory>
#include "absl/status/statusor.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/despecializer.h"
#include "xla/service/hlo_module_config.h"
#include "xla/stream_executor/platform.h"
#include "xla/xla.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
namespace xla {
absl::StatusOr<std::unique_ptr<HloModule>> PrepareReferenceModule(
const HloModule& test_module, HloRunnerInterface* test_runner,
const std::function<void(HloModuleConfig*)>& config_modifier_hook,
const std::function<absl::Status(const HloModule&, HloRunnerInterface*,
HloModule*)>& module_modifier_hook,
bool skip_despecialization) {
DebugOptions debug_options = GetDebugOptionsFromFlags();
debug_options.set_xla_cpu_enable_fast_math(false);
debug_options.set_xla_gpu_enable_fast_min_max(false);
HloModuleConfig reference_config = test_module.config();
reference_config.set_debug_options(debug_options);
if (config_modifier_hook) {
config_modifier_hook(&reference_config);
}
std::unique_ptr<HloModule> reference_module =
test_module.Clone(reference_config, "reference");
if (module_modifier_hook) {
TF_RETURN_IF_ERROR(
module_modifier_hook(test_module, test_runner, reference_module.get()));
} else if (!skip_despecialization) {
TF_RETURN_IF_ERROR(Despecializer().Run(reference_module.get()).status());
}
return std::move(reference_module);
}
}; | #include "xla/tools/prepare_reference_module.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
const char* const kModuleStr = R"(
HloModule jit_step
%fused_computation (param_0.2: f32[1,4]) -> f32[1,3] {
%param_0.2 = f32[1,4]{1,0} parameter(0)
ROOT %slice.11 = f32[1,3]{1,0} slice(f32[1,4]{1,0} %param_0.2),
slice={[0:1], [0:3]}
}
ENTRY %main.3491 (Arg_0.0: f32[1,4]) -> f32[1,3] {
%Arg_0.0 = f32[1,4]{1,0} parameter(0)
ROOT %fusion = f32[1,3]{1,0} fusion(f32[1,4]{1,0} %Arg_0.0), kind=kLoop,
calls=%fused_computation
}
)";
using PrepareReferenceModuleTest = HloTestBase;
TEST_F(PrepareReferenceModuleTest, PerformDespecialization) {
TF_ASSERT_OK_AND_ASSIGN(auto test_module,
ParseAndReturnVerifiedModule(kModuleStr));
TF_ASSERT_OK_AND_ASSIGN(
auto reference_module,
PrepareReferenceModule(*test_module, nullptr, {}, {},
false));
EXPECT_THAT(reference_module->ToString(),
Not(::testing::HasSubstr("fusion")));
}
TEST_F(PrepareReferenceModuleTest, SkipDespecialization) {
TF_ASSERT_OK_AND_ASSIGN(auto test_module,
ParseAndReturnVerifiedModule(kModuleStr));
TF_ASSERT_OK_AND_ASSIGN(
auto reference_module,
PrepareReferenceModule(*test_module, nullptr, {}, {},
true));
EXPECT_THAT(reference_module->ToString(), ::testing::HasSubstr("fusion"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/prepare_reference_module.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/prepare_reference_module_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ef6cb643-5703-419f-9a58-6885cf50b9d9 | cpp | tensorflow/tensorflow | run_hlo_module | third_party/xla/xla/tools/run_hlo_module.cc | third_party/xla/xla/tools/run_hlo_module_test.cc | #include "xla/tools/run_hlo_module.h"
#include <functional>
#include <iomanip>
#include <iostream>
#include <map>
#include <memory>
#include <optional>
#include <random>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/literal_comparison.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/test_utils.h"
#include "xla/tools/hlo_control_flow_flattening.h"
#include "xla/tools/hlo_decomposer.h"
#include "xla/tools/hlo_module_loader.h"
#include "xla/tools/prepare_reference_module.h"
#include "xla/tools/run_hlo_module.pb.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
enum class ModuleResult {
kMatched,
kRan,
kSkipped,
kDidntRun,
kOtherError,
kCompilationError,
kRuntimeError,
kMismatch,
};
constexpr absl::string_view ModuleResultToString(ModuleResult result) {
switch (result) {
case ModuleResult::kMatched:
return "MATCHED";
case ModuleResult::kRan:
return "RAN";
case ModuleResult::kSkipped:
return "SKIPPED";
case ModuleResult::kDidntRun:
return "DIDN'T RUN";
case ModuleResult::kOtherError:
return "OTHER ERROR";
case ModuleResult::kCompilationError:
return "COMPILATION ERROR";
case ModuleResult::kRuntimeError:
return "RUNTIME ERROR";
case ModuleResult::kMismatch:
return "MISMATCH";
}
}
void WriteLiteralToTempFile(const LiteralSlice& literal,
const std::string& name) {
auto* env = tsl::Env::Default();
std::string binary_filename;
std::string text_filename;
std::string outdir;
if (tsl::io::GetTestUndeclaredOutputsDir(&outdir)) {
std::string filename = tsl::io::JoinPath(
outdir, absl::StrFormat("tempfile-%d-%s", env->NowMicros(), name));
binary_filename = absl::StrCat(filename, ".pb");
text_filename = absl::StrCat(filename, ".txt");
} else {
binary_filename = tsl::io::GetTempFilename(absl::StrCat(name, ".pb"));
text_filename = tsl::io::GetTempFilename(absl::StrCat(name, ".txt"));
}
TF_CHECK_OK(tsl::WriteBinaryProto(env, binary_filename, literal.ToProto()));
TF_CHECK_OK(tsl::WriteStringToFile(env, text_filename, literal.ToString()));
LOG(ERROR) << "wrote Literal to " << name << " binary: " << binary_filename
<< " text: " << text_filename;
}
void OnMiscompare(const LiteralSlice& expected, const LiteralSlice& actual,
const LiteralSlice& mismatches,
const ShapeIndex& ,
const literal_comparison::ErrorBuckets& ) {
LOG(INFO) << "expected: " << ShapeUtil::HumanString(expected.shape()) << " "
<< literal_comparison::ToStringTruncated(expected);
LOG(INFO) << "actual: " << ShapeUtil::HumanString(actual.shape()) << " "
<< literal_comparison::ToStringTruncated(actual);
LOG(INFO) << "Dumping literals to temp files...";
WriteLiteralToTempFile(expected, "expected");
WriteLiteralToTempFile(actual, "actual");
WriteLiteralToTempFile(mismatches, "mismatches");
}
absl::StatusOr<Literal> ExecuteWithRunner(
std::unique_ptr<HloModule> module,
const BufferAssignmentProto* buffer_assignment_proto,
absl::Span<const Literal> args, HloRunnerInterface* runner,
bool run_hlo_passes) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(
VerifyHloModule(module.get(), false,
true),
absl::StrCat("(on ", runner->Name(), ")"));
std::cerr << "Running HLO module with runner " << runner->Name() << "...\n";
XLA_VLOG_LINES(1, module->ToString());
const auto start = std::chrono::high_resolution_clock::now();
ExecutionProfile profile;
auto result_status =
(buffer_assignment_proto == nullptr)
? runner->Execute(std::move(module), args, run_hlo_passes, &profile)
: runner->ExecuteWithBufferAssignment(std::move(module),
buffer_assignment_proto, args,
run_hlo_passes, &profile);
const auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = end - start;
std::cerr << "... compiled and ran in " << diff.count() << "s.\n";
double run_time = static_cast<double>(profile.compute_time_ns()) / 1e9;
std::cerr << "execution time for runner " << runner->Name() << ": "
<< run_time << "s.\n";
TF_RETURN_WITH_CONTEXT_IF_ERROR(
result_status.status(),
absl::StrCat("Failed to execute on ", runner->Name()));
return std::move(result_status).value();
}
void UseCpuThunkRuntime(HloModule& module) {
auto debug_options = module.config().debug_options();
debug_options.set_xla_cpu_use_thunk_runtime(true);
module.mutable_config().set_debug_options(debug_options);
}
absl::Status RunAndCompareInternal(
std::unique_ptr<HloModule> test_module,
const BufferAssignmentProto* buffer_assignment_proto,
HloRunnerInterface* test_runner, HloRunnerInterface* reference_runner,
std::minstd_rand0* engine, const RunHloModuleOptions& options,
xla::RunHloModuleIterationLiterals* iteration_literals_proto,
std::function<absl::Status(const HloModule&, HloRunnerInterface*,
HloModule*)>
reference_module_modifier_hook,
std::function<void(HloModuleConfig*)> config_modifier_hook,
ModuleResult* test_run_result, ModuleResult* reference_run_result) {
auto copy_result_on_failure = [](auto status, ModuleResult result,
ModuleResult* out_result) {
if (!status.ok() && out_result != nullptr) {
*out_result = result;
}
return status;
};
if (!config_modifier_hook) {
config_modifier_hook = [](HloModuleConfig* config) {
config->set_seed(42);
};
}
if (options.flatten_control_flow) {
HloControlFlowFlattening control_flow_flattening(
HloControlFlowFlattening::Options{1});
TF_RETURN_IF_ERROR(
copy_result_on_failure(control_flow_flattening.Run(test_module.get()),
ModuleResult::kCompilationError, test_run_result)
.status());
}
TF_ASSIGN_OR_RETURN(
auto args, copy_result_on_failure(
MakeFakeArguments(test_module.get(), engine,
options.use_large_float_range,
options.treat_gte_as_data_formatting),
ModuleResult::kOtherError, test_run_result));
if (iteration_literals_proto != nullptr &&
iteration_literals_proto->arguments_size() != 0) {
if (iteration_literals_proto->arguments_size() != args.size()) {
if (test_run_result != nullptr) {
*test_run_result = ModuleResult::kOtherError;
}
return xla::InvalidArgument(
"Failed to use input literals as arguments; mismatched "
"number of expected arguments.");
} else {
for (int i = 0; i < args.size(); ++i) {
if (!literal_comparison::EqualShapes(
xla::Shape(args[i].shape()),
xla::Shape(iteration_literals_proto->arguments(i).shape()))
.ok()) {
if (test_run_result != nullptr) {
*test_run_result = ModuleResult::kOtherError;
}
return xla::InvalidArgument(
"Failed to use input literals for argument %d "
"because of a shape mismatch.",
i);
}
TF_ASSIGN_OR_RETURN(
args[i],
copy_result_on_failure(xla::Literal::CreateFromProto(
iteration_literals_proto->arguments(i)),
ModuleResult::kOtherError, test_run_result));
}
}
}
if (options.print_literals) {
for (int i = 0; i < args.size(); ++i) {
std::cout << "\n** Argument " << i << " **\n"
<< args[i].ToString() << "\n";
}
}
if (iteration_literals_proto != nullptr &&
iteration_literals_proto->arguments_size() == 0) {
for (int i = 0; i < args.size(); ++i) {
*iteration_literals_proto->add_arguments() = args[i].ToProto();
}
}
std::unique_ptr<HloModule> reference_module;
if (reference_runner != nullptr) {
bool skip_deoptimization = options.reference_platform == options.platform;
TF_ASSIGN_OR_RETURN(
reference_module,
copy_result_on_failure(
PrepareReferenceModule(
*test_module, test_runner, config_modifier_hook,
reference_module_modifier_hook, skip_deoptimization),
ModuleResult::kCompilationError, reference_run_result));
}
if (options.force_use_cpu_thunk_runtime_for_test) {
UseCpuThunkRuntime(*test_module);
}
TF_ASSIGN_OR_RETURN(
auto test_result,
copy_result_on_failure(
ExecuteWithRunner(std::move(test_module), buffer_assignment_proto,
args, test_runner, options.run_test_hlo_passes),
ModuleResult::kRuntimeError, test_run_result));
if (test_run_result != nullptr) {
*test_run_result = ModuleResult::kRan;
}
if (options.print_literals) {
std::cout << "\n** Result with test runner " << test_runner->Name()
<< " **\n"
<< test_result.ToString() << "\n";
}
if (iteration_literals_proto != nullptr) {
LiteralProto test_result_proto = test_result.ToProto();
iteration_literals_proto->mutable_result()->Swap(&test_result_proto);
}
if (reference_module == nullptr) {
std::cerr << "Skipping reference runner\n";
return absl::OkStatus();
}
if (const HloInstruction* root_instruction =
reference_module->entry_computation()->root_instruction();
root_instruction->opcode() == HloOpcode::kCustomCall) {
std::cerr << "Skipping reference runner for a custom call "
<< root_instruction->custom_call_target() << "\n";
if (reference_run_result != nullptr) {
*reference_run_result = ModuleResult::kSkipped;
}
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(
auto reference_result,
copy_result_on_failure(
ExecuteWithRunner(std::move(reference_module),
nullptr, args,
reference_runner, options.run_reference_hlo_passes),
ModuleResult::kRuntimeError, reference_run_result));
if (reference_run_result != nullptr) {
*reference_run_result = ModuleResult::kRan;
}
if (options.print_literals) {
std::cout << "\n** Result with reference runner "
<< reference_runner->Name() << " **\n"
<< reference_result.ToString() << "\n";
}
if (iteration_literals_proto != nullptr) {
LiteralProto reference_result_proto = reference_result.ToProto();
iteration_literals_proto->mutable_reference_result()->Swap(
&reference_result_proto);
}
ErrorSpec error_spec(static_cast<float>(options.abs_error_bound),
static_cast<float>(options.rel_error_bound));
absl::Status comparison_status =
literal_comparison::Near(reference_result,
test_result,
error_spec,
true, &OnMiscompare);
const ModuleResult comparison_result =
comparison_status.ok() ? ModuleResult::kMatched : ModuleResult::kMismatch;
if (test_run_result != nullptr) {
*test_run_result = comparison_result;
}
if (reference_run_result != nullptr) {
*reference_run_result = comparison_result;
}
return comparison_status;
}
struct ChunkResult {
std::string module_name;
ModuleResult test_result = ModuleResult::kDidntRun;
ModuleResult reference_result = ModuleResult::kDidntRun;
absl::Status status;
bool operator<(const ChunkResult& other) const {
if (test_result != other.test_result) {
return test_result < other.test_result;
}
return reference_result < other.reference_result;
}
};
std::string BuildResultsTable(absl::Span<const ChunkResult> chunk_results,
size_t num_modules) {
constexpr int kStatusWidth = 21;
constexpr int kNameWidth = 30;
constexpr int kThreeColumnsWidth = 5 + 2 * kStatusWidth + kNameWidth;
constexpr int kTableWidth = kThreeColumnsWidth + 30;
std::ostringstream strstr;
auto print_row = [&](absl::string_view reference, absl::string_view test,
absl::string_view module_name, absl::string_view error) {
std::string formatted_error = absl::StrReplaceAll(
error, {{"\n", absl::StrCat("\n", std::string(kThreeColumnsWidth, ' '),
"|")}});
strstr << " " << std::left << std::setw(kStatusWidth) << reference << "| "
<< std::setw(kStatusWidth) << test << "| " << std::setw(kNameWidth)
<< module_name << "| " << formatted_error << "\n";
};
auto print_line = [&](int line_width) {
strstr << std::string(line_width, '-') << "\n";
};
print_row("Reference", "Test", "Module", "Status");
print_line(kTableWidth);
std::map<std::pair<ModuleResult, ModuleResult>, int> result_counts;
for (const ChunkResult& chunk_result : chunk_results) {
const std::pair<ModuleResult, ModuleResult> result_pair(
chunk_result.reference_result, chunk_result.test_result);
++result_counts[result_pair];
print_row(ModuleResultToString(chunk_result.reference_result),
ModuleResultToString(chunk_result.test_result),
chunk_result.module_name, chunk_result.status.ToString());
}
print_line(kTableWidth);
print_row("Reference", "Test", "Module", "Status");
print_line(kTableWidth);
strstr << "\n\n";
print_line(kThreeColumnsWidth);
print_row("Reference", "Test", "Total count", "");
print_line(kThreeColumnsWidth);
for (const auto& [result, count] : result_counts) {
print_row(ModuleResultToString(result.first),
ModuleResultToString(result.second), absl::StrCat(count), "");
}
print_line(kThreeColumnsWidth);
if (chunk_results.size() < num_modules) {
strstr << "\n(did not " << (num_modules - chunk_results.size())
<< " modules due to earlier failures)\n\n";
}
return strstr.str();
}
absl::Status RunIsolatedAndCompare(
std::unique_ptr<HloModule> test_module,
const BufferAssignmentProto* buffer_assignment_proto,
HloRunnerInterface* test_runner, HloRunnerInterface* reference_runner,
std::minstd_rand0* engine, const RunHloModuleOptions& options,
xla::RunHloModuleIterationLiterals* iteration_literals_proto,
std::function<absl::Status(const HloModule&, HloRunnerInterface*,
HloModule*)>
reference_module_modifier_hook,
std::function<void(HloModuleConfig*)> config_modifier_hook) {
CHECK(test_module);
CHECK(iteration_literals_proto == nullptr)
<< "Cannot run decomposed module if input literals are provided.";
if (options.run_test_hlo_passes || (options.run_reference_hlo_passes &&
!options.reference_platform.empty())) {
LOG(WARNING)
<< "!!! Warning !!! When running decomposed module, running HLO "
"passes is likely not what you want. If you have unoptimized "
"HLO, first convert it to the optimized e.g. using the "
"hlo-opt tool, and then isolate without HLO passes.";
}
std::vector<ChunkResult> chunk_results;
TF_ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<HloModule>> modules,
DecomposeHloModule(*test_module, true));
absl::Status status = absl::OkStatus();
for (std::unique_ptr<HloModule>& module : modules) {
const std::string module_name = module->name();
ModuleResult test_module_result = ModuleResult::kDidntRun;
ModuleResult reference_module_result = ModuleResult::kDidntRun;
absl::Status chunk_status = RunAndCompareInternal(
std::move(module), buffer_assignment_proto, test_runner,
reference_runner, engine, options, iteration_literals_proto,
reference_module_modifier_hook, config_modifier_hook,
&test_module_result, &reference_module_result);
chunk_results.push_back({std::move(module_name), test_module_result,
reference_module_result, chunk_status});
status.Update(chunk_status);
}
absl::c_sort(chunk_results);
std::cout << BuildResultsTable(chunk_results, modules.size());
return status;
}
}
absl::Status RunAndCompare(
std::unique_ptr<HloModule> test_module,
const BufferAssignmentProto* buffer_assignment_proto,
HloRunnerInterface* test_runner, HloRunnerInterface* reference_runner,
std::minstd_rand0* engine, const RunHloModuleOptions& options,
xla::RunHloModuleIterationLiterals* iteration_literals_proto,
std::function<absl::Status(const HloModule&, HloRunnerInterface*,
HloModule*)>
reference_module_modifier_hook,
std::function<void(HloModuleConfig*)> config_modifier_hook) {
if (options.isolate_instructions) {
return RunIsolatedAndCompare(
std::move(test_module), buffer_assignment_proto, test_runner,
reference_runner, engine, options, iteration_literals_proto,
reference_module_modifier_hook, config_modifier_hook);
}
return RunAndCompareInternal(
std::move(test_module), buffer_assignment_proto, test_runner,
reference_runner, engine, options, iteration_literals_proto,
reference_module_modifier_hook, config_modifier_hook, nullptr, nullptr);
}
absl::Status RunAndCompare(
const std::string& hlo_filename, HloRunnerInterface* test_runner,
HloRunnerInterface* reference_runner, std::minstd_rand0* engine,
const RunHloModuleOptions& options,
xla::RunHloModuleIterationLiterals* iteration_literals_proto,
std::function<absl::Status(const HloModule&, HloRunnerInterface*,
HloModule*)>
reference_module_modifier_hook,
std::function<void(HloModuleConfig*)> config_modifier_hook,
std::function<absl::Status(const RunHloModuleOptions& options,
HloModule& module)>
compilation_env_modifier_hook) {
std::string input_format = options.input_format;
if (input_format.empty()) {
input_format = std::string(tsl::io::Extension(hlo_filename));
}
BufferAssignmentProto buffer_assignment_proto;
TF_ASSIGN_OR_RETURN(
auto test_module,
LoadModuleFromFile(
hlo_filename, input_format, hlo_module_loader_details::Config(),
config_modifier_hook,
options.use_buffer_assignment_from_proto ? &buffer_assignment_proto
: nullptr));
HloVerifier verifier(
HloVerifierOpts{}.WithLayoutSensitive(false).WithAllowMixedPrecision(
true));
TF_RETURN_IF_ERROR(verifier.Run(test_module.get()).status());
if (compilation_env_modifier_hook) {
TF_CHECK_OK(compilation_env_modifier_hook(options, *test_module))
<< "Could not adjust the compilation environment for user provided "
"hlo module.";
}
if (options.print_literals) {
std::cout << "\n** Buffer assignment proto **\n"
<< buffer_assignment_proto.DebugString() << "\n";
}
std::unique_ptr<RunHloModuleIterationLiterals> iteration_literals_proto_local;
if (iteration_literals_proto == nullptr) {
if (!options.force_fake_data && !options.isolate_instructions &&
(input_format == "pb" || input_format == "pbtxt")) {
LOG(INFO) << "Using input data from the user-provided snapshot.";
TF_ASSIGN_OR_RETURN(iteration_literals_proto_local,
LoadInputFromFile(hlo_filename, input_format));
iteration_literals_proto = iteration_literals_proto_local.get();
} else if (input_format == "pb" || input_format == "pbtxt") {
LOG(INFO)
<< "Ignoring input data from snapshot and using fake data instead.";
}
}
return RunAndCompare(
std::move(test_module),
options.use_buffer_assignment_from_proto ? &buffer_assignment_proto
: nullptr,
test_runner, reference_runner, engine, options, iteration_literals_proto,
reference_module_modifier_hook, config_modifier_hook);
}
void ReadInputLiteralsFromFile(const std::string& file_path,
RunHloModuleLiterals* input_literals_proto) {
if (!tsl::ReadTextOrBinaryProto(tsl::Env::Default(), file_path,
input_literals_proto)
.ok() ||
input_literals_proto->iterations().empty()) {
xla::RunHloModuleIterationLiterals iteration_literals_proto;
if (!tsl::ReadTextOrBinaryProto(tsl::Env::Default(), file_path,
&iteration_literals_proto)
.ok()) {
LOG(QFATAL) << "Failed to deserialize input literals from file "
<< file_path << "\n";
}
input_literals_proto->clear_iterations();
*input_literals_proto->add_iterations() = iteration_literals_proto;
}
}
} | #include "xla/tools/run_hlo_module.h"
#include <string>
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/tools/run_hlo_module.pb.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
RunHloModuleIterationLiterals GetTestProto() {
RunHloModuleIterationLiterals result;
*result.add_arguments() = LiteralUtil::CreateR1<float>({0.1, 0.2}).ToProto();
*result.add_arguments() = LiteralUtil::CreateR1<float>({0.3, 0.4}).ToProto();
*result.mutable_result() = LiteralUtil::CreateR1<float>({0.5, 0.6}).ToProto();
*result.mutable_reference_result() =
LiteralUtil::CreateR1<float>({0.5, 0.6}).ToProto();
return result;
}
TEST(ReadInputLiteralsFromFile, ReadRunHloModuleLiteralsBinaryProto) {
std::string file_path;
auto env = tsl::Env::Default();
EXPECT_TRUE(env->LocalTempFilename(&file_path));
auto proto = GetTestProto();
RunHloModuleLiterals wrapped_proto;
*wrapped_proto.add_iterations() = proto;
TF_ASSERT_OK(tsl::WriteBinaryProto(env, file_path, wrapped_proto));
RunHloModuleLiterals result;
ReadInputLiteralsFromFile(file_path, &result);
EXPECT_EQ(result.SerializeAsString(), wrapped_proto.SerializeAsString());
}
TEST(ReadInputLiteralsFromFile, ReadRunHloModuleLiteralsTextProto) {
std::string file_path;
auto env = tsl::Env::Default();
EXPECT_TRUE(env->LocalTempFilename(&file_path));
auto proto = GetTestProto();
RunHloModuleLiterals wrapped_proto;
*wrapped_proto.add_iterations() = proto;
TF_ASSERT_OK(tsl::WriteTextProto(env, file_path, wrapped_proto));
RunHloModuleLiterals result;
ReadInputLiteralsFromFile(file_path, &result);
EXPECT_EQ(result.SerializeAsString(), wrapped_proto.SerializeAsString());
}
TEST(ReadInputLiteralsFromFile, ReadRunHloModuleIterationLiteralsBinaryProto) {
std::string file_path;
auto env = tsl::Env::Default();
EXPECT_TRUE(env->LocalTempFilename(&file_path));
auto proto = GetTestProto();
TF_ASSERT_OK(tsl::WriteBinaryProto(env, file_path, proto));
RunHloModuleLiterals result;
ReadInputLiteralsFromFile(file_path, &result);
EXPECT_EQ(result.iterations_size(), 1);
EXPECT_EQ(result.iterations(0).SerializeAsString(),
proto.SerializeAsString());
}
TEST(ReadInputLiteralsFromFile, ReadRunHloModuleIterationLiteralsTextProto) {
std::string file_path;
auto env = tsl::Env::Default();
EXPECT_TRUE(env->LocalTempFilename(&file_path));
auto proto = GetTestProto();
TF_ASSERT_OK(tsl::WriteTextProto(env, file_path, proto));
RunHloModuleLiterals result;
ReadInputLiteralsFromFile(file_path, &result);
EXPECT_EQ(result.iterations_size(), 1);
EXPECT_EQ(result.iterations(0).SerializeAsString(),
proto.SerializeAsString());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/run_hlo_module.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/run_hlo_module_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
49d9476b-8f73-4dda-9115-ac8a5af6998f | cpp | tensorflow/tensorflow | hlo_slicer | third_party/xla/xla/tools/hlo_slicer.cc | third_party/xla/xla/tools/hlo_slicer_test.cc | #include "xla/tools/hlo_slicer.h"
#include <deque>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_verifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tools/hlo_extractor.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
void ReduceTupleParameterHelper(HloModule* hlo_module,
HloInstruction* tuple_parameter) {
for (HloInstruction* user_inst : tuple_parameter->users()) {
if (user_inst->opcode() != HloOpcode::kGetTupleElement) {
return;
}
}
VLOG(1) << "Parameter instruction to be reduced: "
<< tuple_parameter->ToString()
<< " shape size: " << tuple_parameter->shape().tuple_shapes_size()
<< " users size: " << tuple_parameter->users().size();
std::vector<Shape> used_shapes;
for (HloInstruction* user_inst : tuple_parameter->users()) {
used_shapes.push_back(user_inst->shape());
}
Shape new_tuple_shape =
ShapeUtil::MakeTupleShape(absl::MakeSpan(used_shapes));
tuple_parameter->mutable_shape()->mutable_tuple_shapes()->clear();
for (const auto& shape : used_shapes) {
tuple_parameter->mutable_shape()->mutable_tuple_shapes()->push_back(shape);
}
for (int i = 0; i < tuple_parameter->users().size(); ++i) {
tuple_parameter->users()[i]->set_tuple_index(i);
}
hlo_module->mutable_config().SetComputationLayoutIfExists(
hlo_module->entry_computation()->ComputeProgramShape());
}
void ReduceTupleParameter(HloModule* hlo_module) {
std::vector<HloInstruction*> tuple_parameters;
for (HloInstruction* parameter :
hlo_module->entry_computation()->parameter_instructions()) {
if (parameter->shape().IsTuple()) {
tuple_parameters.push_back(parameter);
}
}
for (HloInstruction* tuple_parameter : tuple_parameters) {
ReduceTupleParameterHelper(hlo_module, tuple_parameter);
}
}
HloInstruction* FindShardingInstruction(HloModule* hlo_module) {
for (HloComputation* computation : hlo_module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kCustomCall &&
instruction->custom_call_target() == "Sharding") {
CHECK_EQ(instruction->operand_count(), 1);
return instruction;
}
}
}
return nullptr;
}
void RemoveSharding(HloModule* hlo_module) {
while (HloInstruction* custom_call_instruction =
FindShardingInstruction(hlo_module)) {
for (HloInstruction* user_instruction : custom_call_instruction->users()) {
CHECK_OK(custom_call_instruction->ReplaceUseWith(
user_instruction, custom_call_instruction->mutable_operand(0)));
}
custom_call_instruction->DetachFromOperandsAndUsers();
CHECK_OK(custom_call_instruction->parent()->RemoveInstruction(
custom_call_instruction));
VLOG(1) << "Removed sharding custom-call: "
<< custom_call_instruction->ToString();
HloVerifier verifier(false,
true);
TF_CHECK_OK(verifier.Run(hlo_module).status());
}
}
void IntraComputationSlicing(
const HloComputation* computation,
absl::flat_hash_set<const HloInstruction*>& sliced_instructions,
absl::flat_hash_set<const HloInstruction*>& frontier_instructions,
bool forward_slice, FrontierSelector frontier_selector,
bool ignore_control_dependency) {
std::deque<const HloInstruction*> worklist(sliced_instructions.begin(),
sliced_instructions.end());
while (!worklist.empty()) {
const HloInstruction* inst = worklist.back();
worklist.pop_back();
if (frontier_selector && !frontier_selector(inst)) {
frontier_instructions.insert(inst);
continue;
}
std::vector<HloInstruction*> instructions_to_propagate =
forward_slice ? std::vector<HloInstruction*>(inst->users().begin(),
inst->users().end())
: std::vector<HloInstruction*>(inst->operands().begin(),
inst->operands().end());
if (!ignore_control_dependency) {
if (forward_slice) {
instructions_to_propagate.insert(instructions_to_propagate.end(),
inst->control_successors().begin(),
inst->control_successors().end());
} else {
instructions_to_propagate.insert(instructions_to_propagate.end(),
inst->control_predecessors().begin(),
inst->control_predecessors().end());
}
}
for (auto next_inst : instructions_to_propagate) {
if (!sliced_instructions.contains(next_inst)) {
worklist.push_front(next_inst);
sliced_instructions.insert(next_inst);
}
}
}
}
SliceOutput SliceModuleHelper(
const HloModule* hlo_module,
absl::Span<const HloInstruction*> slice_starting_instructions,
FrontierSelector frontier_selector, bool ignore_control_dependency,
bool forward_slice, bool nearest_common_ancestor_as_root) {
absl::flat_hash_map<const HloComputation*,
absl::flat_hash_set<const HloInstruction*>>
sliced_computation_instructions_map;
for (auto inst : slice_starting_instructions) {
sliced_computation_instructions_map[inst->parent()].insert(inst);
}
absl::flat_hash_map<const HloComputation*,
absl::flat_hash_set<const HloInstruction*>>
frontier_computation_instructions_map;
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(hlo_module);
std::vector<HloComputation*> post_order_computations =
hlo_module->MakeComputationPostOrder();
std::vector<HloComputation*> computations_to_traverse =
forward_slice
? post_order_computations
: std::vector<HloComputation*>(post_order_computations.rbegin(),
post_order_computations.rend());
absl::flat_hash_set<const HloComputation*>
nearest_common_ancestor_computations;
if (nearest_common_ancestor_as_root) {
std::vector<const HloComputation*> starting_computations;
for (const auto& [computation, instructions] :
sliced_computation_instructions_map) {
starting_computations.push_back(computation);
}
nearest_common_ancestor_computations =
call_graph->NearestCommonAncestorComputations(starting_computations);
CHECK(!nearest_common_ancestor_computations.empty());
}
for (auto computation : computations_to_traverse) {
if (sliced_computation_instructions_map.contains(computation)) {
auto slicing_starting_instructions = std::vector<const HloInstruction*>(
sliced_computation_instructions_map[computation].begin(),
sliced_computation_instructions_map[computation].end());
IntraComputationSlicing(
computation, sliced_computation_instructions_map[computation],
frontier_computation_instructions_map[computation], forward_slice,
frontier_selector, ignore_control_dependency);
if (forward_slice) {
if (nearest_common_ancestor_as_root &&
nearest_common_ancestor_computations.contains(computation)) {
const HloInstruction* nearest_common_ancestor_instruction =
*(call_graph->NearestCommonAncestorInstructions(
slicing_starting_instructions))
.begin();
CHECK_NE(nearest_common_ancestor_instruction, nullptr);
return SliceOutput{sliced_computation_instructions_map,
frontier_computation_instructions_map,
nearest_common_ancestor_instruction};
}
if (!sliced_computation_instructions_map[computation].contains(
computation->root_instruction()) ||
frontier_computation_instructions_map[computation].contains(
computation->root_instruction())) {
continue;
}
for (auto caller_inst :
call_graph->GetComputationCallers(computation)) {
sliced_computation_instructions_map[caller_inst->parent()].insert(
caller_inst);
}
}
if (!forward_slice) {
for (const auto& callsite :
call_graph->GetNode(computation).callsites()) {
if (sliced_computation_instructions_map[computation].contains(
callsite.instruction())) {
for (auto callee : callsite.called_computations()) {
sliced_computation_instructions_map[callee].insert(
callee->root_instruction());
}
}
}
}
}
}
return SliceOutput{sliced_computation_instructions_map,
frontier_computation_instructions_map};
}
}
SliceOutput SliceModule(
const HloModule* hlo_module,
absl::Span<const HloInstruction*> slice_starting_instructions,
FrontierSelector frontier_selector, bool ignore_control_dependency,
bool forward_slice, bool nearest_common_ancestor_as_root) {
if (forward_slice) {
if (!nearest_common_ancestor_as_root) {
return SliceModuleHelper(hlo_module, slice_starting_instructions,
frontier_selector, ignore_control_dependency,
true,
false);
} else {
CHECK(forward_slice) << "Option `nearest_common_ancestor_as_root` can "
"only be enabled when "
"forward slicing";
CHECK((frontier_selector == nullptr))
<< "Option `nearest_common_ancestor_as_root` can not be specified "
"with `frontier_selector`";
SliceOutput forward_slice_output =
SliceModuleHelper(hlo_module, slice_starting_instructions,
nullptr,
ignore_control_dependency, true,
true);
std::vector<const HloInstruction*> nearest_common_ancestor(
{forward_slice_output.nearest_common_ancestor_root()});
CHECK_EQ(nearest_common_ancestor.size(), 1);
SliceOutput backward_slice_output =
SliceModuleHelper(hlo_module,
absl::MakeSpan(nearest_common_ancestor),
nullptr,
ignore_control_dependency, false,
false);
return SliceOutput{SliceOutput::IntersectSlicedInstructions(
forward_slice_output, backward_slice_output),
backward_slice_output.frontier_instructions(),
forward_slice_output.nearest_common_ancestor_root()};
}
} else {
return SliceModuleHelper(hlo_module, slice_starting_instructions,
frontier_selector, ignore_control_dependency,
false,
false);
}
}
std::vector<std::unique_ptr<HloModule>> SliceModuleAndExtract(
const HloModule* hlo_module,
absl::Span<const HloInstruction*> slice_starting_instructions,
const SlicingConfiguration& slicing_configuration) {
std::vector<std::unique_ptr<HloModule>> sliced_modules;
int slicing_group = slicing_configuration.slicing_group;
CHECK(slicing_group >= 1 || slicing_group == -1);
std::vector<absl::Span<const HloInstruction*>> grouped_instructions;
if (slicing_group == -1) {
grouped_instructions = {slice_starting_instructions};
} else {
for (int i = 0; i < slice_starting_instructions.size();
i += slicing_group) {
grouped_instructions.push_back(
slice_starting_instructions.subspan(i, slicing_group));
}
}
for (const auto& grouped_slice_starting_instructions : grouped_instructions) {
SliceOutput forward_slice_output;
if (slicing_configuration.forward_slicing ==
SlicingConfiguration::ForwardSlicingConfig::kRoot) {
forward_slice_output = SliceModule(
hlo_module, grouped_slice_starting_instructions,
nullptr,
false, true,
false);
} else if (slicing_configuration.forward_slicing ==
SlicingConfiguration::ForwardSlicingConfig::kNca) {
forward_slice_output = SliceModule(
hlo_module, grouped_slice_starting_instructions,
nullptr,
false, true,
true);
}
VLOG(1) << "[Num of forward sliced insts]: "
<< forward_slice_output.NumSlicedInstructions();
SliceOutput backward_slice_output;
if (slicing_configuration.backward_slicing) {
backward_slice_output = SliceModule(
hlo_module, grouped_slice_starting_instructions,
nullptr,
false, false);
} else {
backward_slice_output = SliceOutput();
}
auto sliced_result = SliceOutput(SliceOutput::UnionSlicedInstructions(
forward_slice_output, backward_slice_output));
const HloInstruction* extraction_root =
slicing_configuration.forward_slicing ==
SlicingConfiguration::ForwardSlicingConfig::kNca
? forward_slice_output.nearest_common_ancestor_root()
: hlo_module->entry_computation()->root_instruction();
VLOG(1) << "[Root instruction of the sliced module]: "
<< extraction_root->ToString();
auto extract_selector = [&sliced_result](const HloInstruction* hlo_inst) {
for (const auto& [computation, instructions] :
sliced_result.sliced_instructions()) {
if (instructions.contains(hlo_inst)) {
return true;
}
}
return false;
};
auto replace_type_selector =
[](const HloInstruction* hlo_inst) -> ReplaceType {
return ReplaceType::kReplaceZeroBroadcast;
};
auto extracted_module =
ExtractModule(extraction_root, -1,
extract_selector,
replace_type_selector,
true);
if (slicing_configuration.remove_sharding) {
RemoveSharding(extracted_module.get());
}
if (slicing_configuration.reduce_tuple_parameter) {
ReduceTupleParameter(extracted_module.get());
}
HloVerifier verifier(false,
true);
TF_CHECK_OK(verifier.Run(extracted_module.get()).status());
sliced_modules.emplace_back(std::move(extracted_module));
}
CHECK_EQ(sliced_modules.size(), grouped_instructions.size());
return sliced_modules;
}
} | #include "xla/tools/hlo_slicer.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = testing::opcode_matchers;
using HloSlicerTest = HloTestBase;
TEST_F(HloSlicerTest, SingleComputationForwardSlice) {
const std::string& hlo_string = R"(
HloModule axpy_module
ENTRY axpy_computation {
p.0 = f32[10] parameter(0)
p.1 = f32[10] parameter(1)
add.0 = f32[10] add(p.0, p.1)
alpha = f32[] constant(1)
broadcast = f32[10] broadcast(alpha), dimensions={}
p.2 = f32[10] parameter(2)
y = f32[10] multiply(broadcast, p.2)
x = f32[10] subtract(y, add.0)
p.3 = f32[10] parameter(3)
ROOT add.1 = f32[10] add(x, p.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
auto p2 = FindInstruction(hlo_module.get(), "p.2");
EXPECT_THAT(p2, op::Parameter());
auto p3 = FindInstruction(hlo_module.get(), "p.3");
EXPECT_THAT(p3, op::Parameter());
auto x = FindInstruction(hlo_module.get(), "x");
EXPECT_THAT(x, op::Subtract());
auto y = FindInstruction(hlo_module.get(), "y");
EXPECT_THAT(y, op::Multiply());
auto add0 = FindInstruction(hlo_module.get(), "add.0");
EXPECT_THAT(add0, op::Add());
auto add1 = FindInstruction(hlo_module.get(), "add.1");
EXPECT_THAT(add1, op::Add());
auto entry_comp = FindComputation(hlo_module.get(), "axpy_computation");
EXPECT_NE(entry_comp, nullptr);
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return true;
};
{
std::vector<const HloInstruction*> relevant_instructions({p2, x});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_EQ(sliced_instructions[entry_comp].size(), 4);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(p2));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(x));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(y));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(add1));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 0);
}
{
std::vector<const HloInstruction*> relevant_instructions({add0, p3});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_EQ(sliced_instructions[entry_comp].size(), 4);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(add0));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(x));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(p3));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(add1));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 0);
}
}
TEST_F(HloSlicerTest, MultipleComputationForwardSlice) {
const std::string& hlo_string = R"(
HloModule test
calculate_alpha {
constant.5 = s32[] constant(2)
constant.6 = s32[] constant(3)
ROOT ret = s32[] subtract(constant.5, constant.6)
}
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get_tuple_element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(23)
add.3 = s32[] add(get_tuple_element.1, constant.1)
get_tuple_element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get_tuple_element.2, get_tuple_element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add.3, multiply)
}
While.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get_tuple_element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(100)
ROOT less_than = pred[] compare(get_tuple_element.3, constant.2), direction=LT
}
ENTRY Test {
p.1 = s32[] parameter(0)
p.2 = s32[] parameter(1)
add.1 = s32[] add(p.1, p.2)
constant.3 = s32[] call(), to_apply=calculate_alpha
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
while.1 = (s32[], s32[3]{0}) while(tuple.1), condition=While.condition, body=While.body
loop_count = s32[] get-tuple-element(while.1), index=0
ROOT add.2 = s32[] add(loop_count, add.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
auto add1 = FindInstruction(hlo_module.get(), "add.1");
EXPECT_THAT(add1, op::Add());
auto while1 = FindInstruction(hlo_module.get(), "while.1");
EXPECT_THAT(while1, op::While());
auto loop_count = FindInstruction(hlo_module.get(), "loop_count");
EXPECT_THAT(loop_count, op::GetTupleElement());
auto add2 = FindInstruction(hlo_module.get(), "add.2");
EXPECT_THAT(add2, op::Add());
auto gte1 = FindInstruction(hlo_module.get(), "get_tuple_element.1");
EXPECT_THAT(gte1, op::GetTupleElement());
auto gte2 = FindInstruction(hlo_module.get(), "get_tuple_element.2");
EXPECT_THAT(gte2, op::GetTupleElement());
auto constant5 = FindInstruction(hlo_module.get(), "constant.5");
EXPECT_THAT(constant5, op::Constant());
auto tuple1 = FindInstruction(hlo_module.get(), "tuple.1");
EXPECT_THAT(tuple1, op::Tuple());
auto entry_comp = FindComputation(hlo_module.get(), "Test");
EXPECT_NE(entry_comp, nullptr);
auto while_cond_comp = FindComputation(hlo_module.get(), "While.condition");
EXPECT_NE(while_cond_comp, nullptr);
auto while_body_comp = FindComputation(hlo_module.get(), "While.body");
EXPECT_NE(while_body_comp, nullptr);
auto calculate_alpha_comp =
FindComputation(hlo_module.get(), "calculate_alpha");
EXPECT_NE(calculate_alpha_comp, nullptr);
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return true;
};
{
std::vector<const HloInstruction*> relevant_instructions({add1, while1});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_EQ(sliced_instructions[entry_comp].size(), 4);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(add2));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(add1));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(while1));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(loop_count));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 0);
}
{
std::vector<const HloInstruction*> relevant_instructions({constant5});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_instructions.size(), 2);
EXPECT_TRUE(sliced_instructions.contains(entry_comp));
EXPECT_TRUE(sliced_instructions.contains(calculate_alpha_comp));
EXPECT_FALSE(sliced_instructions[entry_comp].contains(add1));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 0);
}
{
std::vector<const HloInstruction*> relevant_instructions({gte2});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_instructions.size(), 2);
EXPECT_TRUE(sliced_instructions.contains(entry_comp));
EXPECT_TRUE(sliced_instructions.contains(while_body_comp));
EXPECT_FALSE(sliced_instructions.contains(while_cond_comp));
EXPECT_FALSE(sliced_instructions[entry_comp].contains(tuple1));
EXPECT_FALSE(sliced_instructions[entry_comp].contains(add1));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(add2));
EXPECT_FALSE(sliced_instructions[while_body_comp].contains(gte1));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 0);
}
}
TEST_F(HloSlicerTest, SingleComputationForwardFrontier) {
const std::string& hlo_string = R"(
HloModule axpy_module
ENTRY axpy_computation {
p.0 = f32[10] parameter(0)
p.1 = f32[10] parameter(1)
add.0 = f32[10] add(p.0, p.1)
alpha = f32[] constant(1)
broadcast = f32[10] broadcast(alpha), dimensions={}
p.2 = f32[10] parameter(2)
y = f32[10] multiply(broadcast, p.2)
x = f32[10] subtract(y, add.0)
p.3 = f32[10] parameter(3)
p.4 = f32[10] parameter(4)
p.5 = f32[10] parameter(5)
sub.1 = f32[10] subtract(p.4, p.5)
add.2 = f32[10] add(p.3, sub.1)
ROOT add.1 = f32[10] add(x, add.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
auto broadcast = FindInstruction(hlo_module.get(), "broadcast");
EXPECT_THAT(broadcast, op::Broadcast());
auto x = FindInstruction(hlo_module.get(), "x");
EXPECT_THAT(x, op::Subtract());
auto y = FindInstruction(hlo_module.get(), "y");
EXPECT_THAT(y, op::Multiply());
auto add0 = FindInstruction(hlo_module.get(), "add.0");
EXPECT_THAT(add0, op::Add());
auto p5 = FindInstruction(hlo_module.get(), "p.5");
EXPECT_THAT(p5, op::Parameter());
auto sub1 = FindInstruction(hlo_module.get(), "sub.1");
EXPECT_THAT(sub1, op::Subtract());
auto entry_comp = FindComputation(hlo_module.get(), "axpy_computation");
EXPECT_NE(entry_comp, nullptr);
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kSubtract;
};
std::vector<const HloInstruction*> relevant_instructions({broadcast, add0});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_result.NumSlicedInstructions(), 4);
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(add0));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(broadcast));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(y));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(x));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 1);
auto frontier_instructions = sliced_result.frontier_instructions();
EXPECT_TRUE(frontier_instructions[entry_comp].contains(x));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kSubtract;
};
std::vector<const HloInstruction*> relevant_instructions({add0, y, p5});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_result.NumSlicedInstructions(), 5);
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(add0));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(y));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(x));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(p5));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(sub1));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 2);
auto frontier_instructions = sliced_result.frontier_instructions();
EXPECT_TRUE(frontier_instructions[entry_comp].contains(x));
EXPECT_TRUE(frontier_instructions[entry_comp].contains(sub1));
}
}
TEST_F(HloSlicerTest, MultipleComputationForwardFrontier) {
const std::string& hlo_string = R"(
HloModule axpy_module
calculate_alpha {
c.0 = f32[] constant(1)
c.1 = f32[] constant(2)
c.2 = f32[] add(c.0, c.1)
c.3 = f32[] constant(4)
ROOT ret = f32[] multiply(c.2, c.3)
}
ENTRY axpy_computation {
p.0 = f32[] parameter(0)
alpha = f32[] call(), to_apply=calculate_alpha
ROOT add = f32[] add(p.0, alpha)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
auto entry_comp = FindComputation(hlo_module.get(), "axpy_computation");
EXPECT_NE(entry_comp, nullptr);
auto calculate_alpha_comp =
FindComputation(hlo_module.get(), "calculate_alpha");
EXPECT_NE(calculate_alpha_comp, nullptr);
auto ret = FindInstruction(hlo_module.get(), "ret");
EXPECT_THAT(ret, op::Multiply());
auto c2 = FindInstruction(hlo_module.get(), "c.2");
EXPECT_THAT(c2, op::Add());
auto c3 = FindInstruction(hlo_module.get(), "c.3");
EXPECT_THAT(c3, op::Constant());
auto alpha = FindInstruction(hlo_module.get(), "alpha");
EXPECT_THAT(alpha, op::Call());
{
auto hlo_selector = [&ret](const HloInstruction* hlo_inst) -> bool {
return hlo_inst != ret;
};
std::vector<const HloInstruction*> relevant_instructions({c2});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_result.NumSlicedInstructions(), 2);
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_TRUE(sliced_instructions.contains(calculate_alpha_comp));
EXPECT_EQ(sliced_instructions[calculate_alpha_comp].size(), 2);
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c2));
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(ret));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 1);
auto frontier_instructions = sliced_result.frontier_instructions();
EXPECT_TRUE(frontier_instructions.contains(calculate_alpha_comp));
EXPECT_TRUE(frontier_instructions[calculate_alpha_comp].contains(ret));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kCall;
};
std::vector<const HloInstruction*> relevant_instructions({c2});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_instructions.size(), 2);
EXPECT_TRUE(sliced_instructions.contains(calculate_alpha_comp));
EXPECT_EQ(sliced_instructions[calculate_alpha_comp].size(), 2);
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c2));
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(ret));
EXPECT_TRUE(sliced_instructions.contains(entry_comp));
EXPECT_EQ(sliced_instructions[entry_comp].size(), 1);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(alpha));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 1);
auto frontier_instructions = sliced_result.frontier_instructions();
EXPECT_TRUE(frontier_instructions.contains(entry_comp));
EXPECT_TRUE(frontier_instructions[entry_comp].contains(alpha));
}
}
TEST_F(HloSlicerTest, SingleComputationBackwardSliceAndFrontier) {
const std::string& hlo_string = R"(
HloModule axpy_module
ENTRY axpy_computation {
p.0 = f32[10] parameter(0)
p.1 = f32[10] parameter(1)
add.0 = f32[10] add(p.0, p.1)
alpha = f32[] constant(1)
broadcast = f32[10] broadcast(alpha), dimensions={}
p.2 = f32[10] parameter(2)
y = f32[10] multiply(broadcast, p.2)
x = f32[10] subtract(y, add.0)
p.3 = f32[10] parameter(3)
ROOT add.1 = f32[10] add(x, p.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
auto alpha = FindInstruction(hlo_module.get(), "alpha");
EXPECT_THAT(alpha, op::Constant());
auto p0 = FindInstruction(hlo_module.get(), "p.0");
EXPECT_THAT(p0, op::Parameter());
auto p1 = FindInstruction(hlo_module.get(), "p.1");
EXPECT_THAT(p1, op::Parameter());
auto p2 = FindInstruction(hlo_module.get(), "p.2");
EXPECT_THAT(p2, op::Parameter());
auto p3 = FindInstruction(hlo_module.get(), "p.3");
EXPECT_THAT(p3, op::Parameter());
auto broadcast = FindInstruction(hlo_module.get(), "broadcast");
EXPECT_THAT(broadcast, op::Broadcast());
auto x = FindInstruction(hlo_module.get(), "x");
EXPECT_THAT(x, op::Subtract());
auto y = FindInstruction(hlo_module.get(), "y");
EXPECT_THAT(y, op::Multiply());
auto add0 = FindInstruction(hlo_module.get(), "add.0");
EXPECT_THAT(add0, op::Add());
auto entry_comp = FindComputation(hlo_module.get(), "axpy_computation");
EXPECT_NE(entry_comp, nullptr);
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return true;
};
{
std::vector<const HloInstruction*> relevant_instructions({y});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector,
false, false);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_EQ(sliced_instructions[entry_comp].size(), 4);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(y));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(broadcast));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(p2));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(alpha));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 0);
}
{
std::vector<const HloInstruction*> relevant_instructions({add0, y});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector,
false, false);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_EQ(sliced_instructions[entry_comp].size(), 7);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(y));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(broadcast));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(p2));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(alpha));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(add0));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(p0));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(p1));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 0);
}
auto broadcast_slicer = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kBroadcast;
};
{
std::vector<const HloInstruction*> relevant_instructions({y});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions),
broadcast_slicer,
false, false);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_EQ(sliced_instructions[entry_comp].size(), 3);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(y));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(p2));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(broadcast));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 1);
auto frontier_instructions = sliced_result.frontier_instructions();
EXPECT_TRUE(frontier_instructions[entry_comp].contains(broadcast));
}
}
TEST_F(HloSlicerTest, MultipleComputationBackwardSliceAndFrontier) {
const std::string& hlo_string = R"(
HloModule axpy_module
calculate_alpha {
c.0 = f32[] constant(1)
c.1 = f32[] constant(2)
c.2 = f32[] add(c.0, c.1)
c.3 = f32[] constant(4)
ROOT ret = f32[] multiply(c.2, c.3)
}
ENTRY axpy_computation {
p.0 = f32[] parameter(0)
alpha = f32[] call(), to_apply=calculate_alpha
ROOT add = f32[] add(p.0, alpha)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
auto entry_comp = FindComputation(hlo_module.get(), "axpy_computation");
EXPECT_NE(entry_comp, nullptr);
auto calculate_alpha_comp =
FindComputation(hlo_module.get(), "calculate_alpha");
EXPECT_NE(calculate_alpha_comp, nullptr);
auto ret = FindInstruction(hlo_module.get(), "ret");
EXPECT_THAT(ret, op::Multiply());
auto c0 = FindInstruction(hlo_module.get(), "c.0");
EXPECT_THAT(c0, op::Constant());
auto c1 = FindInstruction(hlo_module.get(), "c.1");
EXPECT_THAT(c1, op::Constant());
auto c2 = FindInstruction(hlo_module.get(), "c.2");
EXPECT_THAT(c2, op::Add());
auto c3 = FindInstruction(hlo_module.get(), "c.3");
EXPECT_THAT(c3, op::Constant());
auto alpha = FindInstruction(hlo_module.get(), "alpha");
EXPECT_THAT(alpha, op::Call());
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return true;
};
std::vector<const HloInstruction*> relevant_instructions({c2});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector,
false, false);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_result.NumSlicedInstructions(), 3);
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_TRUE(sliced_instructions.contains(calculate_alpha_comp));
EXPECT_EQ(sliced_instructions[calculate_alpha_comp].size(), 3);
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c0));
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c1));
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c2));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 0);
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return true;
};
std::vector<const HloInstruction*> relevant_instructions({alpha});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector,
false, false);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_instructions.size(), 2);
EXPECT_TRUE(sliced_instructions.contains(calculate_alpha_comp));
EXPECT_EQ(sliced_instructions[calculate_alpha_comp].size(), 5);
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c0));
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c1));
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c2));
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c3));
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(ret));
EXPECT_TRUE(sliced_instructions.contains(entry_comp));
EXPECT_EQ(sliced_instructions[entry_comp].size(), 1);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(alpha));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 0);
}
{
auto add_slicer = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kAdd;
};
std::vector<const HloInstruction*> relevant_instructions({ret});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), add_slicer,
false, false);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_result.NumSlicedInstructions(), 3);
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_TRUE(sliced_instructions.contains(calculate_alpha_comp));
EXPECT_EQ(sliced_instructions[calculate_alpha_comp].size(), 3);
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(ret));
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c3));
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c2));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 1);
auto frontier_instructions = sliced_result.frontier_instructions();
EXPECT_TRUE(frontier_instructions.contains(calculate_alpha_comp));
EXPECT_TRUE(frontier_instructions[calculate_alpha_comp].contains(c2));
}
{
auto mul_slicer = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kMultiply;
};
std::vector<const HloInstruction*> relevant_instructions({alpha});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), mul_slicer,
false, false);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_result.NumSlicedInstructions(), 2);
EXPECT_EQ(sliced_instructions.size(), 2);
EXPECT_TRUE(sliced_instructions.contains(calculate_alpha_comp));
EXPECT_EQ(sliced_instructions[calculate_alpha_comp].size(), 1);
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(ret));
EXPECT_TRUE(sliced_instructions.contains(entry_comp));
EXPECT_EQ(sliced_instructions[entry_comp].size(), 1);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(alpha));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 1);
auto frontier_instructions = sliced_result.frontier_instructions();
EXPECT_TRUE(frontier_instructions.contains(calculate_alpha_comp));
EXPECT_TRUE(frontier_instructions[calculate_alpha_comp].contains(ret));
}
}
TEST_F(HloSlicerTest, ForwardSlicingNearestCommonAncestor) {
const std::string& hlo_string = R"(
HloModule module
ENTRY computation {
p.0 = f32[10] parameter(0)
p.1 = f32[10] parameter(1)
add.0 = f32[10] add(p.0, p.1)
p.2 = f32[10] parameter(2)
mul.0 = f32[10] multiply(p.1, p.2)
sub.0 = f32[10] subtract(add.0, mul.0)
add.1 = f32[10] add(add.0, p.2)
ROOT add.2 = f32[10] add(sub.0, add.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
auto p0 = FindInstruction(hlo_module.get(), "p.0");
auto p2 = FindInstruction(hlo_module.get(), "p.2");
auto mul0 = FindInstruction(hlo_module.get(), "mul.0");
auto add0 = FindInstruction(hlo_module.get(), "add.0");
auto sub0 = FindInstruction(hlo_module.get(), "sub.0");
auto add1 = FindInstruction(hlo_module.get(), "add.1");
const HloComputation* computation = hlo_module->entry_computation();
{
std::vector<const HloInstruction*> relevant_instructions({p0});
auto sliced_result =
SliceModule(hlo_module.get(), absl::MakeSpan(relevant_instructions),
nullptr,
false, true,
true);
EXPECT_NE(sliced_result.nearest_common_ancestor_root(), nullptr);
EXPECT_EQ(sliced_result.nearest_common_ancestor_root(), p0);
EXPECT_EQ(sliced_result.NumSlicedInstructions(), 1);
}
{
std::vector<const HloInstruction*> relevant_instructions({p0, p2});
auto sliced_result =
SliceModule(hlo_module.get(), absl::MakeSpan(relevant_instructions),
nullptr,
false, true,
true);
EXPECT_NE(sliced_result.nearest_common_ancestor_root(), nullptr);
EXPECT_TRUE(sliced_result.nearest_common_ancestor_root() == sub0 ||
sliced_result.nearest_common_ancestor_root() == add1);
EXPECT_TRUE(sliced_result.sliced_instructions().contains(computation));
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_TRUE(sliced_instructions[computation].contains(add0));
}
{
std::vector<const HloInstruction*> relevant_instructions({p0, mul0});
auto sliced_result =
SliceModule(hlo_module.get(), absl::MakeSpan(relevant_instructions),
nullptr,
false,
true,
true);
EXPECT_NE(sliced_result.nearest_common_ancestor_root(), nullptr);
EXPECT_EQ(sliced_result.nearest_common_ancestor_root(), sub0);
EXPECT_EQ(sliced_result.NumSlicedInstructions(), 4);
EXPECT_TRUE(sliced_result.sliced_instructions().contains(computation));
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_TRUE(sliced_instructions[computation].contains(p0));
EXPECT_TRUE(sliced_instructions[computation].contains(add0));
EXPECT_TRUE(sliced_instructions[computation].contains(mul0));
EXPECT_TRUE(sliced_instructions[computation].contains(sub0));
}
}
TEST_F(HloSlicerTest, MultipleComputationForwardSlicingNearestCommonAncestor) {
const std::string& hlo_string = R"(
HloModule axpy_module
calculate_alpha {
c.0 = f32[] constant(1)
c.1 = f32[] constant(2)
ROOT ret.0 = f32[] multiply(c.0, c.1)
}
calculate_y {
c.2 = f32[] constant(2)
c.3 = f32[] constant(3)
ROOT ret.1 = f32[] add(c.2, c.3)
}
ENTRY axpy_computation {
alpha = f32[] call(), to_apply=calculate_alpha
y = f32[] call(), to_apply=calculate_y
add.0 = f32[] add(alpha, y)
p.0 = f32[] parameter(0)
ROOT add.1 = f32[] add(add.0, p.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
auto c0 = FindInstruction(hlo_module.get(), "c.0");
auto ret0 = FindInstruction(hlo_module.get(), "ret.0");
auto c2 = FindInstruction(hlo_module.get(), "c.2");
auto ret1 = FindInstruction(hlo_module.get(), "ret.1");
auto alpha = FindInstruction(hlo_module.get(), "alpha");
auto y = FindInstruction(hlo_module.get(), "y");
auto add0 = FindInstruction(hlo_module.get(), "add.0");
const HloComputation* computation = hlo_module->entry_computation();
const HloComputation* calculate_alpha =
FindComputation(hlo_module.get(), "calculate_alpha");
const HloComputation* calculate_y =
FindComputation(hlo_module.get(), "calculate_y");
{
std::vector<const HloInstruction*> relevant_instructions({c0, c2});
auto sliced_result =
SliceModule(hlo_module.get(), absl::MakeSpan(relevant_instructions),
nullptr,
false,
true,
true);
EXPECT_NE(sliced_result.nearest_common_ancestor_root(), nullptr);
EXPECT_EQ(sliced_result.nearest_common_ancestor_root(), add0);
EXPECT_EQ(sliced_result.sliced_instructions().size(), 3);
EXPECT_TRUE(sliced_result.sliced_instructions().contains(computation));
EXPECT_TRUE(sliced_result.sliced_instructions().contains(calculate_alpha));
EXPECT_TRUE(sliced_result.sliced_instructions().contains(calculate_y));
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_result.NumSlicedInstructions(), 7);
EXPECT_TRUE(sliced_instructions[calculate_alpha].contains(c0));
EXPECT_TRUE(sliced_instructions[calculate_alpha].contains(ret0));
EXPECT_TRUE(sliced_instructions[calculate_y].contains(c2));
EXPECT_TRUE(sliced_instructions[calculate_y].contains(ret1));
EXPECT_TRUE(sliced_instructions[computation].contains(alpha));
EXPECT_TRUE(sliced_instructions[computation].contains(y));
EXPECT_TRUE(sliced_instructions[computation].contains(add0));
}
}
TEST_F(HloSlicerTest, TestSliceModuleAndExtract) {
const std::string& hlo_string = R"(
HloModule axpy_module
calculate_alpha {
c.0 = f32[] constant(1)
c.1 = f32[] constant(2)
ROOT ret.0 = f32[] multiply(c.0, c.1)
}
calculate_y {
c.2 = f32[] constant(2)
c.3 = f32[] constant(3)
ROOT ret.1 = f32[] add(c.2, c.3)
}
ENTRY axpy_computation {
alpha = f32[] call(), to_apply=calculate_alpha
y = f32[] call(), to_apply=calculate_y
add.0 = f32[] add(alpha, y)
p.0 = f32[] parameter(0)
ROOT add.1 = f32[] add(add.0, p.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
auto alpha = FindInstruction(hlo_module.get(), "alpha");
auto y = FindInstruction(hlo_module.get(), "y");
auto add0 = FindInstruction(hlo_module.get(), "add.0");
{
std::vector<const HloInstruction*> relevant_instructions({alpha, y});
SlicingConfiguration slicing_config = {
SlicingConfiguration::ForwardSlicingConfig::kNca,
true};
std::vector<std::unique_ptr<HloModule>> sliced_modules =
SliceModuleAndExtract(hlo_module.get(),
absl::MakeSpan(relevant_instructions),
slicing_config);
CHECK_EQ(sliced_modules.size(), 1);
auto sliced_module = std::move(sliced_modules[0]);
EXPECT_EQ(sliced_module->entry_computation()->root_instruction()->name(),
"add.0");
EXPECT_EQ(sliced_module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kAdd);
EXPECT_EQ(sliced_module->computation_count(), 3);
HloInstruction* c0 = FindInstruction(sliced_module.get(), "c.0");
EXPECT_NE(c0, nullptr);
HloInstruction* c1 = FindInstruction(sliced_module.get(), "c.1");
EXPECT_NE(c1, nullptr);
HloInstruction* c2 = FindInstruction(sliced_module.get(), "c.2");
EXPECT_NE(c2, nullptr);
HloInstruction* c3 = FindInstruction(sliced_module.get(), "c.3");
EXPECT_NE(c3, nullptr);
}
{
std::vector<const HloInstruction*> relevant_instructions({alpha, y});
SlicingConfiguration slicing_config = {
SlicingConfiguration::ForwardSlicingConfig::kRoot,
true};
std::vector<std::unique_ptr<HloModule>> sliced_modules =
SliceModuleAndExtract(hlo_module.get(),
absl::MakeSpan(relevant_instructions),
slicing_config);
CHECK_EQ(sliced_modules.size(), 1);
auto sliced_module = std::move(sliced_modules[0]);
EXPECT_EQ(sliced_module->entry_computation()->root_instruction()->name(),
"add.1");
EXPECT_EQ(sliced_module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kAdd);
EXPECT_EQ(sliced_module->computation_count(), 3);
HloInstruction* c0 = FindInstruction(sliced_module.get(), "c.0");
EXPECT_NE(c0, nullptr);
HloInstruction* c1 = FindInstruction(sliced_module.get(), "c.1");
EXPECT_NE(c1, nullptr);
HloInstruction* c2 = FindInstruction(sliced_module.get(), "c.2");
EXPECT_NE(c2, nullptr);
HloInstruction* c3 = FindInstruction(sliced_module.get(), "c.3");
EXPECT_NE(c3, nullptr);
}
{
std::vector<const HloInstruction*> relevant_instructions({y});
SlicingConfiguration slicing_config = {
SlicingConfiguration::ForwardSlicingConfig::kRoot,
true};
std::vector<std::unique_ptr<HloModule>> sliced_modules =
SliceModuleAndExtract(hlo_module.get(),
absl::MakeSpan(relevant_instructions),
slicing_config);
CHECK_EQ(sliced_modules.size(), 1);
auto sliced_module = std::move(sliced_modules[0]);
EXPECT_EQ(sliced_module->entry_computation()->root_instruction()->name(),
"add.1");
EXPECT_EQ(sliced_module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kAdd);
EXPECT_EQ(sliced_module->computation_count(), 2);
HloInstruction* c0 = FindInstruction(sliced_module.get(), "c.0");
EXPECT_EQ(c0, nullptr);
HloInstruction* c1 = FindInstruction(sliced_module.get(), "c.1");
EXPECT_EQ(c1, nullptr);
HloInstruction* c2 = FindInstruction(sliced_module.get(), "c.2");
EXPECT_NE(c2, nullptr);
HloInstruction* c3 = FindInstruction(sliced_module.get(), "c.3");
EXPECT_NE(c3, nullptr);
}
{
std::vector<const HloInstruction*> relevant_instructions({add0});
SlicingConfiguration slicing_config = {
SlicingConfiguration::ForwardSlicingConfig::kRoot,
false};
std::vector<std::unique_ptr<HloModule>> sliced_modules =
SliceModuleAndExtract(hlo_module.get(),
absl::MakeSpan(relevant_instructions),
slicing_config);
CHECK_EQ(sliced_modules.size(), 1);
auto sliced_module = std::move(sliced_modules[0]);
EXPECT_EQ(sliced_module->entry_computation()->root_instruction()->name(),
"add.1");
EXPECT_EQ(sliced_module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kAdd);
EXPECT_EQ(sliced_module->computation_count(), 1);
}
}
TEST_F(HloSlicerTest, TestSliceModuleAndExtractRemoveSharding) {
const std::string& hlo_string = R"(
HloModule axpy_module
ENTRY axpy_computation {
%constant.39733 = bf16[] constant(111)
%broadcast.39734 = bf16[8,1,12288]{2,1,0} broadcast(bf16[] %constant.39733), dimensions={}
%multiply.39766 = bf16[8,1,12288]{2,1,0} multiply(bf16[8,1,12288]{2,1,0} %broadcast.39734, bf16[8,1,12288]{2,1,0} %broadcast.39734)
%custom-call.39767 = bf16[8,1,12288]{2,1,0} custom-call(bf16[8,1,12288]{2,1,0} %multiply.39766), custom_call_target="Sharding", sharding={replicated}
ROOT %add.39786 = bf16[8,1,12288]{2,1,0} add(bf16[8,1,12288]{2,1,0} %custom-call.39767, bf16[8,1,12288]{2,1,0} %custom-call.39767)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* multiply_39766 =
FindInstruction(hlo_module.get(), "multiply.39766");
{
std::vector<const HloInstruction*> relevant_instructions({multiply_39766});
SlicingConfiguration slicing_config = {
SlicingConfiguration::ForwardSlicingConfig::kRoot,
false, true};
std::vector<std::unique_ptr<HloModule>> sliced_modules =
SliceModuleAndExtract(hlo_module.get(),
absl::MakeSpan(relevant_instructions),
slicing_config);
EXPECT_EQ(sliced_modules.size(), 1);
auto sliced_module = std::move(sliced_modules[0]);
for (HloInstruction* instruction :
sliced_module->entry_computation()->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCustomCall);
}
for (HloInstruction* instruction :
sliced_module->entry_computation()->root_instruction()->operands()) {
EXPECT_EQ(instruction->name(), "multiply.39766");
}
}
}
TEST_F(HloSlicerTest, TestSliceModuleAndExtractReduceTupleParameter) {
const std::string& hlo_string = R"(
HloModule axpy_module
ENTRY axpy_computation (p.0: (s32[], s32[3]{0}), p.1: (s32[3]{0}, s32[])) -> s32[] {
p.0 = (s32[], s32[3]{0}) parameter(0)
gte.0 = s32[] get-tuple-element(p.0), index=0
p.1 = (s32[3]{0}, s32[]) parameter(1)
gte.1 = s32[] get-tuple-element(p.1), index=1
ROOT add.0 = s32[] add(gte.0, gte.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* add_0 = FindInstruction(hlo_module.get(), "add.0");
CHECK_NE(add_0, nullptr);
{
std::vector<const HloInstruction*> relevant_instructions({add_0});
SlicingConfiguration slicing_config = {
SlicingConfiguration::ForwardSlicingConfig::kRoot,
true, false,
true};
std::vector<std::unique_ptr<HloModule>> sliced_modules =
SliceModuleAndExtract(hlo_module.get(),
absl::MakeSpan(relevant_instructions),
slicing_config);
EXPECT_EQ(sliced_modules.size(), 1);
auto sliced_module = std::move(sliced_modules[0]);
HloInstruction* p_0 = FindInstruction(sliced_module.get(), "p.0");
EXPECT_NE(p_0, nullptr);
EXPECT_EQ(p_0->shape().tuple_shapes_size(), 1);
HloInstruction* p_1 = FindInstruction(sliced_module.get(), "p.1");
EXPECT_NE(p_1, nullptr);
EXPECT_EQ(p_1->shape().tuple_shapes_size(), 1);
}
}
TEST_F(HloSlicerTest, TestSliceModuleAndExtractSlicingGroup) {
const std::string& hlo_string = R"(
HloModule axpy_module
ENTRY axpy_computation (p.0: (s32[], s32[3]{0}), p.1: (s32[3]{0}, s32[])) -> s32[] {
p.0 = (s32[], s32[3]{0}) parameter(0)
gte.0 = s32[] get-tuple-element(p.0), index=0
p.1 = (s32[3]{0}, s32[]) parameter(1)
gte.1 = s32[] get-tuple-element(p.1), index=1
ROOT add.0 = s32[] add(gte.0, gte.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* gte_0 = FindInstruction(hlo_module.get(), "gte.0");
CHECK_NE(gte_0, nullptr);
HloInstruction* gte_1 = FindInstruction(hlo_module.get(), "gte.1");
CHECK_NE(gte_1, nullptr);
{
std::vector<const HloInstruction*> relevant_instructions({gte_0, gte_1});
SlicingConfiguration slicing_config = {
SlicingConfiguration::ForwardSlicingConfig::kNca,
true, false,
false, 1};
std::vector<std::unique_ptr<HloModule>> sliced_modules =
SliceModuleAndExtract(hlo_module.get(),
absl::MakeSpan(relevant_instructions),
slicing_config);
EXPECT_EQ(sliced_modules.size(), 2);
auto sliced_module_0 = std::move(sliced_modules[0]);
EXPECT_EQ(sliced_module_0->entry_computation()->instruction_count(), 2);
HloInstruction* p_0 = FindInstruction(sliced_module_0.get(), "p.0");
EXPECT_NE(p_0, nullptr);
auto sliced_module_1 = std::move(sliced_modules[1]);
EXPECT_EQ(sliced_module_0->entry_computation()->instruction_count(), 2);
HloInstruction* p_1 = FindInstruction(sliced_module_1.get(), "p.1");
EXPECT_NE(p_1, nullptr);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_slicer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_slicer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
25ccb5a1-cdaa-4877-a165-ec2b78f26bea | cpp | tensorflow/tensorflow | hlo_extractor | third_party/xla/xla/tools/hlo_extractor.cc | third_party/xla/xla/tools/hlo_extractor_test.cc | #include "xla/tools/hlo_extractor.h"
#ifndef _WIN32
#include <unistd.h>
#endif
#include <cstdint>
#include <deque>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/compilation_environments.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_verifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/test_utils.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
class ExtractionVisitor : public ConstDfsHloVisitorWithDefault {
public:
explicit ExtractionVisitor(
const HloInstruction* root_instruction,
absl::flat_hash_set<const HloInstruction*>* boundary,
ExtractSelector extract_selector,
ReplaceTypeSelector replace_type_selector)
: root_instruction_(root_instruction),
old_module_(root_instruction->GetModule()),
module_(std::make_unique<HloModule>(
"extracted", config_,
std::make_unique<CompilationEnvironments>(
old_module_->comp_envs()))),
clone_context_(module_.get()),
boundary_(boundary),
extract_selector_(extract_selector),
replace_type_selector_(replace_type_selector) {
for (auto computation : old_module_->computations()) {
old_computations_to_builders_.insert(
{computation,
std::make_unique<HloComputation::Builder>(computation->name())});
}
for (auto computation : old_module_->computations()) {
parameter_numbers_[computation] = 0;
}
}
absl::Status HandleParameter(const HloInstruction* parameter) override {
return ReplaceWithParameter(parameter);
}
absl::Status DefaultAction(const HloInstruction* hlo) override {
if ((boundary_ != nullptr && boundary_->contains(hlo) > 0) ||
(extract_selector_ != nullptr && !extract_selector_(hlo))) {
if (replace_type_selector_ != nullptr) {
switch (replace_type_selector_(hlo)) {
case ReplaceType::kReplaceConst:
return ReplaceWithConstant(hlo);
case ReplaceType::kReplaceParam:
CHECK(hlo->parent() == root_instruction_->parent())
<< "Replacing instructions at non-entry computation with "
"parameters is not supported.";
return ReplaceWithParameter(hlo);
case ReplaceType::kReplaceZeroBroadcast:
return ReplaceWithConstantBroadcast(
hlo, ReplaceType::kReplaceZeroBroadcast);
case ReplaceType::kReplaceRandomBroadcast:
return ReplaceWithConstantBroadcast(
hlo, ReplaceType::kReplaceRandomBroadcast);
default:
QCHECK(false) << "Unsupported replacement type";
}
}
return ReplaceWithParameter(hlo);
}
std::vector<HloInstruction*> new_operands;
for (auto operand : hlo->operands()) {
new_operands.push_back(clone_context_.GetInstruction(operand));
}
auto instruction =
hlo->CloneWithNewOperands(hlo->shape(), new_operands, &clone_context_);
auto it = old_computations_to_builders_.find(hlo->parent());
CHECK(it != old_computations_to_builders_.end());
auto builder = it->second.get();
builder->AddInstruction(std::move(instruction));
if (hlo->IsRoot() && hlo != root_instruction_) {
CHECK(clone_context_.FindComputation(hlo->parent()) == nullptr);
auto new_computation = module_->AddEmbeddedComputation(builder->Build());
clone_context_.MapComputation(hlo->parent(), new_computation);
}
return absl::OkStatus();
}
absl::Status FinishVisit(const HloInstruction* ) override {
auto new_entry_computation = module_->AddEntryComputation(
old_computations_to_builders_.at(root_instruction_->parent())->Build());
clone_context_.MapComputation(root_instruction_->parent(),
new_entry_computation);
for (auto computation : old_module_->MakeComputationPostOrder()) {
for (auto old_instruction : computation->MakeInstructionPostOrder()) {
if (auto new_instruction =
clone_context_.FindInstruction(old_instruction)) {
new_instruction->SetAndSanitizeName(old_instruction->name());
}
}
}
for (HloInstruction* instruction : extra_created_instructions_) {
module_->SetAndUniquifyInstrName(instruction, instruction->name());
}
return absl::OkStatus();
}
HloModule* module() { return module_.get(); }
std::unique_ptr<HloModule> ConsumeModule() { return std::move(module_); }
private:
absl::Status ReplaceWithConstant(const HloInstruction* hlo) {
absl::StatusOr<Literal> literal_status = MakeFakeLiteral(hlo->shape());
TF_CHECK_OK(literal_status.status());
auto new_const =
HloInstruction::CreateConstant(std::move(literal_status.value()));
clone_context_.MapInstruction(hlo, new_const.get());
auto it = old_computations_to_builders_.find(hlo->parent());
CHECK(it != old_computations_to_builders_.end());
auto builder = it->second.get();
builder->AddInstruction(std::move(new_const));
return absl::OkStatus();
}
absl::Status ReplaceWithParameter(const HloInstruction* hlo) {
CHECK(parameter_numbers_.contains(hlo->parent()));
auto new_parameter = HloInstruction::CreateParameter(
parameter_numbers_.at(hlo->parent())++, hlo->shape(), hlo->name());
clone_context_.MapInstruction(hlo, new_parameter.get());
CHECK(old_computations_to_builders_.contains(hlo->parent()));
auto builder = old_computations_to_builders_[hlo->parent()].get();
builder->AddInstruction(std::move(new_parameter));
return absl::OkStatus();
}
HloInstruction* ReplaceWithConstantBroadcastHelper(
const Shape& shape, HloComputation::Builder* builder,
ReplaceType replace_type) {
if (shape.IsTuple()) {
std::vector<HloInstruction*> tuple_operands;
for (const auto& subshape : shape.tuple_shapes()) {
tuple_operands.push_back(ReplaceWithConstantBroadcastHelper(
subshape, builder, replace_type));
}
auto zero_tuple =
builder->AddInstruction(HloInstruction::CreateTuple(tuple_operands));
extra_created_instructions_.push_back(zero_tuple);
return zero_tuple;
} else {
Shape constant_shape = ShapeUtil::MakeShape(shape.element_type(), {});
HloInstruction* constant_instruction;
CHECK(replace_type == ReplaceType::kReplaceZeroBroadcast ||
replace_type == ReplaceType::kReplaceRandomBroadcast);
if (replace_type == ReplaceType::kReplaceZeroBroadcast) {
constant_instruction =
builder->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(constant_shape.element_type())));
} else {
absl::StatusOr<Literal> literal_status =
MakeFakeLiteral(constant_shape);
TF_CHECK_OK(literal_status.status());
constant_instruction = builder->AddInstruction(
HloInstruction::CreateConstant(std::move(literal_status.value())));
}
extra_created_instructions_.push_back(constant_instruction);
auto broadcast_constant_instruction = builder->AddInstruction(
HloInstruction::CreateBroadcast(shape, constant_instruction, {}));
extra_created_instructions_.push_back(broadcast_constant_instruction);
return broadcast_constant_instruction;
}
}
absl::Status ReplaceWithConstantBroadcast(const HloInstruction* hlo,
ReplaceType replace_type) {
CHECK(replace_type == ReplaceType::kReplaceZeroBroadcast ||
replace_type == ReplaceType::kReplaceRandomBroadcast);
CHECK(old_computations_to_builders_.contains(hlo->parent()));
auto builder = old_computations_to_builders_[hlo->parent()].get();
HloInstruction* zero_broadcast =
ReplaceWithConstantBroadcastHelper(hlo->shape(), builder, replace_type);
clone_context_.MapInstruction(hlo, zero_broadcast);
return absl::OkStatus();
}
const HloInstruction* root_instruction_;
HloModule* old_module_;
HloModuleConfig config_;
std::unique_ptr<HloModule> module_;
HloCloneContext clone_context_;
absl::flat_hash_map<const HloComputation*,
std::unique_ptr<HloComputation::Builder>>
old_computations_to_builders_;
absl::flat_hash_map<const HloComputation*, int> parameter_numbers_;
absl::flat_hash_set<const HloInstruction*>* boundary_;
ExtractSelector extract_selector_;
ReplaceTypeSelector replace_type_selector_;
std::vector<HloInstruction*> extra_created_instructions_;
};
void ComputeBoundary(const HloInstruction* root, int64_t limit,
absl::flat_hash_set<const HloInstruction*>* boundary) {
std::deque<const HloInstruction*> worklist;
absl::flat_hash_map<const HloInstruction*, int64_t> visited;
worklist.push_back(root);
visited.emplace(root, 0);
while (!worklist.empty()) {
auto hlo = worklist.front();
worklist.pop_front();
int64_t hops = visited[hlo];
if (hops > limit) {
boundary->insert(hlo);
continue;
}
for (const HloInstruction* operand : hlo->operands()) {
if (visited.count(operand)) {
continue;
}
worklist.push_back(operand);
visited.emplace(operand, hops + 1);
}
}
}
}
std::unique_ptr<HloModule> ExtractModule(
const HloInstruction* instruction, int64_t height,
ExtractSelector extract_selector, ReplaceTypeSelector replace_type_selector,
bool cross_computation) {
QCHECK(height == -1 || !cross_computation)
<< "Boundary cannnot be calculated across the computations.";
absl::flat_hash_set<const HloInstruction*> boundary;
if (height != -1) {
ComputeBoundary(instruction, height, &boundary);
}
ExtractionVisitor visitor(instruction, &boundary, extract_selector,
replace_type_selector);
TF_CHECK_OK(instruction->Accept(&visitor, true,
false,
cross_computation));
ExtractionVisitor cleanup_visitor(
visitor.module()->entry_computation()->root_instruction(),
nullptr,
nullptr,
nullptr);
TF_CHECK_OK(visitor.module()->entry_computation()->root_instruction()->Accept(
&cleanup_visitor, true,
false,
false));
HloVerifier verifier(false,
true);
TF_CHECK_OK(verifier.Run(cleanup_visitor.module()).status());
return cleanup_visitor.ConsumeModule();
}
} | #include "xla/tools/hlo_extractor.h"
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = testing::opcode_matchers;
using HloExtractorTest = HloTestBase;
TEST_F(HloExtractorTest, ExtractTopLevel) {
const std::string& hlo_string = R"(
HloModule test
ENTRY %entry {
param.0 = f32[4]{0} parameter(0)
negate = f32[4]{0} negate(f32[4]{0} param.0)
ROOT exp = f32[4]{0} exponential(f32[4]{0} negate)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
{
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "exp"));
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Exp(op::Negate(op::Parameter(0))));
}
{
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "exp"), 0);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Exp(op::Parameter(0)));
}
{
auto extracted_module = ExtractModule(
FindInstruction(hlo_module.get(), "negate"), 0);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Negate(op::Parameter(0)));
}
}
TEST_F(HloExtractorTest, ExtractDag) {
const std::string& hlo_string = R"(
HloModule test
ENTRY %entry {
param.0 = f32[4]{0} parameter(0)
tanh = f32[4]{0} tanh(f32[4]{0} param.0)
negate = f32[4]{0} negate(f32[4]{0} tanh)
exp = f32[4]{0} exponential(f32[4]{0} negate)
ROOT add = f32[4]{0} add(f32[4]{0} negate, f32[4]{0} exp)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
{
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "exp"));
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Exp(op::Negate(op::Tanh(op::Parameter(0)))));
}
{
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "add"), 0);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Add(op::Parameter(0), op::Parameter(1)));
}
{
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "add"), 1);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Add(op::Negate(op::Parameter(0)),
op::Exp(op::Negate(op::Parameter(0)))));
}
{
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "add"), 2);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Add(op::Negate(op::Tanh(op::Parameter(0))),
op::Exp(op::Negate(op::Tanh(op::Parameter(0))))));
}
}
TEST_F(HloExtractorTest, ExtractWithConstant) {
const std::string& hlo_string = R"(
HloModule test
ENTRY %entry {
p = f32[4]{0} parameter(0)
tanh = f32[4]{0} tanh(p)
c = f32[4]{0} constant({1, 2, 3, 4})
ROOT add = f32[4]{0} add(tanh, c)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
{
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "add"), 0);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Add(op::Parameter(0), op::Parameter(1)));
}
{
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "add"), 1);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Add(op::Tanh(op::Parameter(0)), op::Constant()));
}
}
TEST_F(HloExtractorTest, ExtractFromMultipleComputation) {
const std::string& hlo_string = R"(
HloModule axpy_module
calculate_alpha {
c.1 = f32[] constant(1)
c.2 = f32[] constant(2)
add.0 = f32[] add(c.1, c.2)
c.3 = f32[] constant(4)
ROOT ret = f32[] subtract(add.0, c.3)
}
ENTRY axpy_computation {
alpha = f32[] call(), to_apply=calculate_alpha
broadcast = f32[10] broadcast(alpha), dimensions={}
x = f32[10] parameter(0)
ax = f32[10] multiply(broadcast, x)
y = f32[10] parameter(1)
ROOT add.1 = f32[10] add(ax, y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* inst = FindInstruction(hlo_module.get(), "add.0");
EXPECT_THAT(inst, op::Add());
auto extract_selector = [&inst](const HloInstruction* hlo_inst) {
return hlo_inst != inst;
};
{
auto replace_type_selector = [](const HloInstruction* hlo_inst) {
return ReplaceType::kReplaceConst;
};
auto extracted_module =
ExtractModule(hlo_module->entry_computation()->root_instruction(),
-1, extract_selector,
replace_type_selector,
true);
EXPECT_EQ(extracted_module->computation_count(), 2);
auto calculate_alpha_root_instruction =
FindComputation(extracted_module.get(), "calculate_alpha")
->root_instruction();
EXPECT_THAT(calculate_alpha_root_instruction,
op::Subtract(op::Constant(), op::Constant()));
}
{
auto replace_type_selector = [](const HloInstruction* hlo_inst) {
return ReplaceType::kReplaceZeroBroadcast;
};
auto extracted_module =
ExtractModule(hlo_module->entry_computation()->root_instruction(),
-1, extract_selector,
replace_type_selector,
true);
EXPECT_EQ(extracted_module->computation_count(), 2);
auto calculate_alpha_root_instruction =
FindComputation(extracted_module.get(), "calculate_alpha")
->root_instruction();
EXPECT_THAT(calculate_alpha_root_instruction,
op::Subtract(op::Broadcast(op::Constant()), op::Constant()));
}
}
TEST_F(HloExtractorTest, HloSelector) {
const std::string& hlo_string = R"(
HloModule axpy_module
calculate_alpha {
c.1 = f32[] constant(1)
c.2 = f32[] constant(2)
c.3 = f32[] add(c.1, c.2)
c.4 = f32[] constant(4)
ROOT ret = f32[] multiply(c.4, c.3)
}
ENTRY axpy_computation {
p.0 = f32[10] parameter(0)
p.1 = f32[10] parameter(1)
add.0 = f32[10] add(p.0, p.1)
alpha = f32[] call(), to_apply=calculate_alpha
broadcast = f32[10] broadcast(alpha), dimensions={}
p.2 = f32[10] parameter(2)
y = f32[10] multiply(broadcast, p.2)
x = f32[10] subtract(y, add.0)
p.3 = f32[10] parameter(3)
ROOT add = f32[10] add(x, p.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* inst =
FindInstruction(hlo_module.get(), HloOpcode::kSubtract);
EXPECT_NE(inst, nullptr);
EXPECT_THAT(inst, op::Subtract(op::Multiply(), op::Add()));
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kCall;
};
auto extracted_module = ExtractModule(inst, -1, hlo_selector);
EXPECT_EQ(extracted_module->computation_count(), 1);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Subtract(op::Multiply(op::Broadcast(op::Parameter()),
op::Parameter()),
op::Add(op::Parameter(), op::Parameter())));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kBroadcast;
};
auto extracted_module = ExtractModule(inst, 2, hlo_selector);
EXPECT_EQ(extracted_module->computation_count(), 1);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Subtract(op::Multiply(op::Parameter(), op::Parameter()),
op::Add(op::Parameter(), op::Parameter())));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kBroadcast;
};
auto replace_type_selector =
[](const HloInstruction* hlo_inst) -> ReplaceType {
return ReplaceType::kReplaceConst;
};
auto extracted_module =
ExtractModule(inst, 2, hlo_selector, replace_type_selector);
EXPECT_EQ(extracted_module->computation_count(), 1);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Subtract(op::Multiply(op::Constant(), op::Parameter()),
op::Add(op::Parameter(), op::Parameter())));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kAdd;
};
auto extracted_module = ExtractModule(inst, -1, hlo_selector);
EXPECT_EQ(extracted_module->computation_count(), 2);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Subtract(op::Multiply(), op::Parameter()));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kSubtract;
};
auto replace_type_selector =
[](const HloInstruction* hlo_inst) -> ReplaceType {
return ReplaceType::kReplaceConst;
};
auto extracted_module =
ExtractModule(hlo_module->entry_computation()->root_instruction(),
2, hlo_selector, replace_type_selector);
EXPECT_EQ(extracted_module->computation_count(), 1);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Add(op::Constant(), op::Parameter()));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
if (hlo_inst->opcode() != HloOpcode::kBroadcast &&
hlo_inst->opcode() != HloOpcode::kAdd) {
return true;
}
return false;
};
auto replace_type_selector =
[](const HloInstruction* hlo_inst) -> ReplaceType {
if (hlo_inst->opcode() == HloOpcode::kBroadcast) {
return ReplaceType::kReplaceConst;
}
return ReplaceType::kReplaceParam;
};
auto extracted_module =
ExtractModule(inst, 2, hlo_selector, replace_type_selector);
EXPECT_EQ(extracted_module->computation_count(), 1);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Subtract(op::Multiply(op::Constant(), op::Parameter()),
op::Parameter()));
}
}
TEST_F(HloExtractorTest, ReplaceTupleWithConstant) {
const std::string& hlo_string = R"(
HloModule test
ENTRY %entry {
param.0 = f32[4]{0} parameter(0)
tuple.0 = (f32[4]{0}, f32[4]{0}) rng-bit-generator(f32[4]{0} param.0), algorithm=rng_default
negate = f32[4]{0} negate(f32[4]{0} param.0)
tuple.1 = ((f32[4]{0}, f32[4]{0}), f32[4]{0}) tuple(tuple.0, negate)
element = f32[4]{0} get-tuple-element(((f32[4]{0}, f32[4]{0}), f32[4]{0}) tuple.1), index=1
ROOT add = f32[4]{0} add(element, param.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kTuple;
};
auto replace_type_selector =
[](const HloInstruction* hlo_inst) -> ReplaceType {
return ReplaceType::kReplaceConst;
};
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "add"),
-1, hlo_selector, replace_type_selector);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Add(op::GetTupleElement(op::Constant()), op::Parameter()));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kGetTupleElement;
};
auto replace_type_selector =
[](const HloInstruction* hlo_inst) -> ReplaceType {
return ReplaceType::kReplaceZeroBroadcast;
};
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "add"),
-1, hlo_selector, replace_type_selector);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Add(op::Broadcast(), op::Parameter()));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kGetTupleElement;
};
auto replace_type_selector =
[](const HloInstruction* hlo_inst) -> ReplaceType {
return ReplaceType::kReplaceRandomBroadcast;
};
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "add"),
-1, hlo_selector, replace_type_selector);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Add(op::Broadcast(), op::Parameter()));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kTuple;
};
auto replace_type_selector =
[](const HloInstruction* hlo_inst) -> ReplaceType {
return ReplaceType::kReplaceZeroBroadcast;
};
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "add"),
-1, hlo_selector, replace_type_selector);
EXPECT_THAT(
extracted_module->entry_computation()->root_instruction(),
op::Add(op::GetTupleElement(op::Tuple(op::Tuple(), op::Broadcast())),
op::Parameter()));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kTuple;
};
auto replace_type_selector =
[](const HloInstruction* hlo_inst) -> ReplaceType {
return ReplaceType::kReplaceRandomBroadcast;
};
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "add"),
-1, hlo_selector, replace_type_selector);
EXPECT_THAT(
extracted_module->entry_computation()->root_instruction(),
op::Add(op::GetTupleElement(op::Tuple(op::Tuple(), op::Broadcast())),
op::Parameter()));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_extractor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_extractor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
01655d2e-ef4c-487f-b26b-7a6ff14cd974 | cpp | tensorflow/tensorflow | hlo_expand | third_party/xla/xla/tools/hlo_expand.cc | third_party/xla/xla/tools/tests/hlo_expand_test.cc | #include "xla/tools/hlo_expand.h"
#include <vector>
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/service/batchnorm_expander.h"
#include "xla/service/cholesky_expander.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/rng_bit_generator_expander.h"
#include "xla/service/rng_expander.h"
#include "xla/service/sharding_propagation.h"
#include "xla/service/spmd/stateful_rng_spmd_partitioner.h"
#include "xla/service/triangular_solve_expander.h"
#include "xla/tsl/util/command_line_flags.h"
#include "xla/xla_data.pb.h"
namespace xla {
void AddPassesToPipeline(HloExpandConfig& config, HloPassPipeline& pipeline,
const HloModuleConfig& hlo_module_config) {
if (config.batch_norm_grad_expander || config.batch_norm_inference_expander ||
config.batch_norm_training_expander) {
pipeline.AddPass<xla::BatchNormExpander>(
config.batch_norm_training_expander,
config.batch_norm_inference_expander,
config.batch_norm_grad_expander);
}
if (config.cholesky_expander) {
pipeline.AddPass<xla::CholeskyExpander>();
}
if (config.rng_expander) {
pipeline.AddPass<xla::RngExpander>();
}
if (config.rng_bit_generator_philox_expander) {
pipeline.AddPass<xla::RngBitGeneratorExpander>(
xla::RandomAlgorithm::RNG_PHILOX);
}
if (config.rng_bit_generator_three_fry_expander) {
pipeline.AddPass<xla::RngBitGeneratorExpander>(
xla::RandomAlgorithm::RNG_THREE_FRY);
}
if (config.triangular_solve_expander) {
pipeline.AddPass<xla::TriangularSolveExpander>();
}
if (config.spmd_expander) {
pipeline.AddPass<ShardingPropagation>(
true, false,
hlo_module_config.allow_spmd_sharding_propagation_to_output(),
hlo_module_config.allow_spmd_sharding_propagation_to_parameters());
pipeline.AddPass<spmd::StatefulRngSpmdPartitioner>(
hlo_module_config.num_partitions(), hlo_module_config.replica_count(),
hlo_module_config.debug_options()
.xla_gpu_threshold_for_windowed_einsum_mib());
}
if (config.verify_hlo) {
pipeline.AddPass<xla::HloVerifier>(false,
false);
}
}
std::vector<tsl::Flag> GetFlags(HloExpandConfig& config) {
return {
tsl::Flag("h", &config.help, "Alias of --help"),
tsl::Flag("help", &config.help, "Display available options"),
tsl::Flag(
"input_format", &config.input_format,
"The format of the input file. If this flag is not specified, it's"
"inferred from the file extension instead. Valid values:\n "
"* hlo|txt : HLO textual format\n"
"* pb : xla::HloProto in binary proto format\n"
"* pbtxt : xla::HloProto in text proto format"),
tsl::Flag("o", &config.output_file, "Alias of --output_file="),
tsl::Flag("output_file", &config.output_file, "Full output file path"),
tsl::Flag("output_format", &config.output_format,
"The format of the output file. Defaults to input_format. "
"Valid values:\n"
"* hlo|txt : HLO textual format\n"
"* pb : xla::HloProto in binary proto format\n"
"* pbtxt : xla::HloProto in text proto format"),
tsl::Flag("batch_norm_expander", &config.batch_norm_expander,
"Overrides and expands batch_norm_grad, batch_norm_inference, "
"and batch_norm_training ops"),
tsl::Flag("batch_norm_grad_expander", &config.batch_norm_grad_expander,
"Expands batch_norm_grad op"),
tsl::Flag("batch_norm_inference_expander",
&config.batch_norm_inference_expander,
"Expands batch_norm_inference_grad op"),
tsl::Flag("batch_norm_training_expander",
&config.batch_norm_training_expander,
"Expands batch_norm_training_grad op"),
tsl::Flag("cholesky_expander", &config.cholesky_expander,
"Expands cholesky op"),
tsl::Flag("spmd_expander", &config.spmd_expander,
"Expands SPMD sharding"),
tsl::Flag("expand_all", &config.expand_all,
"Overrides and expands all supported passes below"),
tsl::Flag("rng_expander", &config.rng_expander, "Expands rng op"),
tsl::Flag(
"rng_bit_generator_expander", &config.rng_bit_generator_expander,
"Overrides and expands rng_bit_generator op on all prng algorithms"),
tsl::Flag("rng_bit_generator_philox_expander",
&config.rng_bit_generator_philox_expander,
"Expands rng_bit_generator op using philox prng algorithm"),
tsl::Flag("rng_bit_generator_three_fry_expander",
&config.rng_bit_generator_three_fry_expander,
"Expands rng_bit_generator op using three_fry prng algorithm"),
tsl::Flag("triangular_solve_expander", &config.triangular_solve_expander,
"Expands triangular_solve op"),
tsl::Flag("verify_hlo", &config.verify_hlo,
"Run HLO verifier after passes"),
};
}
void ParseCompoundFlags(HloExpandConfig& config) {
config.batch_norm_grad_expander |=
config.expand_all || config.batch_norm_expander;
config.batch_norm_inference_expander |=
config.expand_all || config.batch_norm_expander;
config.batch_norm_training_expander |=
config.expand_all || config.batch_norm_expander;
config.cholesky_expander |= config.expand_all;
config.rng_bit_generator_philox_expander |=
config.expand_all || config.rng_bit_generator_expander;
config.rng_bit_generator_three_fry_expander |=
config.expand_all || config.rng_bit_generator_expander;
config.rng_expander |= config.expand_all;
config.triangular_solve_expander |= config.expand_all;
}
} | #include <string>
#include <vector>
#include <gmock/gmock.h>
#include "tsl/platform/path.h"
#include "tsl/platform/subprocess.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class HloExpandTest : public ::testing::Test {
protected:
void HloOpt(std::vector<std::string>& additional_flags) {
std::string hlo_opt_bin =
tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools", "hlo-expand");
tsl::SubProcess proc;
std::vector<std::string> argv = {hlo_opt_bin};
argv.insert(argv.end(), additional_flags.begin(), additional_flags.end());
proc.SetProgram(hlo_opt_bin, argv);
proc.SetChannelAction(tsl::CHAN_STDOUT, tsl::ACTION_PIPE);
proc.SetChannelAction(tsl::CHAN_STDERR, tsl::ACTION_PIPE);
EXPECT_TRUE(proc.Start());
stdout_output_ = stderr_output_ = "";
int status = proc.Communicate(nullptr, &stdout_output_, &stderr_output_);
#if defined(_WIN32) || defined(_WIN64)
exited_normally_ = (status == 0);
exit_status_ = status;
#else
exited_normally_ = WIFEXITED(status);
exit_status_ = exited_normally_ ? WEXITSTATUS(status) : -1;
#endif
}
std::string stdout_output_;
std::string stderr_output_;
bool exited_normally_ = false;
int exit_status_ = -1;
};
TEST_F(HloExpandTest, CholeskyHlo) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "cholesky.hlo");
std::vector<std::string> additional_flags = {"--input_format=hlo", hlo_path};
HloOpt(additional_flags);
const std::string& expected_hlo_string =
R"(HloModule main, entry_computation_layout={()->f64[3,3]{1,0}}
ENTRY %main.3 () -> f64[3,3] {
%constant.1 = f64[3,3]{1,0} constant({ { 1, 2, 3 }, { 2, 20, 26 }, { 3, 26, 70 } })
ROOT %cholesky.2 = f64[3,3]{1,0} cholesky(f64[3,3]{1,0} %constant.1), lower=true
})";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 0);
EXPECT_THAT(stdout_output_, testing::HasSubstr(expected_hlo_string));
}
TEST_F(HloExpandTest, SpmdHlo) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "spmd.hlo");
std::vector<std::string> additional_flags = {"--spmd_expander", hlo_path};
HloOpt(additional_flags);
const std::string& expected_hlo_string =
R"(HloModule module, entry_computation_layout={(f32[24,64]{1,0}, f32[39296,64]{1,0})->f32[24,19648]{1,0}}, num_partitions=2
ENTRY %entry_spmd (param: f32[24,64], param.1: f32[39296,64]) -> f32[24,19648] {
%param = f32[24,64]{1,0} parameter(0), sharding={replicated}
%lhs.copy.1 = f32[24,64]{1,0} copy(f32[24,64]{1,0} %param)
%param.1 = f32[39296,64]{1,0} parameter(1), sharding={replicated}
%constant = s32[2]{0} constant({0, 19648})
%partition-id = u32[] partition-id()
%dynamic-slice = s32[1]{0} dynamic-slice(s32[2]{0} %constant, u32[] %partition-id), dynamic_slice_sizes={1}
%reshape = s32[] reshape(s32[1]{0} %dynamic-slice)
%constant.1 = s32[] constant(0)
%dynamic-slice.1 = f32[19648,64]{1,0} dynamic-slice(f32[39296,64]{1,0} %param.1, s32[] %reshape, s32[] %constant.1), dynamic_slice_sizes={19648,64}
%rhs.copy.1 = f32[19648,64]{1,0} copy(f32[19648,64]{1,0} %dynamic-slice.1)
ROOT %dot.1 = f32[24,19648]{1,0} dot(f32[24,64]{1,0} %lhs.copy.1, f32[19648,64]{1,0} %rhs.copy.1), lhs_contracting_dims={1}, rhs_contracting_dims={1}
})";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 0);
EXPECT_THAT(stdout_output_, testing::HasSubstr(expected_hlo_string));
}
TEST_F(HloExpandTest, CholeskyExpanderHlo) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "cholesky.hlo");
std::vector<std::string> additional_flags = {"--input_format=hlo", hlo_path,
"--expand_all"};
HloOpt(additional_flags);
const std::string& expected_hlo_string = "%xla.cholesky_f64";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 0);
EXPECT_THAT(stdout_output_, testing::HasSubstr(expected_hlo_string));
}
TEST_F(HloExpandTest, InvalidArgc) {
std::vector<std::string> additional_flags = {"--input_format=hlo", "foo",
"bar", "baz"};
HloOpt(additional_flags);
const std::string& expected_string =
"Cannot parse more than one argument. See usage below:";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
TEST_F(HloExpandTest, InvalidInputFileExtension) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "foo.bar");
std::vector<std::string> additional_flags = {hlo_path};
HloOpt(additional_flags);
const std::string& expected_string =
"input_format must be specified as [hlo|pb|pbtxt|txt].";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
TEST_F(HloExpandTest, InvalidInputFormat) {
std::vector<std::string> additional_flags = {"--input_format=foo"};
HloOpt(additional_flags);
const std::string& expected_string =
"input_format must be specified as [hlo|pb|pbtxt|txt].";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
TEST_F(HloExpandTest, InvalidOutputFileExtension) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "cholesky.hlo");
std::string output_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(),
"tools", "tests", "foo.bar");
std::vector<std::string> additional_flags = {"--input_format=", hlo_path,
"--output_file=" + output_path};
HloOpt(additional_flags);
const std::string& expected_string =
"output_format must be specified as [hlo|pb|pbtxt].";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
TEST_F(HloExpandTest, InvalidOutputFormat) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "cholesky.hlo");
std::vector<std::string> additional_flags = {"--input_format=", hlo_path,
"--output_format=foo"};
HloOpt(additional_flags);
const std::string& expected_string =
"output_format must be specified as [hlo|pb|pbtxt].";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
TEST_F(HloExpandTest, InvalidFile) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "foo.bar");
std::vector<std::string> additional_flags = {"--input_format=hlo", hlo_path};
HloOpt(additional_flags);
const std::string& expected_string = "Try: hlo-expand --help";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
TEST_F(HloExpandTest, UnsupportedOutputFormat) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "cholesky.hlo");
std::vector<std::string> additional_flags = {"--input_format=hlo",
"--output_format=pb", hlo_path};
HloOpt(additional_flags);
const std::string& expected_string =
"Printing to stdout must specify supported "
"output_format=[hlo|pbtxt|txt].";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
TEST_F(HloExpandTest, VerificationFailure) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "invalid_concat.hlo");
std::vector<std::string> additional_flags = {"--verify_hlo", hlo_path};
HloOpt(additional_flags);
const std::string& expected_string =
"Cannot concatenate arrays that differ in dimensions";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_expand.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/tests/hlo_expand_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
173476fa-45cf-4802-805d-da921a8259f2 | cpp | tensorflow/tensorflow | hlo_module_loader | third_party/xla/xla/tools/hlo_module_loader.cc | third_party/xla/xla/tools/hlo_module_loader_test.cc | #include "xla/tools/hlo_module_loader.h"
#include <functional>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "re2/re2.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::Status OverrideConfig(const hlo_module_loader_details::Config& ovr_config,
HloModuleConfig* config) {
config->set_replica_count(ovr_config.num_replicas);
config->set_num_partitions(ovr_config.num_partitions);
return absl::OkStatus();
}
}
std::string StripLogHeaders(std::string_view hlo_string) {
static RE2* matcher = new RE2(
"[IWEF]\\d{4} "
"\\d{2}:\\d{2}:\\d{2}\\.\\d+\\s+\\d+\\s+[^:]+:\\d+\\]\\s?(.*)");
std::string_view matches[4];
std::vector<std::string> lines = absl::StrSplit(hlo_string, '\n');
for (auto& line : lines) {
if (matcher->Match(line, 0, line.size(), RE2::ANCHOR_START, matches, 4)) {
line = std::string(matches[1]);
}
}
return absl::StrJoin(lines, "\n",
[](std::string* out, const std::string& line) {
absl::StrAppend(out, line);
});
}
absl::StatusOr<std::unique_ptr<HloModule>> LoadModuleFromData(
const std::string& data, std::string_view format,
const hlo_module_loader_details::Config& ovr_config,
const std::function<void(HloModuleConfig*)>& config_modifier_hook,
BufferAssignmentProto* buffer_assignment_proto, bool fill_missing_layouts) {
DebugOptions debug_options = GetDebugOptionsFromFlags();
std::unique_ptr<HloModule> module;
if (format == "hlo" || format == "txt") {
std::string hlo_string = StripLogHeaders(data);
HloModuleConfig config;
config.set_debug_options(debug_options);
TF_RETURN_IF_ERROR(OverrideConfig(ovr_config, &config));
if (config_modifier_hook) {
config_modifier_hook(&config);
}
HloParserOptions options;
options.set_fill_missing_layouts(fill_missing_layouts);
TF_ASSIGN_OR_RETURN(
module, ParseAndReturnUnverifiedModule(hlo_string, config, options));
} else {
HloSnapshot proto;
if (format == "pb") {
if (!proto.ParseFromString(data) &&
!proto.mutable_hlo()->ParseFromString(data) &&
!proto.mutable_hlo()->mutable_hlo_module()->ParseFromString(data)) {
return InvalidArgument("Failed to parse input as HLO protobuf binary");
}
if (buffer_assignment_proto != nullptr) {
if (proto.hlo().has_buffer_assignment()) {
*buffer_assignment_proto = proto.hlo().buffer_assignment();
} else {
return InvalidArgument(
"Expected buffer assignment in HLO protobuf binary.");
}
}
} else if (format == "pbtxt") {
if (!tsl::protobuf::TextFormat::ParseFromString(data, &proto) &&
!tsl::protobuf::TextFormat::ParseFromString(data,
proto.mutable_hlo()) &&
!tsl::protobuf::TextFormat::ParseFromString(
data, proto.mutable_hlo()->mutable_hlo_module())) {
return InvalidArgument("Failed to parse input as HLO protobuf text");
}
} else {
return InvalidArgument(
"Invalid format from file extension: '%s'. Expected: hlo, txt, pb, "
"or pbtxt",
format);
}
TF_ASSIGN_OR_RETURN(HloModuleConfig config,
HloModule::CreateModuleConfigFromProto(
proto.hlo().hlo_module(), debug_options));
TF_RETURN_IF_ERROR(OverrideConfig(ovr_config, &config));
if (config_modifier_hook) {
config_modifier_hook(&config);
}
TF_ASSIGN_OR_RETURN(
module, HloModule::CreateFromProto(proto.hlo().hlo_module(), config));
}
return std::move(module);
}
absl::StatusOr<std::unique_ptr<HloModule>> LoadModuleFromFile(
const std::string& path, std::string format,
const hlo_module_loader_details::Config& ovr_config,
const std::function<void(HloModuleConfig*)>& config_modifier_hook,
BufferAssignmentProto* buffer_assignment_proto, bool fill_missing_layouts) {
std::string data;
if (format.empty()) {
format = std::string(tsl::io::Extension(path));
}
TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(), path, &data));
return LoadModuleFromData(data, format, ovr_config, config_modifier_hook,
buffer_assignment_proto, fill_missing_layouts);
}
absl::StatusOr<std::unique_ptr<RunHloModuleIterationLiterals>>
LoadInputFromData(const std::string& data, std::string_view format) {
HloSnapshot proto;
if (format == "pb") {
if (!proto.ParseFromString(data) &&
!proto.mutable_hlo()->ParseFromString(data) &&
!proto.mutable_hlo()->mutable_hlo_module()->ParseFromString(data)) {
return InvalidArgument("Failed to parse input as HLO protobuf binary");
}
} else if (format == "pbtxt") {
if (!tsl::protobuf::TextFormat::ParseFromString(data, &proto) &&
!tsl::protobuf::TextFormat::ParseFromString(data,
proto.mutable_hlo()) &&
!tsl::protobuf::TextFormat::ParseFromString(
data, proto.mutable_hlo()->mutable_hlo_module())) {
return InvalidArgument("Failed to parse input as HLO protobuf text");
}
} else {
return InvalidArgument(
"Invalid format from file extension: '%s'. Expected: pb, "
"or pbtxt",
format);
}
auto iteration_literals_proto =
std::make_unique<RunHloModuleIterationLiterals>();
for (const auto& i : proto.arguments()) {
*iteration_literals_proto->add_arguments() = i;
}
return std::move(iteration_literals_proto);
}
absl::StatusOr<std::unique_ptr<RunHloModuleIterationLiterals>>
LoadInputFromFile(const std::string& path, std::string format) {
std::string data;
if (format.empty()) {
format = std::string(tsl::io::Extension(path));
}
TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(), path, &data));
return LoadInputFromData(data, format);
}
} | #include "xla/tools/hlo_module_loader.h"
#include <string>
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class HloModuleLoaderTest : public HloTestBase {};
TEST_F(HloModuleLoaderTest, StripsLogHeaders) {
const std::string& hlo_string = R"(
I0521 12:04:45.883483 1509 service.cc:186] HloModule test_log_stripping
I0521 12:04:45.883483 1509 service.cc:186]
I0521 12:04:45.883483 1509 service.cc:186] ENTRY entry {
I0521 12:04:45.883483 1509 service.cc:186] p0 = f32[4]{0} parameter(0)
I0521 12:04:45.883483 1509 service.cc:186] p1 = f32[4]{0} parameter(1)
I0521 12:04:45.883483 1509 service.cc:186] add = f32[4]{0} add(p0, p1)
I0521 12:04:45.883483 1509 service.cc:186] ROOT rooty = (f32[4]{0}, f32[4]{0}) tuple(p1, add)
I0521 12:04:45.883483 1509 service.cc:186] }
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
LoadModuleFromData(hlo_string, "txt"));
EXPECT_NE(FindInstruction(hlo_module.get(), "p0"), nullptr);
EXPECT_NE(FindInstruction(hlo_module.get(), "p1"), nullptr);
EXPECT_NE(FindInstruction(hlo_module.get(), "add"), nullptr);
EXPECT_NE(FindInstruction(hlo_module.get(), "rooty"), nullptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_module_loader.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_module_loader_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
60b3f1ce-5ea1-464a-9c6d-070957e277da | cpp | tensorflow/tensorflow | hlo_bisect_state | third_party/xla/xla/tools/hlo_bisect/hlo_bisect_state.cc | third_party/xla/xla/tools/hlo_bisect/hlo_bisect_state_test.cc | #include "xla/tools/hlo_bisect/hlo_bisect_state.h"
#include <iterator>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_dce.h"
#include "xla/tests/test_utils.h"
#include "xla/util.h"
namespace xla {
namespace bisect {
namespace {
std::vector<HloInstruction*> GetModifiedInstructionPostOrder(
HloComputation* computation) {
std::vector<HloInstruction*> instructions(
computation->parameter_instructions().begin(),
computation->parameter_instructions().end());
absl::c_copy_if(computation->MakeInstructionPostOrder(),
std::back_inserter(instructions),
[&](const HloInstruction* instr) {
return instr->opcode() != HloOpcode::kParameter;
});
return instructions;
}
absl::Status MorphModuleWithOutputs(HloModule* module,
absl::Span<HloInstruction* const> outputs) {
HloComputation* entry_computation = module->entry_computation();
HloInstruction* new_root = outputs.size() == 1
? outputs[0]
: entry_computation->AddInstruction(
HloInstruction::CreateTuple(outputs));
entry_computation->set_root_instruction(new_root, true);
*module->mutable_entry_computation_layout() =
module->compute_computation_layout();
HloDCE dce;
absl::StatusOr<bool> dce_result = dce.Run(module);
return dce_result.status();
}
absl::Status MorphModuleWithInstructions(
HloModule* module, absl::Span<HloInstruction* const> instructions) {
ConstHloInstructionSet in_range_instructions(instructions.begin(),
instructions.end());
auto keep_result = [&](const HloInstruction* instruction) {
return instruction->opcode() != HloOpcode::kParameter &&
!absl::c_any_of(instruction->users(),
[&](const HloInstruction* user) {
return in_range_instructions.count(user) != 0;
});
};
std::vector<HloInstruction*> outputs;
absl::c_copy_if(instructions, std::back_inserter(outputs), keep_result);
return MorphModuleWithOutputs(module, outputs);
}
absl::Status MorphModuleWithInstructions(HloModule* module,
size_t num_instructions) {
std::vector<HloInstruction*> ordered_instructions =
GetModifiedInstructionPostOrder(module->entry_computation());
HloInstruction* const* instructions_begin = &ordered_instructions.front();
return MorphModuleWithInstructions(
module, absl::MakeSpan(instructions_begin, num_instructions));
}
absl::Status MorphModuleWithLiterals(
HloModule* module, absl::flat_hash_map<std::string, Literal> literal_map) {
HloComputation* entry_computation = module->entry_computation();
absl::flat_hash_map<HloInstruction*, Literal> replace_map;
for (HloInstruction* instruction : entry_computation->instructions()) {
auto it = literal_map.find(instruction->name());
if (it != literal_map.end()) {
replace_map.emplace(instruction, std::move(it->second));
}
}
for (auto& [instruction, literal] : replace_map) {
if (!instruction->IsDead()) {
HloInstruction* new_instruction = entry_computation->AddInstruction(
HloInstruction::CreateConstant(std::move(literal)));
absl::Status replace_status =
entry_computation->ReplaceInstruction(instruction, new_instruction);
TF_RETURN_IF_ERROR(replace_status);
}
}
xla::HloDCE dce;
absl::StatusOr<bool> dce_status = dce.Run(module);
return dce_status.status();
}
bool InstructionNotReplaceableWithConstant(HloInstruction* instruction) {
return instruction->shape().is_dynamic() ||
instruction->opcode() == HloOpcode::kConstant ||
instruction->opcode() == HloOpcode::kTuple ||
instruction->opcode() == HloOpcode::kParameter;
}
}
absl::StatusOr<bool> HloBisectState::ShouldProcess() {
return RunModule(*module_);
}
absl::StatusOr<bool> HloBisectState::TrimEntryComputation() {
bool changed_in_loop = false;
bool changed = false;
for (int iter = 0; changed || iter < 2; iter++) {
if (iter % 2 == 0) {
VLOG(2) << "Trimming by outputs, iteration " << iter;
TF_ASSIGN_OR_RETURN(changed, TrimByOutputs());
} else {
VLOG(2) << "Trimming by instructions, iteration " << iter;
TF_ASSIGN_OR_RETURN(changed, TrimByInstructions());
}
changed_in_loop |= changed;
}
VLOG(2) << "Trimming by replacing instructions with literals";
TF_ASSIGN_OR_RETURN(changed, TrimByUsingConstants());
VLOG(2) << "Final module: " << module_->ToString();
return changed || changed_in_loop;
}
std::unique_ptr<xla::HloModule>&& HloBisectState::GetResult() {
return std::move(module_);
}
absl::StatusOr<bool> HloBisectState::RunModule(const HloModule& module) {
VLOG(3) << "Modified module: " << module.ToString();
absl::StatusOr<bool> bug_result = bug_checker_->Run(module);
TF_RETURN_IF_ERROR(bug_result.status());
VLOG(3) << "Bug checker result: " << bug_result.value();
if (!bug_result.value()) {
for (HloInstruction* instr : module.entry_computation()->instructions()) {
foldable_instructions_.emplace(instr->name());
}
for (auto& [key, value] : bug_checker_->GetResults()) {
foldable_instructions_values_[key] = std::move(value);
}
}
return bug_result;
}
absl::StatusOr<bool> HloBisectState::TrimByOutputs() {
HloInstruction* root_instruction =
module_->entry_computation()->root_instruction();
if (root_instruction->opcode() != HloOpcode::kTuple ||
root_instruction->operand_count() < 2) {
return false;
}
auto run_modified = [&](int64_t start, int64_t end) -> absl::StatusOr<bool> {
std::unique_ptr<HloModule> new_module = module_->Clone("");
HloInstruction* const* new_operands =
new_module->entry_computation()->root_instruction()->operands().begin();
TF_RETURN_IF_ERROR(MorphModuleWithOutputs(
new_module.get(),
absl::MakeSpan(new_operands + start, end - start + 1)));
return RunModule(*new_module);
};
int64_t bisect_low = 0;
int64_t bisect_high = root_instruction->operand_count() - 1;
while (bisect_low < bisect_high) {
int64_t cur = bisect_low + (bisect_high - bisect_low) / 2;
VLOG(2) << "Number of outputs: " << (cur - bisect_low + 1) << " ["
<< bisect_low << ".." << cur << "]";
TF_ASSIGN_OR_RETURN(bool has_bug, run_modified(bisect_low, cur));
if (has_bug) {
bisect_high = cur;
} else {
TF_ASSIGN_OR_RETURN(has_bug, run_modified(cur + 1, bisect_high));
if (has_bug) {
bisect_low = cur + 1;
} else {
break;
}
}
}
bool changed =
(bisect_high - bisect_low) < (root_instruction->operand_count() - 1);
if (changed) {
TF_RETURN_IF_ERROR(MorphModuleWithOutputs(
module_.get(),
absl::MakeSpan(root_instruction->operands().begin() + bisect_low,
bisect_high - bisect_low + 1)));
TF_RETURN_IF_ERROR(ExpectModuleIsBuggy());
}
return changed;
}
absl::StatusOr<bool> HloBisectState::TrimByInstructions() {
HloComputation* computation = module_->entry_computation();
int64_t upper_bound = computation->instruction_count() -
computation->root_instruction()->shape().IsTuple();
int64_t bisect_low = computation->num_parameters() - 1;
int64_t bisect_high = upper_bound;
while (bisect_low + 1 < bisect_high) {
int64_t cur = bisect_low + (bisect_high - bisect_low) / 2;
VLOG(2) << "Number of instructions: " << cur << " (of "
<< computation->instruction_count() << ")";
std::unique_ptr<HloModule> new_module = module_->Clone("");
TF_RETURN_IF_ERROR(MorphModuleWithInstructions(new_module.get(), cur));
TF_ASSIGN_OR_RETURN(bool has_bug, RunModule(*new_module));
if (has_bug) {
bisect_high = cur;
} else {
bisect_low = cur;
}
}
if (bisect_high == computation->num_parameters()) {
return Internal(
"The checker fails on an empty computation! Something is not right. "
"Can't bisect.");
}
bool changed = bisect_high < upper_bound;
if (changed) {
TF_RETURN_IF_ERROR(MorphModuleWithInstructions(module_.get(), bisect_high));
TF_RETURN_IF_ERROR(ExpectModuleIsBuggy());
}
return changed;
}
absl::StatusOr<bool> HloBisectState::TrimByUsingConstants() {
absl::flat_hash_map<std::string, Literal> literal_map;
int64_t random_literals_count = 0;
for (HloInstruction* instr : module_->entry_computation()->instructions()) {
if (InstructionNotReplaceableWithConstant(instr)) {
continue;
}
if (foldable_instructions_values_.contains(instr->name())) {
auto it = foldable_instructions_values_.extract(instr->name());
literal_map.insert(std::move(it));
} else if (foldable_instructions_.contains(instr->name())) {
absl::StatusOr<Literal> literal_status = MakeFakeLiteral(instr->shape());
TF_RETURN_IF_ERROR(literal_status.status());
literal_map[instr->name()] = std::move(literal_status).value();
++random_literals_count;
}
}
VLOG(2) << "Number of literals: " << literal_map.size()
<< " (random: " << random_literals_count << ")";
std::unique_ptr<HloModule> new_module = module_->Clone("");
TF_RETURN_IF_ERROR(
MorphModuleWithLiterals(new_module.get(), std::move(literal_map)));
TF_ASSIGN_OR_RETURN(bool has_bug, RunModule(*new_module));
if (has_bug) {
std::swap(module_, new_module);
}
return has_bug;
}
absl::Status HloBisectState::ExpectModuleIsBuggy() {
TF_ASSIGN_OR_RETURN(bool has_bug, RunModule(*module_));
if (has_bug) {
return absl::OkStatus();
}
const int retry_count = 5;
int bug_count = 0;
for (int i = 0; i < retry_count; i++) {
TF_ASSIGN_OR_RETURN(has_bug, bug_checker_->Run(*module_));
if (has_bug) {
bug_count++;
}
}
if (bug_count != 0) {
return InternalStrCat("The checker is non deterministic! (only ", bug_count,
" failures seen in ", (retry_count + 1), " runs)");
}
return Internal("We \"lost\" the bug while bisecting!");
}
}
} | #include "xla/tools/hlo_bisect/hlo_bisect_state.h"
#include <initializer_list>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace bisect {
namespace {
namespace m = match;
using HloBisectStateTest = HloTestBase;
class TestBugSearch : public BugCheckerInterface {
public:
TestBugSearch(std::initializer_list<HloOpcode> opcodes) : opcodes_(opcodes) {}
absl::StatusOr<bool> Run(const HloModule& module) override {
auto has_opcode = [&](HloOpcode opcode) {
return absl::c_any_of(module.entry_computation()->instructions(),
[opcode](const HloInstruction* instr) {
return instr->opcode() == opcode;
});
};
return absl::c_all_of(opcodes_, has_opcode);
}
absl::flat_hash_map<std::string, Literal> GetResults() override { return {}; }
private:
std::vector<HloOpcode> opcodes_;
};
Literal CreateLiteral(float value) {
Literal result = Literal::CreateFromShape(ShapeUtil::MakeShape(F32, {}));
result.PopulateWithValue(value);
return result;
}
TEST_F(HloBisectStateTest, TrimByOutputs) {
const char* kModuleStr = R"(
HloModule test_module
ENTRY test_computation {
p1 = s32[8] parameter(0)
p2 = s32[8] parameter(1)
a = s32[8] add(p1, p2)
b = s32[8] multiply(p1, p2)
c = s32[8] subtract(p1, p2)
ROOT sum = tuple(a, b, c)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
TestBugSearch bug_checker({HloOpcode::kMultiply});
HloBisectState bisect(std::move(module), &bug_checker);
TF_ASSERT_OK_AND_ASSIGN(bool changed, bisect.TrimEntryComputation());
EXPECT_TRUE(changed);
auto reduced_module = std::move(bisect).GetResult();
EXPECT_THAT(reduced_module->entry_computation()->root_instruction(),
GmockMatch(m::Multiply(m::Parameter(0), m::Parameter(1))));
}
TEST_F(HloBisectStateTest, TrimByInstructions) {
const char* kModuleStr = R"(
HloModule axpy_module
ENTRY axpy_computation {
alpha = f32[] parameter(0)
broadcast = f32[10] broadcast(alpha), dimensions={}
x = f32[10] parameter(1)
ax = f32[10] multiply(broadcast, x)
y = f32[10] parameter(2)
ROOT add = f32[10] add(ax, y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
TestBugSearch bug_checker({HloOpcode::kMultiply, HloOpcode::kBroadcast});
HloBisectState bisect(std::move(module), &bug_checker);
TF_ASSERT_OK_AND_ASSIGN(bool changed, bisect.TrimEntryComputation());
EXPECT_TRUE(changed);
auto reduced_module = std::move(bisect).GetResult();
EXPECT_THAT(
reduced_module->entry_computation()->root_instruction(),
GmockMatch(m::Multiply(m::Broadcast(m::Parameter(0)), m::Parameter(1))));
}
TEST_F(HloBisectStateTest, TrimByUsingRandomConstants) {
const char* kModuleStr = R"(
HloModule test_module
ENTRY test_computation {
p1 = f32[4] parameter(0)
p2 = f32[4] parameter(1)
a = f32[4] multiply(p1, p2)
b = f32[4] add(p1, p2)
ROOT result = f32[4] power(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
TestBugSearch bug_checker({HloOpcode::kPower});
HloBisectState bisect(std::move(module), &bug_checker);
TF_ASSERT_OK_AND_ASSIGN(bool changed, bisect.TrimEntryComputation());
EXPECT_TRUE(changed);
auto reduced_module = std::move(bisect).GetResult();
EXPECT_THAT(reduced_module->entry_computation()->root_instruction(),
GmockMatch(m::Power(m::Constant(), m::Constant())));
}
TEST_F(HloBisectStateTest, TrimByUsingReferenceConstants) {
class TestBugSearchWithReferenceConstants : public TestBugSearch {
public:
TestBugSearchWithReferenceConstants()
: TestBugSearch({HloOpcode::kPower}) {}
absl::flat_hash_map<std::string, Literal> GetResults() override {
absl::flat_hash_map<std::string, Literal> results;
results["a"] = CreateLiteral(2.0f);
results["b"] = CreateLiteral(3.0f);
return results;
}
};
const char* kModuleStr = R"(
HloModule test_module
ENTRY test_computation {
p1 = f32[] parameter(0)
p2 = f32[] parameter(1)
a = f32[] multiply(p1, p2)
b = f32[] add(p1, p2)
ROOT result = f32[] power(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
TestBugSearchWithReferenceConstants bug_checker;
HloBisectState bisect(std::move(module), &bug_checker);
TF_ASSERT_OK_AND_ASSIGN(bool changed, bisect.TrimEntryComputation());
EXPECT_TRUE(changed);
auto reduced_module = std::move(bisect).GetResult();
EXPECT_THAT(reduced_module->entry_computation()->root_instruction(),
GmockMatch(m::Power(m::Constant(), m::Constant())));
}
TEST_F(HloBisectStateTest, TrimByOutputsLostBug) {
class CustomBugSearch : public TestBugSearch {
public:
CustomBugSearch() : TestBugSearch({HloOpcode::kConstant}) {}
absl::StatusOr<bool> Run(const HloModule& module) override {
TF_ASSIGN_OR_RETURN(bool has_constants, TestBugSearch::Run(module));
int program_size = module.entry_computation()->instruction_count();
return program_size == 5 && !has_constants;
}
};
const char* kModuleStr = R"(
HloModule test_module
ENTRY test_computation {
p1 = s32[8] parameter(0)
p2 = s32[8] parameter(1)
a = s32[8] add(p1, p2)
b = s32[8] multiply(p1, p2)
ROOT sum = tuple(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
CustomBugSearch bug_checker;
HloBisectState bisect(std::move(module), &bug_checker);
TF_ASSERT_OK_AND_ASSIGN(bool changed, bisect.TrimEntryComputation());
EXPECT_FALSE(changed);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_bisect/hlo_bisect_state.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_bisect/hlo_bisect_state_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cfec4fa1-43f2-4e44-bf38-a8522b629bac | cpp | tensorflow/tensorflow | functional_hlo_runner | third_party/xla/xla/tools/multihost_hlo_runner/functional_hlo_runner.cc | third_party/xla/xla/tools/multihost_hlo_runner/functional_hlo_runner_test.cc | #include "xla/tools/multihost_hlo_runner/functional_hlo_runner.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <random>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/btree_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/client/executable_build_options.h"
#include "xla/client/xla_computation.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/layout.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/pjrt/distributed/key_value_store_interface.h"
#include "xla/pjrt/host_memory_spaces.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_compiler.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/primitive_util.h"
#include "xla/service/computation_layout.h"
#include "xla/service/computation_placer.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tests/test_utils.h"
#include "xla/tools/hlo_control_flow_flattening.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::StatusOr<std::unique_ptr<HloModule>> HloTextToModule(
absl::string_view hlo_text) {
return ParseAndReturnUnverifiedModule(hlo_text);
}
absl::StatusOr<std::unique_ptr<HloModule>> HloProtoToModule(
const HloModuleProto& proto) {
TF_ASSIGN_OR_RETURN(
HloModuleConfig config,
HloModule::CreateModuleConfigFromProto(proto, DebugOptions()));
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module,
HloModule::CreateFromProto(proto, config));
return std::move(module);
}
template <typename ElementType>
void PopulateWithSameValue(Literal* literal, ElementType val) {
for (ElementType& element : literal->data<ElementType>()) {
element = static_cast<ElementType>(val);
}
}
absl::StatusOr<Literal> MakeFakeLiteralWithSameValue(const Shape& shape,
int value) {
if (shape.IsArray()) {
Shape new_shape = shape;
new_shape.mutable_layout()->clear_tiles();
return primitive_util::PrimitiveTypeSwitch<absl::StatusOr<Literal>>(
[&](auto type) -> absl::StatusOr<Literal> {
if constexpr (primitive_util::IsArrayType(type)) {
using NativeT = primitive_util::NativeTypeOf<type>;
Literal literal(new_shape);
PopulateWithSameValue(
&literal,
static_cast<NativeT>(type == PRED ? (value % 2) == 0 : value));
return literal;
}
return Unimplemented(
"Unsupported type for fake literal generation: %s",
ShapeUtil::HumanString(shape));
},
new_shape.element_type());
} else if (shape.IsTuple()) {
std::vector<Literal> subliterals;
for (const Shape& subshape : shape.tuple_shapes()) {
TF_ASSIGN_OR_RETURN(Literal subliteral,
MakeFakeLiteralWithSameValue(subshape, value));
subliterals.push_back(std::move(subliteral));
}
return LiteralUtil::MakeTupleOwned(std::move(subliterals));
}
return InvalidArgument("Unsupported type for fake literal generation: %s",
ShapeUtil::HumanString(shape));
}
}
bool AbslParseFlag(absl::string_view text, InputFormat* input_format,
std::string* error) {
if (text == "text") {
*input_format = InputFormat::kText;
return true;
}
if (text == "proto_text") {
*input_format = InputFormat::kProtoText;
return true;
}
if (text == "proto_binary") {
*input_format = InputFormat::kProtoBinary;
return true;
}
if (text == "snapshot_proto_binary") {
*input_format = InputFormat::kSnapshotProtoBinary;
return true;
}
*error = "unknown value for enumeration";
return false;
}
std::string AbslUnparseFlag(InputFormat input_format) {
switch (input_format) {
case InputFormat::kText:
return "text";
case InputFormat::kProtoText:
return "proto_text";
case InputFormat::kProtoBinary:
return "proto_binary";
case InputFormat::kSnapshotProtoBinary:
return "snapshot_proto_binary";
default:
return absl::StrCat(input_format);
}
}
bool AbslParseFlag(absl::string_view text,
FunctionalHloRunner::ModuleArgumentMode* argument_mode,
std::string* error) {
if (text == "use_device_id_as_input") {
*argument_mode =
FunctionalHloRunner::ModuleArgumentMode::kUseDeviceIdAsInput;
return true;
}
if (text == "use_random_inputs") {
*argument_mode = FunctionalHloRunner::ModuleArgumentMode::kUseRandomInputs;
return true;
}
if (text == "use_shared_random_inputs") {
*argument_mode =
FunctionalHloRunner::ModuleArgumentMode::kUseSharedRandomInputs;
return true;
}
if (text == "use_zeros_as_input") {
*argument_mode = FunctionalHloRunner::ModuleArgumentMode::kUseZerosAsInput;
return true;
}
if (text == "uninitialized") {
*argument_mode = FunctionalHloRunner::ModuleArgumentMode::kUninitialized;
return true;
}
*error =
"Unrecognized module argument mode specified. Expect "
"\"use_device_id_as_input\", \"use_random_inputs\", or "
"\"use_shared_random_inputs\".";
return false;
}
std::string AbslUnparseFlag(
FunctionalHloRunner::ModuleArgumentMode argument_mode) {
switch (argument_mode) {
case FunctionalHloRunner::ModuleArgumentMode::kUseDeviceIdAsInput:
return "use_device_id_as_input";
case FunctionalHloRunner::ModuleArgumentMode::kUseRandomInputs:
return "use_random_inputs";
case FunctionalHloRunner::ModuleArgumentMode::kUseSharedRandomInputs:
return "use_shared_random_inputs";
case FunctionalHloRunner::ModuleArgumentMode::kUseZerosAsInput:
return "use_zeros_as_input";
case FunctionalHloRunner::ModuleArgumentMode::kUninitialized:
return "uninitialized";
default:
LOG(FATAL) << "Unexpected argument mode.";
}
}
bool AbslParseFlag(absl::string_view text,
FunctionalHloRunner::ModuleOutputMode* output_mode,
std::string* error) {
if (text == "return_outputs") {
*output_mode = FunctionalHloRunner::ModuleOutputMode::kReturnOutputs;
return true;
}
if (text == "not_return_outputs") {
*output_mode = FunctionalHloRunner::ModuleOutputMode::kNotReturnOutputs;
return true;
}
if (text == "return_device_0_outputs") {
*output_mode = FunctionalHloRunner::ModuleOutputMode::kReturnDevice0Outputs;
return true;
}
*error =
"Unrecognized module output mode specified. Expect \"return_outputs\", "
"\"not_return_outputs\", or \"return_device_0_outputs\".";
return false;
}
std::string AbslUnparseFlag(FunctionalHloRunner::ModuleOutputMode output_mode) {
switch (output_mode) {
case FunctionalHloRunner::ModuleOutputMode::kReturnOutputs:
return "return_outputs";
case FunctionalHloRunner::ModuleOutputMode::kNotReturnOutputs:
return "not_return_outputs";
case FunctionalHloRunner::ModuleOutputMode::kReturnDevice0Outputs:
return "return_device_0_outputs";
default:
LOG(FATAL) << "Unexpected output mode.";
}
}
void AddShardingAnnotationsToSpmdPartitionedModule(HloModule* hlo_module) {
auto set_manual_sharding = [](HloInstruction* hlo) {
if (!hlo->has_sharding()) {
hlo->set_sharding(
HloSharding::Manual().NormalizeTupleSharding(hlo->shape()));
}
};
for (int64_t i = 0; i < hlo_module->entry_computation()->num_parameters();
++i) {
HloInstruction* param =
hlo_module->entry_computation()->parameter_instruction(i);
set_manual_sharding(param);
}
HloInstruction* entry_root =
hlo_module->entry_computation()->root_instruction();
set_manual_sharding(entry_root);
}
absl::StatusOr<ExecutionOptions> FunctionalHloRunner::LoadExecutionOptions(
absl::string_view path) {
ExecutionOptions execution_options;
TF_RETURN_IF_ERROR(tsl::ReadTextOrBinaryProto(
tsl::Env::Default(), std::string(path), &execution_options));
return execution_options;
}
absl::StatusOr<CompileOptions> FunctionalHloRunner::CreateCompileOptions(
const PjRtClient& client,
const FunctionalHloRunner::RawCompileOptions& raw_options, int task_id,
int num_nodes, std::shared_ptr<xla::KeyValueStoreInterface> kv_store) {
CompileOptions compile_options;
if (raw_options.execution_options.has_value()) {
compile_options.executable_build_options =
CreateExecutableBuildOptionsFromExecutionOptions(
raw_options.execution_options.value());
}
ExecutableBuildOptions& build_options =
compile_options.executable_build_options;
ReplicasAndPartitions replicas_and_partitions =
FunctionalHloRunner::GetReplicasAndPartitions(
raw_options.execution_options, client.device_count(),
raw_options.num_replicas, raw_options.num_partitions,
raw_options.num_slices.value_or(1));
build_options.set_num_replicas(replicas_and_partitions.replicas);
build_options.set_num_partitions(replicas_and_partitions.partitions);
build_options.set_process_index(task_id);
build_options.set_process_count(num_nodes);
build_options.set_key_value_store(kv_store);
if (raw_options.spmd_mode == SpmdMode::kUseSpmdPartitioning ||
raw_options.spmd_mode == SpmdMode::kUseShardyPartitioning) {
build_options.set_use_spmd_partitioning(true);
if (raw_options.spmd_mode == SpmdMode::kUseShardyPartitioning) {
build_options.set_use_shardy_partitioner(true);
}
}
if (!build_options.has_device_assignment() &&
!raw_options.num_slices.has_value()) {
TF_ASSIGN_OR_RETURN(
DeviceAssignment device_assignment,
client.GetDefaultDeviceAssignment(replicas_and_partitions.replicas,
replicas_and_partitions.partitions));
build_options.set_device_assignment(device_assignment);
}
DebugOptions& debug_options = *build_options.mutable_debug_options();
if (task_id == 0) {
if (!raw_options.xla_dump_to.empty()) {
debug_options.set_xla_dump_to(raw_options.xla_dump_to);
}
debug_options.set_xla_dump_hlo_as_text(raw_options.xla_text_dump_mode ==
XlaTextDumpMode::kDumpAsText);
debug_options.set_xla_dump_hlo_as_proto(raw_options.xla_proto_dump_mode ==
XlaProtoDumpMode::kDumpAsProto);
}
switch (raw_options.hlo_passes_mode) {
case HloPassesMode::kRunXLABackendOnly:
build_options.set_run_backend_only(true);
break;
case HloPassesMode::kDisableAllHloPasses:
debug_options.set_xla_disable_all_hlo_passes(true);
break;
case HloPassesMode::kStandardCompile:
break;
}
return compile_options;
}
FunctionalHloRunner::ReplicasAndPartitions
FunctionalHloRunner::GetReplicasAndPartitionsInternal(
const std::optional<ExecutionOptions>& execution_options, int device_count,
const std::optional<int>& num_replicas,
const std::optional<int>& num_partitions, int num_slices) {
if (num_replicas.has_value() && num_partitions.has_value()) {
return ReplicasAndPartitions{num_replicas.value(), num_partitions.value()};
}
if (execution_options.has_value()) {
return ReplicasAndPartitions{execution_options->num_replicas(),
execution_options->num_partitions()};
}
if (num_replicas.has_value()) {
return ReplicasAndPartitions{
num_replicas.value(), device_count * num_slices / num_replicas.value()};
}
if (num_partitions.has_value()) {
return ReplicasAndPartitions{
device_count * num_slices / num_partitions.value(),
num_partitions.value()};
}
return ReplicasAndPartitions{device_count * num_slices, 1};
}
FunctionalHloRunner::ReplicasAndPartitions
FunctionalHloRunner::GetReplicasAndPartitions(
const std::optional<ExecutionOptions>& execution_options, int device_count,
const std::optional<int>& num_replicas,
const std::optional<int>& num_partitions, int num_slices) {
CHECK_GE(num_slices, 1);
ReplicasAndPartitions result = GetReplicasAndPartitionsInternal(
execution_options, device_count, num_replicas, num_partitions,
num_slices);
VLOG(1) << "Calculated replicas: " << result.replicas
<< ", partitions: " << result.partitions;
CHECK_GE(result.replicas, 1);
CHECK_GE(result.partitions, 1);
return result;
}
ExecutableBuildOptions
FunctionalHloRunner::CreateExecutableBuildOptionsFromExecutionOptions(
const ExecutionOptions& execution_options) {
ExecutableBuildOptions build_options;
if (execution_options.has_debug_options()) {
*build_options.mutable_debug_options() = execution_options.debug_options();
build_options.mutable_debug_options()->set_xla_dump_to("");
}
if (execution_options.has_shape_with_output_layout()) {
build_options.set_result_layout(
Shape(execution_options.shape_with_output_layout()));
}
build_options.set_num_replicas(execution_options.num_replicas());
build_options.set_num_partitions(execution_options.num_partitions());
build_options.set_use_spmd_partitioning(
execution_options.use_spmd_partitioning());
build_options.set_use_shardy_partitioner(
execution_options.use_shardy_partitioner());
build_options.set_use_auto_spmd_partitioning(
execution_options.use_auto_spmd_partitioning());
build_options.set_deduplicate_hlo(execution_options.deduplicate_hlo());
build_options.set_allow_spmd_sharding_propagation_to_parameters(
execution_options.allow_spmd_sharding_propagation_to_parameters());
build_options.set_allow_spmd_sharding_propagation_to_output(
execution_options.allow_spmd_sharding_propagation_to_output());
if (execution_options.has_device_assignment()) {
absl::StatusOr<std::unique_ptr<DeviceAssignment>> device_assignment =
DeviceAssignment::Deserialize(execution_options.device_assignment());
TF_CHECK_OK(device_assignment.status());
build_options.set_device_assignment(**device_assignment);
}
build_options.set_alias_passthrough_params(
execution_options.alias_passthrough_params());
return build_options;
}
absl::Status FunctionalHloRunner::DumpOutput(
const FunctionalHloRunner::PerDeviceLiteralVecType& output,
absl::string_view dump_output_to, int task_id) {
std::vector<std::string> output_path_vec =
absl::StrSplit(dump_output_to, '.');
std::string suffix = output_path_vec.back();
output_path_vec.pop_back();
output_path_vec.push_back(absl::StrCat("task_", task_id));
output_path_vec.push_back("");
int device_id_index = output_path_vec.size() - 1;
output_path_vec.push_back("");
int literal_id_index = output_path_vec.size() - 1;
output_path_vec.push_back(suffix);
for (const auto& [device_id, literal_vec] : output) {
output_path_vec[device_id_index] = absl::StrCat("device_", device_id);
for (int literal_id = 0; literal_id < literal_vec.size(); ++literal_id) {
output_path_vec[literal_id_index] = absl::StrCat("literal_", literal_id);
std::string literal_path = absl::StrJoin(output_path_vec, ".");
CHECK_EQ(suffix, std::string("txt"));
absl::Status write_status =
tsl::WriteStringToFile(tsl::Env::Default(), literal_path,
literal_vec[literal_id].ToString());
if (!write_status.ok()) {
return write_status;
}
}
}
return absl::OkStatus();
}
absl::Span<PjRtDevice* const> FunctionalHloRunner::GetLocalDevices(
const PjRtClient& client) {
return client.addressable_devices();
}
absl::StatusOr<FunctionalHloRunner::HloModuleAndArguments>
FunctionalHloRunner::LoadHloModuleAndArguments(absl::string_view hlo_file,
InputFormat input_format) {
HloModuleAndArguments hlo_module_and_arguments;
switch (input_format) {
case InputFormat::kText: {
std::string hlo_text;
TF_ASSIGN_OR_RETURN(hlo_module_and_arguments.hlo_module,
ReadModuleFromHloTextFile(hlo_file));
} break;
case InputFormat::kProtoText: {
TF_ASSIGN_OR_RETURN(hlo_module_and_arguments.hlo_module,
ReadModuleFromTextProtoFile(hlo_file));
} break;
case InputFormat::kProtoBinary: {
TF_ASSIGN_OR_RETURN(hlo_module_and_arguments.hlo_module,
ReadModuleFromBinaryProtoFile(hlo_file));
} break;
case InputFormat::kSnapshotProtoBinary: {
TF_ASSIGN_OR_RETURN(hlo_module_and_arguments,
ReadModuleFromSnapshotBinaryProtoFile(hlo_file));
} break;
default:
LOG(FATAL) << "Cannot process input format: "
<< AbslUnparseFlag(input_format);
}
return hlo_module_and_arguments;
}
absl::Status FunctionalHloRunner::LoadAndRunAndDump(
PjRtClient& client, const DebugOptions& debug_options,
const xla::FunctionalHloRunner::PreprocessingOptions& preproc_options,
const xla::FunctionalHloRunner::RawCompileOptions& raw_compile_options,
const xla::FunctionalHloRunner::RunningOptions& running_options,
absl::string_view hlo_text, InputFormat input_format,
std::string dump_output_to, int task_id, int num_nodes,
std::shared_ptr<xla::KeyValueStoreInterface> kv_store) {
TF_ASSIGN_OR_RETURN(
CompileOptions compile_options,
FunctionalHloRunner::CreateCompileOptions(client, raw_compile_options,
task_id, num_nodes, kv_store));
TF_ASSIGN_OR_RETURN(
FunctionalHloRunner::PerDeviceLiteralVecType output,
FunctionalHloRunner::LoadAndRun(client, debug_options, preproc_options,
compile_options, running_options,
hlo_text, input_format));
return dump_output_to.empty()
? absl::OkStatus()
: FunctionalHloRunner::DumpOutput(output, dump_output_to, task_id);
}
absl::StatusOr<FunctionalHloRunner::PerDeviceLiteralVecType>
FunctionalHloRunner::LoadAndRun(PjRtClient& client,
const DebugOptions& debug_options,
const PreprocessingOptions& preproc_options,
const CompileOptions& compile_options,
const RunningOptions& running_options,
absl::string_view hlo_text,
InputFormat input_format,
const PerDeviceLiteralVecType& arguments) {
HloModuleAndArguments hlo_module_and_arguments;
PerDeviceLiteralVecType loaded_arguments;
TF_ASSIGN_OR_RETURN(hlo_module_and_arguments,
LoadHloModuleAndArguments(hlo_text, input_format));
if (input_format == InputFormat::kSnapshotProtoBinary) {
loaded_arguments[client.devices()[0]->id()] =
std::move(hlo_module_and_arguments.arguments);
}
if (!arguments.empty()) {
return CompileAndRun(client, debug_options, preproc_options,
compile_options, running_options,
hlo_module_and_arguments.hlo_module.get(), arguments);
}
return CompileAndRun(
client, debug_options, preproc_options, compile_options, running_options,
hlo_module_and_arguments.hlo_module.get(), loaded_arguments);
}
absl::Status FunctionalHloRunner::LoadAndCompile(
PjRtClient& client, const DebugOptions& debug_options,
const PreprocessingOptions& preproc_options,
const RawCompileOptions& raw_compile_options, std::string_view hlo_file,
InputFormat input_format, int task_id, int num_nodes,
std::shared_ptr<xla::KeyValueStoreInterface> kv_store) {
TF_ASSIGN_OR_RETURN(
CompileOptions compile_options,
FunctionalHloRunner::CreateCompileOptions(client, raw_compile_options,
task_id, num_nodes, kv_store));
int num_replicas = compile_options.executable_build_options.num_replicas();
int num_partitions =
compile_options.executable_build_options.num_partitions();
int needed_devices = num_replicas * num_partitions;
if (client.addressable_device_count() < needed_devices) {
LOG(INFO) << "Applying a workaround to allow compiling multi-device HLOs "
"on machines with fewer devices.";
DeviceAssignment assignment(num_replicas, num_partitions);
assignment.Fill(0);
compile_options.executable_build_options.set_device_assignment(assignment);
}
TF_ASSIGN_OR_RETURN(
FunctionalHloRunner::HloModuleAndArguments hlo_module_and_arguments,
FunctionalHloRunner::LoadHloModuleAndArguments(hlo_file, input_format));
TF_RETURN_IF_ERROR(FunctionalHloRunner::Compile(
client, hlo_module_and_arguments.hlo_module.get(),
debug_options, preproc_options, compile_options)
.status());
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<HloModule>>
FunctionalHloRunner::ReadModuleFromHloTextFile(absl::string_view hlo_file) {
std::string hlo_string;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(),
std::string(hlo_file), &hlo_string));
return ParseAndReturnUnverifiedModule(hlo_string);
}
absl::StatusOr<std::unique_ptr<HloModule>>
FunctionalHloRunner::ReadModuleFromBinaryProtoFile(absl::string_view hlo_file) {
HloProto proto;
TF_RETURN_IF_ERROR(
tsl::ReadBinaryProto(tsl::Env::Default(), std::string(hlo_file), &proto));
return HloProtoToModule(proto.hlo_module());
}
absl::StatusOr<std::unique_ptr<HloModule>>
FunctionalHloRunner::ReadModuleFromTextProtoFile(absl::string_view hlo_file) {
HloProto proto;
TF_RETURN_IF_ERROR(
tsl::ReadTextProto(tsl::Env::Default(), std::string(hlo_file), &proto));
return HloProtoToModule(proto.hlo_module());
}
absl::StatusOr<FunctionalHloRunner::HloModuleAndArguments>
FunctionalHloRunner::ReadModuleFromSnapshotBinaryProtoFile(
absl::string_view hlo_file) {
HloSnapshot proto;
HloModuleAndArguments hlo_module_and_arguments;
TF_RETURN_IF_ERROR(
tsl::ReadBinaryProto(tsl::Env::Default(), std::string(hlo_file), &proto));
hlo_module_and_arguments.arguments.resize(proto.arguments_size());
for (int i = 0; i < proto.arguments_size(); i++) {
TF_ASSIGN_OR_RETURN(hlo_module_and_arguments.arguments[i],
Literal::CreateFromProto(proto.arguments()[i]));
}
TF_ASSIGN_OR_RETURN(hlo_module_and_arguments.hlo_module,
HloProtoToModule(proto.hlo().hlo_module()));
return hlo_module_and_arguments;
}
absl::StatusOr<std::unique_ptr<HloModule>>
FunctionalHloRunner::ReadModuleFromString(absl::string_view hlo_text) {
return HloTextToModule(hlo_text);
}
absl::StatusOr<std::unique_ptr<HloModule>>
FunctionalHloRunner::ReadModuleFromProto(const HloModuleProto& proto) {
return HloProtoToModule(proto);
}
absl::StatusOr<FunctionalHloRunner::PerDeviceLiteralVecType>
FunctionalHloRunner::CompileAndRun(PjRtClient& client,
const DebugOptions& debug_options,
const PreprocessingOptions& preproc_options,
const CompileOptions& compile_options,
const RunningOptions& running_options,
HloModule* hlo_module,
const PerDeviceLiteralVecType& arguments) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtLoadedExecutable> executable,
Compile(client, hlo_module, debug_options,
preproc_options, compile_options));
return Run(client, executable.get(), arguments, running_options);
}
namespace {
enum class ParameterType {
kOneTupleOfArrays = 0,
kOneListOfArrays = 1,
kOther = 2
};
ParameterType GetParameterType(const HloModule& module) {
int num_parameters = module.entry_computation()->num_parameters();
if (num_parameters == 1) {
const Shape& shape =
module.entry_computation()->parameter_instruction(0)->shape();
if (shape.IsTuple()) {
bool is_tuple_of_arrays = absl::c_all_of(
shape.tuple_shapes(),
[](const Shape& subshape) { return subshape.IsArray(); });
if (is_tuple_of_arrays) {
return ParameterType::kOneTupleOfArrays;
}
return ParameterType::kOther;
}
}
bool is_list_of_arrays =
absl::c_all_of(module.entry_computation()->parameter_instructions(),
[](const HloInstruction* parameter) {
return parameter->shape().IsArray();
});
return is_list_of_arrays ? ParameterType::kOneListOfArrays
: ParameterType::kOther;
}
}
absl::Status FunctionalHloRunner::PrepareHloModuleForCompilation(
HloModule* hlo_module, const DebugOptions& debug_options,
const PreprocessingOptions& preproc_options) {
hlo_module->mutable_config().set_debug_options(debug_options);
if (preproc_options.is_spmd_partitioned_module()) {
AddShardingAnnotationsToSpmdPartitionedModule(hlo_module);
}
if (preproc_options.flatten_while_loop() ||
preproc_options.remove_infeed_outfeed) {
HloPassPipeline pipeline("control-flow-flattening-pipeline");
int while_execution_count =
preproc_options.while_execution_count.value_or(0);
pipeline.AddPass<HloControlFlowFlattening>(
HloControlFlowFlattening::Options{
while_execution_count,
while_execution_count,
while_execution_count,
preproc_options.remove_infeed_outfeed,
preproc_options.flatten_while_loop(),
false, true});
TF_RETURN_IF_ERROR(pipeline.Run(hlo_module).status());
}
return absl::OkStatus();
}
CompileOptions FunctionalHloRunner::CompleteCompileOptions(
const HloModule& hlo_module, CompileOptions compile_options) {
ParameterType parameter_type = GetParameterType(hlo_module);
compile_options.parameter_is_tupled_arguments =
(parameter_type == ParameterType::kOneTupleOfArrays);
return compile_options;
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
FunctionalHloRunner::Compile(PjRtClient& client, HloModule* hlo_module,
const DebugOptions& debug_options,
const PreprocessingOptions& preproc_options,
const CompileOptions& compile_options) {
TF_RETURN_IF_ERROR(PrepareHloModuleForCompilation(hlo_module, debug_options,
preproc_options));
CompileOptions modified_compile_options =
CompleteCompileOptions(*hlo_module, compile_options);
XlaComputation computation(hlo_module->ToProto());
VLOG(1) << "FunctionalHloRunner: compilation started.";
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtLoadedExecutable> executable,
client.Compile(computation, modified_compile_options));
VLOG(1) << "FunctionalHloRunner: compile succeeded.";
return executable;
}
absl::StatusOr<std::unique_ptr<PjRtExecutable>> FunctionalHloRunner::Compile(
PjRtClient& client, HloModule* hlo_module,
const DebugOptions& debug_options,
const PreprocessingOptions& preproc_options,
const CompileOptions& compile_options,
const PjRtTopologyDescription& topology) {
TF_RETURN_IF_ERROR(PrepareHloModuleForCompilation(hlo_module, debug_options,
preproc_options));
CompileOptions modified_compile_options =
CompleteCompileOptions(*hlo_module, compile_options);
XlaComputation computation(hlo_module->ToProto());
VLOG(1) << "FunctionalHloRunner: compilation started.";
TF_ASSIGN_OR_RETURN(
std::unique_ptr<PjRtExecutable> executable,
PjRtCompile(modified_compile_options, computation, topology, &client));
VLOG(1) << "FunctionalHloRunner: compile succeeded.";
return executable;
}
absl::StatusOr<FunctionalHloRunner::PerDeviceLiteralVecType>
FunctionalHloRunner::Run(PjRtClient& client, PjRtLoadedExecutable* executable,
const PerDeviceLiteralVecType& arguments,
const RunningOptions& running_options,
std::minstd_rand0* engine) {
auto create_argument_buffers_on_device = [&client, &executable, &arguments,
&running_options, engine](
bool flatten_tupled_arguments) {
if (arguments.empty()) {
return CreateArgumentsOnDevice(client, executable, running_options,
flatten_tupled_arguments, engine);
}
if (flatten_tupled_arguments && arguments.begin()->second.size() == 1 &&
arguments.begin()->second.front().shape().IsTuple()) {
PerDeviceLiteralVecType flattened_arguments;
for (const auto& device_id_and_arguments : arguments) {
Literal tupled_argument =
device_id_and_arguments.second.front().Clone();
LiteralVec flattened_argument = tupled_argument.DecomposeTuple();
int device_id = device_id_and_arguments.first;
flattened_arguments.insert({device_id, std::move(flattened_argument)});
}
return CopyArgumentsToDevice(client, executable, flattened_arguments,
running_options,
true);
}
return CopyArgumentsToDevice(client, executable, arguments, running_options,
false);
};
return RunInternal(client, executable, create_argument_buffers_on_device,
running_options);
}
namespace {
std::vector<std::vector<PjRtBuffer*>> CreateArgumentPointersFromDeviceBuffers(
absl::Span<const std::vector<std::unique_ptr<PjRtBuffer>>> device_buffers) {
std::vector<std::vector<PjRtBuffer*>> argument_ptrs(device_buffers.size());
for (int i = 0; i < device_buffers.size(); i++) {
argument_ptrs[i].resize(device_buffers[i].size());
for (int j = 0; j < device_buffers[i].size(); j++) {
argument_ptrs[i][j] = device_buffers[i][j].get();
}
}
return argument_ptrs;
}
std::vector<std::vector<PjRtBuffer*>> CreateArgumentPointersBasedOnAliasing(
absl::Span<const std::vector<std::unique_ptr<PjRtBuffer>>> output_buffers,
absl::Span<const std::vector<std::unique_ptr<PjRtBuffer>>> input_buffers,
std::function<std::optional<int64_t>(int64_t)> get_output_buffer_index) {
int num_arguments = input_buffers.front().size();
std::vector<std::vector<PjRtBuffer*>> argument_ptrs(output_buffers.size());
for (int i = 0; i < input_buffers.size(); i++) {
argument_ptrs[i].resize(num_arguments);
for (int argument_index = 0; argument_index < num_arguments;
argument_index++) {
std::optional<int> output_buffer_index =
get_output_buffer_index(argument_index);
if (!output_buffer_index.has_value()) {
argument_ptrs[i][argument_index] =
input_buffers[i][argument_index].get();
} else {
argument_ptrs[i][argument_index] =
output_buffers[i][*output_buffer_index].get();
}
}
}
return argument_ptrs;
}
std::vector<Shape> GetArgumentShapes(const HloModule& module) {
const auto& params = module.entry_computation()->parameter_instructions();
std::vector<Shape> argument_shapes;
argument_shapes.reserve(params.size());
for (int i = 0; i < static_cast<int>(params.size()); ++i) {
const HloModuleConfig& module_config = module.config();
argument_shapes.push_back((module_config.has_entry_computation_layout() &&
module_config.entry_computation_layout()
.parameter_layout(i)
.shape()
.is_static())
? module_config.entry_computation_layout()
.parameter_layout(i)
.shape()
: params[i]->shape());
}
return argument_shapes;
}
absl::Status EnsureSingleTupleForFlattening(const HloModule& module) {
if (module.entry_computation()->num_parameters() != 1) {
return InvalidArgument(
"Flattening arguments requires the number of parameters to be 1. "
"The actual number of parameters is %d",
module.entry_computation()->num_parameters());
}
if (!module.entry_computation()
->parameter_instructions()
.front()
->shape()
.IsTuple()) {
return InvalidArgument(
"Flattening arguments requires the module parameter to be a single "
"tuple. But the actual parameter shape is %s",
module.entry_computation()
->parameter_instructions()
.front()
->shape()
.ToString());
}
return absl::OkStatus();
}
}
absl::StatusOr<FunctionalHloRunner::PerDeviceLiteralVecType>
FunctionalHloRunner::RunInternal(
PjRtClient& client, PjRtLoadedExecutable* executable,
std::function<absl::StatusOr<
std::vector<std::vector<std::unique_ptr<PjRtBuffer>>>>(bool)>
create_argument_buffers_on_device,
const RunningOptions& running_options) {
ExecuteOptions execute_options;
if (running_options.multi_slice_config != nullptr) {
execute_options.multi_slice_config = running_options.multi_slice_config;
}
if (running_options.untuple_result.has_value()) {
execute_options.untuple_result = *running_options.untuple_result;
}
TF_ASSIGN_OR_RETURN(std::vector<std::shared_ptr<HloModule>> hlo_modules,
executable->GetHloModules());
CHECK_EQ(hlo_modules.size(), 1);
const HloModule& module = *(hlo_modules.front());
ParameterType parameter_type = GetParameterType(module);
bool flatten_arguments = parameter_type == ParameterType::kOneTupleOfArrays;
auto get_output_index_for_one_tuple_of_arrays =
[&module](int64_t parameter_index) -> std::optional<int64_t> {
const HloInputOutputAliasConfig& alias_config =
module.input_output_alias_config();
std::optional<ShapeIndex> output_index =
alias_config.GetAliasedOutput(0, {parameter_index});
if (!output_index.has_value()) {
return std::nullopt;
}
if (module.entry_computation()->root_instruction()->shape().IsTuple()) {
return std::optional<int64_t>(output_index->front());
}
CHECK(output_index->empty());
return 0;
};
auto get_output_index_for_one_list_of_arrays =
[&module](int64_t parameter_index) -> std::optional<int64_t> {
const HloInputOutputAliasConfig& alias_config =
module.input_output_alias_config();
std::optional<ShapeIndex> output_index =
alias_config.GetAliasedOutput(parameter_index, {});
if (!output_index.has_value()) {
return std::nullopt;
}
if (module.entry_computation()->root_instruction()->shape().IsTuple()) {
return std::optional<int64_t>(output_index->front());
}
CHECK(output_index->empty());
return 0;
};
std::vector<std::vector<std::unique_ptr<PjRtBuffer>>> output_buffers;
auto output_has_tuple_leaf_on_host_memory_space = [&module]() {
if (!module.result_shape().IsTuple()) {
return false;
}
return absl::c_any_of(
module.result_shape().tuple_shapes(), [](const Shape& shape) {
return shape.has_layout() &&
shape.layout().memory_space() == Layout::kHostMemorySpace;
});
};
bool must_untuple_result = output_has_tuple_leaf_on_host_memory_space();
bool default_untuple_result =
must_untuple_result || execute_options.untuple_result;
switch (parameter_type) {
case ParameterType::kOneTupleOfArrays:
execute_options.arguments_are_tupled = false;
execute_options.untuple_result =
module.entry_computation()->root_instruction()->shape().IsTuple();
break;
case ParameterType::kOneListOfArrays:
execute_options.arguments_are_tupled = false;
execute_options.untuple_result =
module.entry_computation()->root_instruction()->shape().IsTuple();
break;
case ParameterType::kOther:
execute_options.arguments_are_tupled = false;
execute_options.untuple_result = false;
break;
}
if (must_untuple_result) {
execute_options.untuple_result = true;
}
std::optional<std::vector<PjRtFuture<>>> futures;
futures.emplace();
std::vector<std::vector<std::unique_ptr<PjRtBuffer>>> device_buffers;
std::vector<std::vector<PjRtBuffer*>> argument_ptrs;
for (int repeat = 0; repeat < running_options.num_repeats; ++repeat) {
VLOG(1) << "FunctionalHloRunner: ExecuteOnDevices started (repeat = "
<< repeat << ").";
if (repeat == 0 || running_options.recreate_buffers_between_repeats) {
VLOG(1) << "Creating argument buffers. repeat = " << repeat;
device_buffers.clear();
argument_ptrs.clear();
TF_ASSIGN_OR_RETURN(device_buffers,
create_argument_buffers_on_device(flatten_arguments));
argument_ptrs = CreateArgumentPointersFromDeviceBuffers(device_buffers);
}
if (repeat == running_options.num_repeats - 1) {
execute_options.untuple_result = default_untuple_result;
if (running_options.profiler != nullptr) {
running_options.profiler->CreateSession();
}
}
execute_options.launch_id = repeat;
futures->clear();
TF_ASSIGN_OR_RETURN(
output_buffers,
executable->Execute(argument_ptrs, execute_options, futures));
for (auto& future : *futures) {
TF_RETURN_IF_ERROR(future.Await());
}
VLOG(1) << "FunctionalHloRunner: ExecuteOnDevices succeeded (repeat = "
<< repeat << ")";
if (repeat < running_options.num_repeats - 1) {
switch (parameter_type) {
case ParameterType::kOneTupleOfArrays:
argument_ptrs = CreateArgumentPointersBasedOnAliasing(
output_buffers, device_buffers,
get_output_index_for_one_tuple_of_arrays);
break;
case ParameterType::kOneListOfArrays:
argument_ptrs = CreateArgumentPointersBasedOnAliasing(
output_buffers, device_buffers,
get_output_index_for_one_list_of_arrays);
break;
case ParameterType::kOther:
argument_ptrs =
CreateArgumentPointersFromDeviceBuffers(device_buffers);
break;
}
}
}
TF_ASSIGN_OR_RETURN(PerDeviceLiteralVecType results,
FetchAndLogOutput(client, output_buffers,
running_options.module_output_mode,
running_options.log_input_output()));
if (running_options.profiler != nullptr) {
running_options.profiler->UploadSession();
}
return results;
}
absl::StatusOr<std::vector<std::vector<std::unique_ptr<PjRtBuffer>>>>
FunctionalHloRunner::CreateArgumentsOnDevice(
PjRtClient& client, const PjRtLoadedExecutable* executable,
const RunningOptions& running_options, bool flatten_arguments,
std::minstd_rand0* engine) {
if (running_options.module_argument_mode ==
ModuleArgumentMode::kUninitialized) {
return CreateUninitializedArgumentsOnDevice(
client, executable, running_options, flatten_arguments);
}
absl::Span<PjRtDevice* const> addressable_devices =
executable->addressable_devices();
size_t num_addressable_devices = addressable_devices.size();
PerDeviceLiteralVecType per_device_argument_literals;
absl::Span<const PjRtLoadedExecutable::LogicalDeviceIds>
addressable_device_logical_ids =
executable->addressable_device_logical_ids();
TF_ASSIGN_OR_RETURN(std::vector<std::shared_ptr<HloModule>> hlo_modules,
executable->GetHloModules());
VLOG(1) << "FunctionalHloRunner: local_executable count = "
<< hlo_modules.size();
const bool kUseRandomInputs = running_options.module_argument_mode ==
ModuleArgumentMode::kUseRandomInputs ||
running_options.module_argument_mode ==
ModuleArgumentMode::kUseSharedRandomInputs;
const bool kUseSharedInputs =
running_options.module_argument_mode ==
ModuleArgumentMode::kUseSharedRandomInputs ||
running_options.module_argument_mode ==
ModuleArgumentMode::kUseZerosAsInput;
for (int i = 0; i < num_addressable_devices; ++i) {
VLOG(3) << "Creating fake argument for device " << i;
LiteralVec& argument_literals =
per_device_argument_literals[addressable_devices[i]->id()];
int executable_idx = hlo_modules.size() == 1
? 0
: addressable_device_logical_ids[i].partition;
HloModule* my_hlo_module = hlo_modules[executable_idx].get();
if (flatten_arguments) {
TF_RETURN_IF_ERROR(EnsureSingleTupleForFlattening(*my_hlo_module));
}
if (running_options.module_argument_mode ==
ModuleArgumentMode::kUseDeviceIdAsInput) {
const auto params =
my_hlo_module->entry_computation()->parameter_instructions();
if (flatten_arguments) {
CHECK_EQ(params.size(), 1);
CHECK(params.front()->shape().IsTuple());
argument_literals.reserve(params.front()->shape().tuple_shapes_size());
} else {
argument_literals.reserve(params.size());
}
for (int j = 0; j < params.size(); ++j) {
TF_ASSIGN_OR_RETURN(
Literal argument_literal_j,
MakeFakeLiteralWithSameValue(params[j]->shape(),
addressable_devices[i]->id()));
if (flatten_arguments) {
std::vector<Literal> decomposed_argument_literals =
argument_literal_j.DecomposeTuple();
for (auto& literal : decomposed_argument_literals) {
argument_literals.push_back(std::move(literal));
}
} else {
argument_literals.push_back(std::move(argument_literal_j));
}
}
} else {
if (flatten_arguments) {
TF_ASSIGN_OR_RETURN(
LiteralVec tupled_argument_literals,
MakeFakeArguments(my_hlo_module, kUseRandomInputs,
false,
false,
std::nullopt, engine));
CHECK_EQ(tupled_argument_literals.size(), 1);
CHECK(tupled_argument_literals.front().shape().IsTuple());
argument_literals = tupled_argument_literals.front().DecomposeTuple();
} else {
TF_ASSIGN_OR_RETURN(
argument_literals,
MakeFakeArguments(my_hlo_module, kUseRandomInputs,
false,
false,
std::nullopt, engine));
}
if (kUseSharedInputs) {
break;
}
}
}
if (kUseSharedInputs) {
return CopyArgumentsToDevice(client, executable,
per_device_argument_literals, running_options,
flatten_arguments,
true);
}
return CopyArgumentsToDevice(client, executable, per_device_argument_literals,
running_options, flatten_arguments);
}
absl::StatusOr<std::vector<std::vector<std::unique_ptr<PjRtBuffer>>>>
FunctionalHloRunner::CreateUninitializedArgumentsOnDevice(
PjRtClient& client, const PjRtLoadedExecutable* executable,
const RunningOptions& running_options, bool flatten_arguments) {
absl::Span<PjRtDevice* const> addressable_devices =
executable->addressable_devices();
absl::Span<const PjRtLoadedExecutable::LogicalDeviceIds>
addressable_device_logical_ids =
executable->addressable_device_logical_ids();
TF_ASSIGN_OR_RETURN(std::vector<std::shared_ptr<HloModule>> hlo_modules,
executable->GetHloModules());
VLOG(1) << "FunctionalHloRunner: local_executable count = "
<< hlo_modules.size();
LOG(INFO) << "Starting argument buffer shape calculation.";
PerDeviceShapeVecType argument_shapes_per_device;
CHECK_EQ(addressable_devices.size(), addressable_device_logical_ids.size());
for (int i = 0; i < static_cast<int>(addressable_devices.size()); ++i) {
VLOG(3) << "Calculating fake argument shapes for device " << i;
PjRtDevice* device = addressable_devices[i];
int executable_idx = hlo_modules.size() == 1
? 0
: addressable_device_logical_ids[i].partition;
const HloModule& hlo_module = *hlo_modules[executable_idx];
std::vector<Shape> argument_shapes;
if (flatten_arguments) {
TF_RETURN_IF_ERROR(EnsureSingleTupleForFlattening(hlo_module));
std::vector<Shape> original_argument_shapes =
GetArgumentShapes(hlo_module);
CHECK_EQ(original_argument_shapes.size(), 1);
CHECK(original_argument_shapes.front().IsTuple());
argument_shapes = original_argument_shapes.front().tuple_shapes();
} else {
argument_shapes = GetArgumentShapes(hlo_module);
}
argument_shapes_per_device[device->id()] = std::move(argument_shapes);
}
LOG(INFO) << "Starting argument buffer allocation.";
int buffer_count = 0;
std::vector<std::vector<std::unique_ptr<PjRtBuffer>>>
argument_buffers_per_device;
argument_buffers_per_device.reserve(addressable_devices.size());
for (int i = 0; i < static_cast<int>(addressable_devices.size()); ++i) {
VLOG(3) << "Allocating fake arguments for device " << i;
PjRtDevice* device = addressable_devices[i];
CHECK(argument_shapes_per_device.contains(device->id()));
const std::vector<Shape>& argument_shapes =
argument_shapes_per_device.at(device->id());
std::vector<std::unique_ptr<PjRtBuffer>> argument_buffers;
argument_buffers.reserve(argument_shapes.size());
for (const Shape& shape : argument_shapes) {
if (running_options.log_input_output()) {
LOG(INFO) << "device_id=" << device->id()
<< ", input = " << shape.ToString();
}
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtBuffer> argument_buffer,
client.CreateUninitializedBuffer(shape, device));
argument_buffers.push_back(std::move(argument_buffer));
buffer_count += 1;
}
argument_buffers_per_device.push_back(std::move(argument_buffers));
}
LOG(INFO) << "Allocated argument buffers: " << buffer_count;
for (const auto& argument_buffers : argument_buffers_per_device) {
for (const auto& buffer : argument_buffers) {
TF_RETURN_IF_ERROR(buffer->BlockHostUntilReady());
}
}
LOG(INFO) << "Argument buffers are ready.";
return argument_buffers_per_device;
}
absl::StatusOr<std::vector<std::vector<std::unique_ptr<PjRtBuffer>>>>
FunctionalHloRunner::CopyArgumentsToDevice(
PjRtClient& client, const PjRtLoadedExecutable* executable,
const PerDeviceLiteralVecType& arguments,
const RunningOptions& running_options, bool flattened_arguments,
bool clone_device0_arguments) {
const bool log_input = running_options.log_input_output();
absl::Span<PjRtDevice* const> addressable_devices =
executable->addressable_devices();
size_t num_addressable_devices = addressable_devices.size();
if (!clone_device0_arguments && num_addressable_devices != arguments.size()) {
return InvalidArgument(
"The number of provided arguments does not match "
"the number of logical devices.");
}
std::vector<std::vector<std::unique_ptr<PjRtBuffer>>> argument_buffers;
argument_buffers.resize(num_addressable_devices);
auto argument_memory_space =
[&flattened_arguments](const HloModule* module, PjRtDevice* device,
int arg_i) -> absl::StatusOr<PjRtMemorySpace*> {
auto non_tuple_memory_space = [&device](const Shape& shape) {
if (shape.has_layout() &&
shape.layout().memory_space() == Layout::kHostMemorySpace) {
return device->memory_space_by_kind(PinnedHostMemorySpace::kKind);
}
return device->default_memory_space();
};
const ComputationLayout& entry_layout = module->entry_computation_layout();
TF_RET_CHECK(entry_layout.parameter_count() > 0);
if (entry_layout.parameter_shape(0).IsTuple() && flattened_arguments) {
TF_RET_CHECK(entry_layout.parameter_count() == 1)
<< "entry_layout.parameter_count(): "
<< entry_layout.parameter_count();
TF_RET_CHECK(arg_i < entry_layout.parameter_shape(0).tuple_shapes_size());
const Shape& shape = entry_layout.parameter_shape(0).tuple_shapes(arg_i);
TF_RET_CHECK(!shape.IsTuple()) << "Nested tuples are not supported";
return non_tuple_memory_space(shape);
}
TF_RET_CHECK(arg_i < entry_layout.parameter_count());
const Shape& shape = entry_layout.parameter_shape(arg_i);
TF_RET_CHECK(!shape.IsTuple()) << "Param tuple without flattened_arguments";
return non_tuple_memory_space(shape);
};
auto buffer_from_host_literal =
[&client, &argument_memory_space, &running_options](
const HloModule* module, PjRtDevice* device, int arg_i,
const Literal& literal)
-> absl::StatusOr<std::unique_ptr<PjRtBuffer>> {
const Layout* layout = nullptr;
if (running_options.use_argument_host_layout &&
literal.shape().has_layout()) {
layout = &literal.shape().layout();
}
if (client.memory_spaces().empty()) {
return client.BufferFromHostLiteral(literal, device, layout);
}
TF_ASSIGN_OR_RETURN(PjRtMemorySpace * memory_space,
argument_memory_space(module, device, arg_i));
return client.BufferFromHostLiteral(literal, memory_space, layout);
};
absl::Span<const PjRtLoadedExecutable::LogicalDeviceIds>
addressable_device_logical_ids =
executable->addressable_device_logical_ids();
TF_ASSIGN_OR_RETURN(std::vector<std::shared_ptr<HloModule>> hlo_modules,
executable->GetHloModules());
for (int i = 0; i < num_addressable_devices; ++i) {
PjRtDevice* curr_device = addressable_devices[i];
int curr_device_id = curr_device->id();
PjRtDevice* source_device =
addressable_devices[clone_device0_arguments ? 0 : i];
int source_device_id = source_device->id();
if (!arguments.contains(source_device_id)) {
return InvalidArgument(
"The provided argument map does not contain arguments "
"for device: %d",
curr_device_id);
}
const std::vector<Literal>& curr_device_arguments =
arguments.at(source_device_id);
int executable_idx = hlo_modules.size() == 1
? 0
: addressable_device_logical_ids[i].partition;
HloModule* module = hlo_modules[executable_idx].get();
argument_buffers[i].reserve(curr_device_arguments.size());
for (int arg_i = 0; arg_i < curr_device_arguments.size(); ++arg_i) {
const Literal& literal = curr_device_arguments[arg_i];
if (log_input) {
LOG(INFO) << "device_id=" << curr_device_id
<< ", input = " << literal.ToString();
}
TF_ASSIGN_OR_RETURN(
std::unique_ptr<PjRtBuffer> argument_buffer,
buffer_from_host_literal(module, curr_device, arg_i, literal));
argument_buffers[i].push_back(std::move(argument_buffer));
}
}
for (const auto& device_argument_buffers : argument_buffers) {
for (const auto& device_buffer : device_argument_buffers) {
TF_RETURN_IF_ERROR(device_buffer->BlockHostUntilReady());
}
}
return argument_buffers;
}
absl::StatusOr<FunctionalHloRunner::PerDeviceLiteralVecType>
FunctionalHloRunner::FetchAndLogOutput(
PjRtClient& client,
const std::vector<std::vector<std::unique_ptr<PjRtBuffer>>>& output_buffers,
ModuleOutputMode module_output_mode, bool log_output) {
CHECK(!output_buffers.empty());
absl::Mutex mu;
absl::Status status;
size_t num_pending_transfers = 0;
bool device_0_is_local = false;
for (PjRtDevice* device : GetLocalDevices(client)) {
if (device->id() == 0) {
device_0_is_local = true;
}
}
if (module_output_mode == ModuleOutputMode::kReturnDevice0Outputs &&
device_0_is_local) {
num_pending_transfers = output_buffers[0].size();
} else if (module_output_mode == ModuleOutputMode::kReturnOutputs) {
for (const auto& bs : output_buffers) {
num_pending_transfers += bs.size();
}
}
PerDeviceLiteralVecType outputs;
for (int i = 0; i < output_buffers.size(); ++i) {
if (output_buffers[i].empty()) {
continue;
}
const int device_id = output_buffers[i][0]->device()->id();
std::vector<Literal>& output_slice = outputs[device_id];
if (module_output_mode == ModuleOutputMode::kReturnOutputs ||
(module_output_mode == ModuleOutputMode::kReturnDevice0Outputs &&
device_id == 0)) {
output_slice.reserve(output_buffers[i].size());
for (const auto& buffer : output_buffers[i]) {
TF_RET_CHECK(buffer->device() == output_buffers[i][0]->device())
<< "All outputs from a given vector of outputs should be for the "
"same device";
output_slice.emplace_back(
ShapeUtil::DeviceShapeToHostShape(buffer->on_device_shape()));
buffer->ToLiteral(&output_slice.back()).OnReady([&](absl::Status s) {
absl::MutexLock lock(&mu);
--num_pending_transfers;
status.Update(s);
});
}
} else {
for (const auto& buffer : output_buffers[i]) {
TF_RET_CHECK(buffer->device() == output_buffers[i][0]->device())
<< "All outputs from a given vector of outputs should be for the "
"same device";
TF_RETURN_IF_ERROR(buffer->BlockHostUntilReady());
}
}
}
if (module_output_mode == ModuleOutputMode::kReturnOutputs ||
(module_output_mode == ModuleOutputMode::kReturnDevice0Outputs &&
device_0_is_local)) {
auto cond = [&]() { return !status.ok() || num_pending_transfers == 0; };
absl::MutexLock lock(&mu);
mu.Await(absl::Condition(&cond));
TF_RETURN_IF_ERROR(status);
if (log_output) {
for (const PjRtDevice* device : GetLocalDevices(client)) {
int device_id = device->id();
if (module_output_mode == ModuleOutputMode::kReturnDevice0Outputs &&
device_id != 0) {
continue;
}
LOG(INFO) << "Outputs for device_id: " << device_id;
const std::vector<Literal>& output_slice = outputs[device_id];
for (int i = 0; i < output_slice.size(); ++i) {
LOG(INFO) << "output[" << i << "]: " << output_slice[i].ToString();
}
}
}
}
return outputs;
}
} | #include "xla/tools/multihost_hlo_runner/functional_hlo_runner.h"
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_format.h"
#include "absl/time/time.h"
#include "xla/debug_options_flags.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/tests/filecheck.h"
#include "xla/tools/multihost_hlo_runner/create_client.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/util/command_line_flags.h"
#include "xla/xla.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/subprocess.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using ::testing::SizeIs;
bool IsTestingCpu() {
#ifdef XLA_TEST_BACKEND_CPU
return true;
#endif
return false;
}
std::string GetHloPath(std::string file_name) {
return tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"multihost_hlo_runner", "data", file_name);
}
absl::StatusOr<std::unique_ptr<xla::PjRtClient>> GetPjRtClient() {
if (IsTestingCpu()) {
return CreateHostClient();
}
return CreateGpuClient({});
}
using FunctionalHloRunnerTest = ::testing::Test;
TEST_F(FunctionalHloRunnerTest, SingleDeviceHlo) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::PjRtClient> client,
GetPjRtClient());
xla::DebugOptions debug_options;
FunctionalHloRunner::PreprocessingOptions preproc_options;
FunctionalHloRunner::RawCompileOptions raw_compile_options;
raw_compile_options.num_replicas = 1;
raw_compile_options.num_partitions = 1;
FunctionalHloRunner::RunningOptions running_options;
TF_EXPECT_OK(FunctionalHloRunner::LoadAndRunAndDump(
*client, debug_options, preproc_options, raw_compile_options,
running_options, {GetHloPath("single_device.hlo")}, InputFormat::kText));
}
TEST_F(FunctionalHloRunnerTest, Sharded2Devices) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::PjRtClient> client,
GetPjRtClient());
constexpr int kRequiredDeviceCount = 2;
const int kDeviceCount = client->device_count();
if (kDeviceCount < kRequiredDeviceCount) {
GTEST_SKIP() << "Requires " << kRequiredDeviceCount
<< " devices, but found only " << kDeviceCount;
return;
}
xla::DebugOptions debug_options;
FunctionalHloRunner::PreprocessingOptions preproc_options;
FunctionalHloRunner::RawCompileOptions raw_compile_options;
raw_compile_options.spmd_mode =
FunctionalHloRunner::SpmdMode::kUseSpmdPartitioning;
raw_compile_options.num_replicas = 1;
raw_compile_options.num_partitions = 2;
FunctionalHloRunner::RunningOptions running_options;
TF_EXPECT_OK(FunctionalHloRunner::LoadAndRunAndDump(
*client, debug_options, preproc_options, raw_compile_options,
running_options, {GetHloPath("sharded_2_devices.hlo")},
InputFormat::kText));
}
TEST_F(FunctionalHloRunnerTest, UseZerosAsInputs) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::PjRtClient> client,
GetPjRtClient());
constexpr int kRequiredDeviceCount = 2;
const int kDeviceCount = client->device_count();
if (kDeviceCount < kRequiredDeviceCount) {
GTEST_SKIP() << "Requires " << kRequiredDeviceCount
<< " devices, but found only " << kDeviceCount;
return;
}
xla::DebugOptions debug_options;
FunctionalHloRunner::PreprocessingOptions preproc_options;
FunctionalHloRunner::RawCompileOptions raw_compile_options;
raw_compile_options.spmd_mode =
FunctionalHloRunner::SpmdMode::kUseSpmdPartitioning;
raw_compile_options.num_replicas = 1;
raw_compile_options.num_partitions = 2;
FunctionalHloRunner::RunningOptions running_options;
running_options.module_argument_mode =
FunctionalHloRunner::ModuleArgumentMode::kUseZerosAsInput;
TF_EXPECT_OK(FunctionalHloRunner::LoadAndRunAndDump(
*client, debug_options, preproc_options, raw_compile_options,
running_options, {GetHloPath("sharded_2_devices.hlo")},
InputFormat::kText));
}
TEST_F(FunctionalHloRunnerTest, UseUninitializedInputs) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::PjRtClient> client,
GetPjRtClient());
constexpr int kRequiredDeviceCount = 2;
const int kDeviceCount = client->device_count();
if (kDeviceCount < kRequiredDeviceCount) {
GTEST_SKIP() << "Requires " << kRequiredDeviceCount
<< " devices, but found only " << kDeviceCount;
return;
}
xla::DebugOptions debug_options;
FunctionalHloRunner::PreprocessingOptions preproc_options;
FunctionalHloRunner::RawCompileOptions raw_compile_options;
raw_compile_options.spmd_mode =
FunctionalHloRunner::SpmdMode::kUseSpmdPartitioning;
raw_compile_options.num_replicas = 1;
raw_compile_options.num_partitions = 2;
FunctionalHloRunner::RunningOptions running_options;
running_options.module_argument_mode =
FunctionalHloRunner::ModuleArgumentMode::kUninitialized;
TF_EXPECT_OK(FunctionalHloRunner::LoadAndRunAndDump(
*client, debug_options, preproc_options, raw_compile_options,
running_options, {GetHloPath("sharded_2_devices.hlo")},
InputFormat::kText));
}
TEST_F(FunctionalHloRunnerTest, UseUninitializedInputsWithTupledArguments) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::PjRtClient> client,
GetPjRtClient());
xla::DebugOptions debug_options;
FunctionalHloRunner::PreprocessingOptions preproc_options;
FunctionalHloRunner::RawCompileOptions raw_compile_options;
raw_compile_options.spmd_mode =
FunctionalHloRunner::SpmdMode::kUseSpmdPartitioning;
raw_compile_options.num_replicas = 1;
raw_compile_options.num_partitions = 1;
FunctionalHloRunner::RunningOptions running_options;
running_options.module_argument_mode =
FunctionalHloRunner::ModuleArgumentMode::kUninitialized;
TF_EXPECT_OK(FunctionalHloRunner::LoadAndRunAndDump(
*client, debug_options, preproc_options, raw_compile_options,
running_options, {GetHloPath("single_device_tupled.hlo")},
InputFormat::kText));
}
TEST_F(FunctionalHloRunnerTest, CanCompileWithoutHavingEnoughGpus) {
tsl::Env* env = tsl::Env::Default();
std::string dump_dir;
ASSERT_TRUE(env->LocalTempFilename(&dump_dir));
tsl::FileSystem* fs = nullptr;
TF_ASSERT_OK(env->GetFileSystemForFile(dump_dir, &fs));
xla::DebugOptions debug_options;
FunctionalHloRunner::PreprocessingOptions preproc_options;
FunctionalHloRunner::RawCompileOptions raw_compile_options;
raw_compile_options.spmd_mode =
FunctionalHloRunner::SpmdMode::kUseSpmdPartitioning;
raw_compile_options.num_replicas = 1;
raw_compile_options.num_partitions = 16;
raw_compile_options.xla_dump_to = dump_dir;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::PjRtClient> client,
GetPjRtClient());
TF_EXPECT_OK(FunctionalHloRunner::LoadAndCompile(
*client, debug_options, preproc_options, raw_compile_options,
GetHloPath("sharded_16_devices.hlo"), InputFormat::kText));
{
std::vector<std::string> after_opt_hlo_paths;
TF_ASSERT_OK(
fs->GetMatchingPaths(fs->JoinPath(dump_dir, "*after_optimizations.txt"),
&after_opt_hlo_paths));
ASSERT_THAT(after_opt_hlo_paths, SizeIs(1));
std::string after_opt_hlo;
TF_ASSERT_OK(
tsl::ReadFileToString(env, after_opt_hlo_paths[0], &after_opt_hlo));
absl::StatusOr<bool> file_check_result = RunFileCheck(after_opt_hlo, R"(
)");
TF_ASSERT_OK(file_check_result.status());
EXPECT_TRUE(file_check_result.value());
}
{
std::vector<std::string> ir_paths;
TF_ASSERT_OK(fs->GetMatchingPaths(fs->JoinPath(dump_dir, "*ir-no-opt.ll"),
&ir_paths));
ASSERT_THAT(ir_paths, SizeIs(1));
}
}
static const char* binary_name;
constexpr int kNumNodes = 2;
TEST_F(FunctionalHloRunnerTest, ShardedAutotuningWorks) {
if (IsTestingCpu()) {
GTEST_SKIP() << "GPU-only test.";
}
tsl::SubProcess child[kNumNodes];
for (int node_id = 0; node_id < kNumNodes; ++node_id) {
std::vector<std::string> argv;
argv.push_back(binary_name);
argv.push_back("--xla_gpu_shard_autotuning");
argv.push_back(absl::StrFormat("--node_id=%d", node_id));
child[node_id].SetProgram(binary_name, argv);
child[node_id].SetChannelAction(tsl::CHAN_STDOUT, tsl::ACTION_PIPE);
child[node_id].SetChannelAction(tsl::CHAN_STDERR, tsl::ACTION_PIPE);
ASSERT_TRUE(child[node_id].Start()) << "node " << node_id;
}
for (int node_id = 0; node_id < kNumNodes; ++node_id) {
std::string stdout_str;
std::string stderr_str;
int child_status =
child[node_id].Communicate(nullptr, &stdout_str, &stderr_str);
ASSERT_EQ(child_status, 0) << " node " << node_id << "\nstdout:\n"
<< stdout_str << "\nstderr:\n"
<< stderr_str;
}
}
absl::Status ShardedAutotuningWorksTestBody(const int node_id) {
tsl::setenv("CUDA_VISIBLE_DEVICES", std::to_string(node_id).data(),
true);
TF_ASSIGN_OR_RETURN(
PjRtEnvironment env,
xla::GetPjRtClient("gpu", "127.0.0.1:12345", node_id, kNumNodes,
false,
absl::Seconds(120)));
CHECK(env.kv_store != nullptr);
TF_RETURN_IF_ERROR(FunctionalHloRunner::LoadAndCompile(
*env.client, GetDebugOptionsFromFlags(),
FunctionalHloRunner::PreprocessingOptions{},
FunctionalHloRunner::RawCompileOptions{},
GetHloPath("multiple_gemm_fusions.hlo"), InputFormat::kText));
if (node_id == 0) {
TF_ASSIGN_OR_RETURN(std::string results0,
env.kv_store->Get("gemm_fusion_autotuning_results_1_0",
absl::Seconds(1)));
CHECK(absl::StrContains(results0, "run_time"));
TF_ASSIGN_OR_RETURN(std::string results1,
env.kv_store->Get("gemm_fusion_autotuning_results_1_1",
absl::Seconds(1)));
CHECK(absl::StrContains(results1, "run_time"));
CHECK_NE(results0, results1);
}
return absl::OkStatus();
}
TEST_F(FunctionalHloRunnerTest, CanRunWithMockCollectives) {
if (IsTestingCpu()) {
GTEST_SKIP() << "GPU-only test";
}
xla::DebugOptions debug_options;
FunctionalHloRunner::PreprocessingOptions preproc_options;
FunctionalHloRunner::RawCompileOptions raw_compile_options;
raw_compile_options.spmd_mode =
FunctionalHloRunner::SpmdMode::kUseSpmdPartitioning;
raw_compile_options.num_replicas = 1;
raw_compile_options.num_partitions = 16;
FunctionalHloRunner::RunningOptions running_options;
running_options.module_argument_mode =
FunctionalHloRunner::ModuleArgumentMode::kUseZerosAsInput;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::PjRtClient> client,
CreateMockGpuClient(16));
TF_EXPECT_OK(FunctionalHloRunner::LoadAndRunAndDump(
*client, debug_options, preproc_options, raw_compile_options,
running_options, {GetHloPath("sharded_16_devices.hlo")},
InputFormat::kText));
}
}
}
int main(int argc, char* argv[]) {
xla::binary_name = argv[0];
int node_id = -1;
std::vector<tsl::Flag> flag_list = {
tsl::Flag("node_id", &node_id,
"Node ID for ShardedAutotuningWorks test."),
};
xla::AppendDebugOptionsFlags(&flag_list);
std::string usage = tsl::Flags::Usage(argv[0], flag_list);
tsl::Flags::Parse(&argc, argv, flag_list);
testing::InitGoogleTest(&argc, argv);
if (node_id >= 0) {
return !xla::ShardedAutotuningWorksTestBody(node_id).ok();
}
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/multihost_hlo_runner/functional_hlo_runner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/multihost_hlo_runner/functional_hlo_runner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a1fba7d6-3851-4dfe-a221-936f5f6a1256 | cpp | tensorflow/tensorflow | profiler | tensorflow/lite/profiling/telemetry/profiler.cc | tensorflow/lite/profiling/telemetry/profiler_test.cc | #include "tensorflow/lite/profiling/telemetry/profiler.h"
#include <cstdint>
#include "tensorflow/lite/core/api/profiler.h"
namespace tflite::telemetry {
void TelemetryProfiler::AddEvent(const char* tag, EventType event_type,
uint64_t metric, int64_t event_metadata1,
int64_t event_metadata2) {
switch (event_type) {
case EventType::TELEMETRY_EVENT:
case EventType::TELEMETRY_DELEGATE_EVENT: {
if (event_metadata1 == -1) {
ReportTelemetryEvent(tag, TelemetryStatusCode(metric));
} else {
ReportTelemetryOpEvent(tag, event_metadata1, event_metadata2,
TelemetryStatusCode(metric));
}
break;
}
case EventType::OPERATOR_INVOKE_EVENT:
case EventType::DELEGATE_OPERATOR_INVOKE_EVENT:
case EventType::DELEGATE_PROFILED_OPERATOR_INVOKE_EVENT: {
ReportOpInvokeEvent(tag, metric, event_metadata1, event_metadata2);
break;
}
default:
return;
}
}
void TelemetryProfiler::AddEventWithData(const char* tag, EventType event_type,
const void* data) {
switch (event_type) {
case EventType::TELEMETRY_REPORT_SETTINGS:
case EventType::TELEMETRY_DELEGATE_REPORT_SETTINGS: {
auto* settings = reinterpret_cast<const TfLiteTelemetrySettings*>(data);
if (settings) {
ReportSettings(tag, settings);
}
break;
}
default:
return;
}
}
uint32_t TelemetryProfiler::BeginEvent(const char* tag, EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) {
switch (event_type) {
case EventType::OPERATOR_INVOKE_EVENT:
case EventType::DELEGATE_OPERATOR_INVOKE_EVENT:
case EventType::DELEGATE_PROFILED_OPERATOR_INVOKE_EVENT: {
return ReportBeginOpInvokeEvent(tag, event_metadata1, event_metadata2);
}
default:
return UINT32_MAX;
}
}
void TelemetryProfiler::EndEvent(uint32_t event_handle) {
if (event_handle == UINT32_MAX) return;
ReportEndOpInvokeEvent(event_handle);
}
class TfLiteTelemetryProfiler : public TelemetryProfiler {
public:
explicit TfLiteTelemetryProfiler(TfLiteTelemetryProfilerStruct* profiler)
: profiler_(profiler) {}
void ReportTelemetryEvent(const char* event_name,
TelemetryStatusCode status) override;
void ReportTelemetryOpEvent(const char* event_name, int64_t op_idx,
int64_t subgraph_idx,
TelemetryStatusCode status) override;
void ReportSettings(const char* setting_name,
const TfLiteTelemetrySettings* settings) override;
uint32_t ReportBeginOpInvokeEvent(const char* op_name, int64_t op_idx,
int64_t subgraph_idx) override;
void ReportEndOpInvokeEvent(uint32_t event_handle) override;
void ReportOpInvokeEvent(const char* op_name, uint64_t elapsed_time,
int64_t op_idx, int64_t subgraph_idx) override;
private:
TfLiteTelemetryProfilerStruct* profiler_ = nullptr;
};
void TfLiteTelemetryProfiler::ReportTelemetryEvent(const char* event_name,
TelemetryStatusCode status) {
profiler_->ReportTelemetryEvent(profiler_, event_name, status.code());
}
void TfLiteTelemetryProfiler::ReportTelemetryOpEvent(
const char* event_name, int64_t op_idx, int64_t subgraph_idx,
TelemetryStatusCode status) {
profiler_->ReportTelemetryOpEvent(profiler_, event_name, op_idx, subgraph_idx,
status.code());
}
void TfLiteTelemetryProfiler::ReportSettings(
const char* setting_name, const TfLiteTelemetrySettings* settings) {
profiler_->ReportSettings(profiler_, setting_name, settings);
}
uint32_t TfLiteTelemetryProfiler::ReportBeginOpInvokeEvent(
const char* op_name, int64_t op_idx, int64_t subgraph_idx) {
return profiler_->ReportBeginOpInvokeEvent(profiler_, op_name, op_idx,
subgraph_idx);
}
void TfLiteTelemetryProfiler::ReportEndOpInvokeEvent(uint32_t event_handle) {
profiler_->ReportEndOpInvokeEvent(profiler_, event_handle);
}
void TfLiteTelemetryProfiler::ReportOpInvokeEvent(const char* op_name,
uint64_t elapsed_time,
int64_t op_idx,
int64_t subgraph_idx) {
profiler_->ReportOpInvokeEvent(profiler_, op_name, elapsed_time, op_idx,
subgraph_idx);
}
TelemetryProfiler* MakeTfLiteTelemetryProfiler(
TfLiteTelemetryProfilerStruct* profiler) {
return new TfLiteTelemetryProfiler(profiler);
}
} | #include "tensorflow/lite/profiling/telemetry/profiler.h"
#include <cstdint>
#include <iostream>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/profiling/telemetry/c/telemetry_setting.h"
#include "tensorflow/lite/profiling/telemetry/telemetry_status.h"
namespace tflite::telemetry {
namespace {
constexpr char kEventName[] = "event_name";
constexpr char kSettingName[] = "setting_name";
class MockTelemtryProfiler : public TelemetryProfiler {
public:
MOCK_METHOD(void, ReportTelemetryEvent,
(const char* event_name, TelemetryStatusCode status), (override));
MOCK_METHOD(void, ReportTelemetryOpEvent,
(const char* event_name, int64_t op_idx, int64_t subgraph_idx,
TelemetryStatusCode status),
(override));
MOCK_METHOD(void, ReportSettings,
(const char* setting_name,
const TfLiteTelemetrySettings* settings),
(override));
MOCK_METHOD(uint32_t, ReportBeginOpInvokeEvent,
(const char* op_name, int64_t op_idx, int64_t subgraph_idx),
(override));
MOCK_METHOD(void, ReportEndOpInvokeEvent, (uint32_t event_handle),
(override));
MOCK_METHOD(void, ReportOpInvokeEvent,
(const char* op_name, uint64_t elapsed_time, int64_t op_idx,
int64_t subgraph_idx),
(override));
};
class TelemetryStructTest : public ::testing::Test {
protected:
TelemetryStructTest() {
context_.profiler = &profiler_;
profiler_struct_.data = &mock_profiler_;
profiler_struct_.ReportTelemetryEvent =
[](struct TfLiteTelemetryProfilerStruct* profiler,
const char* event_name, uint64_t status) {
static_cast<MockTelemtryProfiler*>(profiler->data)
->ReportTelemetryEvent(
event_name, tflite::telemetry::TelemetryStatusCode(status));
};
profiler_struct_.ReportTelemetryOpEvent =
[](struct TfLiteTelemetryProfilerStruct* profiler,
const char* event_name, int64_t op_idx, int64_t subgraph_idx,
uint64_t status) {
static_cast<MockTelemtryProfiler*>(profiler->data)
->ReportTelemetryOpEvent(
event_name, op_idx, subgraph_idx,
tflite::telemetry::TelemetryStatusCode(status));
};
profiler_struct_.ReportSettings =
[](struct TfLiteTelemetryProfilerStruct* profiler,
const char* setting_name, const TfLiteTelemetrySettings* settings) {
static_cast<MockTelemtryProfiler*>(profiler->data)
->ReportSettings(setting_name, settings);
};
profiler_struct_.ReportBeginOpInvokeEvent =
[](struct TfLiteTelemetryProfilerStruct* profiler, const char* op_name,
int64_t op_idx, int64_t subgraph_idx) -> uint32_t {
return static_cast<MockTelemtryProfiler*>(profiler->data)
->ReportBeginOpInvokeEvent(op_name, op_idx, subgraph_idx);
};
profiler_struct_.ReportEndOpInvokeEvent =
[](struct TfLiteTelemetryProfilerStruct* profiler,
uint32_t event_handle) {
return static_cast<MockTelemtryProfiler*>(profiler->data)
->ReportEndOpInvokeEvent(event_handle);
};
profiler_struct_.ReportOpInvokeEvent =
[](struct TfLiteTelemetryProfilerStruct* profiler, const char* op_name,
uint64_t elapsed_time, int64_t op_idx, int64_t subgraph_idx) {
return static_cast<MockTelemtryProfiler*>(profiler->data)
->ReportOpInvokeEvent(op_name, elapsed_time, op_idx,
subgraph_idx);
};
profiler_.reset(telemetry::MakeTfLiteTelemetryProfiler(&profiler_struct_));
}
MockTelemtryProfiler mock_profiler_;
std::unique_ptr<TelemetryProfiler> profiler_;
TfLiteContext context_;
TfLiteTelemetryProfilerStruct profiler_struct_;
};
TEST_F(TelemetryStructTest, TelemetryReportEvent) {
EXPECT_CALL(mock_profiler_,
ReportTelemetryEvent(kEventName, TelemetryStatusCode(kTfLiteOk)));
profiler_->ReportTelemetryEvent(kEventName, TelemetryStatusCode(kTfLiteOk));
}
TEST_F(TelemetryStructTest, TelemetryReportOpEvent) {
EXPECT_CALL(
mock_profiler_,
ReportTelemetryOpEvent(kEventName, 1, 2, TelemetryStatusCode(kTfLiteOk)));
profiler_->ReportTelemetryOpEvent(kEventName, 1, 2,
TelemetryStatusCode(kTfLiteOk));
}
TEST_F(TelemetryStructTest, TelemetryReportSettings) {
EXPECT_CALL(mock_profiler_, ReportSettings(kSettingName, testing::_));
TfLiteTelemetrySettings settings{};
profiler_->ReportSettings(kSettingName, &settings);
}
TEST_F(TelemetryStructTest, TelemetryReportBeginOpInvokeEvent) {
EXPECT_CALL(mock_profiler_, ReportBeginOpInvokeEvent(kSettingName, 1, 2));
profiler_->ReportBeginOpInvokeEvent(kSettingName, 1, 2);
}
TEST_F(TelemetryStructTest, TelemetryReportEndOpInvokeEvent) {
EXPECT_CALL(mock_profiler_, ReportEndOpInvokeEvent(1));
profiler_->ReportEndOpInvokeEvent(1);
}
TEST_F(TelemetryStructTest, TelemetryReportOpInvokeEvent) {
EXPECT_CALL(mock_profiler_, ReportOpInvokeEvent(kSettingName, 1, 2, 3));
profiler_->ReportOpInvokeEvent(kSettingName, 1, 2, 3);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/telemetry/profiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/telemetry/profiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
665bfea1-89ee-49d0-99c1-042637e13beb | cpp | tensorflow/tensorflow | outfeed_receiver | third_party/xla/xla/python/outfeed_receiver.cc | third_party/xla/xla/python/outfeed_receiver_test.cc | #include "xla/python/outfeed_receiver.h"
#include <sys/types.h>
#include <cstdint>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/client/executable_build_options.h"
#include "xla/client/sharding_builder.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/python/pjrt_ifrt/pjrt_client.h"
#include "xla/python/pjrt_ifrt/pjrt_device.h"
#include "xla/service/computation_placer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla {
int constexpr kOutfeedHeaderWords = 2;
uint32_t constexpr kOutfeedHeaderStart = 271828;
uint32_t constexpr kOutfeedCidShutdown = 0;
class OutfeedData {
public:
OutfeedData(ifrt::PjRtDevice* device, uint32_t consumer_id, Shape shape)
: device_(device),
consumer_id_(consumer_id),
shape_(shape),
literal_(nullptr),
literal_size_bytes_(0) {}
ifrt::PjRtDevice* device() { return device_; }
uint32_t consumer_id() const { return consumer_id_; }
Shape shape() const { return shape_; }
std::unique_ptr<Literal> literal() {
CHECK(literal_);
return std::move(literal_);
}
void SetLiteral(std::unique_ptr<Literal> literal);
ssize_t literal_size_bytes() const { return literal_size_bytes_; }
std::string DebugString() const;
private:
ifrt::PjRtDevice* device_;
uint32_t consumer_id_;
Shape shape_;
std::unique_ptr<Literal> literal_;
ssize_t literal_size_bytes_;
};
void OutfeedData::SetLiteral(std::unique_ptr<Literal> literal) {
literal_ = std::move(literal);
shape_ = literal_->shape();
int total_size_bytes = 0;
ShapeUtil::ForEachSubshape(
shape_, [&](const Shape& literal_subshape, const ShapeIndex& index) {
if (!literal_subshape.IsTuple()) {
total_size_bytes += ShapeUtil::ByteSizeOf(literal_subshape, 8);
}
});
literal_size_bytes_ = total_size_bytes;
}
std::string OutfeedData::DebugString() const {
return absl::StrFormat("dev=%s; cons=%d; shape=%s", device_->DebugString(),
consumer_id_, shape_.ToString());
}
class OutfeedReceiverImpl {
public:
OutfeedReceiverImpl(
OutfeedReceiver::Callback callback,
absl::Span<ifrt::PjRtClient* const> clients,
ssize_t max_callback_queue_size_bytes,
const std::optional<ExecutableBuildOptions>& executable_build_options);
OutfeedReceiverImpl(const OutfeedReceiverImpl&) = delete;
OutfeedReceiverImpl& operator=(const OutfeedReceiverImpl&) = delete;
~OutfeedReceiverImpl();
void Start();
absl::StatusOr<XlaOp> AddOutfeedToBuilder(XlaBuilder* builder, XlaOp token,
uint32_t consumer_id,
std::vector<XlaOp> arrays,
uint32_t device_idx);
absl::Status RegisterOutfeed(uint32_t consumer_id, const Shape& shape);
private:
bool CallbackQueueHasSpace() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return callback_queue_size_bytes_ < max_callback_queue_size_bytes_;
}
bool ShutdownDone() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return (num_working_callback_threads_ == 0 && num_listening_threads_ == 0);
}
void CallbackThreadLoop(int device_idx);
void DeviceListenerThreadLoop(int device_idx);
absl::Status SendShutdownOutfeedHeader(int device_idx);
absl::StatusOr<std::unique_ptr<Literal>> ReceiveRawFromOutfeed(
ifrt::PjRtDevice* device, const Shape& shape);
void EnqueueReceivedData(uint32_t device_idx,
std::unique_ptr<OutfeedData> received)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void Shutdown();
OutfeedReceiver::Callback callback_;
std::vector<ifrt::PjRtDevice*> devices_;
uint64_t max_callback_queue_size_bytes_;
std::optional<ExecutableBuildOptions> executable_build_options_;
absl::Mutex mu_;
absl::flat_hash_map<uint32_t, Shape> shape_registry_ ABSL_GUARDED_BY(mu_);
uint64_t callback_queue_size_bytes_ ABSL_GUARDED_BY(mu_);
int num_listening_threads_ ABSL_GUARDED_BY(mu_);
bool shutdown_started_ ABSL_GUARDED_BY(mu_);
int num_working_callback_threads_ ABSL_GUARDED_BY(mu_);
std::vector<std::queue<std::unique_ptr<OutfeedData>>> callback_queues_
ABSL_GUARDED_BY(mu_);
std::unique_ptr<tsl::thread::ThreadPool> threads_;
};
OutfeedReceiverImpl::OutfeedReceiverImpl(
OutfeedReceiver::Callback callback,
absl::Span<ifrt::PjRtClient* const> clients,
ssize_t max_callback_queue_size_bytes,
const std::optional<ExecutableBuildOptions>& executable_build_options)
: executable_build_options_(executable_build_options) {
callback_ = callback;
max_callback_queue_size_bytes_ = max_callback_queue_size_bytes;
for (const auto& client : clients) {
for (auto device : client->addressable_devices()) {
devices_.push_back(tensorflow::down_cast<ifrt::PjRtDevice*>(device));
}
}
CHECK_GT(devices_.size(), 0);
callback_queues_ =
std::vector<std::queue<std::unique_ptr<OutfeedData>>>(devices_.size());
callback_queue_size_bytes_ = 0;
num_listening_threads_ = 0;
num_working_callback_threads_ = 0;
shutdown_started_ = false;
}
void OutfeedReceiverImpl::Start() {
{
absl::MutexLock lock(&mu_);
CHECK(!shutdown_started_);
}
int num_threads = 2 * devices_.size();
threads_ = std::make_unique<tsl::thread::ThreadPool>(
tsl::Env::Default(), "outfeed_receiver", num_threads);
for (int device_idx = 0; device_idx < devices_.size(); ++device_idx) {
threads_->Schedule(
[this, device_idx]() { DeviceListenerThreadLoop(device_idx); });
threads_->Schedule(
[this, device_idx]() { CallbackThreadLoop(device_idx); });
}
}
void OutfeedReceiverImpl::Shutdown() {
VLOG(2) << "Shutdown start";
{
absl::MutexLock lock(&mu_);
CHECK(!shutdown_started_);
shutdown_started_ = true;
}
for (int device_idx = 0; device_idx < devices_.size(); ++device_idx) {
TF_CHECK_OK(SendShutdownOutfeedHeader(device_idx));
}
VLOG(2) << "Shutdown waiting for listening and callback threads to stop";
absl::MutexLock lock(&mu_);
mu_.Await(absl::Condition(this, &OutfeedReceiverImpl::ShutdownDone));
VLOG(2) << "Shutdown done";
}
OutfeedReceiverImpl::~OutfeedReceiverImpl() {
VLOG(2) << "~OutfeedReceiverImpl";
Shutdown();
}
void OutfeedReceiverImpl::DeviceListenerThreadLoop(int device_idx) {
{
absl::MutexLock lock(&mu_);
++num_listening_threads_;
}
ifrt::PjRtDevice* device = devices_[device_idx];
while (true) {
Shape header_shape = ShapeUtil::MakeShape(U32, {kOutfeedHeaderWords});
std::unique_ptr<Literal> header =
ReceiveRawFromOutfeed(device, header_shape).value();
absl::Span<uint32_t> header_data = header->data<uint32_t>();
CHECK_EQ(header_data.size(), kOutfeedHeaderWords);
CHECK_EQ(header_data[0], kOutfeedHeaderStart);
uint32_t consumer_id = header_data[1];
Shape shape;
{
absl::MutexLock lock(&mu_);
auto registered_shape = shape_registry_.find(consumer_id);
if (registered_shape == shape_registry_.end()) {
LOG(FATAL)
<< "[" << device->DebugString()
<< "] Cannot find registered shape for consumer ID " << consumer_id
<< ". Perhaps the code was compiled with a different instance "
<< "of OutfeedReceiver.";
}
shape = registered_shape->second;
}
auto received = std::make_unique<OutfeedData>(device, consumer_id, shape);
VLOG(2) << "Listener received header " << received->DebugString();
if (consumer_id == kOutfeedCidShutdown) {
VLOG(2) << "[" << device->DebugString()
<< "] Listener received shutdown header";
absl::MutexLock lock(&mu_);
--num_listening_threads_;
VLOG(2) << "[" << device->DebugString() << "] Enqueue shutdown callback";
EnqueueReceivedData(device_idx, std::move(received));
return;
}
std::unique_ptr<Literal> data =
ReceiveRawFromOutfeed(device, shape).value();
received->SetLiteral(std::move(data));
absl::MutexLock lock(&mu_);
EnqueueReceivedData(device_idx, std::move(received));
}
}
void OutfeedReceiverImpl::EnqueueReceivedData(
uint32_t device_idx, std::unique_ptr<OutfeedData> received)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
mu_.Await(absl::Condition(this, &OutfeedReceiverImpl::CallbackQueueHasSpace));
ssize_t literal_size_bytes = received->literal_size_bytes();
callback_queue_size_bytes_ += literal_size_bytes;
VLOG(2) << "Listener enqueues data " << received->DebugString() << " of size "
<< literal_size_bytes << " bytes; "
<< (1 + callback_queues_[device_idx].size())
<< " callbacks in queue of total size " << callback_queue_size_bytes_
<< " bytes.\n";
callback_queues_[device_idx].push(std::move(received));
}
absl::StatusOr<std::unique_ptr<Literal>>
OutfeedReceiverImpl::ReceiveRawFromOutfeed(ifrt::PjRtDevice* device,
const Shape& shape) {
auto literal = std::make_unique<Literal>(shape);
TF_RETURN_IF_ERROR(
device->client()->TransferFromOutfeed(device, literal.get()));
return literal;
}
void OutfeedReceiverImpl::CallbackThreadLoop(int device_idx) {
const ifrt::PjRtDevice* device = devices_[device_idx];
{
absl::MutexLock lock(&mu_);
num_working_callback_threads_++;
}
while (true) {
std::unique_ptr<OutfeedData> received;
{
absl::MutexLock lock(&mu_);
mu_.Await(absl::Condition(
+[](std::queue<std::unique_ptr<OutfeedData>>* queue) {
return !queue->empty();
},
&callback_queues_[device_idx]));
received = std::move(callback_queues_[device_idx].front());
callback_queues_[device_idx].pop();
callback_queue_size_bytes_ -= received->literal_size_bytes();
VLOG(2) << "[" << device->DebugString() << "] Dequeued callback for "
<< received->DebugString() << "; "
<< callback_queues_[device_idx].size()
<< " callbacks in queue of total size "
<< callback_queue_size_bytes_ << " bytes.\n";
}
if (received->consumer_id() == kOutfeedCidShutdown) {
VLOG(2) << "[" << device->DebugString()
<< "] Callback loop received shutdown signal";
{
absl::MutexLock lock(&mu_);
CHECK(callback_queues_[device_idx].empty());
--num_working_callback_threads_;
}
VLOG(2) << "[" << device->DebugString() << "] Callback loop done";
return;
}
{
tsl::profiler::TraceMe traceme("OutfeedReceiver::Callback");
callback_(received->device(), received->consumer_id(),
received->literal());
}
}
}
absl::Status OutfeedReceiverImpl::SendShutdownOutfeedHeader(int device_idx) {
const ifrt::PjRtDevice* device = devices_[device_idx];
constexpr int consumer_id = kOutfeedCidShutdown;
VLOG(2) << "[" << device->DebugString()
<< "] SendSpecialHeader cons=" << consumer_id;
XlaBuilder builder(
absl::StrFormat("special_outfeed_header_%d_%d", consumer_id, device_idx));
XlaOp cst_operand = xla::ConstantR0<int32_t>(&builder, 0);
XlaOp outfeed =
AddOutfeedToBuilder(&builder, CreateToken(&builder), consumer_id, {}, 0)
.value();
XlaOp add_dep = xla::internal::XlaBuilderFriend::BuildAddDependency(
&builder, cst_operand, outfeed, ShapeUtil::MakeScalarShape(S32));
XlaComputation computation = builder.Build(add_dep).value();
CompileOptions compile_options;
if (executable_build_options_) {
compile_options.executable_build_options = *executable_build_options_;
}
compile_options.executable_build_options.set_num_replicas(1);
compile_options.executable_build_options.set_num_partitions(1);
DeviceAssignment device_assignment(1, 1);
device_assignment(0, 0) = device->Id().value();
compile_options.executable_build_options.set_device_assignment(
device_assignment);
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtLoadedExecutable> executable,
devices_[device_idx]->client()->pjrt_client()->Compile(
computation, std::move(compile_options)));
ExecuteOptions execute_options;
TF_ASSIGN_OR_RETURN(
std::vector<std::vector<std::unique_ptr<PjRtBuffer>>> output_buffers,
executable->Execute({{}}, execute_options));
return absl::OkStatus();
}
absl::Status OutfeedReceiverImpl::RegisterOutfeed(uint32_t consumer_id,
const Shape& shape) {
VLOG(2) << "RegisterShape cons=" << consumer_id
<< "; shape=" << shape.ToString();
{
absl::MutexLock lock(&mu_);
auto found = shape_registry_.find(consumer_id);
if (found != shape_registry_.end()) {
if (!ShapeUtil::Equal(shape, found->second)) {
return InvalidArgument(
"Shape %s does not match previous shape %s used "
"for consumer id %d",
shape.DebugString(), found->second.DebugString(), consumer_id);
}
} else {
shape_registry_.insert({consumer_id, shape});
}
}
return absl::OkStatus();
}
absl::StatusOr<XlaOp> OutfeedReceiverImpl::AddOutfeedToBuilder(
XlaBuilder* builder, XlaOp token, uint32_t consumer_id,
std::vector<XlaOp> arrays, uint32_t device_idx) {
XlaOp data = Tuple(builder, std::move(arrays));
Shape shape_with_layout = builder->GetShape(data).value();
ShapeUtil::ForEachMutableSubshape(
&shape_with_layout, [](Shape* subshape, const ShapeIndex&) {
if (!subshape->has_layout()) {
LayoutUtil::SetToDefaultLayout(subshape);
}
});
TF_RETURN_IF_ERROR(RegisterOutfeed(consumer_id, shape_with_layout));
std::vector<uint32_t> header{kOutfeedHeaderStart, consumer_id};
XlaOp header_op = ConstantR1<uint32_t>(builder, header);
builder->SetSharding(sharding_builder::AssignDevice(device_idx));
token = OutfeedWithToken(
header_op, token, ShapeUtil::MakeShape(U32, {kOutfeedHeaderWords}), "");
if (consumer_id != kOutfeedCidShutdown) {
token = OutfeedWithToken(data, token, shape_with_layout, "");
}
builder->ClearSharding();
return token;
}
OutfeedReceiver::OutfeedReceiver(
Callback callback, absl::Span<ifrt::PjRtClient* const> clients,
ssize_t max_callback_queue_size_bytes,
const std::optional<ExecutableBuildOptions>& executable_build_options) {
p_impl_ = std::make_unique<OutfeedReceiverImpl>(callback, clients,
max_callback_queue_size_bytes,
executable_build_options);
}
OutfeedReceiver::~OutfeedReceiver() = default;
void OutfeedReceiver::Start() { p_impl_->Start(); }
absl::StatusOr<XlaOp> OutfeedReceiver::AddOutfeedToBuilder(
XlaBuilder* builder, XlaOp token, uint32_t consumer_id,
std::vector<XlaOp> arrays, uint32_t device_idx) {
if (consumer_id == kOutfeedCidShutdown) {
return InvalidArgument("Consumer ID cannot be a reserved value: %d",
consumer_id);
}
return p_impl_->AddOutfeedToBuilder(builder, token, consumer_id, arrays,
device_idx);
}
absl::Status OutfeedReceiver::RegisterOutfeed(uint32_t consumer_id,
const Shape& shape) {
if (consumer_id == kOutfeedCidShutdown) {
return InvalidArgument("Consumer ID cannot be a reserved value: %d",
consumer_id);
}
return p_impl_->RegisterOutfeed(consumer_id, shape);
}
} | #include "xla/python/outfeed_receiver.h"
#include <memory>
#include <optional>
#include <vector>
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "xla/client/client_library.h"
#include "xla/client/executable_build_options.h"
#include "xla/client/xla_builder.h"
#include "xla/pjrt/cpu/cpu_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_stream_executor_client.h"
#include "xla/service/platform_util.h"
#include "xla/test.h"
namespace xla {
namespace {
absl::Status CompileAndExecute(XlaBuilder* builder, XlaOp root, int device_id,
PjRtClient* client) {
XlaComputation computation = builder->Build(root).value();
CompileOptions compile_options;
compile_options.executable_build_options.set_num_replicas(1);
compile_options.executable_build_options.set_num_partitions(1);
DeviceAssignment device_assignment(1, 1);
device_assignment(0, 0) = device_id;
compile_options.executable_build_options.set_device_assignment(
device_assignment);
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtLoadedExecutable> executable,
client->Compile(computation, std::move(compile_options)));
ExecuteOptions execute_options;
TF_ASSIGN_OR_RETURN(
std::vector<std::vector<std::unique_ptr<PjRtBuffer>>> output_buffers,
executable->Execute({{}}, execute_options));
return absl::OkStatus();
}
class Accumulator {
public:
struct Data {
uint32_t consumer_id;
std::shared_ptr<Literal> data;
};
void Receive(uint32_t consumer_id, std::shared_ptr<Literal> data) {
absl::MutexLock lock(&mutex_);
received_.push_back(Data{consumer_id, data});
}
std::vector<Data> received() {
absl::MutexLock lock(&mutex_);
return received_;
}
private:
absl::Mutex mutex_;
std::vector<Data> received_ ABSL_GUARDED_BY(mutex_);
};
TEST(OutfeedReceiverTest, ReceiveOutfeedSimple) {
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<PjRtClient> cpu_client,
GetTfrtCpuClient(CpuClientOptions()));
auto ifrt_cpu_client = ifrt::PjRtClient::Create(cpu_client);
std::vector<ifrt::PjRtClient*> clients{ifrt_cpu_client.get()};
auto receiver = std::make_unique<Accumulator>();
OutfeedReceiver::Callback callback =
[&receiver](xla::ifrt::PjRtDevice* device, uint32_t consumer_id,
std::shared_ptr<Literal> data) {
receiver->Receive(consumer_id, data);
};
auto outfeed_receiver =
std::make_shared<OutfeedReceiver>(callback, clients, 128, std::nullopt);
outfeed_receiver->Start();
XlaBuilder builder("execute_test_outfeed");
constexpr int consumer_id0 = 5;
const Shape shape0 = ShapeUtil::MakeShape(U32, {16});
XlaOp data = Iota(&builder, shape0, 0);
XlaOp send = outfeed_receiver
->AddOutfeedToBuilder(&builder, CreateToken(&builder),
consumer_id0, {data}, 0)
.value();
EXPECT_TRUE(CompileAndExecute(&builder, send, 0, cpu_client.get()).ok());
outfeed_receiver = nullptr;
std::vector<Accumulator::Data> received = receiver->received();
EXPECT_EQ(1, received.size());
EXPECT_EQ(consumer_id0, received[0].consumer_id);
EXPECT_EQ(ShapeUtil::MakeTupleShape({shape0}), received[0].data->shape());
}
TEST(OutfeedReceiverTest, ReceiveOutfeedTwoComputations) {
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<PjRtClient> cpu_client,
GetTfrtCpuClient(CpuClientOptions()));
auto ifrt_cpu_client = ifrt::PjRtClient::Create(cpu_client);
std::vector<ifrt::PjRtClient*> clients{ifrt_cpu_client.get()};
auto receiver = std::make_unique<Accumulator>();
OutfeedReceiver::Callback callback =
[&receiver](xla::ifrt::PjRtDevice* device, uint32_t consumer_id,
std::shared_ptr<Literal> data) {
receiver->Receive(consumer_id, data);
};
auto outfeed_receiver =
std::make_shared<OutfeedReceiver>(callback, clients, 128, std::nullopt);
outfeed_receiver->Start();
XlaBuilder builder0("execute_test_outfeed_0");
constexpr int consumer_id0 = 5;
const Shape shape0 = ShapeUtil::MakeShape(U32, {16});
XlaOp data0 = Iota(&builder0, shape0, 0);
XlaOp send0 = outfeed_receiver
->AddOutfeedToBuilder(&builder0, CreateToken(&builder0),
consumer_id0, {data0}, 0)
.value();
EXPECT_TRUE(CompileAndExecute(&builder0, send0, 0, cpu_client.get()).ok());
XlaBuilder builder1("execute_test_outfeed_1");
constexpr int consumer_id1 = 6;
const Shape shape1 = ShapeUtil::MakeShape(U32, {128});
XlaOp data1 = Iota(&builder1, shape1, 0);
XlaOp send1 = outfeed_receiver
->AddOutfeedToBuilder(&builder1, CreateToken(&builder1),
consumer_id1, {data1}, 0)
.value();
EXPECT_TRUE(CompileAndExecute(&builder1, send1, 0, cpu_client.get()).ok());
outfeed_receiver = nullptr;
std::vector<Accumulator::Data> received = receiver->received();
EXPECT_EQ(2, received.size());
EXPECT_EQ(consumer_id0, received[0].consumer_id);
EXPECT_EQ(ShapeUtil::MakeTupleShape({shape0}), received[0].data->shape());
EXPECT_EQ(consumer_id1, received[1].consumer_id);
EXPECT_EQ(ShapeUtil::MakeTupleShape({shape1}), received[1].data->shape());
}
TEST(OutfeedReceiverTest, ReceiveOutfeedTwoOutfeed) {
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<PjRtClient> cpu_client,
GetTfrtCpuClient(CpuClientOptions()));
auto ifrt_cpu_client = ifrt::PjRtClient::Create(cpu_client);
std::vector<ifrt::PjRtClient*> clients{ifrt_cpu_client.get()};
auto receiver = std::make_unique<Accumulator>();
OutfeedReceiver::Callback callback =
[&receiver](xla::ifrt::PjRtDevice* device, uint32_t consumer_id,
std::shared_ptr<Literal> data) {
receiver->Receive(consumer_id, data);
};
auto outfeed_receiver =
std::make_shared<OutfeedReceiver>(callback, clients, 128, std::nullopt);
outfeed_receiver->Start();
XlaBuilder builder("execute_test_outfeed");
constexpr int consumer_id0 = 5;
const Shape shape0 = ShapeUtil::MakeShape(U32, {16});
XlaOp data0 = Iota(&builder, shape0, 0);
XlaOp send0 = outfeed_receiver
->AddOutfeedToBuilder(&builder, CreateToken(&builder),
consumer_id0, {data0}, 0)
.value();
constexpr int consumer_id1 = 6;
const Shape shape1 = ShapeUtil::MakeShape(U32, {128});
XlaOp data1 = Iota(&builder, shape1, 0);
XlaOp send1 =
outfeed_receiver
->AddOutfeedToBuilder(&builder, send0, consumer_id1, {data1}, 0)
.value();
EXPECT_TRUE(CompileAndExecute(&builder, send1, 0, cpu_client.get()).ok());
outfeed_receiver = nullptr;
std::vector<Accumulator::Data> received = receiver->received();
EXPECT_EQ(2, received.size());
EXPECT_EQ(consumer_id0, received[0].consumer_id);
EXPECT_EQ(ShapeUtil::MakeTupleShape({shape0}), received[0].data->shape());
EXPECT_EQ(consumer_id1, received[1].consumer_id);
EXPECT_EQ(ShapeUtil::MakeTupleShape({shape1}), received[1].data->shape());
}
TEST(OutfeedReceiverTest, DifferentShapeForConsumerIdError) {
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<PjRtClient> cpu_client,
GetTfrtCpuClient(CpuClientOptions()));
auto ifrt_cpu_client = ifrt::PjRtClient::Create(cpu_client);
std::vector<ifrt::PjRtClient*> clients{ifrt_cpu_client.get()};
auto receiver = std::make_unique<Accumulator>();
OutfeedReceiver::Callback callback =
[&receiver](xla::ifrt::PjRtDevice* device, uint32_t consumer_id,
std::shared_ptr<Literal> data) {
receiver->Receive(consumer_id, data);
};
auto outfeed_receiver =
std::make_shared<OutfeedReceiver>(callback, clients, 128, std::nullopt);
outfeed_receiver->Start();
XlaBuilder builder("execute_test_outfeed");
constexpr int consumer_id0 = 5;
const Shape shape0 = ShapeUtil::MakeShape(U32, {16});
XlaOp data0 = Iota(&builder, shape0, 0);
XlaOp send0 = outfeed_receiver
->AddOutfeedToBuilder(&builder, CreateToken(&builder),
consumer_id0, {data0}, 0)
.value();
const Shape shape1 = ShapeUtil::MakeShape(U32, {128});
XlaOp data1 = Iota(&builder, shape1, 0);
absl::StatusOr<XlaOp> send1 = outfeed_receiver->AddOutfeedToBuilder(
&builder, send0, consumer_id0, {data1}, 0);
EXPECT_FALSE(send1.ok());
EXPECT_THAT(
send1.status().ToString(),
testing::ContainsRegex(
#if defined(PLATFORM_WINDOWS)
"does not match previous shape \\w*/*\\w* *\\n?element_type"));
#else
"does not match previous shape (go/\\w+[ "
"]+\\n)?element_type"));
#endif
}
TEST(OutfeedReceiverTest, InvalidConsumerIdError) {
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<PjRtClient> cpu_client,
GetTfrtCpuClient(CpuClientOptions()));
auto ifrt_cpu_client = ifrt::PjRtClient::Create(cpu_client);
std::vector<ifrt::PjRtClient*> clients{ifrt_cpu_client.get()};
auto receiver = std::make_unique<Accumulator>();
OutfeedReceiver::Callback callback =
[&receiver](xla::ifrt::PjRtDevice* device, uint32_t consumer_id,
std::shared_ptr<Literal> data) {
receiver->Receive(consumer_id, data);
};
auto outfeed_receiver =
std::make_shared<OutfeedReceiver>(callback, clients, 128, std::nullopt);
outfeed_receiver->Start();
XlaBuilder builder("execute_test_outfeed");
const Shape shape0 = ShapeUtil::MakeShape(U32, {16});
XlaOp data0 = Iota(&builder, shape0, 0);
absl::StatusOr<XlaOp> send0 = outfeed_receiver->AddOutfeedToBuilder(
&builder, CreateToken(&builder), 0, {data0}, 0);
EXPECT_FALSE(send0.ok());
EXPECT_THAT(send0.status().ToString(),
testing::HasSubstr("Consumer ID cannot be a reserved value"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/outfeed_receiver.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/outfeed_receiver_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
92843351-d9ed-433d-8b94-3590ca2b505e | cpp | tensorflow/tensorflow | ops | tensorflow/compiler/mlir/python/mlir_wrapper/ops.cc | tensorflow/c/ops_test.cc | #include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Operation.h"
#include "tensorflow/compiler/mlir/python/mlir_wrapper/mlir_wrapper.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
void init_ops(py::module& m) {
py::class_<mlir::Operation, std::unique_ptr<mlir::Operation, py::nodelete>>(
m, "Operation")
.def("getRegion", &mlir::Operation::getRegion,
py::return_value_policy::reference)
.def("getResult", &mlir::Operation::getResult)
.def("dump", &mlir::Operation::dump)
.def("getNumResults", &mlir::Operation::getNumResults);
py::class_<mlir::OperationState>(m, "OperationState")
.def(py::init([](mlir::Location loc, std::string name) {
return mlir::OperationState(loc, llvm::StringRef(name));
}))
.def("addTypes",
[](mlir::OperationState& state, std::vector<mlir::Type> tys) {
state.addTypes(mlir::ArrayRef<mlir::Type>(tys));
})
.def("addOperands",
[](mlir::OperationState& os, std::vector<mlir::Value> ops) {
os.addOperands(mlir::ArrayRef<mlir::Value>(ops));
})
.def("addRegion", py::overload_cast<>(&mlir::OperationState::addRegion),
py::return_value_policy::reference);
py::class_<mlir::ModuleOp>(m, "ModuleOp")
.def("create",
[](mlir::Location loc) { return mlir::ModuleOp::create(loc); })
.def("push_back",
[](mlir::ModuleOp& m, mlir::func::FuncOp f) { m.push_back(f); })
.def("dump", &mlir::ModuleOp::dump)
.def("getAsStr", [](mlir::ModuleOp& m) {
std::string str;
llvm::raw_string_ostream os(str);
m.print(os);
return os.str();
});
py::class_<mlir::func::FuncOp>(m, "FuncOp")
.def("create",
[](mlir::Location location, std::string name,
mlir::FunctionType type) {
auto func = mlir::func::FuncOp::create(location, name, type);
func.addEntryBlock();
return func;
})
.def(
"getBody",
[](mlir::func::FuncOp& f) -> mlir::Region& { return f.getBody(); },
py::return_value_policy::reference)
.def("getArguments",
[](mlir::func::FuncOp& f) { return f.getArguments().vec(); })
.def("getName", [](mlir::func::FuncOp& f) { return f.getName().str(); })
.def("getType", &mlir::func::FuncOp::getFunctionType);
py::class_<mlir::func::ReturnOp>(m, "ReturnOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc,
std::vector<mlir::Value> values) -> mlir::Operation* {
return opb
.create<mlir::func::ReturnOp>(
loc, mlir::ArrayRef<mlir::Value>(values))
.getOperation();
});
py::class_<mlir::TF::AddV2Op>(m, "Tf_AddV2Op")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc, mlir::Value x,
mlir::Value y) -> mlir::Operation* {
return opb.create<mlir::TF::AddV2Op>(loc, x, y).getOperation();
});
py::class_<mlir::TF::AnyOp>(m, "Tf_AnyOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc, mlir::Value input,
mlir::Value reduction_indices,
bool keep_dims = false) -> mlir::Operation* {
return opb
.create<mlir::TF::AnyOp>(loc, opb.getI1Type(), input,
reduction_indices, keep_dims)
.getOperation();
});
py::class_<mlir::TF::ConstOp>(m, "Tf_ConstOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc,
mlir::Attribute value) -> mlir::Operation* {
return opb.create<mlir::TF::ConstOp>(loc, value).getOperation();
});
py::class_<mlir::TF::EqualOp>(m, "Tf_EqualOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc, mlir::Value x,
mlir::Value y) -> mlir::Operation* {
return opb
.create<mlir::TF::EqualOp>(loc, x, y, opb.getBoolAttr(true))
.getOperation();
});
py::class_<mlir::TF::GreaterEqualOp>(m, "Tf_GreaterEqualOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc, mlir::Value x,
mlir::Value y) -> mlir::Operation* {
return opb.create<mlir::TF::GreaterEqualOp>(loc, x, y)
.getOperation();
});
py::class_<mlir::TF::GreaterOp>(m, "Tf_GreaterOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc, mlir::Value x,
mlir::Value y) -> mlir::Operation* {
return opb.create<mlir::TF::GreaterOp>(loc, x, y).getOperation();
});
py::class_<mlir::TF::LegacyCallOp>(m, "Tf_LegacyCallOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc,
std::vector<mlir::Type> output, std::vector<mlir::Value> args,
std::string f) -> mlir::Operation* {
return opb
.create<mlir::TF::LegacyCallOp>(
loc, mlir::ArrayRef<mlir::Type>(output),
mlir::ArrayRef<mlir::Value>(args), mlir::StringRef(f))
.getOperation();
});
py::class_<mlir::TF::LessEqualOp>(m, "Tf_LessEqualOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc, mlir::Value x,
mlir::Value y) -> mlir::Operation* {
return opb.create<mlir::TF::LessEqualOp>(loc, x, y).getOperation();
});
py::class_<mlir::TF::LessOp>(m, "Tf_LessOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc, mlir::Value x,
mlir::Value y) -> mlir::Operation* {
return opb.create<mlir::TF::LessOp>(loc, x, y).getOperation();
});
py::class_<mlir::TF::NegOp>(m, "Tf_NegOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc,
mlir::Value x) -> mlir::Operation* {
return opb.create<mlir::TF::NegOp>(loc, x).getOperation();
});
py::class_<mlir::TF::NotEqualOp>(m, "Tf_NotEqualOp")
.def("create", [](mlir::OpBuilder& opb, mlir::Location loc, mlir::Value x,
mlir::Value y) {
return opb
.create<mlir::TF::NotEqualOp>(
loc, x, y, mlir::BoolAttr::get(opb.getContext(), true))
.getOperation();
});
py::class_<mlir::TF::SubOp>(m, "Tf_SubOp")
.def("create",
[](mlir::OpBuilder& opb, mlir::Location loc, mlir::Value x,
mlir::Value y) -> mlir::Operation* {
return opb.create<mlir::TF::SubOp>(loc, x, y).getOperation();
});
} | #include "tensorflow/c/ops.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/c/c_api.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(OpsTest, TestBasicOpRegistration) {
TF_OpDefinitionBuilder* builder = TF_NewOpDefinitionBuilder("SomeOp");
TF_OpDefinitionBuilderAddAttr(builder, "attr1: string");
TF_OpDefinitionBuilderAddInput(builder, "input1: uint8");
TF_OpDefinitionBuilderAddInput(builder, "input2: uint16");
TF_OpDefinitionBuilderAddOutput(builder, "output1: uint32");
TF_Status* status = TF_NewStatus();
TF_RegisterOpDefinition(builder, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_Buffer* op_list_buffer = TF_GetAllOpList();
::tensorflow::OpList op_list;
op_list.ParseFromArray(op_list_buffer->data, op_list_buffer->length);
bool found = false;
for (const auto& op : op_list.op()) {
if (op.name() == "SomeOp") {
ASSERT_EQ(2, op.input_arg_size());
ASSERT_EQ("input1", op.input_arg(0).name());
ASSERT_EQ(::tensorflow::DT_UINT8, op.input_arg(0).type());
ASSERT_EQ(1, op.attr_size());
ASSERT_EQ("string", op.attr(0).type());
found = true;
}
}
EXPECT_TRUE(found);
TF_DeleteStatus(status);
TF_DeleteBuffer(op_list_buffer);
}
void identity_shape_fn(TF_ShapeInferenceContext* ctx, TF_Status* status) {
TF_ShapeHandle* handle = TF_NewShapeHandle();
TF_ShapeInferenceContextGetInput(ctx, 0, handle, status);
ASSERT_EQ(TF_OK, TF_GetCode(status));
TF_ShapeInferenceContextSetOutput(ctx, 0, handle, status);
TF_DeleteShapeHandle(handle);
}
TEST(OpsTest, TestShapeInference_IdentityFunction) {
ShapeInferenceTestOp op("SomeTestOp");
TF_OpDefinitionBuilder* builder = TF_NewOpDefinitionBuilder("SomeTestOp");
TF_OpDefinitionBuilderAddInput(builder, "input1: uint8");
TF_OpDefinitionBuilderAddOutput(builder, "output1: uint8");
TF_OpDefinitionBuilderSetShapeInferenceFunction(builder, &identity_shape_fn);
TF_Status* status = TF_NewStatus();
TF_RegisterOpDefinition(builder, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_ASSERT_OK(
shape_inference::ShapeInferenceTestutil::InferShapes(op, "[1,2]", "in0"));
TF_DeleteStatus(status);
}
TEST(OpsTest, TestShapeInference_UnknownShape) {
ShapeInferenceTestOp op("UnknownShapeOp");
TF_OpDefinitionBuilder* builder = TF_NewOpDefinitionBuilder("UnknownShapeOp");
TF_OpDefinitionBuilderAddInput(builder, "input1: uint8");
TF_OpDefinitionBuilderAddInput(builder, "input2: uint32");
TF_OpDefinitionBuilderAddOutput(builder, "output1: uint8");
TF_OpDefinitionBuilderAddOutput(builder, "output2: uint8");
TF_OpDefinitionBuilderSetShapeInferenceFunction(
builder, &TF_ShapeInferenceContextSetUnknownShape);
TF_Status* status = TF_NewStatus();
TF_RegisterOpDefinition(builder, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_ASSERT_OK(shape_inference::ShapeInferenceTestutil::InferShapes(
op, "[1,2];[3,4]", "?;?"));
TF_DeleteStatus(status);
}
void vectorize_shape_fn(TF_ShapeInferenceContext* ctx, TF_Status* status) {
TF_ShapeHandle* handle = TF_NewShapeHandle();
TF_ShapeInferenceContextGetInput(ctx, 0, handle, status);
ASSERT_EQ(TF_OK, TF_GetCode(status));
TF_ShapeHandle* new_shape = TF_ShapeInferenceContextVectorFromSize(
ctx, TF_ShapeInferenceContextRank(ctx, handle));
TF_ShapeInferenceContextSetOutput(ctx, 0, new_shape, status);
TF_DeleteShapeHandle(handle);
TF_DeleteShapeHandle(new_shape);
}
TEST(OpsTest, TestShapeInference_VectorizeFunction) {
ShapeInferenceTestOp op("VectorizeTestOp");
TF_OpDefinitionBuilder* builder =
TF_NewOpDefinitionBuilder("VectorizeTestOp");
TF_OpDefinitionBuilderAddInput(builder, "input1: uint8");
TF_OpDefinitionBuilderAddOutput(builder, "output1: uint8");
TF_OpDefinitionBuilderSetShapeInferenceFunction(builder, &vectorize_shape_fn);
TF_Status* status = TF_NewStatus();
TF_RegisterOpDefinition(builder, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_ASSERT_OK(shape_inference::ShapeInferenceTestutil::InferShapes(
op, "[4,5,9]", "[3]"));
TF_DeleteStatus(status);
}
TEST(OpsTest, AttributeAccessors) {
TF_OpDefinitionBuilder* builder =
TF_NewOpDefinitionBuilder("AttributeAccessorsOp");
TF_OpDefinitionBuilderAddAttr(builder, "foo1: int >= 2");
TF_OpDefinitionBuilderAddAttr(builder, "foo2: string=\"my string\"");
TF_OpDefinitionBuilderSetIsCommutative(builder, true);
TF_OpDefinitionBuilderSetIsAggregate(builder, true);
TF_OpDefinitionBuilderSetAllowsUninitializedInput(builder, true);
std::string deprecation_msg = "use something else instead";
TF_OpDefinitionBuilderDeprecated(builder, 4, deprecation_msg.c_str());
TF_Status* status = TF_NewStatus();
TF_RegisterOpDefinition(builder, status);
ASSERT_EQ(TF_OK, TF_GetCode(status));
TF_Buffer* op_list_buffer = TF_GetAllOpList();
::tensorflow::OpList op_list;
op_list.ParseFromArray(op_list_buffer->data, op_list_buffer->length);
bool found = false;
for (const auto& op : op_list.op()) {
if (op.name() == "AttributeAccessorsOp") {
ASSERT_TRUE(op.is_commutative());
ASSERT_TRUE(op.is_aggregate());
ASSERT_TRUE(op.allows_uninitialized_input());
ASSERT_EQ(4, op.deprecation().version());
ASSERT_EQ(deprecation_msg, op.deprecation().explanation());
ASSERT_EQ(2, op.attr_size());
ASSERT_EQ("int", op.attr(0).type());
ASSERT_EQ(2, op.attr(0).minimum());
ASSERT_EQ("string", op.attr(1).type());
ASSERT_EQ("my string", op.attr(1).default_value().s());
found = true;
}
}
ASSERT_TRUE(found);
TF_DeleteStatus(status);
TF_DeleteBuffer(op_list_buffer);
}
#define C_CTX(x) reinterpret_cast<TF_ShapeInferenceContext*>(x)
#define C_SHP(x) reinterpret_cast<TF_ShapeHandle*>(x)
static OpDef MakeOpDef(int num_inputs, int num_outputs) {
OpRegistrationData op_reg_data;
OpDefBuilder b("dummy");
for (int i = 0; i < num_inputs; ++i) {
b.Input(strings::StrCat("i", i, ": float"));
}
for (int i = 0; i < num_outputs; ++i) {
b.Output(strings::StrCat("o", i, ": float"));
}
CHECK(b.Attr("foo:string").Finalize(&op_reg_data).ok());
return op_reg_data.op_def;
}
PartialTensorShape S(std::initializer_list<int64_t> dims) {
return PartialTensorShape(dims);
}
PartialTensorShape Unknown() { return PartialTensorShape(); }
TEST(OpsTest, ShapeInferenceWithRank) {
NodeDef def;
shape_inference::InferenceContext c(0, def, MakeOpDef(1, 0),
{S({10, 20, 30})}, {}, {}, {});
shape_inference::ShapeHandle in0 = c.input(0);
shape_inference::ShapeHandle s1;
TF_Status* status = TF_NewStatus();
TF_ShapeInferenceContextWithRankAtMost(C_CTX(&c), C_SHP(&in0), 3, C_SHP(&s1),
status);
EXPECT_EQ("[10,20,30]", c.DebugString(s1));
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_ShapeInferenceContextWithRankAtLeast(C_CTX(&c), C_SHP(&in0), 3, C_SHP(&s1),
status);
EXPECT_EQ("[10,20,30]", c.DebugString(s1));
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_ShapeInferenceContextWithRankAtLeast(C_CTX(&c), C_SHP(&in0), 6, C_SHP(&s1),
status);
ASSERT_NE(TF_OK, TF_GetCode(status));
TF_SetStatus(status, TF_OK, "");
TF_ShapeInferenceContextWithRankAtMost(C_CTX(&c), C_SHP(&in0), 1, C_SHP(&s1),
status);
ASSERT_NE(TF_OK, TF_GetCode(status));
TF_SetStatus(status, TF_OK, "");
TF_ShapeInferenceContextWithRank(C_CTX(&c), C_SHP(&in0), 3, C_SHP(&s1),
status);
ASSERT_EQ(TF_OK, TF_GetCode(status));
TF_ShapeInferenceContextWithRank(C_CTX(&c), C_SHP(&in0), 4, C_SHP(&s1),
status);
ASSERT_NE(TF_OK, TF_GetCode(status));
TF_DeleteStatus(status);
}
TEST(OpsTest, ShapeInferenceWithRank_UnknownRank) {
NodeDef def;
shape_inference::InferenceContext c(0, def, MakeOpDef(2, 2),
{Unknown(), S({1, -1, 3})}, {}, {}, {});
shape_inference::ShapeHandle in0 = c.input(0);
shape_inference::ShapeHandle s1;
TF_Status* status = TF_NewStatus();
TF_ShapeInferenceContextWithRankAtMost(C_CTX(&c), C_SHP(&in0), 1, C_SHP(&s1),
status);
EXPECT_EQ("?", c.DebugString(s1));
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_ShapeInferenceContextWithRankAtLeast(C_CTX(&c), C_SHP(&in0), 1, C_SHP(&s1),
status);
EXPECT_EQ("?", c.DebugString(s1));
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_DeleteStatus(status);
}
TEST(OpsTest, ShapeInferenceConcatenateShapes) {
NodeDef def;
shape_inference::InferenceContext c(0, def, MakeOpDef(2, 0),
{S({1, 2}), S({3, 4})}, {}, {}, {});
ASSERT_EQ(2, TF_ShapeInferenceContextNumInputs(C_CTX(&c)));
shape_inference::ShapeHandle a = c.input(0);
shape_inference::ShapeHandle b = c.input(1);
TF_ShapeHandle* result = TF_NewShapeHandle();
TF_Status* status = TF_NewStatus();
TF_ShapeInferenceContextConcatenateShapes(C_CTX(&c), C_SHP(&a), C_SHP(&b),
result, status);
EXPECT_EQ(
"[1,2,3,4]",
c.DebugString(*reinterpret_cast<shape_inference::ShapeHandle*>(result)));
EXPECT_EQ(TF_OK, TF_GetCode(status));
TF_DeleteShapeHandle(result);
TF_DeleteStatus(status);
}
TEST(OpsTest, DimensionHandleValueKnown) {
NodeDef def;
shape_inference::InferenceContext c(0, def, MakeOpDef(2, 0),
{S({1, 2}), S({3, 4})}, {}, {}, {});
TF_ShapeHandle* handle =
TF_ShapeInferenceContextVectorFromSize(C_CTX(&c), 43);
ASSERT_EQ(
"[43]",
c.DebugString(*reinterpret_cast<shape_inference::ShapeHandle*>(handle)));
ASSERT_EQ(1, TF_ShapeInferenceContextRankKnown(C_CTX(&c), handle));
ASSERT_EQ(1, TF_ShapeInferenceContextRank(C_CTX(&c), handle));
TF_DimensionHandle* dim_handle = TF_NewDimensionHandle();
TF_ShapeInferenceContextDim(C_CTX(&c), handle, 0, dim_handle);
ASSERT_EQ(1, TF_DimensionHandleValueKnown(dim_handle));
ASSERT_EQ(43, TF_DimensionHandleValue(dim_handle));
TF_DeleteShapeHandle(handle);
TF_DeleteDimensionHandle(dim_handle);
}
TEST(OpsTest, ShapeInferenceSubshape) {
NodeDef def;
shape_inference::InferenceContext c(0, def, MakeOpDef(1, 0),
{S({10, 20, 30, 40, 50})}, {}, {}, {});
ASSERT_EQ("[10,20,30,40,50]", c.DebugString(c.input(0)));
TF_ShapeHandle* handle = TF_NewShapeHandle();
TF_Status* status = TF_NewStatus();
TF_ShapeInferenceContextGetInput(C_CTX(&c), 0, handle, status);
ASSERT_EQ(TF_OK, TF_GetCode(status));
TF_ShapeInferenceContextSubshape(C_CTX(&c), handle, 1, -1, handle, status);
ASSERT_EQ(TF_OK, TF_GetCode(status));
ASSERT_EQ(
"[20,30,40]",
c.DebugString(*reinterpret_cast<shape_inference::ShapeHandle*>(handle)));
TF_DeleteStatus(status);
TF_DeleteShapeHandle(handle);
}
TEST(OpsTest, ShapeInferenceScalarShape) {
NodeDef def;
shape_inference::InferenceContext c(0, def, MakeOpDef(0, 0), {S({})}, {}, {},
{});
TF_ShapeHandle* TF_scalar_shape = TF_ShapeInferenceContextScalar(C_CTX(&c));
shape_inference::ShapeHandle* scalar_shape =
reinterpret_cast<shape_inference::ShapeHandle*>(TF_scalar_shape);
ASSERT_EQ("[]", c.DebugString(*scalar_shape));
TF_DeleteShapeHandle(TF_scalar_shape);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/python/mlir_wrapper/ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
aa9eb069-6e9e-4c5a-9df4-5d8c7ab16540 | cpp | tensorflow/tensorflow | xplane_to_profile_instructions | third_party/xla/xla/python/xplane_to_profile_instructions.cc | third_party/xla/xla/python/xplane_to_profile_instructions_test.cc | #include "xla/python/xplane_to_profile_instructions.h"
#include <cstdint>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo.pb.h"
#include "xla/tsl/profiler/convert/xla_op_utils.h"
#include "xla/tsl/profiler/utils/file_system_utils.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_utils.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "xla/xla.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace xla {
namespace {
constexpr char kXPlanePb[] = "xplane.pb";
constexpr char kCostNameSep[] = "::";
using tensorflow::profiler::XPlane;
using tensorflow::profiler::XSpace;
using tsl::profiler::CreateTfXPlaneVisitor;
using tsl::profiler::FindPlanesWithPrefix;
using tsl::profiler::FindPlaneWithName;
using tsl::profiler::GetStatTypeStr;
using tsl::profiler::HostEventType;
using tsl::profiler::IsInternalEvent;
using tsl::profiler::ProfilerJoinPath;
using tsl::profiler::StatType;
using tsl::profiler::XEventMetadataVisitor;
using tsl::profiler::XEventVisitor;
using tsl::profiler::XLineVisitor;
using tsl::profiler::XPlaneVisitor;
using tsl::profiler::XStatVisitor;
void GetXPlaneLatencyInfo(
const XPlaneVisitor& xplane,
const absl::flat_hash_map<std::string, std::string>& hlo_module_info,
absl::flat_hash_map<std::string, HloLatencyInfo>* hlo_latency_info) {
xplane.ForEachLine([hlo_latency_info,
hlo_module_info](const XLineVisitor& xline) {
if (xline.DisplayName() == tsl::profiler::kXlaAsyncOpLineName) {
return;
}
xline.ForEachEvent([hlo_latency_info,
hlo_module_info](const XEventVisitor& xevent) {
int64_t event_type =
xevent.Type().value_or(HostEventType::kUnknownHostEventType);
if (IsInternalEvent(event_type)) return;
std::optional<std::string> hlo_name = std::nullopt;
std::optional<std::string> hlo_module_name = std::nullopt;
std::optional<std::string> fingerprint = std::nullopt;
std::optional<int64_t> program_id = std::nullopt;
auto for_each_stat = [&](const XStatVisitor& stat) {
if (stat.ValueCase() == tsl::profiler::XStat::VALUE_NOT_SET) return;
if (stat.Name() == GetStatTypeStr(StatType::kHloOp)) {
hlo_name = stat.ToString();
}
if (stat.Name() == GetStatTypeStr(StatType::kProgramId)) {
program_id = stat.IntValue();
}
if (stat.Name() == GetStatTypeStr(StatType::kHloModule)) {
hlo_module_name = stat.ToString();
}
};
xevent.Metadata().ForEachStat(for_each_stat);
xevent.ForEachStat(for_each_stat);
if (!hlo_name.has_value() || !hlo_module_name.has_value()) {
return;
}
if (hlo_module_name.has_value()) {
std::string fingerprint_key = hlo_module_name.value();
if (program_id.has_value()) {
fingerprint_key = tsl::profiler::HloModuleNameWithProgramId(
hlo_module_name.value(), program_id.value());
}
if (hlo_module_info.contains(fingerprint_key)) {
fingerprint = hlo_module_info.at(fingerprint_key);
}
}
double latency = static_cast<double>(xevent.DurationNs()) / 1e3;
std::string key = hlo_name.value();
if (fingerprint.has_value()) {
key = absl::StrCat(fingerprint.value(), kCostNameSep, hlo_name.value());
}
(*hlo_latency_info)[key].durations.emplace_back(latency);
});
});
}
std::unique_ptr<xla::HloModule> CreateModuleFromProto(
const xla::HloModuleProto& proto) {
auto config = xla::HloModule::CreateModuleConfigFromProto(proto, {});
if (config.ok()) {
auto module = xla::HloModule::CreateFromProto(proto, config.value());
if (module.ok()) {
return std::move(*module);
}
}
return nullptr;
}
std::optional<std::string> GetHloModuleFingerprint(
const xla::HloModuleProto& hlo_module_proto) {
std::unique_ptr<xla::HloModule> hlo_module =
CreateModuleFromProto(hlo_module_proto);
if (hlo_module == nullptr) {
return std::nullopt;
}
const auto& map = hlo_module->entry_computation()
->root_instruction()
->frontend_attributes()
.map();
auto it = map.find("fingerprint_before_lhs");
if (it != map.end()) {
return it->second;
}
return std::nullopt;
}
void GetXPlaneHloModuleInfo(
const XPlaneVisitor& xplane,
absl::flat_hash_map<std::string, std::string>* hlo_module_info) {
xplane.ForEachEventMetadata([&](const XEventMetadataVisitor& event_metadata) {
event_metadata.ForEachStat([&](const XStatVisitor& stat) {
xla::HloProto hlo_proto;
if (tsl::ParseProtoUnlimited(&hlo_proto, stat.BytesValue().data(),
stat.BytesValue().size())) {
const xla::HloModuleProto& hlo_module_proto = hlo_proto.hlo_module();
std::optional<std::string> fingerprint =
GetHloModuleFingerprint(hlo_module_proto);
if (fingerprint.has_value()) {
std::string key_with_id = tsl::profiler::HloModuleNameWithProgramId(
hlo_module_proto.name(), hlo_module_proto.id());
(*hlo_module_info)[key_with_id] = fingerprint.value();
}
}
});
});
}
}
absl::Status ConvertXplaneUnderLogdirToProfiledInstructionsProto(
const std::string& logdir, tensorflow::profiler::ProfiledInstructionsProto*
profiled_instructions_proto) {
std::vector<std::string> children_path;
TF_RETURN_IF_ERROR(tsl::Env::Default()->GetChildren(logdir, &children_path));
if (children_path.empty()) {
return absl::NotFoundError(
absl::StrCat("Could not find file under: ", logdir));
}
std::vector<tensorflow::profiler::XSpace> xspaces;
for (const std::string& child_path : children_path) {
if (absl::StrContains(child_path, kXPlanePb)) {
std::string xspace_path = ProfilerJoinPath(logdir, child_path);
tensorflow::profiler::XSpace xspace;
TF_RETURN_IF_ERROR(
ReadBinaryProto(tsl::Env::Default(), xspace_path, &xspace));
xspaces.emplace_back(xspace);
}
}
return ConvertXplaneToProfiledInstructionsProto(xspaces,
profiled_instructions_proto);
}
absl::Status ConvertXplaneToProfiledInstructionsProto(
std::vector<tensorflow::profiler::XSpace> xspaces,
tensorflow::profiler::ProfiledInstructionsProto*
profiled_instructions_proto) {
absl::flat_hash_map<std::string, HloLatencyInfo> hlo_latency_info;
absl::flat_hash_map<std::string, std::string> hlo_module_info;
for (const XSpace& xspace : xspaces) {
const XPlane* metadata_plane =
FindPlaneWithName(xspace, tsl::profiler::kMetadataPlaneName);
if (metadata_plane != nullptr) {
XPlaneVisitor xplane = CreateTfXPlaneVisitor(metadata_plane);
GetXPlaneHloModuleInfo(xplane, &hlo_module_info);
}
std::vector<const XPlane*> device_planes =
FindPlanesWithPrefix(xspace, tsl::profiler::kGpuPlanePrefix);
if (device_planes.empty()) {
device_planes =
FindPlanesWithPrefix(xspace, tsl::profiler::kTpuPlanePrefix);
}
if (device_planes.empty()) {
device_planes =
FindPlanesWithPrefix(xspace, tsl::profiler::kCustomPlanePrefix);
}
for (const XPlane* device_plane : device_planes) {
XPlaneVisitor xplane = CreateTfXPlaneVisitor(device_plane);
GetXPlaneLatencyInfo(xplane, hlo_module_info, &hlo_latency_info);
}
}
for (const auto& iter : hlo_latency_info) {
auto* cost = profiled_instructions_proto->add_costs();
std::vector<double> durations = iter.second.durations;
double sum = std::accumulate(durations.begin(), durations.end(), 0.0);
cost->set_cost_us(sum / durations.size());
cost->set_name(iter.first);
}
return absl::OkStatus();
}
} | #include "xla/python/xplane_to_profile_instructions.h"
#include <cstdint>
#include <memory>
#include <string>
#include "xla/service/hlo.pb.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tsl/profiler/convert/xla_op_utils.h"
#include "xla/tsl/profiler/rpc/client/save_profile.h"
#include "xla/tsl/profiler/utils/file_system_utils.h"
#include "xla/tsl/profiler/utils/xplane_builder.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "tsl/platform/test.h"
#include "tsl/profiler/protobuf/profiled_instructions.pb.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace xla {
namespace {
using tensorflow::profiler::XSpace;
using tsl::profiler::GetStatTypeStr;
using tsl::profiler::GpuPlaneName;
using tsl::profiler::kHostThreadsPlaneName;
using tsl::profiler::kMetadataPlaneName;
using tsl::profiler::StatType;
using tsl::profiler::XEventBuilder;
using tsl::profiler::XLineBuilder;
using tsl::profiler::XPlaneBuilder;
void CreateXSpace(XSpace* space, int first_device_latency,
int second_device_latency) {
XPlaneBuilder host_plane(space->add_planes());
host_plane.SetName(kHostThreadsPlaneName);
XLineBuilder thread1 = host_plane.GetOrCreateLine(10);
thread1.SetName("thread1");
XEventBuilder event1 =
thread1.AddEvent(*host_plane.GetOrCreateEventMetadata("event1"));
event1.SetTimestampNs(150000);
event1.SetDurationNs(10000);
event1.AddStatValue(*host_plane.GetOrCreateStatMetadata("tf_op"),
*host_plane.GetOrCreateStatMetadata("Relu"));
XLineBuilder thread2 = host_plane.GetOrCreateLine(20);
thread2.SetName("thread2");
XEventBuilder event2 =
thread2.AddEvent(*host_plane.GetOrCreateEventMetadata("event2"));
event2.SetTimestampNs(160000);
event2.SetDurationNs(10000);
event2.AddStatValue(*host_plane.GetOrCreateStatMetadata("tf_op"),
*host_plane.GetOrCreateStatMetadata("Conv2D"));
int64_t program_id = 1;
XPlaneBuilder device_plane(space->add_planes());
device_plane.SetName(GpuPlaneName(0));
device_plane.SetId(0);
XLineBuilder stream1 = device_plane.GetOrCreateLine(30);
stream1.SetName("gpu stream 1");
XEventBuilder event3 =
stream1.AddEvent(*device_plane.GetOrCreateEventMetadata("kernel1"));
event3.SetTimestampNs(180000);
event3.SetDurationNs(first_device_latency);
event3.AddStatValue(
*device_plane.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kHloOp)),
*device_plane.GetOrCreateStatMetadata("custom-call"));
event3.AddStatValue(*device_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kHloModule)),
*device_plane.GetOrCreateStatMetadata("test_module"));
event3.AddStatValue(*device_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kProgramId)),
program_id);
XPlaneBuilder device_plane_2(space->add_planes());
device_plane_2.SetName(GpuPlaneName(1));
device_plane_2.SetId(0);
XLineBuilder stream2 = device_plane.GetOrCreateLine(30);
stream2.SetName("gpu stream 1");
XEventBuilder event5 =
stream1.AddEvent(*device_plane.GetOrCreateEventMetadata("kernel1"));
event5.SetTimestampNs(180000);
event5.SetDurationNs(second_device_latency);
event5.AddStatValue(
*device_plane.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kHloOp)),
*device_plane.GetOrCreateStatMetadata("custom-call"));
event5.AddStatValue(*device_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kHloModule)),
*device_plane.GetOrCreateStatMetadata("test_module"));
event5.AddStatValue(*device_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kProgramId)),
program_id);
}
void CreateXSpaceWithFingerprint(XSpace* space, int first_device_latency,
int second_device_latency) {
XPlaneBuilder metadata_plane(space->add_planes());
metadata_plane.SetName(kMetadataPlaneName);
const char* hlo_text = R"(
HloModule test_module
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY ar {
p0 = f32[32] parameter(0)
p1 = f32[32, 32] parameter(1)
p2 = f32[32, 32] parameter(2)
p3 = f32[32] parameter(3)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
dot1 = f32[32,32]{1,0} custom-call(dot0, p2), custom_call_target="__cublas$gemm"
dot2 = f32[32,32]{1,0} custom-call(dot1, p2), custom_call_target="__cublas$gemm"
dot3 = f32[32,32]{1,0} custom-call(dot2, p2), custom_call_target="__cublas$gemm"
dot4 = f32[32,32]{1,0} custom-call(dot3, p2), custom_call_target="__cublas$gemm"
dot5 = f32[32,32]{1,0} custom-call(dot4, p2), custom_call_target="__cublas$gemm"
dot6 = f32[32,32]{1,0} custom-call(dot5, p2), custom_call_target="__cublas$gemm"
ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op
ar-done = f32[32] all-reduce-done(ar-start)
%ag-start = (f32[32], f32[64]) all-gather-start(p3), dimensions={0}
%ag-done = f32[64] all-gather-done(%ag-start)
add0 = f32[32,32] add(dot0, dot1)
add1 = f32[32,32] add(add0, dot2)
add2 = f32[32,32] add(add1, dot3)
add3 = f32[32,32] add(add2, dot4)
add4 = f32[32,32] add(add3, dot5)
add5 = f32[32,32] add(add4, dot6)
ROOT t = (f32[32], f32[64], f32[32,32]) tuple(ar-done, %ag-done, add5)
})";
xla::HloModuleConfig config;
auto module = std::make_unique<VerifiedHloModule>(
"test_module", config, false,
true,
ShapeUtil::ByteSizeOfElements);
if (module->ParseHloStringAndVerifyModule(hlo_text).ok()) {
HloInstruction* root = module->entry_computation()->root_instruction();
FrontendAttributes attributes;
(*attributes.mutable_map())["fingerprint_before_lhs"] = "08a5";
root->add_frontend_attributes(attributes);
xla::HloModuleProto hlo_module_proto = module->ToProto();
hlo_module_proto.set_id(1);
xla::HloProto hlo_proto;
*hlo_proto.mutable_hlo_module() = hlo_module_proto;
int64_t program_id = 1;
tsl::profiler::XEventMetadata* event_metadata =
metadata_plane.GetOrCreateEventMetadata(program_id);
event_metadata->set_name(tsl::profiler::HloModuleNameWithProgramId(
hlo_proto.hlo_module().name(), program_id));
tsl::profiler::XStatsBuilder<tsl::profiler::XEventMetadata> event_stats(
event_metadata, &metadata_plane);
auto* hlo_proto_stat = metadata_plane.GetOrCreateStatMetadata(
GetStatTypeStr(tsl::profiler::StatType::kHloProto));
event_stats.AddStatValue(*hlo_proto_stat, hlo_proto);
}
return CreateXSpace(space, first_device_latency, second_device_latency);
}
TEST(XplaneToProfiledInstructionsProtoTest,
ConvertXplaneUnderLogdirToProfiledInstructionsProto) {
tensorflow::profiler::ProfiledInstructionsProto profile_proto;
std::string logdir = testing::TempDir() + "/logdir";
std::string run = tsl::profiler::GetCurrentTimeStampAsString();
const std::string path = tsl::profiler::ProfilerJoinPath(logdir, run);
XSpace xspace_first_host;
CreateXSpace(&xspace_first_host, 10000, 10000);
auto status =
tsl::profiler::SaveXSpace(logdir, run, "host_0", xspace_first_host);
EXPECT_TRUE(status.ok());
XSpace xspace_2nd_host;
CreateXSpace(&xspace_2nd_host, 15000, 5000);
status = tsl::profiler::SaveXSpace(logdir, run, "host_1", xspace_2nd_host);
EXPECT_TRUE(status.ok());
EXPECT_TRUE(
ConvertXplaneUnderLogdirToProfiledInstructionsProto(path, &profile_proto)
.ok());
EXPECT_EQ(profile_proto.costs_size(), 1);
EXPECT_EQ(profile_proto.costs(0).cost_us(), 10);
EXPECT_EQ(profile_proto.costs(0).name(), "custom-call");
}
TEST(XplaneToProfiledInstructionsProtoTest,
ConvertXplaneUnderLogdirToProfiledInstructionsProtoWithFingerprint) {
tensorflow::profiler::ProfiledInstructionsProto profile_proto;
std::string logdir = testing::TempDir() + "/logdir";
std::string run = tsl::profiler::GetCurrentTimeStampAsString();
const std::string path = tsl::profiler::ProfilerJoinPath(logdir, run);
XSpace xspace_first_host;
CreateXSpaceWithFingerprint(&xspace_first_host, 10000, 10000);
auto status =
tsl::profiler::SaveXSpace(logdir, run, "host_0", xspace_first_host);
EXPECT_TRUE(status.ok());
XSpace xspace_2nd_host;
CreateXSpaceWithFingerprint(&xspace_2nd_host, 15000, 5000);
status = tsl::profiler::SaveXSpace(logdir, run, "host_1", xspace_2nd_host);
EXPECT_TRUE(status.ok());
EXPECT_TRUE(
ConvertXplaneUnderLogdirToProfiledInstructionsProto(path, &profile_proto)
.ok());
EXPECT_EQ(profile_proto.costs_size(), 1);
EXPECT_EQ(profile_proto.costs(0).cost_us(), 10);
EXPECT_EQ(profile_proto.costs(0).name(), "08a5::custom-call");
}
TEST(XplaneToProfiledInstructionsProtoTest,
ConvertXplaneToProfiledInstructionsProto) {
tensorflow::profiler::ProfiledInstructionsProto profile_proto;
XSpace xspace_a;
CreateXSpace(&xspace_a, 10000, 10000);
XSpace xspace_b;
CreateXSpace(&xspace_b, 15000, 5000);
EXPECT_TRUE(ConvertXplaneToProfiledInstructionsProto({xspace_a, xspace_b},
&profile_proto)
.ok());
EXPECT_EQ(profile_proto.costs_size(), 1);
EXPECT_EQ(profile_proto.costs(0).cost_us(), 10);
EXPECT_EQ(profile_proto.costs(0).name(), "custom-call");
}
TEST(XplaneToProfiledInstructionsProtoTest,
ConvertXplaneToProfiledInstructionsProtoWithFingerprint) {
tensorflow::profiler::ProfiledInstructionsProto profile_proto;
XSpace xspace_a;
CreateXSpaceWithFingerprint(&xspace_a, 10000, 10000);
XSpace xspace_b;
CreateXSpaceWithFingerprint(&xspace_b, 15000, 5000);
EXPECT_TRUE(ConvertXplaneToProfiledInstructionsProto({xspace_a, xspace_b},
&profile_proto)
.ok());
EXPECT_EQ(profile_proto.costs_size(), 1);
EXPECT_EQ(profile_proto.costs(0).cost_us(), 10);
EXPECT_EQ(profile_proto.costs(0).name(), "08a5::custom-call");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/xplane_to_profile_instructions.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/xplane_to_profile_instructions_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c14d0579-faed-4c33-bc1a-ab53d3937844 | cpp | tensorflow/tensorflow | aggregate_profile | third_party/xla/xla/python/aggregate_profile.cc | third_party/xla/xla/python/aggregate_profile_test.cc | #include "xla/python/aggregate_profile.h"
#include <algorithm>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/types/span.h"
#include "xla/python/xplane_to_profile_instructions.h"
namespace xla {
void AggregateProfiledInstructionsProto(
absl::Span<const tensorflow::profiler::ProfiledInstructionsProto> profiles,
int percentile,
tensorflow::profiler::ProfiledInstructionsProto *result_profile) {
if (percentile < 0 || percentile > 100) return;
absl::flat_hash_map<std::string, HloLatencyInfo> hlo_latency_info;
for (const auto &profile : profiles) {
for (const auto &cost : profile.costs()) {
hlo_latency_info[cost.name()].durations.emplace_back(cost.cost_us());
}
}
for (const auto &iter : hlo_latency_info) {
auto *cost = result_profile->add_costs();
std::vector<double> durations = iter.second.durations;
int index = 0;
if (durations.size() > 1) {
std::sort(durations.begin(), durations.end());
index = percentile / 100.0 * (durations.size() - 1);
}
cost->set_cost_us(durations[index]);
cost->set_name(iter.first);
}
}
} | #include "xla/python/aggregate_profile.h"
#include <map>
#include <string>
#include <vector>
#include "absl/types/span.h"
#include "tsl/platform/test.h"
#include "tsl/profiler/protobuf/profiled_instructions.pb.h"
namespace xla {
namespace {
using tensorflow::profiler::ProfiledInstructionsProto;
TEST(AggregateProfiledInstructionsProtoTest, aggregateAndGetPercentile) {
tensorflow::profiler::ProfiledInstructionsProto profile_a;
{
auto *cost_a = profile_a.add_costs();
cost_a->set_cost_us(10);
cost_a->set_name("reduce");
}
{
auto *cost_a = profile_a.add_costs();
cost_a->set_cost_us(30);
cost_a->set_name("copy");
}
tensorflow::profiler::ProfiledInstructionsProto profile_c;
{
auto *cost_c = profile_c.add_costs();
cost_c->set_cost_us(30);
cost_c->set_name("reduce");
}
std::vector<tensorflow::profiler::ProfiledInstructionsProto> profiles = {
profile_a, profile_c};
std::vector<int> custom_call_costs = {0, 10, 20, 30, 40, 50,
60, 70, 80, 90, 100};
for (int cost : custom_call_costs) {
tensorflow::profiler::ProfiledInstructionsProto profile_custom_call;
{
auto *cost_c = profile_custom_call.add_costs();
cost_c->set_cost_us(cost);
cost_c->set_name("custom-call");
}
profiles.push_back(profile_custom_call);
}
tensorflow::profiler::ProfiledInstructionsProto result_90th;
AggregateProfiledInstructionsProto(
absl::Span<const tensorflow::profiler::ProfiledInstructionsProto>(
profiles.data(), profiles.size()),
90, &result_90th);
EXPECT_EQ(result_90th.costs_size(), 3);
std::map<std::string, float> costs;
for (const auto &cost : result_90th.costs()) {
costs[cost.name()] = cost.cost_us();
}
EXPECT_EQ(costs["copy"], 30);
EXPECT_EQ(costs["custom-call"], 90);
EXPECT_EQ(costs["reduce"], 10);
tensorflow::profiler::ProfiledInstructionsProto result_10th;
AggregateProfiledInstructionsProto(
absl::Span<const tensorflow::profiler::ProfiledInstructionsProto>(
profiles.data(), profiles.size()),
10, &result_10th);
EXPECT_EQ(result_10th.costs_size(), 3);
for (const auto &cost : result_10th.costs()) {
costs[cost.name()] = cost.cost_us();
}
EXPECT_EQ(costs["copy"], 30);
EXPECT_EQ(costs["custom-call"], 10);
EXPECT_EQ(costs["reduce"], 10);
}
TEST(AggregateProfiledInstructionsProtoTest, getIncorrectPercentile) {
tensorflow::profiler::ProfiledInstructionsProto profile_a;
{
auto *cost_a = profile_a.add_costs();
cost_a->set_cost_us(10);
cost_a->set_name("reduce");
}
std::vector<tensorflow::profiler::ProfiledInstructionsProto> profiles = {
profile_a};
tensorflow::profiler::ProfiledInstructionsProto result;
AggregateProfiledInstructionsProto(
absl::Span<const tensorflow::profiler::ProfiledInstructionsProto>(
profiles.data(), profiles.size()),
-1, &result);
EXPECT_EQ(result.costs_size(), 0);
AggregateProfiledInstructionsProto(
absl::Span<const tensorflow::profiler::ProfiledInstructionsProto>(
profiles.data(), profiles.size()),
101, &result);
EXPECT_EQ(result.costs_size(), 0);
AggregateProfiledInstructionsProto(
absl::Span<const tensorflow::profiler::ProfiledInstructionsProto>(
profiles.data(), profiles.size()),
100, &result);
EXPECT_EQ(result.costs_size(), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/aggregate_profile.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/aggregate_profile_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c1927ee1-1ede-4759-a812-191e2251d57c | cpp | tensorflow/tensorflow | dlpack | tensorflow/c/eager/dlpack.cc | tensorflow/c/eager/dlpack_test.cc | #include "tensorflow/c/eager/dlpack.h"
#include <string>
#include "include/dlpack/dlpack.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/tfe_tensorhandle_internal.h"
#include "tensorflow/c/tf_status_internal.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace {
struct TfDlManagedTensorCtx {
TensorReference reference;
std::vector<int64_t> shape;
std::vector<int64_t> strides;
DLManagedTensor tensor;
explicit TfDlManagedTensorCtx(const TensorReference& ref) : reference(ref) {}
};
const Tensor* GetTensorFromHandle(TFE_TensorHandle* h, TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return nullptr;
}
tensorflow::TensorHandle* handle =
tensorflow::TensorHandleFromInterface(tensorflow::unwrap(h));
if (handle->Type() != TensorHandle::LOCAL) {
status->status = tensorflow::errors::InvalidArgument(
"DLPack doesn't support ", handle->TypeString(), " tensor");
return nullptr;
}
const tensorflow::Tensor* tensor;
status->status = handle->Tensor(&tensor);
if (!status->status.ok()) {
return nullptr;
}
return tensor;
}
void DLManagedTensorDeleter(DLManagedTensor* arg) {
TfDlManagedTensorCtx* owner =
static_cast<TfDlManagedTensorCtx*>(arg->manager_ctx);
owner->reference.Unref();
delete owner;
}
DLDataType GetDlDataType(TF_DataType data_type, TF_Status* status) {
DLDataType dtype;
dtype.lanes = 1;
dtype.bits = TF_DataTypeSize(data_type) * 8;
switch (data_type) {
case TF_DataType::TF_BOOL:
dtype.code = DLDataTypeCode::kDLBool;
break;
case TF_DataType::TF_HALF:
case TF_DataType::TF_FLOAT:
case TF_DataType::TF_DOUBLE:
dtype.code = DLDataTypeCode::kDLFloat;
break;
case TF_DataType::TF_INT8:
case TF_DataType::TF_INT16:
case TF_DataType::TF_INT32:
case TF_DataType::TF_INT64:
dtype.code = DLDataTypeCode::kDLInt;
break;
case TF_DataType::TF_UINT8:
case TF_DataType::TF_UINT16:
case TF_DataType::TF_UINT32:
case TF_DataType::TF_UINT64:
dtype.code = DLDataTypeCode::kDLUInt;
break;
case TF_DataType::TF_BFLOAT16:
dtype.code = DLDataTypeCode::kDLBfloat;
break;
case TF_DataType::TF_COMPLEX64:
case TF_DataType::TF_COMPLEX128:
dtype.code = DLDataTypeCode::kDLComplex;
break;
default:
status->status = tensorflow::errors::InvalidArgument(
DataType_Name(static_cast<DataType>(data_type)),
" is not supported by dlpack");
break;
}
return dtype;
}
DLDevice GetDlContext(TFE_TensorHandle* h, TF_Status* status) {
DLDevice ctx;
const char* device_name =
tensorflow::unwrap(h)->BackingDeviceName(&status->status);
DeviceNameUtils::ParsedName parsed_name;
tensorflow::DeviceNameUtils::ParseFullName(device_name, &parsed_name);
std::string device_type = parsed_name.type;
int device_id = 0;
if (parsed_name.has_id) {
device_id = parsed_name.id;
}
ctx.device_id = device_id;
if (device_type == "CPU") {
ctx.device_type = DLDeviceType::kDLCPU;
} else if (device_type == "GPU") {
#if TENSORFLOW_USE_ROCM
ctx.device_type = DLDeviceType::kDLROCM;
#else
ctx.device_type = DLDeviceType::kDLCUDA;
#endif
} else {
status->status = tensorflow::errors::InvalidArgument(
"Unsupported Device Type for dlpack");
}
return ctx;
}
absl::optional<std::string> DeviceNameFromDlContext(const DLDevice& ctx,
TF_Status* status) {
switch (ctx.device_type) {
case DLDeviceType::kDLCPU:
return "CPU:0";
case DLDeviceType::kDLCUDA:
return absl::StrCat("GPU:", ctx.device_id);
case DLDeviceType::kDLROCM:
return absl::StrCat("GPU:", ctx.device_id);
default:
return absl::nullopt;
}
}
Status TfDataTypeFormDlDataType(const DLDataType& dtype,
TF_DataType* tf_dtype) {
switch (dtype.code) {
case DLDataTypeCode::kDLBool:
if (dtype.bits != 8) {
return tensorflow::errors::InvalidArgument(
"Only DLPack bools of bitwidth 8 are supported, got: ", dtype.bits);
}
*tf_dtype = TF_DataType::TF_BOOL;
return absl::OkStatus();
case DLDataTypeCode::kDLUInt:
switch (dtype.bits) {
case 8:
*tf_dtype = TF_DataType::TF_UINT8;
return absl::OkStatus();
case 16:
*tf_dtype = TF_DataType::TF_UINT16;
return absl::OkStatus();
case 32:
*tf_dtype = TF_DataType::TF_UINT32;
return absl::OkStatus();
case 64:
*tf_dtype = TF_DataType::TF_UINT64;
return absl::OkStatus();
default:
return tensorflow::errors::InvalidArgument("Unsupported UInt bits: ",
dtype.bits);
}
return absl::OkStatus();
case DLDataTypeCode::kDLInt:
switch (dtype.bits) {
case 8:
*tf_dtype = TF_DataType::TF_INT8;
return absl::OkStatus();
case 16:
*tf_dtype = TF_DataType::TF_INT16;
return absl::OkStatus();
case 32:
*tf_dtype = TF_DataType::TF_INT32;
return absl::OkStatus();
case 64:
*tf_dtype = TF_DataType::TF_INT64;
return absl::OkStatus();
default:
return tensorflow::errors::InvalidArgument("Unsupported Int bits: ",
dtype.bits);
}
return absl::OkStatus();
case DLDataTypeCode::kDLFloat:
switch (dtype.bits) {
case 16:
*tf_dtype = TF_DataType::TF_HALF;
return absl::OkStatus();
case 32:
*tf_dtype = TF_DataType::TF_FLOAT;
return absl::OkStatus();
case 64:
*tf_dtype = TF_DataType::TF_DOUBLE;
return absl::OkStatus();
default:
return tensorflow::errors::InvalidArgument("Unsupported Float bits: ",
dtype.bits);
}
break;
case DLDataTypeCode::kDLBfloat:
switch (dtype.bits) {
case 16:
*tf_dtype = TF_DataType::TF_BFLOAT16;
return absl::OkStatus();
default:
return tensorflow::errors::InvalidArgument(
"Unsupported BFloat bits: ", dtype.bits);
}
break;
case DLDataTypeCode::kDLComplex:
switch (dtype.bits) {
case 64:
*tf_dtype = TF_DataType::TF_COMPLEX64;
return absl::OkStatus();
case 128:
*tf_dtype = TF_DataType::TF_COMPLEX128;
return absl::OkStatus();
default:
return tensorflow::errors::InvalidArgument(
"Unsupported Complex bits: ", dtype.bits);
}
break;
default:
return tensorflow::errors::InvalidArgument("Unsupported Type Codes: ",
dtype.code);
}
}
void DeallocatorWrapperFunc(void* data, size_t len, void* dlmt_vptr) {
TFE_CallDLManagedTensorDeleter(dlmt_vptr);
}
bool IsValidStrideCompactRowMajorData(int64_t* shape_arr, int64_t* stride_arr,
int ndim) {
bool valid = true;
int64_t expected_stride = 1;
for (int i = ndim - 1; i >= 0; --i) {
if (shape_arr[i] == 0) return true;
if (shape_arr[i] != 1 && stride_arr[i] != expected_stride) {
valid = false;
}
expected_stride *= shape_arr[i];
}
return valid;
}
}
void TFE_CallDLManagedTensorDeleter(void* dlm_ptr) {
DLManagedTensor* dlMTensor = static_cast<DLManagedTensor*>(dlm_ptr);
if (dlMTensor->deleter != nullptr) {
dlMTensor->deleter(dlMTensor);
}
}
void* TFE_HandleToDLPack(TFE_TensorHandle* h, TF_Status* status) {
auto tf_dlm_context = GetDlContext(h, status);
if (!status->status.ok()) {
return nullptr;
}
auto* tf_dlm_data = TFE_TensorHandleDevicePointer(h, status);
if (!status->status.ok()) {
return nullptr;
}
const Tensor* tensor = GetTensorFromHandle(h, status);
TF_DataType data_type = static_cast<TF_DataType>(tensor->dtype());
auto tf_dlm_type = GetDlDataType(data_type, status);
if (!status->status.ok()) {
return nullptr;
}
TensorReference tensor_ref(*tensor);
auto* tf_dlm_tensor_ctx = new TfDlManagedTensorCtx(tensor_ref);
tf_dlm_tensor_ctx->reference = tensor_ref;
DLManagedTensor* dlm_tensor = &tf_dlm_tensor_ctx->tensor;
dlm_tensor->manager_ctx = tf_dlm_tensor_ctx;
dlm_tensor->deleter = &DLManagedTensorDeleter;
dlm_tensor->dl_tensor.device = tf_dlm_context;
int ndim = tensor->dims();
dlm_tensor->dl_tensor.ndim = ndim;
dlm_tensor->dl_tensor.data = tf_dlm_data;
dlm_tensor->dl_tensor.dtype = tf_dlm_type;
std::vector<int64_t>* shape_arr = &tf_dlm_tensor_ctx->shape;
std::vector<int64_t>* stride_arr = &tf_dlm_tensor_ctx->strides;
shape_arr->resize(ndim);
stride_arr->resize(ndim, 1);
for (int i = 0; i < ndim; i++) {
(*shape_arr)[i] = tensor->dim_size(i);
}
for (int i = ndim - 2; i >= 0; --i) {
(*stride_arr)[i] = (*shape_arr)[i + 1] * (*stride_arr)[i + 1];
}
dlm_tensor->dl_tensor.shape = shape_arr->data();
dlm_tensor->dl_tensor.strides = stride_arr->data();
dlm_tensor->dl_tensor.byte_offset =
0;
return static_cast<void*>(dlm_tensor);
}
TFE_TensorHandle* TFE_HandleFromDLPack(void* dlm, TF_Status* status,
TFE_Context* ctx) {
DLManagedTensor* dlmt = static_cast<DLManagedTensor*>(dlm);
DLTensor* dl_tensor = &dlmt->dl_tensor;
absl::optional<std::string> device_name =
DeviceNameFromDlContext(dl_tensor->device, status);
if (!device_name.has_value()) {
status->status =
tensorflow::errors::InvalidArgument("Unsupported Device Type");
return nullptr;
}
TF_DataType dtype;
Status s = TfDataTypeFormDlDataType(dl_tensor->dtype, &dtype);
if (!s.ok()) {
status->status = std::move(s);
return nullptr;
}
int num_dims = dl_tensor->ndim;
const int64_t* dims = dl_tensor->shape;
void* data = dl_tensor->data;
if (dl_tensor->byte_offset != 0) {
status->status = tensorflow::errors::InvalidArgument(
"Unsupported byte_offset (", dl_tensor->byte_offset,
") from DLPack, must be zero");
return nullptr;
}
size_t total_bytes = dl_tensor->dtype.bits / 8;
for (int i = 0; i < num_dims; i++) {
total_bytes *= dims[i];
}
if (dl_tensor->strides != nullptr &&
!IsValidStrideCompactRowMajorData(dl_tensor->shape, dl_tensor->strides,
num_dims)) {
status->status = tensorflow::errors::InvalidArgument(
"Invalid strides array from DLPack");
return nullptr;
}
TFE_TensorHandle* handle = TFE_NewTensorHandleFromDeviceMemory(
ctx, device_name.value().c_str(), dtype, dims, num_dims, data,
total_bytes, &DeallocatorWrapperFunc, dlmt, status);
return handle;
}
} | #include "tensorflow/c/eager/dlpack.h"
#include <vector>
#include "absl/strings/str_join.h"
#include "include/dlpack/dlpack.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
void TestHandleFromDLPack(TF_Status* status, TFE_Context* ctx,
std::vector<int64_t> shape,
std::vector<int64_t> strides) {
size_t num_elements = 1;
for (int i = 0; i < static_cast<int32_t>(shape.size()); ++i) {
num_elements *= shape[i];
}
std::vector<float> data(num_elements);
for (size_t j = 0; j < num_elements; ++j) {
data[j] = j;
}
DLManagedTensor dlm_in = {};
DLTensor* dltensor_in = &dlm_in.dl_tensor;
dltensor_in->data = data.data();
dltensor_in->device = {kDLCPU, 0};
dltensor_in->ndim = static_cast<int32_t>(shape.size());
dltensor_in->dtype = {kDLFloat, 32, 1};
dltensor_in->shape = shape.data();
dltensor_in->strides = strides.data();
TFE_TensorHandle* handle = TFE_HandleFromDLPack(&dlm_in, status, ctx);
ASSERT_NE(handle, nullptr)
<< TF_Message(status) << " (shape=[" << absl::StrJoin(shape, ",")
<< "], strides=[" << absl::StrJoin(strides, ",") << "])";
auto* dlm_out =
static_cast<DLManagedTensor*>(TFE_HandleToDLPack(handle, status));
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
const DLTensor* dltensor_out = &dlm_out->dl_tensor;
EXPECT_EQ(dltensor_out->device.device_type, dltensor_in->device.device_type);
EXPECT_EQ(dltensor_out->device.device_id, dltensor_in->device.device_id);
EXPECT_EQ(dltensor_out->ndim, dltensor_in->ndim);
EXPECT_EQ(dltensor_out->dtype.code, dltensor_in->dtype.code);
EXPECT_EQ(dltensor_out->dtype.bits, dltensor_in->dtype.bits);
EXPECT_EQ(dltensor_out->dtype.lanes, dltensor_in->dtype.lanes);
for (int i = 0; i < dltensor_in->ndim; ++i) {
EXPECT_EQ(dltensor_out->shape[i], dltensor_in->shape[i]);
if (dltensor_out->strides) {
if (i == dltensor_in->ndim - 1) {
EXPECT_EQ(dltensor_out->strides[i], 1);
} else {
EXPECT_EQ(dltensor_out->strides[i],
dltensor_out->shape[i + 1] * dltensor_out->strides[i + 1]);
}
}
}
const float* data_in = static_cast<const float*>(dltensor_in->data);
const float* data_out = static_cast<const float*>(dltensor_out->data);
for (size_t j = 0; j < num_elements; ++j) {
EXPECT_EQ(data_out[j], data_in[j]);
}
TFE_CallDLManagedTensorDeleter(dlm_out);
TFE_DeleteTensorHandle(handle);
}
TEST(DLPack, HandleFromDLPackStrides) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TestHandleFromDLPack(status, ctx, {}, {});
TestHandleFromDLPack(status, ctx, {4}, {});
TestHandleFromDLPack(status, ctx, {4}, {1});
TestHandleFromDLPack(status, ctx, {4, 3, 2}, {});
TestHandleFromDLPack(status, ctx, {4, 3, 2}, {6, 2, 1});
TestHandleFromDLPack(status, ctx, {1}, {1});
TestHandleFromDLPack(status, ctx, {1}, {0});
TestHandleFromDLPack(status, ctx, {4, 1, 2}, {2, 1, 1});
TestHandleFromDLPack(status, ctx, {4, 1, 2}, {2, 0, 1});
TestHandleFromDLPack(status, ctx, {4, 3, 1}, {3, 1, 1});
TestHandleFromDLPack(status, ctx, {4, 3, 1}, {3, 1, 0});
TestHandleFromDLPack(status, ctx, {4, 0, 2}, {0, 2, 1});
TestHandleFromDLPack(status, ctx, {4, 0, 2}, {0, 1, 1});
TestHandleFromDLPack(status, ctx, {4, 0, 2}, {0, 0, 1});
TestHandleFromDLPack(status, ctx, {4, 0, 2}, {0, 2, 0});
TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/dlpack.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/dlpack_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0d8a30af-9d88-44c6-8cd3-014ba40aa72e | cpp | tensorflow/tensorflow | types | tensorflow/compiler/mlir/python/mlir_wrapper/types.cc | tensorflow/lite/toco/tflite/types_test.cc | #include "mlir/IR/BuiltinTypes.h"
#include "tensorflow/compiler/mlir/python/mlir_wrapper/mlir_wrapper.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
void init_types(py::module& m) {
py::class_<mlir::Type> Type(m, "Type");
py::class_<mlir::FunctionType, mlir::Type>(m, "FunctionType")
.def("getResults",
[](mlir::FunctionType& ft) { return ft.getResults().vec(); });
py::class_<mlir::FloatType, mlir::Type>(m, "FloatType")
.def("getBF16", &mlir::FloatType::getBF16)
.def("getF16", &mlir::FloatType::getF16)
.def("getF32", &mlir::FloatType::getF32)
.def("getF64", &mlir::FloatType::getF64);
py::class_<mlir::IntegerType, mlir::Type>(m, "IntegerType")
.def("get", [](mlir::MLIRContext* context, unsigned width) {
return mlir::IntegerType::get(context, width,
mlir::IntegerType::Signless);
});
py::class_<mlir::UnrankedTensorType, mlir::Type>(m, "UnrankedTensorType")
.def("get", &mlir::UnrankedTensorType::get);
py::class_<mlir::RankedTensorType, mlir::Type>(m, "RankedTensorType")
.def("get", [](std::vector<int64_t> shape, mlir::Type ty) {
return mlir::RankedTensorType::get(mlir::ArrayRef<int64_t>(shape), ty);
});
} | #include "tensorflow/lite/toco/tflite/types.h"
#include <complex>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/runtime/types.h"
namespace toco {
namespace tflite {
namespace {
using flatbuffers::Offset;
using flatbuffers::Vector;
static const ArrayDataType kUnsupportedTocoTypes[] = {ArrayDataType::kNone};
static const ::tflite::TensorType kUnsupportedTfLiteTypes[] = {
::tflite::TensorType_FLOAT16};
MATCHER_P(HasOffset, value, "") { return arg.o == value; }
template <ArrayDataType T>
Array ToFlatBufferAndBack(std::initializer_list<::toco::DataType<T>> items) {
Array src;
src.data_type = T;
src.GetMutableBuffer<T>().data = items;
Array result;
flatbuffers::FlatBufferBuilder builder;
builder.Finish(CreateTensor(builder, 0, DataType::Serialize(T),
1));
flatbuffers::FlatBufferBuilder buffer_builder;
Offset<Vector<uint8_t>> data_buffer =
DataBuffer::Serialize(src, &buffer_builder);
buffer_builder.Finish(::tflite::CreateBuffer(buffer_builder, data_buffer));
auto* tensor =
flatbuffers::GetRoot<::tflite::Tensor>(builder.GetBufferPointer());
auto* buffer =
flatbuffers::GetRoot<::tflite::Buffer>(buffer_builder.GetBufferPointer());
DataBuffer::Deserialize(*tensor, *buffer, &result);
return result;
}
TEST(DataType, SupportedTypes) {
std::vector<std::pair<ArrayDataType, ::tflite::TensorType>> testdata = {
{ArrayDataType::kUint8, ::tflite::TensorType_UINT8},
{ArrayDataType::kInt32, ::tflite::TensorType_INT32},
{ArrayDataType::kUint32, ::tflite::TensorType_UINT32},
{ArrayDataType::kInt64, ::tflite::TensorType_INT64},
{ArrayDataType::kFloat, ::tflite::TensorType_FLOAT32},
{ArrayDataType::kBool, ::tflite::TensorType_BOOL},
{ArrayDataType::kComplex64, ::tflite::TensorType_COMPLEX64}};
for (auto x : testdata) {
EXPECT_EQ(x.second, DataType::Serialize(x.first));
EXPECT_EQ(x.first, DataType::Deserialize(x.second));
}
}
TEST(DataType, UnsupportedTypes) {
for (::tflite::TensorType t : kUnsupportedTfLiteTypes) {
EXPECT_DEATH(DataType::Deserialize(t), "Unhandled tensor type.");
}
for (ArrayDataType t : kUnsupportedTocoTypes) {
EXPECT_EQ(::tflite::TensorType_FLOAT32, DataType::Serialize(t));
}
}
TEST(DataBuffer, EmptyBuffers) {
flatbuffers::FlatBufferBuilder builder;
Array array;
EXPECT_THAT(DataBuffer::Serialize(array, &builder), HasOffset(0));
builder.Finish(::tflite::CreateTensor(builder));
auto* tensor =
flatbuffers::GetRoot<::tflite::Tensor>(builder.GetBufferPointer());
flatbuffers::FlatBufferBuilder buffer_builder;
Offset<Vector<uint8_t>> v = buffer_builder.CreateVector<uint8_t>({});
buffer_builder.Finish(::tflite::CreateBuffer(buffer_builder, v));
auto* buffer =
flatbuffers::GetRoot<::tflite::Buffer>(buffer_builder.GetBufferPointer());
DataBuffer::Deserialize(*tensor, *buffer, &array);
EXPECT_EQ(nullptr, array.buffer);
}
TEST(DataBuffer, UnsupportedTypes) {
for (ArrayDataType t : kUnsupportedTocoTypes) {
flatbuffers::FlatBufferBuilder builder;
Array array;
array.data_type = t;
array.GetMutableBuffer<ArrayDataType::kFloat>();
EXPECT_DEATH(DataBuffer::Serialize(array, &builder),
"Unhandled array data type.");
}
for (::tflite::TensorType t : kUnsupportedTfLiteTypes) {
flatbuffers::FlatBufferBuilder builder;
builder.Finish(::tflite::CreateTensor(builder, 0, t, 1));
flatbuffers::FlatBufferBuilder buffer_builder;
Offset<Vector<uint8_t>> v = buffer_builder.CreateVector<uint8_t>({1});
buffer_builder.Finish(::tflite::CreateBuffer(buffer_builder, v));
auto* buffer = flatbuffers::GetRoot<::tflite::Buffer>(
buffer_builder.GetBufferPointer());
auto* tensor =
flatbuffers::GetRoot<::tflite::Tensor>(builder.GetBufferPointer());
Array array;
EXPECT_DEATH(DataBuffer::Deserialize(*tensor, *buffer, &array),
"Unhandled tensor type.");
}
}
TEST(DataBuffer, Float) {
Array recovered = ToFlatBufferAndBack<ArrayDataType::kFloat>({1.0f, 2.0f});
EXPECT_THAT(recovered.GetBuffer<ArrayDataType::kFloat>().data,
::testing::ElementsAre(1.0f, 2.0f));
}
TEST(DataBuffer, Uint8) {
Array recovered = ToFlatBufferAndBack<ArrayDataType::kUint8>({127, 244});
EXPECT_THAT(recovered.GetBuffer<ArrayDataType::kUint8>().data,
::testing::ElementsAre(127, 244));
}
TEST(DataBuffer, Int32) {
Array recovered = ToFlatBufferAndBack<ArrayDataType::kInt32>({1, 1 << 30});
EXPECT_THAT(recovered.GetBuffer<ArrayDataType::kInt32>().data,
::testing::ElementsAre(1, 1 << 30));
}
TEST(DataBuffer, Uint32) {
Array recovered = ToFlatBufferAndBack<ArrayDataType::kUint32>({1, 1U << 31});
EXPECT_THAT(recovered.GetBuffer<ArrayDataType::kUint32>().data,
::testing::ElementsAre(1, 1U << 31));
}
TEST(DataBuffer, Int16) {
Array recovered = ToFlatBufferAndBack<ArrayDataType::kInt16>({1, 1 << 14});
EXPECT_THAT(recovered.GetBuffer<ArrayDataType::kInt16>().data,
::testing::ElementsAre(1, 1 << 14));
}
TEST(DataBuffer, String) {
Array recovered = ToFlatBufferAndBack<ArrayDataType::kString>(
{"AA", "BBB", "Best. String. Ever."});
EXPECT_THAT(recovered.GetBuffer<ArrayDataType::kString>().data,
::testing::ElementsAre("AA", "BBB", "Best. String. Ever."));
}
TEST(DataBuffer, Bool) {
Array recovered =
ToFlatBufferAndBack<ArrayDataType::kBool>({true, false, true});
EXPECT_THAT(recovered.GetBuffer<ArrayDataType::kBool>().data,
::testing::ElementsAre(true, false, true));
}
TEST(DataBuffer, Complex64) {
Array recovered = ToFlatBufferAndBack<ArrayDataType::kComplex64>(
{std::complex<float>(1.0f, 2.0f), std::complex<float>(3.0f, 4.0f)});
EXPECT_THAT(recovered.GetBuffer<ArrayDataType::kComplex64>().data,
::testing::ElementsAre(std::complex<float>(1.0f, 2.0f),
std::complex<float>(3.0f, 4.0f)));
}
TEST(Padding, All) {
EXPECT_EQ(::tflite::Padding_SAME, Padding::Serialize(PaddingType::kSame));
EXPECT_EQ(PaddingType::kSame, Padding::Deserialize(::tflite::Padding_SAME));
EXPECT_EQ(::tflite::Padding_VALID, Padding::Serialize(PaddingType::kValid));
EXPECT_EQ(PaddingType::kValid, Padding::Deserialize(::tflite::Padding_VALID));
EXPECT_DEATH(Padding::Serialize(static_cast<PaddingType>(10000)),
"Unhandled padding type.");
EXPECT_DEATH(Padding::Deserialize(10000), "Unhandled padding.");
}
TEST(ActivationFunction, All) {
std::vector<
std::pair<FusedActivationFunctionType, ::tflite::ActivationFunctionType>>
testdata = {{FusedActivationFunctionType::kNone,
::tflite::ActivationFunctionType_NONE},
{FusedActivationFunctionType::kRelu,
::tflite::ActivationFunctionType_RELU},
{FusedActivationFunctionType::kRelu6,
::tflite::ActivationFunctionType_RELU6},
{FusedActivationFunctionType::kRelu1,
::tflite::ActivationFunctionType_RELU_N1_TO_1}};
for (auto x : testdata) {
EXPECT_EQ(x.second, ActivationFunction::Serialize(x.first));
EXPECT_EQ(x.first, ActivationFunction::Deserialize(x.second));
}
EXPECT_DEATH(ActivationFunction::Serialize(
static_cast<FusedActivationFunctionType>(10000)),
"Unhandled fused activation function type.");
EXPECT_DEATH(ActivationFunction::Deserialize(10000),
"Unhandled fused activation function type.");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/python/mlir_wrapper/types.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/tflite/types_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
07beb975-db15-4cb1-a542-c80f31965a03 | cpp | tensorflow/tensorflow | sharding | third_party/xla/xla/python/ifrt/sharding.cc | third_party/xla/xla/python/ifrt/sharding_test.cc | #include "xla/python/ifrt/sharding.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <ostream>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/index.h"
#include "xla/python/ifrt/index_domain.h"
#include "xla/python/ifrt/ir/sharding_param.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/shape.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
MemoryKind CanonicalizeMemoryKindWithDevices(
const MemoryKind& memory_kind,
const tsl::RCReference<DeviceList>& devices) {
CHECK(devices != nullptr);
CHECK(!devices->devices().empty());
return CanonicalizeMemoryKind(memory_kind, devices->devices().front());
}
bool ComputeIsFullyReplicated(const ShardingParam& sharding_param) {
return llvm::all_of(sharding_param.dim_shards(),
[](auto shards) { return shards == 1; });
}
template <typename ContainerT>
class MajorToMinorIter {
public:
using IteratorT = typename ContainerT::const_iterator;
using ValueT = typename ContainerT::value_type;
static MajorToMinorIter<ContainerT> cbegin(
absl::Span<const ContainerT> containers) {
std::vector<IteratorT> iters;
iters.reserve(containers.size());
for (const ContainerT& container : containers) {
iters.push_back(container.cbegin());
}
return MajorToMinorIter(containers, std::move(iters));
}
std::vector<ValueT> operator*() const {
std::vector<ValueT> result;
result.reserve(iters_.size());
for (const auto& iter : iters_) {
result.push_back(*iter);
}
return result;
}
void operator++() {
for (int i = iters_.size() - 1; i >= 0; --i) {
++iters_[i];
if (iters_[i] != containers_[i].end()) {
break;
}
if (i != 0) {
iters_[i] = containers_[i].begin();
}
}
}
bool IsEnd() const {
return iters_.empty() || iters_[0] == containers_[0].end();
}
private:
MajorToMinorIter(absl::Span<const ContainerT> containers,
std::vector<IteratorT> iters)
: containers_(containers), iters_(iters) {
DCHECK_EQ(iters.size(), containers.size());
}
absl::Span<const ContainerT> containers_;
std::vector<IteratorT> iters_;
};
std::vector<Index> GetTileIndices(absl::Span<const int64_t> dim_shards) {
std::vector<std::vector<int64_t>> indices;
indices.reserve(dim_shards.size());
for (const int64_t dim_shard : dim_shards) {
std::vector<int64_t> index(dim_shard);
absl::c_iota(index, 0);
indices.push_back(std::move(index));
}
std::vector<Index> result;
int64_t shard_count =
absl::c_accumulate(dim_shards, 1, std::multiplies<int64_t>());
result.reserve(shard_count);
for (auto iter = MajorToMinorIter<std::vector<int64_t>>::cbegin(indices);
!iter.IsEnd(); ++iter) {
result.push_back(Index(*iter));
}
return result;
}
}
char Sharding::ID = 0;
char SingleDeviceSharding::ID = 0;
char OpaqueSharding::ID = 0;
char ConcreteSharding::ID = 0;
char ConcreteEvenSharding::ID = 0;
char ShardingParamSharding::ID = 0;
char DeserializeShardingOptions::ID = 0;
Sharding::Sharding(tsl::RCReference<DeviceList> devices, MemoryKind memory_kind,
bool is_fully_replicated)
: devices_(std::move(devices)),
memory_kind_(memory_kind),
is_fully_replicated_(is_fully_replicated) {}
bool Sharding::operator==(const Sharding& other) const {
if (this == &other) {
return true;
}
return HasSamePartitioning(other) && memory_kind_ == other.memory_kind_ &&
*devices() == *other.devices();
}
absl::StatusOr<std::unique_ptr<Sharding>> Sharding::FromProto(
DeviceList::LookupDeviceFunc lookup_device,
const ShardingProto& sharding_proto) {
return Deserialize<Sharding>(
sharding_proto.serialized_sharding(),
std::make_unique<DeserializeShardingOptions>(std::move(lookup_device)));
}
absl::StatusOr<ShardingProto> Sharding::ToProto() const {
ShardingProto sharding_proto;
TF_ASSIGN_OR_RETURN(*sharding_proto.mutable_serialized_sharding(),
Serialize(const_cast<Sharding&>(*this)));
return sharding_proto;
}
std::ostream& operator<<(std::ostream& os, const Sharding& sharding) {
return os << sharding.DebugString();
}
std::unique_ptr<SingleDeviceSharding> SingleDeviceSharding::Create(
Device* device, MemoryKind memory_kind) {
CHECK(device != nullptr);
memory_kind = CanonicalizeMemoryKind(memory_kind, device);
return std::unique_ptr<SingleDeviceSharding>(
new SingleDeviceSharding(device, memory_kind));
}
absl::StatusOr<Shape> SingleDeviceSharding::GetShardShape(
const Shape& shape) const {
return shape;
}
bool SingleDeviceSharding::HasSamePartitioning(const Sharding& other) const {
if (this == &other) {
return true;
}
return llvm::isa<SingleDeviceSharding>(&other);
}
absl::StatusOr<std::unique_ptr<Sharding>>
SingleDeviceSharding::WithDeviceAssignment(
std::optional<tsl::RCReference<DeviceList>> devices,
std::optional<MemoryKind> memory_kind) const {
if (devices.has_value() && (*devices)->size() != 1) {
return InvalidArgument(
"SingleDeviceSharding can only have one device, but was asked to have "
"%d devices",
(*devices)->size());
}
return Create(devices.value_or(devices_)->devices().front(),
memory_kind.value_or(memory_kind_));
}
absl::StatusOr<std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>>>
SingleDeviceSharding::Disassemble(const Shape& shape) const {
DCHECK(this);
return std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>>{
{shape, SingleDeviceSharding::Create(devices_->devices().front(),
memory_kind_)}};
}
absl::StatusOr<
std::vector<std::pair<DynamicShape, std::shared_ptr<const Sharding>>>>
SingleDeviceSharding::Disassemble(const DynamicShape& dynamic_shape) const {
DCHECK(this);
return std::vector<std::pair<DynamicShape, std::shared_ptr<const Sharding>>>{
{dynamic_shape, SingleDeviceSharding::Create(devices_->devices().front(),
memory_kind_)}};
}
absl::StatusOr<std::vector<IndexDomain>> SingleDeviceSharding::IndexDomains(
const Shape& shape) const {
DCHECK(this);
std::vector<IndexDomain> result;
result.reserve(1);
result.push_back(IndexDomain(shape));
return result;
}
std::string SingleDeviceSharding::DebugString() const {
DCHECK(this);
return absl::StrFormat("SingleDeviceSharding(%s, memory_kind: %v)",
devices_->devices().front()->DebugString(),
memory_kind_);
}
std::unique_ptr<OpaqueSharding> OpaqueSharding::Create(
tsl::RCReference<DeviceList> devices, MemoryKind memory_kind) {
memory_kind = CanonicalizeMemoryKindWithDevices(memory_kind, devices);
return std::unique_ptr<OpaqueSharding>(
new OpaqueSharding(std::move(devices), memory_kind));
}
OpaqueSharding::OpaqueSharding(tsl::RCReference<DeviceList> devices,
MemoryKind memory_kind)
: llvm::RTTIExtends<OpaqueSharding, Sharding>(
std::move(devices), memory_kind, false) {}
absl::StatusOr<Shape> OpaqueSharding::GetShardShape(const Shape& shape) const {
return InvalidArgument(
"OpaqueSharding does not have shard shape information");
}
bool OpaqueSharding::HasSamePartitioning(const Sharding& other) const {
if (this == &other) {
return true;
}
return false;
}
absl::StatusOr<std::unique_ptr<Sharding>> OpaqueSharding::WithDeviceAssignment(
std::optional<tsl::RCReference<DeviceList>> devices,
std::optional<MemoryKind> memory_kind) const {
if (devices.has_value() && (*devices)->size() != devices_->size()) {
return InvalidArgument(
"OpaqueSharding should have the same number of devices as the current "
"sharding, but was asked to have %d devices",
(*devices)->size());
}
return Create(devices.value_or(devices_), memory_kind.value_or(memory_kind_));
}
absl::StatusOr<std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>>>
OpaqueSharding::Disassemble(const Shape& shape) const {
DCHECK(this);
return InvalidArgument(
"OpaqueSharding does not have shard shape information");
}
absl::StatusOr<
std::vector<std::pair<DynamicShape, std::shared_ptr<const Sharding>>>>
OpaqueSharding::Disassemble(const DynamicShape& dynamic_shape) const {
DCHECK(this);
return InvalidArgument(
"OpaqueSharding does not have shard shape information");
}
absl::StatusOr<std::vector<IndexDomain>> OpaqueSharding::IndexDomains(
const Shape& shape) const {
DCHECK(this);
return InvalidArgument(
"OpaqueSharding does not have index domain information");
}
std::string OpaqueSharding::DebugString() const {
DCHECK(this);
return absl::StrFormat("OpaqueSharding(devices: %v, memory_kind: %v)",
*devices_, memory_kind_);
}
std::unique_ptr<ConcreteSharding> ConcreteSharding::Create(
tsl::RCReference<DeviceList> devices, MemoryKind memory_kind, Shape shape,
std::vector<Shape> shard_shapes) {
CHECK_EQ(devices->size(), shard_shapes.size());
memory_kind = CanonicalizeMemoryKindWithDevices(memory_kind, devices);
return std::unique_ptr<ConcreteSharding>(
new ConcreteSharding(std::move(devices), memory_kind, std::move(shape),
std::move(shard_shapes)));
}
std::unique_ptr<ConcreteSharding> ConcreteSharding::Create(
tsl::RCReference<DeviceList> devices, MemoryKind memory_kind,
DynamicShape dynamic_shape,
std::vector<DynamicShape> shard_dynamic_shapes) {
CHECK_EQ(devices->size(), shard_dynamic_shapes.size());
memory_kind = CanonicalizeMemoryKindWithDevices(memory_kind, devices);
return std::unique_ptr<ConcreteSharding>(new ConcreteSharding(
std::move(devices), memory_kind, std::move(dynamic_shape),
std::move(shard_dynamic_shapes)));
}
ConcreteSharding::ConcreteSharding(tsl::RCReference<DeviceList> devices,
MemoryKind memory_kind, Shape shape,
std::vector<Shape> shard_shapes)
: llvm::RTTIExtends<ConcreteSharding, Sharding>(
std::move(devices), memory_kind, false),
shape_(std::move(shape)),
shard_shapes_(std::move(shard_shapes)) {}
ConcreteSharding::ConcreteSharding(
tsl::RCReference<DeviceList> devices, MemoryKind memory_kind,
DynamicShape dynamic_shape, std::vector<DynamicShape> shard_dynamic_shapes)
: llvm::RTTIExtends<ConcreteSharding, Sharding>(
std::move(devices), memory_kind, false),
shape_(std::move(dynamic_shape)),
shard_shapes_(std::move(shard_dynamic_shapes)) {}
absl::StatusOr<Shape> ConcreteSharding::GetShardShape(
const Shape& shape) const {
return InvalidArgument("ConcreteSharding does not have a fixed shard shape");
}
bool ConcreteSharding::HasSamePartitioning(const Sharding& other) const {
if (this == &other) {
return true;
}
const auto* other_concrete_sharding =
llvm::dyn_cast<ConcreteSharding>(&other);
if (!other_concrete_sharding) {
return false;
}
return shape_ == other_concrete_sharding->shape_ &&
shard_shapes_ == other_concrete_sharding->shard_shapes_;
}
absl::StatusOr<std::unique_ptr<Sharding>>
ConcreteSharding::WithDeviceAssignment(
std::optional<tsl::RCReference<DeviceList>> devices,
std::optional<MemoryKind> memory_kind) const {
if (devices.has_value() && (*devices)->size() != devices_->size()) {
return InvalidArgument(
"ConcreteSharding should have the same number of devices as the "
"current sharding, but was asked to have %d devices",
(*devices)->size());
}
if (has_static_shape()) {
return Create(devices.value_or(devices_),
memory_kind.value_or(memory_kind_), std::get<Shape>(shape_),
std::get<std::vector<Shape>>(shard_shapes_));
} else {
return Create(devices.value_or(devices_),
memory_kind.value_or(memory_kind_),
std::get<DynamicShape>(shape_),
std::get<std::vector<DynamicShape>>(shard_shapes_));
}
}
absl::StatusOr<std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>>>
ConcreteSharding::Disassemble(const Shape& shape) const {
DCHECK(this);
if (!has_static_shape()) {
return InvalidArgument(
"ConcreteSharding holds dynamic shape, but was asked "
"to disassemble static shape %s",
shape.DebugString());
}
if (shape != std::get<Shape>(shape_)) {
return InvalidArgument(
"ConcreteSharding can only disassemble shape %s, but was asked "
"to disassemble shape %s",
std::get<Shape>(shape_).DebugString(), shape.DebugString());
}
std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>> result;
const std::vector<Shape>& shard_shapes =
std::get<std::vector<Shape>>(shard_shapes_);
const absl::Span<Device* const> devices = devices_->devices();
result.reserve(devices.size());
for (int i = 0; i < devices.size(); ++i) {
result.push_back({shard_shapes[i],
SingleDeviceSharding::Create(devices[i], memory_kind_)});
}
return result;
}
absl::StatusOr<
std::vector<std::pair<DynamicShape, std::shared_ptr<const Sharding>>>>
ConcreteSharding::Disassemble(const DynamicShape& dynamic_shape) const {
DCHECK(this);
if (!has_dynamic_shape()) {
return InvalidArgument(
"ConcreteSharding holds static shape, but was asked "
"to disassemble dynamic shape %s",
dynamic_shape.DebugString());
}
if (dynamic_shape != std::get<DynamicShape>(shape_)) {
return InvalidArgument(
"ConcreteSharding can only disassemble dynamic shape %s, but was asked "
"to disassemble dynamic shape %s",
std::get<DynamicShape>(shape_).DebugString(),
dynamic_shape.DebugString());
}
std::vector<std::pair<DynamicShape, std::shared_ptr<const Sharding>>> result;
const std::vector<DynamicShape>& shard_dynamic_shapes =
std::get<std::vector<DynamicShape>>(shard_shapes_);
const absl::Span<Device* const> devices = devices_->devices();
result.reserve(devices.size());
for (int i = 0; i < devices.size(); ++i) {
result.push_back({shard_dynamic_shapes[i],
SingleDeviceSharding::Create(devices[i], memory_kind_)});
}
return result;
}
absl::StatusOr<std::vector<IndexDomain>> ConcreteSharding::IndexDomains(
const Shape& shape) const {
DCHECK(this);
return InvalidArgument(
"ConcreteSharding does not have index domain information");
}
std::string ConcreteSharding::DebugString() const {
DCHECK(this);
return std::visit(
[this](const auto& shape, const auto& shard_shapes) {
return absl::StrFormat(
"ConcreteSharding(devices: %v, shape: %s, shard_shapes: %s, "
"memory_kind: %v)",
*devices_, shape.DebugString(),
absl::StrJoin(shard_shapes, ",",
[](std::string* out, const auto& shard_shape) {
absl::StrAppend(out, shard_shape.DebugString());
}),
memory_kind_);
},
shape_, shard_shapes_);
}
std::unique_ptr<ConcreteEvenSharding> ConcreteEvenSharding::Create(
tsl::RCReference<DeviceList> devices, MemoryKind memory_kind, Shape shape,
Shape shard_shape, bool is_fully_replicated) {
memory_kind = CanonicalizeMemoryKindWithDevices(memory_kind, devices);
return std::unique_ptr<ConcreteEvenSharding>(new ConcreteEvenSharding(
std::move(devices), memory_kind, std::move(shape), std::move(shard_shape),
is_fully_replicated));
}
ConcreteEvenSharding::ConcreteEvenSharding(tsl::RCReference<DeviceList> devices,
MemoryKind memory_kind, Shape shape,
Shape shard_shape,
bool is_fully_replicated)
: llvm::RTTIExtends<ConcreteEvenSharding, Sharding>(
std::move(devices), memory_kind, is_fully_replicated),
shape_(std::move(shape)),
shard_shape_(std::move(shard_shape)) {}
absl::StatusOr<Shape> ConcreteEvenSharding::GetShardShape(
const Shape& shape) const {
if (shape != shape_) {
return InvalidArgument(
"ConcreteEvenSharding has a shard shape for shape %s, but was asked "
"to get a shard shape for shape %s",
shape_.DebugString(), shape.DebugString());
}
return shard_shape_;
}
bool ConcreteEvenSharding::HasSamePartitioning(const Sharding& other) const {
if (this == &other) {
return true;
}
const auto* other_concrete_even_sharding =
llvm::dyn_cast<ConcreteEvenSharding>(&other);
if (!other_concrete_even_sharding) {
return false;
}
return devices_->size() == other_concrete_even_sharding->devices_->size() &&
shape_ == other_concrete_even_sharding->shape_ &&
shard_shape_ == other_concrete_even_sharding->shard_shape_ &&
is_fully_replicated_ ==
other_concrete_even_sharding->is_fully_replicated_;
}
absl::StatusOr<std::unique_ptr<Sharding>>
ConcreteEvenSharding::WithDeviceAssignment(
std::optional<tsl::RCReference<DeviceList>> devices,
std::optional<MemoryKind> memory_kind) const {
if (devices.has_value() && (*devices)->size() != devices_->size()) {
return InvalidArgument(
"ConcreteEvenSharding should have the same number of devices as the "
"current sharding, but was asked to have %d devices",
(*devices)->size());
}
return Create(devices.value_or(devices_), memory_kind.value_or(memory_kind_),
shape_, shard_shape_, is_fully_replicated_);
}
absl::StatusOr<std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>>>
ConcreteEvenSharding::Disassemble(const Shape& shape) const {
DCHECK(this);
if (shape != shape_) {
return InvalidArgument(
"ConcreteEvenSharding can only disassemble shape %s, but was asked "
"to disassemble shape %s",
shape_.DebugString(), shape.DebugString());
}
std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>> result;
const absl::Span<Device* const> devices = devices_->devices();
result.reserve(devices.size());
for (int i = 0; i < devices.size(); ++i) {
result.push_back(
{shard_shape_, SingleDeviceSharding::Create(devices[i], memory_kind_)});
}
return result;
}
absl::StatusOr<
std::vector<std::pair<DynamicShape, std::shared_ptr<const Sharding>>>>
ConcreteEvenSharding::Disassemble(const DynamicShape& dynamic_shape) const {
return InvalidArgument(
"ConcreteEvenSharding can only disassemble static shape, but was asked "
"to disassemble dynamic shape %s",
dynamic_shape.DebugString());
}
absl::StatusOr<std::vector<IndexDomain>> ConcreteEvenSharding::IndexDomains(
const Shape& shape) const {
DCHECK(this);
return InvalidArgument(
"ConcreteEvenSharding does not have index domain information");
}
std::string ConcreteEvenSharding::DebugString() const {
DCHECK(this);
return absl::StrFormat(
"ConcreteEvenSharding(devices: %v, shape: %s, shard_shape: %s, "
"memory_kind: %v)",
*devices_, shape_.DebugString(), shard_shape_.DebugString(),
memory_kind_);
}
absl::StatusOr<std::unique_ptr<ShardingParamSharding>>
ShardingParamSharding::Create(ShardingParam sharding_param,
tsl::RCReference<DeviceList> devices,
MemoryKind memory_kind) {
memory_kind = CanonicalizeMemoryKindWithDevices(memory_kind, devices);
int64_t device_count =
absl::c_accumulate(sharding_param.minor_to_major().axis_sizes, 1,
std::multiplies<int64_t>());
if (device_count != devices->size()) {
return InvalidArgument(
"Device counts don't match. From ShardingParam %d vs from DeviceList "
"%d",
device_count, devices->size());
}
return std::unique_ptr<ShardingParamSharding>(new ShardingParamSharding(
std::move(sharding_param), std::move(devices), memory_kind));
}
ShardingParamSharding::ShardingParamSharding(
ShardingParam sharding_param, tsl::RCReference<DeviceList> devices,
MemoryKind memory_kind)
: llvm::RTTIExtends<ShardingParamSharding, Sharding>(
std::move(devices), memory_kind,
ComputeIsFullyReplicated(sharding_param)),
sharding_param_(sharding_param) {}
absl::StatusOr<std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>>>
ShardingParamSharding::Disassemble(const Shape& shape) const {
DCHECK(this);
TF_ASSIGN_OR_RETURN(Shape local_shape, GetShardShape(shape));
std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>> result;
for (Device* device : devices_->devices()) {
result.push_back(
{local_shape, SingleDeviceSharding::Create(device, memory_kind_)});
}
return result;
}
absl::StatusOr<Shape> ShardingParamSharding::GetShardShape(
const Shape& shape) const {
if (shape.dims().size() != sharding_param_.dim_shards().size()) {
return InvalidArgument(
"Numbers of dimensions don't match. From Shape %d vs from "
"ShardingParam %d",
shape.dims().size(), sharding_param_.dim_shards().size());
}
std::vector<int64_t> dims;
dims.reserve(shape.dims().size());
for (const auto [dim, dim_shards] :
llvm::zip(shape.dims(), sharding_param_.dim_shards())) {
if (dim % dim_shards != 0) {
return InvalidArgument(
"Uneven shard is not supported. dim: %d, dim_shards: %d", dim,
dim_shards);
}
dims.push_back(dim / dim_shards);
}
return Shape(dims);
}
bool ShardingParamSharding::HasSamePartitioning(const Sharding& other) const {
if (this == &other) {
return true;
}
const auto* other_sharding_param_sharding =
llvm::dyn_cast<ShardingParamSharding>(&other);
if (!other_sharding_param_sharding) {
return false;
}
return sharding_param_ == other_sharding_param_sharding->sharding_param_;
}
absl::StatusOr<std::unique_ptr<Sharding>>
ShardingParamSharding::WithDeviceAssignment(
std::optional<tsl::RCReference<DeviceList>> devices,
std::optional<MemoryKind> memory_kind) const {
if (devices.has_value() && (*devices)->size() != devices_->size()) {
return InvalidArgument(
"ShardingParamSharding should have the same number of devices as the "
"current sharding, but was asked to have %d devices",
(*devices)->size());
}
return Create(sharding_param_, devices.value_or(devices_),
memory_kind.value_or(memory_kind_));
}
absl::StatusOr<
std::vector<std::pair<DynamicShape, std::shared_ptr<const Sharding>>>>
ShardingParamSharding::Disassemble(const DynamicShape& dynamic_shape) const {
return InvalidArgument(
"ShardingParamSharding can only disassemble static shape, but was asked "
"to disassemble dynamic shape %s",
dynamic_shape.DebugString());
}
absl::StatusOr<std::vector<IndexDomain>> ShardingParamSharding::IndexDomains(
const Shape& shape) const {
DCHECK(this);
TF_ASSIGN_OR_RETURN(Shape local_shape, GetShardShape(shape));
std::vector<Index> tile_indices =
GetTileIndices(sharding_param_.dim_shards());
std::vector<Index> origins;
origins.reserve(tile_indices.size());
for (const Index& tile_index : tile_indices) {
origins.push_back(tile_index * local_shape.dims());
}
static constexpr int kInvalidIndex = -1;
llvm::SmallVector<int, 4> device_list;
sharding_param_.minor_to_major().ToDeviceList(device_list);
std::vector<int> device_to_index(device_list.size(), kInvalidIndex);
for (int i = 0; i < device_list.size(); ++i) {
device_to_index[device_list[i]] = i;
}
DCHECK_EQ(device_to_index.size() % origins.size(), 0);
int replication = device_to_index.size() / origins.size();
std::vector<IndexDomain> result;
result.reserve(device_to_index.size());
for (int i = 0; i < device_to_index.size(); ++i) {
int index = device_to_index[i];
DCHECK_NE(index, kInvalidIndex);
result.push_back(IndexDomain(origins[index / replication], local_shape));
}
return result;
}
std::string ShardingParamSharding::DebugString() const {
DCHECK(this);
return absl::StrFormat(
"ShardingParamSharding(%s, devices: %v, memory_kind: %v)",
sharding_param_.DebugString(), *devices_, memory_kind_);
}
}
} | #include "xla/python/ifrt/sharding.h"
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "llvm/Support/Casting.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/device_test_util.h"
#include "xla/python/ifrt/index.h"
#include "xla/python/ifrt/index_domain.h"
#include "xla/python/ifrt/ir/sharding_param.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/shape.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::testing::SizeIs;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
class SingleDeviceShardingTest : public test_util::DeviceTest {};
class OpaqueShardingTest : public test_util::DeviceTest {};
class ConcreteShardingTest : public test_util::DeviceTest {};
class ConcreteEvenShardingTest : public test_util::DeviceTest {};
class ShardingParamShardingTest : public test_util::DeviceTest {};
TEST_P(SingleDeviceShardingTest, CreateWithBadDevice) {
EXPECT_DEATH(SingleDeviceSharding::Create(nullptr, MemoryKind()), "");
}
TEST_P(SingleDeviceShardingTest, IsFullyReplicated) {
auto device_list = GetDevices({0});
std::shared_ptr<const Sharding> sharding = SingleDeviceSharding::Create(
device_list->devices().front(), MemoryKind());
EXPECT_TRUE(sharding->IsFullyReplicated());
}
TEST_P(SingleDeviceShardingTest, GetShardShape) {
auto device_list = GetDevices({0});
std::shared_ptr<const Sharding> sharding = SingleDeviceSharding::Create(
device_list->devices().front(), MemoryKind());
EXPECT_THAT(sharding->GetShardShape(Shape({10, 20})),
IsOkAndHolds(Shape({10, 20})));
}
TEST_P(SingleDeviceShardingTest, HasSamePartitioning) {
auto device_list0 = GetDevices({0});
std::shared_ptr<const Sharding> sharding0 = SingleDeviceSharding::Create(
device_list0->devices().front(), MemoryKind());
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding0));
{
auto device_list1 = GetDevices({1});
std::shared_ptr<const Sharding> sharding1 = SingleDeviceSharding::Create(
device_list1->devices().front(), MemoryKind());
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding1));
}
}
TEST_P(SingleDeviceShardingTest, WithDeviceAssignment) {
auto device_list0 = GetDevices({0});
std::shared_ptr<const Sharding> sharding0 = SingleDeviceSharding::Create(
device_list0->devices().front(), MemoryKind());
{
auto device_list1 = GetDevices({1});
std::shared_ptr<const Sharding> sharding1 = SingleDeviceSharding::Create(
device_list1->devices().front(), MemoryKind());
TF_ASSERT_OK_AND_ASSIGN(
auto new_sharding,
sharding0->WithDeviceAssignment(device_list1,
std::nullopt));
EXPECT_EQ(*new_sharding, *sharding1);
}
{
auto device_list1 = GetDevices({0, 1});
EXPECT_THAT(sharding0->WithDeviceAssignment(device_list1,
std::nullopt),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("SingleDeviceSharding can only have one "
"device, but was asked to have 2 devices")));
}
}
TEST_P(SingleDeviceShardingTest, IndexDomains) {
auto device_list = GetDevices({0});
std::shared_ptr<const Sharding> sharding = SingleDeviceSharding::Create(
device_list->devices().front(), MemoryKind());
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto index_domains, sharding->IndexDomains(shape));
EXPECT_THAT(index_domains, ElementsAre(IndexDomain(shape)));
}
TEST_P(SingleDeviceShardingTest, Disassemble) {
auto device_list = GetDevices({0});
std::shared_ptr<const Sharding> sharding = SingleDeviceSharding::Create(
device_list->devices().front(), MemoryKind());
{
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto disassembled, sharding->Disassemble(shape));
ASSERT_THAT(disassembled, SizeIs(1));
const auto& [result_shape, result_sharding] = disassembled[0];
EXPECT_EQ(shape, result_shape);
EXPECT_EQ(*result_sharding, *sharding);
}
{
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape dynamic_shape,
DynamicShape::Create(Shape({10, 20}),
BoundedDynamicShapeTag({true, true})));
TF_ASSERT_OK_AND_ASSIGN(auto disassembled,
sharding->Disassemble(dynamic_shape));
ASSERT_THAT(disassembled, SizeIs(1));
const auto& [result_shape, result_sharding] = disassembled[0];
EXPECT_EQ(dynamic_shape, result_shape);
EXPECT_EQ(*result_sharding, *sharding);
}
}
TEST_P(OpaqueShardingTest, CreateWithBadDeviceList) {
EXPECT_DEATH(
OpaqueSharding::Create(tsl::RCReference<DeviceList>(), MemoryKind()), "");
EXPECT_DEATH(
OpaqueSharding::Create(BasicDeviceList::Create({}), MemoryKind()), "");
}
TEST_P(OpaqueShardingTest, IsFullyReplicated) {
auto device_list = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding =
OpaqueSharding::Create(device_list, MemoryKind());
EXPECT_FALSE(sharding->IsFullyReplicated());
}
TEST_P(OpaqueShardingTest, GetShardShape) {
auto device_list = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding =
OpaqueSharding::Create(device_list, MemoryKind());
EXPECT_THAT(sharding->GetShardShape(Shape({10, 20})),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("OpaqueSharding does not have shard shape")));
}
TEST_P(OpaqueShardingTest, HasSamePartitioning) {
auto device_list0 = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding0 =
OpaqueSharding::Create(device_list0, MemoryKind());
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding0));
{
auto device_list1 = GetDevices({2, 3});
std::shared_ptr<const Sharding> sharding1 =
OpaqueSharding::Create(device_list0, MemoryKind());
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
}
TEST_P(OpaqueShardingTest, WithDeviceAssignment) {
auto device_list0 = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding0 =
OpaqueSharding::Create(device_list0, MemoryKind());
{
auto device_list1 = GetDevices({2, 3});
std::shared_ptr<const Sharding> sharding1 =
OpaqueSharding::Create(device_list0, MemoryKind());
TF_ASSERT_OK_AND_ASSIGN(
auto new_sharding,
sharding0->WithDeviceAssignment(device_list1,
std::nullopt));
ASSERT_TRUE(llvm::isa<OpaqueSharding>(*new_sharding));
EXPECT_THAT(new_sharding->devices()->devices(),
ElementsAreArray(device_list1->devices()));
}
{
auto device_list1 = GetDevices({0, 1, 2, 3});
EXPECT_THAT(
sharding0->WithDeviceAssignment(device_list1,
std::nullopt),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("OpaqueSharding should have the same number of "
"devices as the current sharding, but was asked to "
"have 4 devices")));
}
}
TEST_P(OpaqueShardingTest, FailedToDisassemble) {
auto device_list = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding =
OpaqueSharding::Create(device_list, MemoryKind());
EXPECT_THAT(
sharding->Disassemble(Shape({30})),
StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("OpaqueSharding does not have shard shape information")));
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape dynamic_shape,
DynamicShape::Create(Shape({30}), BoundedDynamicShapeTag({true})));
EXPECT_THAT(
sharding->Disassemble(dynamic_shape),
StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("OpaqueSharding does not have shard shape information")));
}
TEST_P(OpaqueShardingTest, IndexDomainsFails) {
auto device_list = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding =
OpaqueSharding::Create(device_list, MemoryKind());
EXPECT_THAT(
sharding->IndexDomains(Shape({30})),
StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("OpaqueSharding does not have index domain information")));
}
TEST_P(ConcreteShardingTest, CreateWithBadDeviceList) {
EXPECT_DEATH(ConcreteSharding::Create(tsl::RCReference<DeviceList>(),
MemoryKind(), Shape({}), {Shape({})}),
"");
EXPECT_DEATH(ConcreteSharding::Create(BasicDeviceList::Create({}),
MemoryKind(), Shape({}), {Shape({})}),
"");
}
TEST_P(ConcreteShardingTest, IsFullyReplicated) {
auto device_list = GetDevices({0, 1});
std::vector<Shape> shard_shapes;
shard_shapes.reserve(2);
shard_shapes.push_back(Shape({10}));
shard_shapes.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding = ConcreteSharding::Create(
device_list, MemoryKind(), Shape({30}), shard_shapes);
EXPECT_FALSE(sharding->IsFullyReplicated());
}
TEST_P(ConcreteShardingTest, GetShardShape) {
auto device_list = GetDevices({0, 1});
std::vector<Shape> shard_shapes;
shard_shapes.reserve(2);
shard_shapes.push_back(Shape({10}));
shard_shapes.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding = ConcreteSharding::Create(
device_list, MemoryKind(), Shape({30}), shard_shapes);
EXPECT_THAT(
sharding->GetShardShape(Shape({30})),
StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("ConcreteSharding does not have a fixed shard shape")));
}
TEST_P(ConcreteShardingTest, HasSamePartitioning) {
auto device_list0 = GetDevices({0, 1});
std::vector<Shape> shard_shapes0;
shard_shapes0.reserve(2);
shard_shapes0.push_back(Shape({10}));
shard_shapes0.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding0 = ConcreteSharding::Create(
device_list0, MemoryKind(), Shape({30}), shard_shapes0);
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding0));
{
auto device_list1 = GetDevices({2, 3});
std::vector<Shape> shard_shapes1;
shard_shapes1.reserve(2);
shard_shapes1.push_back(Shape({10}));
shard_shapes1.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding1 = ConcreteSharding::Create(
device_list1, MemoryKind(), Shape({30}), shard_shapes1);
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({2, 3, 4});
std::vector<Shape> shard_shapes1;
shard_shapes1.reserve(3);
shard_shapes1.push_back(Shape({10}));
shard_shapes1.push_back(Shape({20}));
shard_shapes1.push_back(Shape({30}));
std::shared_ptr<const Sharding> sharding1 = ConcreteSharding::Create(
device_list1, MemoryKind(), Shape({60}), shard_shapes1);
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({2, 3});
std::vector<Shape> shard_shapes1;
shard_shapes1.reserve(2);
shard_shapes1.push_back(Shape({10}));
shard_shapes1.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding1 = ConcreteSharding::Create(
device_list1, MemoryKind(), Shape({40}), shard_shapes1);
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({2, 3});
std::vector<Shape> shard_shapes1;
shard_shapes1.reserve(2);
shard_shapes1.push_back(Shape({10000}));
shard_shapes1.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding1 = ConcreteSharding::Create(
device_list1, MemoryKind(), Shape({30}), shard_shapes1);
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
}
TEST_P(ConcreteShardingTest, WithDeviceAssignment) {
auto device_list0 = GetDevices({0, 1});
std::vector<Shape> shard_shapes0;
shard_shapes0.reserve(2);
shard_shapes0.push_back(Shape({10}));
shard_shapes0.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding0 = ConcreteSharding::Create(
device_list0, MemoryKind(), Shape({30}), shard_shapes0);
{
auto device_list1 = GetDevices({0, 1});
std::vector<Shape> shard_shapes1;
shard_shapes1.reserve(2);
shard_shapes1.push_back(Shape({10}));
shard_shapes1.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding1 = ConcreteSharding::Create(
device_list1, MemoryKind(), Shape({30}), shard_shapes1);
TF_ASSERT_OK_AND_ASSIGN(
auto new_sharding,
sharding0->WithDeviceAssignment(device_list1,
std::nullopt));
EXPECT_EQ(*new_sharding, *sharding1);
}
{
auto device_list1 = GetDevices({0, 1, 2, 3});
EXPECT_THAT(
sharding0->WithDeviceAssignment(device_list1,
std::nullopt),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("ConcreteSharding should have the same number of "
"devices as the current sharding, but was asked to "
"have 4 devices")));
}
}
TEST_P(ConcreteShardingTest, Disassemble) {
auto device_list = GetDevices({0, 1});
std::vector<Shape> shard_shapes;
shard_shapes.reserve(2);
shard_shapes.push_back(Shape({10}));
shard_shapes.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding = ConcreteSharding::Create(
device_list, MemoryKind(), Shape({30}), shard_shapes);
TF_ASSERT_OK_AND_ASSIGN(auto disassembled,
sharding->Disassemble(Shape({30})));
ASSERT_THAT(disassembled, SizeIs(2));
for (int i = 0; i < 2; ++i) {
const auto& [shape, sharding] = disassembled[i];
EXPECT_EQ(shape, shard_shapes[i]);
EXPECT_EQ(*sharding, *SingleDeviceSharding::Create(
device_list->devices()[i], MemoryKind()));
}
}
TEST_P(ConcreteShardingTest, DisassembleDynamicShape) {
auto device_list = GetDevices({0, 1});
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape dynamic_shape,
DynamicShape::Create(Shape({10}), BoundedDynamicShapeTag({true})));
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape shard_dynamic_shape1,
DynamicShape::Create(Shape({3}), BoundedDynamicShapeTag({true})));
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape shard_dynamic_shape2,
DynamicShape::Create(Shape({7}), BoundedDynamicShapeTag({true})));
std::vector<DynamicShape> shard_dynamic_shapes{
std::move(shard_dynamic_shape1), std::move(shard_dynamic_shape2)};
auto sharding = ConcreteSharding::Create(device_list, MemoryKind(),
dynamic_shape, shard_dynamic_shapes);
EXPECT_THAT(sharding->Disassemble(Shape({10})),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("ConcreteSharding holds dynamic shape")));
TF_ASSERT_OK_AND_ASSIGN(auto disassembled,
sharding->Disassemble(DynamicShape(dynamic_shape)));
ASSERT_THAT(disassembled, SizeIs(2));
for (int i = 0; i < disassembled.size(); ++i) {
const auto& [dynamic_shape, sharding] = disassembled[i];
EXPECT_EQ(dynamic_shape, shard_dynamic_shapes[i]);
EXPECT_EQ(*sharding, *SingleDeviceSharding::Create(
device_list->devices()[i], MemoryKind()));
}
}
TEST_P(ConcreteShardingTest, DisassembleFailsForUnexpectedShape) {
auto device_list = GetDevices({0, 1});
std::vector<Shape> shard_shapes;
shard_shapes.reserve(2);
shard_shapes.push_back(Shape({10}));
shard_shapes.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding = ConcreteSharding::Create(
device_list, MemoryKind(), Shape({30}), shard_shapes);
EXPECT_THAT(sharding->Disassemble(Shape({40})),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("ConcreteSharding can only disassemble")));
}
TEST_P(ConcreteShardingTest, IndexDomainsFails) {
auto device_list = GetDevices({0, 1});
std::vector<Shape> shard_shapes;
shard_shapes.reserve(2);
shard_shapes.push_back(Shape({10}));
shard_shapes.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding = ConcreteSharding::Create(
device_list, MemoryKind(), Shape({30}), shard_shapes);
EXPECT_THAT(sharding->IndexDomains(Shape({30})),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("ConcreteSharding does not have index "
"domain information")));
}
TEST_P(ConcreteEvenShardingTest, CreateWithBadDeviceList) {
EXPECT_DEATH(ConcreteEvenSharding::Create(tsl::RCReference<DeviceList>(),
MemoryKind(), Shape({}), Shape({}),
true),
"");
EXPECT_DEATH(ConcreteEvenSharding::Create(BasicDeviceList::Create({}),
MemoryKind(), Shape({}), Shape({}),
true),
"");
}
TEST_P(ConcreteEvenShardingTest, IsFullyReplicated) {
auto device_list = GetDevices({0, 1});
{
std::shared_ptr<const Sharding> sharding =
ConcreteEvenSharding::Create(device_list, MemoryKind(), Shape({30}),
Shape({15}), true);
EXPECT_TRUE(sharding->IsFullyReplicated());
}
{
std::shared_ptr<const Sharding> sharding = ConcreteEvenSharding::Create(
device_list, MemoryKind(), Shape({30}), Shape({15}),
false);
EXPECT_FALSE(sharding->IsFullyReplicated());
}
}
TEST_P(ConcreteEvenShardingTest, GetShardShape) {
auto device_list = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding =
ConcreteEvenSharding::Create(device_list, MemoryKind(), Shape({30}),
Shape({15}), true);
EXPECT_THAT(sharding->GetShardShape(Shape({30})), IsOkAndHolds(Shape({15})));
EXPECT_THAT(
sharding->GetShardShape(Shape({45})),
StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("ConcreteEvenSharding has a shard shape for shape [30], "
"but was asked to get a shard shape for shape [45]")));
}
TEST_P(ConcreteEvenShardingTest, HasSamePartitioning) {
auto device_list0 = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding0 =
ConcreteEvenSharding::Create(device_list0, MemoryKind(), Shape({30}),
Shape({15}), true);
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding0));
{
auto device_list1 = GetDevices({2, 3});
std::shared_ptr<const Sharding> sharding1 =
ConcreteEvenSharding::Create(device_list1, MemoryKind(), Shape({30}),
Shape({15}), true);
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({2, 3, 4});
std::shared_ptr<const Sharding> sharding1 =
ConcreteEvenSharding::Create(device_list1, MemoryKind(), Shape({30}),
Shape({15}), true);
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({2, 3});
std::shared_ptr<const Sharding> sharding1 =
ConcreteEvenSharding::Create(device_list1, MemoryKind(), Shape({45}),
Shape({15}), true);
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({2, 3});
std::shared_ptr<const Sharding> sharding1 =
ConcreteEvenSharding::Create(device_list1, MemoryKind(), Shape({30}),
Shape({10}), true);
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({2, 3});
std::shared_ptr<const Sharding> sharding1 = ConcreteEvenSharding::Create(
device_list1, MemoryKind(), Shape({30}), Shape({15}),
false);
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
}
TEST_P(ConcreteEvenShardingTest, WithDeviceAssignment) {
auto device_list0 = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding0 =
ConcreteEvenSharding::Create(device_list0, MemoryKind(), Shape({30}),
Shape({15}), true);
{
auto device_list1 = GetDevices({2, 3});
std::shared_ptr<const Sharding> sharding1 =
ConcreteEvenSharding::Create(device_list1, MemoryKind(), Shape({30}),
Shape({15}), true);
TF_ASSERT_OK_AND_ASSIGN(
auto new_sharding,
sharding0->WithDeviceAssignment(device_list1,
std::nullopt));
EXPECT_EQ(*new_sharding, *sharding1);
}
{
auto device_list1 = GetDevices({0, 1, 2, 3});
EXPECT_THAT(
sharding0->WithDeviceAssignment(device_list1,
std::nullopt),
StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("ConcreteEvenSharding should have the same number of "
"devices as the current sharding, but was asked to "
"have 4 devices")));
}
}
TEST_P(ConcreteEvenShardingTest, Disassemble) {
auto device_list = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding =
ConcreteEvenSharding::Create(device_list, MemoryKind(), Shape({30}),
Shape({15}), false);
TF_ASSERT_OK_AND_ASSIGN(auto disassembled,
sharding->Disassemble(Shape({30})));
ASSERT_THAT(disassembled, SizeIs(2));
for (int i = 0; i < 2; ++i) {
const auto& [shape, sharding] = disassembled[i];
EXPECT_EQ(shape, Shape({15}));
EXPECT_EQ(*sharding, *SingleDeviceSharding::Create(
device_list->devices()[i], MemoryKind()));
}
}
TEST_P(ConcreteEvenShardingTest, DisassembleFailsForUnexpectedShape) {
auto device_list = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding =
ConcreteEvenSharding::Create(device_list, MemoryKind(), Shape({30}),
Shape({15}), false);
EXPECT_THAT(sharding->Disassemble(Shape({40})),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("ConcreteEvenSharding can only disassemble")));
}
TEST_P(ConcreteEvenShardingTest, IndexDomainsFails) {
auto device_list = GetDevices({0, 1});
std::vector<Shape> shard_shapes;
std::shared_ptr<const Sharding> sharding =
ConcreteEvenSharding::Create(device_list, MemoryKind(), Shape({30}),
Shape({15}), false);
EXPECT_THAT(
sharding->IndexDomains(Shape({30})),
StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr(
"ConcreteEvenSharding does not have index domain information")));
}
TEST_P(ShardingParamShardingTest, CreateWithBadDeviceList) {
ShardingParam param{{2, 3},
{{1, 0}, {3, 2}}};
EXPECT_DEATH(ShardingParamSharding::Create(
param, tsl::RCReference<DeviceList>(), MemoryKind())
.value(),
"");
EXPECT_DEATH(ShardingParamSharding::Create(param, BasicDeviceList::Create({}),
MemoryKind())
.value(),
"");
}
TEST_P(ShardingParamShardingTest, CreateFailsWhenDeviceCountNotMatch) {
auto device_list = GetDevices({0, 1});
ShardingParam param{{2, 3},
{{1, 0}, {3, 2}}};
EXPECT_THAT(ShardingParamSharding::Create(param, device_list, MemoryKind()),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("Device counts don't match. From "
"ShardingParam 6 vs from DeviceList 2")));
}
TEST_P(ShardingParamShardingTest, IsFullyReplicated) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
{
ShardingParam param{{1, 1},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> param_sharding,
ShardingParamSharding::Create(param, device_list, MemoryKind()));
EXPECT_TRUE(param_sharding->IsFullyReplicated());
}
{
ShardingParam param{{1, 6},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> param_sharding,
ShardingParamSharding::Create(param, device_list, MemoryKind()));
EXPECT_FALSE(param_sharding->IsFullyReplicated());
}
{
ShardingParam param{{2, 3},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> param_sharding,
ShardingParamSharding::Create(param, device_list, MemoryKind()));
EXPECT_FALSE(param_sharding->IsFullyReplicated());
}
}
TEST_P(ShardingParamShardingTest, GetShardShape) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
ShardingParam param{{2, 3},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> sharding,
ShardingParamSharding::Create(param, device_list, MemoryKind()));
EXPECT_THAT(sharding->GetShardShape(Shape({6, 6})),
IsOkAndHolds(Shape({3, 2})));
EXPECT_THAT(sharding->GetShardShape(Shape({6, 6, 6})),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("Numbers of dimensions don't match. From "
"Shape 3 vs from ShardingParam 2")));
}
TEST_P(ShardingParamShardingTest, HasSamePartitioning) {
auto device_list0 = GetDevices({0, 1, 2, 3, 4, 5});
ShardingParam param0{{2, 3},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> sharding0,
ShardingParamSharding::Create(param0, device_list0, MemoryKind()));
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding0));
{
auto device_list1 = GetDevices({3, 4, 5, 0, 1, 2});
ShardingParam param1{{2, 3},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> sharding1,
ShardingParamSharding::Create(param1, device_list1, MemoryKind()));
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({3, 4, 5});
ShardingParam param1{{3, 1},
{{1, 0}, {1, 3}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> sharding1,
ShardingParamSharding::Create(param1, device_list1, MemoryKind()));
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({3, 4, 5, 0, 1, 2});
ShardingParam param1{{3, 2},
{{0, 1}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> sharding1,
ShardingParamSharding::Create(param1, device_list1, MemoryKind()));
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
}
TEST_P(ShardingParamShardingTest, WithDeviceAssignment) {
auto device_list0 = GetDevices({0, 1, 2, 3, 4, 5});
ShardingParam param0{{2, 3},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> sharding0,
ShardingParamSharding::Create(param0, device_list0, MemoryKind()));
{
auto device_list1 = GetDevices({3, 4, 5, 0, 1, 2});
ShardingParam param1{{2, 3},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> sharding1,
ShardingParamSharding::Create(param1, device_list1, MemoryKind()));
TF_ASSERT_OK_AND_ASSIGN(
auto new_sharding,
sharding0->WithDeviceAssignment(device_list1,
std::nullopt));
EXPECT_EQ(*new_sharding, *sharding1);
}
{
auto device_list1 = GetDevices({0, 1, 2});
EXPECT_THAT(
sharding0->WithDeviceAssignment(device_list1,
std::nullopt),
StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("ShardingParamSharding should have the same number of "
"devices as the current sharding, but was asked to "
"have 3 devices")));
}
}
TEST_P(ShardingParamShardingTest, Disassemble) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
ShardingParam param{{2, 3},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> param_sharding,
ShardingParamSharding::Create(param, device_list, MemoryKind()));
TF_ASSERT_OK_AND_ASSIGN(auto disassembled,
param_sharding->Disassemble(Shape({6, 6})));
ASSERT_THAT(disassembled, SizeIs(6));
for (int i = 0; i < 6; ++i) {
const auto& [shape, sharding] = disassembled[i];
EXPECT_EQ(shape, Shape({3, 2}));
EXPECT_EQ(*sharding, *SingleDeviceSharding::Create(
device_list->devices()[i], MemoryKind()));
}
}
TEST_P(ShardingParamShardingTest, DisassembleFailsWhenRankNotMatch) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
ShardingParam param{{2, 3},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> param_sharding,
ShardingParamSharding::Create(param, device_list, MemoryKind()));
EXPECT_THAT(param_sharding->Disassemble(Shape({6, 6, 6})),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("Numbers of dimensions don't match. From "
"Shape 3 vs from ShardingParam 2")));
}
TEST_P(ShardingParamShardingTest, DisassembleFailsForUnevenSharding) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
ShardingParam param{{2, 3},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> param_sharding,
ShardingParamSharding::Create(param, device_list, MemoryKind()));
EXPECT_THAT(
param_sharding->Disassemble(Shape({7, 6})),
StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("Uneven shard is not supported. dim: 7, dim_shards: 2")));
}
TEST_P(ShardingParamShardingTest, IndexDomain) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
ShardingParam param{{2, 3},
{{0, 1}, {2, 3}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> param_sharding,
ShardingParamSharding::Create(param, device_list, MemoryKind()));
TF_ASSERT_OK_AND_ASSIGN(auto index_domains,
param_sharding->IndexDomains(Shape({6, 6})));
EXPECT_THAT(index_domains,
ElementsAre(IndexDomain(Index({0, 0}), Shape({3, 2})),
IndexDomain(Index({0, 2}), Shape({3, 2})),
IndexDomain(Index({0, 4}), Shape({3, 2})),
IndexDomain(Index({3, 0}), Shape({3, 2})),
IndexDomain(Index({3, 2}), Shape({3, 2})),
IndexDomain(Index({3, 4}), Shape({3, 2}))));
}
TEST_P(ShardingParamShardingTest, IndexDomainWithPermutation) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
ShardingParam param{{2, 3},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> param_sharding,
ShardingParamSharding::Create(param, device_list, MemoryKind()));
TF_ASSERT_OK_AND_ASSIGN(auto index_domains,
param_sharding->IndexDomains(Shape({6, 6})));
EXPECT_THAT(index_domains,
ElementsAre(IndexDomain(Index({0, 0}), Shape({3, 2})),
IndexDomain(Index({0, 4}), Shape({3, 2})),
IndexDomain(Index({3, 2}), Shape({3, 2})),
IndexDomain(Index({0, 2}), Shape({3, 2})),
IndexDomain(Index({3, 0}), Shape({3, 2})),
IndexDomain(Index({3, 4}), Shape({3, 2}))));
}
TEST_P(ShardingParamShardingTest, IndexDomainWithReplication) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
ShardingParam param{{2, 1},
{{0, 1}, {2, 3}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> param_sharding,
ShardingParamSharding::Create(param, device_list, MemoryKind()));
TF_ASSERT_OK_AND_ASSIGN(auto index_domains,
param_sharding->IndexDomains(Shape({6, 6})));
EXPECT_THAT(index_domains,
ElementsAre(IndexDomain(Index({0, 0}), Shape({3, 6})),
IndexDomain(Index({0, 0}), Shape({3, 6})),
IndexDomain(Index({0, 0}), Shape({3, 6})),
IndexDomain(Index({3, 0}), Shape({3, 6})),
IndexDomain(Index({3, 0}), Shape({3, 6})),
IndexDomain(Index({3, 0}), Shape({3, 6}))));
}
INSTANTIATE_TEST_SUITE_P(NumDevices, SingleDeviceShardingTest,
testing::Values(test_util::DeviceTestParam{
6,
6}));
INSTANTIATE_TEST_SUITE_P(NumDevices, OpaqueShardingTest,
testing::Values(test_util::DeviceTestParam{
6,
6}));
INSTANTIATE_TEST_SUITE_P(NumDevices, ConcreteShardingTest,
testing::Values(test_util::DeviceTestParam{
6,
6}));
INSTANTIATE_TEST_SUITE_P(NumDevices, ConcreteEvenShardingTest,
testing::Values(test_util::DeviceTestParam{
6,
6}));
INSTANTIATE_TEST_SUITE_P(NumDevices, ShardingParamShardingTest,
testing::Values(test_util::DeviceTestParam{
6,
4}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/sharding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/sharding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ff4abb2c-ab85-4700-8461-1d3ad21f9d18 | cpp | tensorflow/tensorflow | xla_compiler | tensorflow/compiler/tf2xla/xla_compiler.cc | tensorflow/compiler/tf2xla/xla_compiler_test.cc | #include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include <algorithm>
#include <array>
#include <map>
#include <memory>
#include <numeric>
#include <optional>
#include <set>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "tensorflow/compiler/mlir/tf2xla/mlir_bridge_rollout_policy.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/variant.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/shape_inference.h"
#include "tensorflow/compiler/jit/xla_compile_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h"
#include "tensorflow/compiler/mlir/utils/array_container_utils.h"
#include "tensorflow/compiler/tf2xla/graph_compiler.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/rearrange_function_argument.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/sharding_util.h"
#include "tensorflow/compiler/tf2xla/side_effect_util.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_compilation_device.h"
#include "tensorflow/compiler/tf2xla/xla_context.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/client_library.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/protobuf_util.h"
#include "xla/service/hlo.pb.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/executor.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_optimizer.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/tpu/tpu_defs.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tensorflow/core/util/dump_graph.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/tensor_float_32_utils.h"
namespace tensorflow {
namespace {
constexpr char kSingleOpComponent[] = "TF2XLA_XLA_COMPILER_COMPILE_SINGLE_OP";
constexpr char kCompileFunctionComponent[] =
"TF2XLA_XLA_COMPILER_COMPILE_FUNCTION";
Status CheckSignature(const DataTypeVector& types,
absl::Span<const XlaCompiler::Argument> args) {
if (args.size() != types.size()) {
return errors::Internal("Compilation arguments have ", args.size(),
" elements while function has ", types.size());
}
for (int i = 0, end = types.size(); i < end; ++i) {
if (types[i] != args[i].type && types[i] != DT_RESOURCE &&
types[i] != DT_VARIANT) {
return errors::Internal(
"Argument ", i, " has declared type ", DataTypeString(args[i].type),
" but function parameter has type ", DataTypeString(types[i]));
}
}
return absl::OkStatus();
}
absl::StatusOr<
std::pair<std::map<int, xla::OpSharding>, std::map<int, xla::OpSharding>>>
ComputeArgAndRetvalShardings(const Graph& graph) {
auto get_sharding_for_node =
[](const Node* n) -> absl::StatusOr<std::optional<xla::OpSharding>> {
TF_ASSIGN_OR_RETURN(
auto sharding,
ParseShardingFromDevice(*n, std::numeric_limits<int32>::max(),
false));
return sharding;
};
std::map<int, xla::OpSharding> arg_shardings;
std::map<int, xla::OpSharding> retval_shardings;
for (const Node* n : graph.nodes()) {
if (n->IsArg()) {
TF_ASSIGN_OR_RETURN(auto sharding, get_sharding_for_node(n));
if (!sharding.has_value()) continue;
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
TF_RET_CHECK(index >= 0) << "Negative _Arg index";
arg_shardings[index] = std::move(*sharding);
} else if (n->IsRetval()) {
TF_ASSIGN_OR_RETURN(auto sharding, get_sharding_for_node(n));
if (!sharding.has_value()) continue;
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
TF_RET_CHECK(index >= 0) << "Negative _Retval index";
retval_shardings[index] = std::move(*sharding);
}
}
return std::make_pair(std::move(arg_shardings), std::move(retval_shardings));
}
Status ExecuteGraph(XlaContext* xla_context, std::unique_ptr<Graph> graph,
XlaCompilationDevice* device, FunctionLibraryRuntime* flib,
int64_t step_id) {
xla_context->Ref();
Status status;
auto step_container = std::make_unique<ScopedStepContainer>(
step_id, [&status, device](const string& name) {
status = device->resource_manager()->Cleanup(name);
});
TF_RETURN_IF_ERROR(step_container->Create(device->resource_manager(),
XlaContext::kXlaContextResourceName,
xla_context));
GraphCompiler graph_compiler(device, graph.get(), flib, step_container.get());
TF_RETURN_IF_ERROR(graph_compiler.Compile());
step_container.reset();
return status;
}
Status BuildComputation(
const std::vector<XlaCompiler::Argument>& args,
const std::vector<XlaExpression>& retvals,
const std::map<int, xla::OpSharding>& arg_shardings,
const std::map<int, xla::OpSharding>& retval_shardings,
const std::vector<std::unique_ptr<XlaResource>>& resources,
std::unique_ptr<xla::XlaOp> token_output,
const XlaShapeLayoutHelpers::ShapeDeterminationFns& shape_determination_fns,
bool is_entry_computation, bool return_updated_values_for_all_resources,
bool always_return_tuple, bool use_tuple_arg, bool alias_resource_update,
xla::XlaBuilder* builder, xla::XlaComputation* computation,
int* num_computation_outputs, int* num_nonconst_outputs,
std::vector<XlaCompiler::OutputDescription>* outputs,
std::vector<XlaCompiler::ResourceUpdate>* resource_updates,
xla::Shape* output_shape, absl::Span<int const> input_mapping) {
xla::OpMetadata retval_metadata;
retval_metadata.set_op_name("XLA_Retvals");
builder->SetOpMetadata(retval_metadata);
VLOG(1) << "Building new computation";
auto cleanup = gtl::MakeCleanup([builder]() { builder->ClearOpMetadata(); });
auto identity_op = [builder](xla::XlaOp op,
const std::optional<xla::OpSharding>& sharding) {
xla::XlaScopedShardingAssignment assign_sharding(builder, sharding);
return xla::Copy(op);
};
std::vector<xla::XlaOp> elems;
elems.reserve(retvals.size());
std::unordered_map<int, xla::OpSharding> retval_index_and_sharding;
for (int i = 0, end = retvals.size(); i < end; ++i) {
XlaCompiler::OutputDescription& output = (*outputs)[i];
const XlaExpression& retval = retvals[i];
output.type = retval.dtype();
switch (retval.kind()) {
case XlaExpression::Kind::kConstant:
output.is_constant = true;
output.constant_value = *retval.constant_value();
output.shape = output.constant_value.shape();
break;
case XlaExpression::Kind::kTensorList: {
output.is_tensor_list = true;
xla::XlaOp value = retval.handle();
elems.push_back(value);
break;
}
case XlaExpression::Kind::kXlaOp: {
output.is_constant = false;
TF_ASSIGN_OR_RETURN(output.shape, retval.GetShape());
xla::XlaOp value = retval.handle();
auto it = retval_shardings.find(i);
std::optional<xla::OpSharding> sharding =
it == retval_shardings.end() ? std::optional<xla::OpSharding>()
: it->second;
if (it != retval_shardings.end()) {
retval_index_and_sharding[elems.size()] = it->second;
}
if (shape_determination_fns.shape_representation_fn) {
TF_ASSIGN_OR_RETURN(auto original_shape, builder->GetShape(value));
TF_ASSIGN_OR_RETURN(value,
ReshapeWithCorrectRepresentationAndSharding(
builder, value, original_shape,
shape_determination_fns, sharding,
false));
}
if (it != retval_shardings.end()) {
value = identity_op(value, sharding);
}
elems.push_back(value);
break;
}
case XlaExpression::Kind::kResource:
output.is_constant = false;
output.input_index = retval.resource()->arg_num();
output.shape = retval.resource()->shape();
break;
case XlaExpression::Kind::kInvalid:
return errors::InvalidArgument(
"Invalid expression returned by computation. "
"This probably means a return value was not set.");
}
}
*num_nonconst_outputs = elems.size();
std::vector<const XlaResource*> arg_resources;
arg_resources.reserve(resources.size());
for (const auto& resource : resources) {
if (resource->arg_num() >= 0) {
arg_resources.push_back(resource.get());
}
}
std::sort(arg_resources.begin(), arg_resources.end(),
[](const XlaResource* a, const XlaResource* b) {
return a->arg_num() < b->arg_num();
});
absl::flat_hash_map<int, int> argument_to_xla_arg;
for (int xla_arg = 0; xla_arg < input_mapping.size(); xla_arg++) {
argument_to_xla_arg[input_mapping[xla_arg]] = xla_arg;
}
std::vector<xla::XlaBuilder::InputOutputAlias> aliases;
for (const XlaResource* resource : arg_resources) {
DCHECK_LT(resource->arg_num(), args.size());
const XlaCompiler::Argument& arg = args[resource->arg_num()];
auto it = arg_shardings.find(resource->arg_num());
bool modified = !resource->value().IsIdenticalTo(resource->initial_value());
for (const auto& grad : resource->tensor_array_gradients()) {
modified =
modified ||
!grad.second->value().IsIdenticalTo(grad.second->initial_value()) ||
arg.tensor_array_gradients.count(grad.first) == 0;
}
if (return_updated_values_for_all_resources || modified ||
arg.requires_broadcast) {
resource_updates->emplace_back();
XlaCompiler::ResourceUpdate& update = resource_updates->back();
update.input_index = resource->arg_num();
update.type = resource->type();
update.shape = resource->shape();
update.modified = modified;
int param_num = use_tuple_arg ? 0 : update.input_index;
if (is_entry_computation &&
arg.resource_kind != XlaResource::kTensorArray &&
alias_resource_update && argument_to_xla_arg.count(param_num)) {
xla::ShapeIndex param_index =
use_tuple_arg ? xla::ShapeIndex({update.input_index})
: xla::ShapeIndex{};
int xla_param_num = argument_to_xla_arg[param_num];
int64_t output_index_num = elems.size();
xla::ShapeIndex output_index = xla::ShapeIndex({output_index_num});
VLOG(3) << "Storing alias: " << output_index.ToString() << ": ("
<< xla_param_num << ", " << param_index.ToString() << ")";
aliases.push_back({output_index, xla_param_num, param_index});
}
for (const auto& grad : resource->tensor_array_gradients()) {
update.tensor_array_gradients_accessed.insert(grad.first);
}
xla::XlaOp handle;
TF_RETURN_IF_ERROR(resource->Pack(&handle, builder));
auto sharding = it == arg_shardings.end()
? std::optional<xla::OpSharding>()
: it->second;
if (shape_determination_fns.layout_preference_fn &&
shape_determination_fns.shape_representation_fn) {
TF_ASSIGN_OR_RETURN(auto original_shape, builder->GetShape(handle));
TF_ASSIGN_OR_RETURN(
handle, ReshapeWithCorrectRepresentationAndSharding(
builder, handle, original_shape,
shape_determination_fns, sharding, arg.fast_mem));
}
if (it != arg_shardings.end()) {
retval_index_and_sharding[elems.size()] = it->second;
}
handle = identity_op(handle, sharding);
elems.push_back(handle);
}
}
if (token_output) {
elems.push_back(*token_output);
}
*num_computation_outputs = elems.size();
xla::XlaOp tuple;
if (retval_index_and_sharding.empty() || !is_entry_computation) {
tuple = xla::Tuple(builder, elems);
} else {
std::vector<xla::Shape> elem_shapes;
for (const auto& elem : elems) {
TF_ASSIGN_OR_RETURN(xla::Shape elem_shape,
elem.builder()->GetShape(elem));
elem_shapes.push_back(elem_shape);
}
xla::Shape shape = xla::ShapeUtil::MakeTupleShape(elem_shapes);
std::vector<xla::HloSharding> sharding_elems;
for (int i = 0, end = elems.size(); i < end; i++) {
const auto& iter = retval_index_and_sharding.find(i);
TF_RET_CHECK(iter != retval_index_and_sharding.end());
const xla::OpSharding& sub_op_sharding = iter->second;
TF_ASSIGN_OR_RETURN(xla::HloSharding sub_sharding,
xla::HloSharding::FromProto(sub_op_sharding));
if (elem_shapes[i].IsTuple()) {
const std::vector<xla::HloSharding> sub_sharding_elems =
sub_sharding.tuple_elements();
const int64_t sub_sharding_elems_size = sub_sharding_elems.size();
TF_RET_CHECK(sub_sharding_elems_size ==
xla::ShapeUtil::GetLeafCount(elem_shapes[i]));
for (const auto& sub_sharding_elem : sub_sharding_elems) {
sharding_elems.push_back(sub_sharding_elem);
}
} else {
sharding_elems.push_back(sub_sharding);
}
}
xla::HloSharding modified_sharding =
xla::HloSharding::Tuple(shape, sharding_elems);
xla::OpSharding op_sharding = modified_sharding.ToProto();
xla::XlaScopedShardingAssignment assign_sharding(builder, op_sharding);
tuple = xla::Tuple(builder, elems);
}
bool returns_tuple = always_return_tuple || elems.size() != 1;
VLOG(3) << "Computation returns a tuple=" << returns_tuple;
if (!returns_tuple) {
xla::GetTupleElement(tuple, 0);
for (xla::XlaBuilder::InputOutputAlias& alias : aliases) {
if (alias.output_index == xla::ShapeIndex({0})) {
VLOG(3) << "For aliased parameter " << alias.param_number << ": "
<< alias.param_index.ToString()
<< " normalizing output_index from {0} to {}, as a scalar is "
"returned from the cluster";
alias.output_index = xla::ShapeIndex({});
}
}
}
for (xla::XlaBuilder::InputOutputAlias& alias : aliases) {
builder->SetUpAlias(alias.output_index, alias.param_number,
alias.param_index);
}
TF_ASSIGN_OR_RETURN(*computation, builder->Build());
TF_ASSIGN_OR_RETURN(auto program_shape, computation->GetProgramShape());
*output_shape = program_shape.result();
return absl::OkStatus();
}
}
string XlaCompiler::Argument::HumanString() const {
string common;
if (!name.empty()) {
common = absl::StrCat(" name=", name);
}
absl::StrAppend(&common, " type=", DataTypeString(type),
" shape=", ShapeHumanString());
absl::StrAppend(
&common, " is_same_data_across_replicas=", is_same_data_across_replicas);
switch (kind) {
case kInvalid:
return "invalid";
case kConstant:
return absl::StrCat("kind=constant", common,
" value=", constant_value.DebugString());
case kConstantResource:
return absl::StrCat("kind=constant-resource", common,
" value=", constant_value.DebugString());
case kResource: {
string output = absl::StrCat(
"kind=resource", common,
" resource_kind=", XlaResource::KindToString(resource_kind),
" initialized=", initialized, " is_fast_mem=", fast_mem);
if (max_array_size >= 0) {
absl::StrAppend(&output, " max_array_size=", max_array_size);
}
if (!tensor_array_gradients.empty()) {
absl::StrAppend(&output, " tensor_array_gradients=",
absl::StrJoin(tensor_array_gradients, ","));
}
return output;
}
case kParameter:
return absl::StrCat("kind=parameter", common);
case kTensorList:
return absl::StrCat("kind=tensorlist", common);
case kToken:
return absl::StrCat("token", common);
}
}
std::vector<int64_t> XlaCompiler::Argument::DimensionSizes() const {
if (absl::holds_alternative<TensorShape>(shape)) {
return xla::InlinedVectorToVector(std::get<TensorShape>(shape).dim_sizes());
} else {
return xla::SpanToVector(std::get<xla::Shape>(shape).dimensions());
}
}
absl::InlinedVector<int64_t, 4>
XlaCompiler::Argument::DimensionSizesAsInlinedVector() const {
if (absl::holds_alternative<TensorShape>(shape)) {
return std::get<TensorShape>(shape).dim_sizes();
} else {
auto v = std::get<xla::Shape>(shape).dimensions();
return absl::InlinedVector<int64_t, 4>(v.begin(), v.end());
}
}
string XlaCompiler::Argument::ShapeHumanString() const {
if (absl::holds_alternative<TensorShape>(shape)) {
return std::get<TensorShape>(shape).DebugString();
} else {
return std::get<xla::Shape>(shape).DebugString();
}
}
XlaCompiler::XlaCompiler(XlaCompiler::Options options)
: options_(options),
initialization_status_(absl::OkStatus()),
next_step_id_(1),
device_(new XlaCompilationDevice(SessionOptions(), options_.device_type)),
device_mgr_(absl::WrapUnique(device_)) {
CHECK(!options_.device_type.type_string().empty());
if (options_.populate_resource_manager) {
initialization_status_ =
(*options_.populate_resource_manager)(device_->resource_manager());
}
local_flib_def_.reset(new FunctionLibraryDefinition(OpRegistry::Global(),
FunctionDefLibrary()));
local_pflr_.reset(new ProcessFunctionLibraryRuntime(
&device_mgr_, Env::Default(), nullptr,
options.graph_def_version, local_flib_def_.get(), OptimizerOptions()));
pflr_.reset(new ProcessFunctionLibraryRuntime(
&device_mgr_, Env::Default(), nullptr,
options.graph_def_version, options.flib_def, OptimizerOptions()));
local_flib_runtime_ = local_pflr_->GetFLR(device_->name());
flib_runtime_ = pflr_->GetFLR(device_->name());
XlaShapeLayoutHelpers::ShapeDeterminationFns& shape_determination_fns =
options_.shape_determination_fns;
if (!shape_determination_fns.shape_representation_fn) {
shape_determination_fns.shape_representation_fn =
IdentityShapeRepresentationFn();
}
if (!shape_determination_fns.layout_preference_fn) {
shape_determination_fns.layout_preference_fn = UseNoPreferenceLayoutFn();
}
}
XlaCompiler::~XlaCompiler() = default;
int64_t XlaCompiler::NextStepId() { return next_step_id_++; }
uint64 XlaCompiler::SignatureHash::operator()(
const std::pair<string, std::vector<Argument>>& signature) const {
return std::hash<string>()(signature.first);
}
static Status GetFunctionBody(const NameAttrList& function,
FunctionLibraryRuntime* flib_runtime,
const FunctionBody** fbody) {
FunctionLibraryRuntime::Handle handle;
TF_RETURN_IF_ERROR(flib_runtime->Instantiate(
function.name(), AttrSlice(&function.attr()), &handle));
*fbody = flib_runtime->GetFunctionBody(handle);
TF_RET_CHECK(*fbody);
return absl::OkStatus();
}
Status XlaCompiler::FindFunctionBody(const NameAttrList& function,
const FunctionBody** fbody,
const ConfigProto** config_proto) {
auto status = GetFunctionBody(function, local_flib_runtime_, fbody);
if (!status.ok()) {
if (!absl::IsNotFound(status)) {
return status;
}
TF_RETURN_WITH_CONTEXT_IF_ERROR(
GetFunctionBody(function, flib_runtime_, fbody),
"Local lookup failed with: ", status.message());
if (config_proto) {
*config_proto = flib_runtime_->config_proto();
}
VLOG(4) << "Function " << function.name() << " in flib_runtime_";
} else {
if (config_proto) {
*config_proto = local_flib_runtime_->config_proto();
}
VLOG(4) << "Function " << function.name() << " in local_flib_runtime_";
}
return absl::OkStatus();
}
std::unique_ptr<Graph> XlaCompiler::GetGraph(const FunctionBody* fbody) {
std::unique_ptr<Graph> graph(new Graph(options_.flib_def));
CopyGraph(*fbody->graph, graph.get());
bool is_inside_mustcompile = false;
TryGetNodeAttr(AttrSlice(&fbody->record->fdef().attr()), kXlaMustCompileAttr,
&is_inside_mustcompile);
auto flags = GetBuildXlaOpsPassFlags();
OptimizerOptions opts;
opts.set_opt_level(OptimizerOptions::L0);
opts.set_do_common_subexpression_elimination(false);
opts.set_do_function_inlining(true);
opts.set_do_constant_folding(!flags->tf_xla_disable_constant_folding);
GraphOptimizer optimizer(opts);
auto cf_consider_fn = [](const Node* n) {
for (const auto& output_arg : n->op_def().output_arg()) {
if (output_arg.type() == DT_VARIANT) {
return false;
}
}
const auto& ts = n->type_string();
if (ts == "Shape" || ts == "ShapeN" || ts == "Size") {
return false;
}
return true;
};
GraphOptimizer::Options graph_optimizer_options;
graph_optimizer_options.cf_consider_fn = cf_consider_fn;
graph_optimizer_options.inline_multi_device_functions = true;
graph_optimizer_options.inline_impl_selection_group_functions = true;
graph_optimizer_options.inline_with_single_device_body_placer = true;
graph_optimizer_options.ignore_noinline = is_inside_mustcompile;
{
GraphShapeInfo shape_info;
InferShapes(graph.get(), {},
flib_runtime_->GetFunctionLibraryDefinition(), &shape_info)
.IgnoreError();
auto node_name_index = graph->BuildNodeNameIndex();
std::unordered_map<string, std::vector<PartialTensorShape>> shape_map;
for (const auto& node_shape_info : shape_info) {
const string& node_name = node_shape_info.first;
const std::vector<InferredShape>& output_shapes = node_shape_info.second;
const auto& node_iter = node_name_index.find(node_name);
if (node_iter != node_name_index.end()) {
auto& partial_shapes = shape_map[node_name];
for (const auto& inferred_shape : output_shapes) {
partial_shapes.push_back(inferred_shape.shape);
}
}
}
graph_optimizer_options.shape_map = &shape_map;
optimizer.Optimize(flib_runtime_, flib_runtime_->env(),
nullptr, &graph, graph_optimizer_options);
}
GraphShapeInfo shape_info;
InferShapes(graph.get(), {},
flib_runtime_->GetFunctionLibraryDefinition(), &shape_info)
.IgnoreError();
auto node_name_index = graph->BuildNodeNameIndex();
std::unordered_map<string, std::vector<PartialTensorShape>> shape_map;
for (const auto& node_shape_info : shape_info) {
const string& node_name = node_shape_info.first;
const std::vector<InferredShape>& output_shapes = node_shape_info.second;
const auto& node_iter = node_name_index.find(node_name);
if (node_iter != node_name_index.end()) {
auto& partial_shapes = shape_map[node_name];
for (const auto& inferred_shape : output_shapes) {
partial_shapes.push_back(inferred_shape.shape);
}
}
}
graph_optimizer_options.shape_map = &shape_map;
optimizer.Optimize(flib_runtime_, flib_runtime_->env(),
nullptr, &graph, graph_optimizer_options);
return graph;
}
std::vector<std::string> GetValidControlRets(
absl::Span<Node* const> orig_control_ret_nodes, const Graph& graph) {
absl::flat_hash_map<string, int> control_ret_nodes_map;
for (int i = 0; i < orig_control_ret_nodes.size(); ++i) {
const Node* n = orig_control_ret_nodes[i];
control_ret_nodes_map[n->name()] = i;
}
std::vector<bool> is_valid_control_ret(orig_control_ret_nodes.size(), false);
int num_valid_control_rets = 0;
for (const Node* n : graph.nodes()) {
auto iter = control_ret_nodes_map.find(n->name());
if (iter != control_ret_nodes_map.end()) {
++num_valid_control_rets;
is_valid_control_ret[iter->second] = true;
}
}
std::vector<std::string> valid_control_rets;
valid_control_rets.reserve(num_valid_control_rets);
for (int i = 0; i < orig_control_ret_nodes.size(); ++i) {
if (is_valid_control_ret[i]) {
valid_control_rets.push_back(orig_control_ret_nodes[i]->name());
}
}
return valid_control_rets;
}
Status XlaCompiler::CompileSingleOp(
const XlaCompiler::CompileOptions& compile_options,
const XlaCompiler::SingleOpCompileArgument& single_op_compile_argument,
absl::Span<const Argument> args, XlaCompiler::CompilationResult* result) {
const NodeDef& node_def = single_op_compile_argument.node_def;
TF_ASSIGN_OR_RETURN(
auto graph,
CreateSingleOpGraph(node_def, args,
single_op_compile_argument.output_dtypes));
*result = {};
Status status = ADD_SOURCE_LOCATION(CompileGraph(
compile_options, node_def.name(), std::move(graph), args, result));
if (status.ok()) {
tensorflow::metrics::IncrementPhase2XlaCompilerCounter(
tensorflow::metrics::Phase2XlaCompilerMetric::
kCompileSingleOpXlaBuilderSuccess);
} else {
tensorflow::metrics::IncrementPhase2XlaCompilerCounter(
tensorflow::metrics::Phase2XlaCompilerMetric::
kCompileSingleOpXlaBuilderFailure);
tsl::error_logging::Log(mlir::TF::kBridgeComponent, kSingleOpComponent,
status.ToString())
.IgnoreError();
}
return status;
}
Status XlaCompiler::CompileFunction(
const XlaCompiler::CompileOptions& options,
const NameAttrList& fn_name_attrs,
absl::Span<const XlaCompiler::Argument> args,
XlaCompiler::CompilationResult* result) {
string function_id =
Canonicalize(fn_name_attrs.name(), AttrSlice(&fn_name_attrs.attr()));
VLOG(1) << "XlaCompiler::CompileFunction " << function_id;
const std::vector<XlaCompiler::Argument> arg_vector(args.begin(), args.end());
auto it = cache_.find({function_id, arg_vector});
if (it != cache_.end()) {
*result = it->second;
return absl::OkStatus();
}
const FunctionBody* fbody;
const ConfigProto* config = nullptr;
TF_RETURN_IF_ERROR(FindFunctionBody(fn_name_attrs, &fbody, &config));
std::optional<ConfigProto> config_proto;
if (config) {
config_proto = *config;
}
TF_RETURN_WITH_CONTEXT_IF_ERROR(
CheckSignature(fbody->arg_types, args),
"Signature check failure while compiling: ", fn_name_attrs.name());
for (int i = 0, end = args.size(); i < end; i++) {
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(fbody->arg_nodes[i]->def(), "T", &dtype));
if (dtype == DT_RESOURCE || dtype == DT_VARIANT) {
continue;
}
if (absl::holds_alternative<xla::Shape>(args[i].shape)) {
xla::Shape xla_shape = std::get<xla::Shape>(args[i].shape);
TensorShape tensor_shape;
if (XLAShapeToTensorShape(xla_shape, &tensor_shape).ok() &&
xla_shape.is_static()) {
fbody->arg_nodes[i]->ClearAttr("_output_shapes");
fbody->arg_nodes[i]->AddAttr("_output_shapes",
std::vector<TensorShape>{tensor_shape});
}
} else {
TensorShape tensor_shape = std::get<TensorShape>(args[i].shape);
fbody->arg_nodes[i]->ClearAttr("_output_shapes");
fbody->arg_nodes[i]->AddAttr("_output_shapes",
std::vector<TensorShape>{tensor_shape});
}
}
std::unique_ptr<Graph> graph = GetGraph(fbody);
for (Node* n : graph->nodes()) {
if (n->IsArg()) {
TF_RETURN_IF_ERROR(SetNodeShardingFromNeighbors(n, true));
}
}
for (Node* n : graph->nodes()) {
if (n->IsRetval()) {
TF_RETURN_IF_ERROR(SetNodeShardingFromNeighbors(n, false));
}
}
if (VLOG_IS_ON(2)) {
VLOG(2) << "XlaCompiler::CompileFunction: "
<< DumpGraphToFile(
absl::StrCat("xla_compile_function_", function_id), *graph);
}
VLOG(1) << "====================================================";
VLOG(1) << "CompileFunction with XlaBuilder";
auto status =
CompileGraph(options, function_id, std::move(graph), args, result);
if (!status.ok()) {
tensorflow::metrics::IncrementPhase2XlaCompilerCounter(
tensorflow::metrics::Phase2XlaCompilerMetric::
kCompileFunctionXlaBuilderFailure);
::tsl::errors::AppendToMessage(
&status, "tf2xla conversion failed while converting ",
std::move(function_id),
". Run with TF_DUMP_GRAPH_PREFIX=/path/to/dump/dir and "
"--vmodule=xla_compiler=2 to obtain a dump of the compiled "
"functions.");
tsl::error_logging::Log(mlir::TF::kBridgeComponent,
kCompileFunctionComponent, status.ToString())
.IgnoreError();
return status;
}
tensorflow::metrics::IncrementPhase2XlaCompilerCounter(
tensorflow::metrics::Phase2XlaCompilerMetric::
kCompileFunctionXlaBuilderSuccess);
VLOG(1) << "====================================================";
cache_[{function_id, arg_vector}] = *result;
return absl::OkStatus();
}
Status XlaCompiler::XLAShapeForArgument(
const XlaCompiler::Argument& arg, bool is_entry_computation,
const std::optional<xla::HloSharding>& arg_sharding,
xla::Shape* xla_shape) const {
switch (arg.kind) {
case XlaCompiler::Argument::kConstant:
LOG(FATAL) << "Unreachable case";
case XlaCompiler::Argument::kParameter: {
if (is_entry_computation) {
TensorShape shape;
if (std::holds_alternative<TensorShape>(arg.shape)) {
shape = std::get<TensorShape>(arg.shape);
} else {
TF_RETURN_IF_ERROR(
XLAShapeToTensorShape(std::get<xla::Shape>(arg.shape), &shape));
}
auto layout_preference =
options_.shape_determination_fns.layout_preference_fn(
shape, arg.type, arg.kind);
TF_ASSIGN_OR_RETURN(
*xla_shape,
options_.shape_determination_fns.shape_representation_fn(
shape, arg.type,
false, layout_preference));
TF_RETURN_IF_ERROR(RewriteLayoutWithShardedShape(
arg_sharding, false,
options_.shape_determination_fns, xla_shape));
if (std::holds_alternative<xla::Shape>(arg.shape) &&
std::get<xla::Shape>(arg.shape).is_dynamic()) {
xla::Shape dynamic_shape = std::get<xla::Shape>(arg.shape);
for (int i = 0; i < xla_shape->dimensions_size(); ++i) {
xla_shape->set_dynamic_dimension(
i, dynamic_shape.is_dynamic_dimension(i));
}
}
} else {
if (std::holds_alternative<xla::Shape>(arg.shape)) {
*xla_shape = std::get<xla::Shape>(arg.shape);
} else {
TF_RETURN_IF_ERROR(TensorShapeToXLAShape(
arg.type, std::get<TensorShape>(arg.shape), xla_shape));
}
}
return absl::OkStatus();
}
case XlaCompiler::Argument::kTensorList: {
TF_RET_CHECK(absl::holds_alternative<xla::Shape>(arg.shape));
*xla_shape = std::get<xla::Shape>(arg.shape);
return absl::OkStatus();
}
case XlaCompiler::Argument::kConstantResource:
case XlaCompiler::Argument::kResource: {
TF_RET_CHECK(arg.initialized);
switch (arg.resource_kind) {
case XlaResource::kVariable: {
TF_RET_CHECK(absl::holds_alternative<TensorShape>(arg.shape));
auto layout_preference =
options_.shape_determination_fns.layout_preference_fn(
std::get<TensorShape>(arg.shape), arg.type, arg.kind);
TF_ASSIGN_OR_RETURN(
*xla_shape,
options_.shape_determination_fns.shape_representation_fn(
std::get<TensorShape>(arg.shape), arg.type,
arg.fast_mem, layout_preference));
TF_RETURN_IF_ERROR(RewriteLayoutWithShardedShape(
arg_sharding, arg.fast_mem, options_.shape_determination_fns,
xla_shape));
return absl::OkStatus();
}
case XlaResource::kTensorArray: {
if (arg.max_array_size < 0) {
return errors::InvalidArgument(
"Negative max_array_size in XLAShapeForArgument");
}
TF_RET_CHECK(absl::holds_alternative<TensorShape>(arg.shape));
TensorShape shape;
TF_RETURN_IF_ERROR(shape.AddDimWithStatus(arg.max_array_size));
shape.AppendShape(std::get<TensorShape>(arg.shape));
TF_RETURN_IF_ERROR(TensorShapeToXLAShape(arg.type, shape, xla_shape));
if (!arg.tensor_array_gradients.empty()) {
std::vector<xla::Shape> tuple_shape(
arg.tensor_array_gradients.size() + 1, *xla_shape);
*xla_shape = xla::ShapeUtil::MakeTupleShape(tuple_shape);
}
return absl::OkStatus();
}
case XlaResource::kStack: {
if (arg.max_array_size < 0) {
return errors::InvalidArgument(
"Negative max_array_size in XLAShapeForArgument");
}
TF_RET_CHECK(absl::holds_alternative<TensorShape>(arg.shape));
TensorShape shape;
TF_RETURN_IF_ERROR(shape.AddDimWithStatus(arg.max_array_size));
shape.AppendShape(std::get<TensorShape>(arg.shape));
xla::Shape buffer_shape;
TF_RETURN_IF_ERROR(
TensorShapeToXLAShape(arg.type, shape, &buffer_shape));
*xla_shape = xla::ShapeUtil::MakeTupleShape(
{buffer_shape, xla::ShapeUtil::MakeShape(xla::S32, {})});
return absl::OkStatus();
}
case XlaResource::kInvalid:
return errors::Internal(
"Invalid resource type in XLAShapeForArgument()");
}
}
case XlaCompiler::Argument::kToken: {
*xla_shape = xla::ShapeUtil::MakeTokenShape();
return absl::OkStatus();
}
case XlaCompiler::Argument::kInvalid:
return errors::Internal("Invalid argument type in XLAShapeForArgument()");
}
}
void XlaCompiler::PopulateArgumentFromResource(const XlaResource& resource,
Argument* arg) {
arg->initialized = resource.initialized();
arg->kind = XlaCompiler::Argument::kResource;
arg->resource_kind = resource.kind();
arg->type = resource.type();
arg->shape = resource.shape();
arg->max_array_size = resource.max_array_size();
for (const auto& gradient : resource.tensor_array_gradients()) {
arg->tensor_array_gradients.insert(gradient.first);
}
arg->name = resource.name();
}
XlaCompiler::SingleOpCompileArgument::SingleOpCompileArgument(
const OpKernelContext& ctx) {
std::vector<DataType> output_dtypes(ctx.num_outputs());
for (int i = 0; i < output_dtypes.size(); ++i) {
output_dtypes[i] = ctx.expected_output_dtype(i);
}
this->output_dtypes = output_dtypes;
this->node_def = ctx.op_kernel().def();
auto* config_proto = ctx.function_library()->config_proto();
if (config_proto != nullptr) {
this->config_proto = *config_proto;
}
}
Status XlaCompiler::BuildArguments(
const Graph& graph, const std::vector<XlaCompiler::Argument>& args,
bool use_tuple_arg, xla::XlaBuilder* builder, XlaContext* context,
const std::map<int, xla::OpSharding>& arg_shardings,
std::vector<XlaExpression>* arg_expressions,
std::vector<int>* input_to_args, std::vector<xla::Shape>* input_shapes,
bool is_entry_computation) {
arg_expressions->resize(args.size());
input_to_args->clear();
input_to_args->reserve(args.size());
for (std::vector<XlaCompiler::Argument>::size_type i = 0; i < args.size();
++i) {
const XlaCompiler::Argument& arg = args[i];
XlaExpression& arg_expression = (*arg_expressions)[i];
switch (arg.kind) {
case XlaCompiler::Argument::kConstantResource:
case XlaCompiler::Argument::kResource: {
TF_RET_CHECK(arg.resource_kind != XlaResource::kInvalid);
TF_RET_CHECK(absl::holds_alternative<TensorShape>(arg.shape));
XlaResource* resource =
context->AddResource(std::make_unique<XlaResource>(
arg.resource_kind, i, arg.name, arg.type,
std::get<TensorShape>(arg.shape), xla::XlaOp(),
arg.max_array_size,
arg.tensor_array_gradients,
true,
arg.definition_stack_trace));
arg_expression =
arg.kind == XlaCompiler::Argument::kResource
? XlaExpression::Resource(resource)
: XlaExpression::ConstantResource(arg.constant_value, resource);
if (arg.initialized) {
input_to_args->push_back(i);
}
break;
}
case XlaCompiler::Argument::kParameter:
case XlaCompiler::Argument::kTensorList:
case XlaCompiler::Argument::kToken: {
input_to_args->push_back(i);
break;
}
case XlaCompiler::Argument::kConstant:
arg_expression = XlaExpression::Constant(arg.constant_value);
break;
case XlaCompiler::Argument::kInvalid:
return errors::Internal(
"Unreachable case in BuildArguments() while filling constant args");
}
}
if (input_to_args->empty() && !use_tuple_arg) {
return absl::OkStatus();
}
std::vector<int> arg_to_inputs(args.size(), -1);
for (int i = 0, end = input_to_args->size(); i < end; i++) {
arg_to_inputs[input_to_args->at(i)] = i;
}
std::vector<xla::Shape> arg_shapes(input_to_args->size());
for (std::vector<int>::size_type i = 0; i < input_to_args->size(); ++i) {
auto arg_sharding = arg_shardings.find((*input_to_args)[i]);
std::optional<xla::HloSharding> sharding;
if (arg_sharding != arg_shardings.end()) {
TF_ASSIGN_OR_RETURN(auto hlo_sharding,
xla::HloSharding::FromProto(arg_sharding->second));
sharding = hlo_sharding;
}
TF_RETURN_IF_ERROR(XLAShapeForArgument(args[(*input_to_args)[i]],
is_entry_computation, sharding,
&arg_shapes[i]));
}
if (use_tuple_arg) {
input_shapes->push_back(xla::ShapeUtil::MakeTupleShape(arg_shapes));
} else {
*input_shapes = arg_shapes;
}
xla::OpMetadata arg_metadata;
arg_metadata.set_op_name("XLA_Args");
builder->SetOpMetadata(arg_metadata);
std::vector<xla::XlaOp> arg_handles(input_to_args->size());
if (use_tuple_arg) {
xla::XlaOp tuple;
if (is_entry_computation) {
xla::OpSharding tuple_sharding;
tuple_sharding.set_type(xla::OpSharding::TUPLE);
for (int64_t parameter : *input_to_args) {
auto it = arg_shardings.find(parameter);
*tuple_sharding.add_tuple_shardings() =
it == arg_shardings.end() ? xla::sharding_builder::AssignDevice(0)
: it->second;
}
std::vector<bool> is_same_across_replicas;
for (int i = 0, end = input_to_args->size(); i < end; ++i) {
is_same_across_replicas.insert(
is_same_across_replicas.end(),
xla::ShapeUtil::GetLeafCount(arg_shapes[i]),
args[input_to_args->at(i)].is_same_data_across_replicas);
}
xla::XlaScopedShardingAssignment assign_tuple_sharding(
builder, input_to_args->empty() ? std::optional<xla::OpSharding>()
: tuple_sharding);
tuple = xla::Parameter(builder, 0, (*input_shapes)[0], "arg_tuple",
is_same_across_replicas);
} else {
tuple = xla::Parameter(builder, 0, (*input_shapes)[0], "arg_tuple");
}
for (std::vector<int>::size_type i = 0; i < input_to_args->size(); ++i) {
auto it = arg_shardings.find(i);
xla::XlaScopedShardingAssignment assign_sharding(
builder, it == arg_shardings.end() ? std::optional<xla::OpSharding>()
: it->second);
auto& arg = args[input_to_args->at(i)];
xla::OpMetadata arg_metadata;
arg_metadata.set_op_name(arg.node_name);
builder->SetOneShotOpMetadata(arg_metadata);
arg_handles[i] = xla::GetTupleElement(tuple, i);
}
} else {
for (std::vector<int>::size_type i = 0; i < input_to_args->size(); ++i) {
auto it = arg_shardings.find(i);
xla::XlaScopedShardingAssignment assign_sharding(
builder, it == arg_shardings.end() ? std::optional<xla::OpSharding>()
: it->second);
if (is_entry_computation) {
std::vector<bool> is_same_across_replicas(
xla::ShapeUtil::GetLeafCount((*input_shapes)[i]),
args[input_to_args->at(i)].is_same_data_across_replicas);
arg_handles[i] =
xla::Parameter(builder, i, (*input_shapes)[i],
absl::StrCat("arg", i), is_same_across_replicas);
} else {
arg_handles[i] = xla::Parameter(builder, i, (*input_shapes)[i],
absl::StrCat("arg", i));
}
}
}
builder->ClearOpMetadata();
VLOG(2) << "XLA computation inputs:";
for (std::vector<int>::size_type i = 0; i < input_to_args->size(); ++i) {
const XlaCompiler::Argument& arg = args[input_to_args->at(i)];
VLOG(2) << " XLA arg " << i
<< " shape: " << xla::ShapeUtil::HumanString(arg_shapes[i])
<< " name: " << arg.name << " TF arg " << input_to_args->at(i)
<< " node name: " << arg.node_name
<< (arg_shardings.find(i) == arg_shardings.end()
? ""
: absl::StrCat(" sharding: ",
arg_shardings.at(i).DebugString()));
XlaExpression& arg_expression = (*arg_expressions)[input_to_args->at(i)];
switch (arg.kind) {
case XlaCompiler::Argument::kConstantResource:
case XlaCompiler::Argument::kResource: {
TF_RET_CHECK(arg.initialized);
XlaResource* resource = arg_expression.resource();
TF_RETURN_IF_ERROR(resource->SetFromPack(arg.tensor_array_gradients,
arg_handles[i], builder));
VLOG(2) << " resource: num_gradients: "
<< arg.tensor_array_gradients.size();
break;
}
case XlaCompiler::Argument::kParameter:
if (is_entry_computation) {
arg_expression = XlaExpression::XlaOp(
xla::Reshape(arg_handles[i], arg.DimensionSizes()), arg.type);
} else {
arg_expression = XlaExpression::XlaOp(arg_handles[i], arg.type);
if (arg.value_bound) {
TF_RET_CHECK(arg.value_dynamism);
arg_expression.set_value_bound(arg.value_bound.value());
arg_expression.set_value_dynamism(arg.value_dynamism.value());
}
}
break;
case XlaCompiler::Argument::kTensorList: {
arg_expression = XlaExpression::TensorList(arg_handles[i]);
break;
}
case XlaCompiler::Argument::kToken: {
arg_expression = XlaExpression::XlaOp(arg_handles[i], arg.type);
break;
}
case XlaCompiler::Argument::kConstant:
case XlaCompiler::Argument::kInvalid:
return errors::Internal(
"Unreachable case in BuildArguments() while filling handles");
}
}
return absl::OkStatus();
}
namespace {
Status ValidateFunctionDef(const FunctionDef* fdef,
const FunctionLibraryDefinition& flib_def) {
for (const NodeDef& node : fdef->node_def()) {
const string& op = node.op();
if (op == FunctionLibraryDefinition::kGradientOp || flib_def.Find(op)) {
continue;
}
const OpDef* op_def;
TF_RETURN_IF_ERROR(OpRegistry::Global()->LookUpOpDef(op, &op_def));
}
return absl::OkStatus();
}
Status GetPotentialFunctionName(const Node& node, const string** name) {
if (node.IsPartitionedCall()) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(
node.attrs().Find(FunctionLibraryDefinition::kFuncAttr, &attr_value));
if (!attr_value->has_func()) {
return errors::InvalidArgument(
"The attribute value for attribute 'f' in node ", node.DebugString(),
" does not have 'func' field set");
}
*name = &attr_value->func().name();
return absl::OkStatus();
}
*name = &node.type_string();
return absl::OkStatus();
}
Status ValidateGraph(const Graph* graph,
const FunctionLibraryDefinition& flib_def,
const DeviceType& device_type, const string& name) {
XlaOpRegistry::RegisterCompilationKernels();
auto maybe_error = [&](const Node* node, const Status& s) -> Status {
if (!s.ok()) {
std::string errmsg = absl::StrCat(
"Detected unsupported operations when trying to compile graph ", name,
" on ", device_type.type_string(), ": ", node->def().op(), " (",
s.message(), ")", FormatNodeForError(*node));
if (absl::StrContains(device_type.type_string(), "TPU")) {
absl::StrAppend(&errmsg,
"\nOne approach is to outside compile the unsupported "
"ops to run on CPUs by enabling soft placement "
"`tf.config.set_soft_device_placement(True)`."
" This has a potential performance penalty.\n");
}
if (std::shared_ptr<AbstractStackTrace> stack_trace =
node->GetStackTrace()) {
absl::StrAppend(
&errmsg, "\nThe op is created at: \n",
stack_trace->ToString({true,
true,
true}));
}
return errors::InvalidArgument(errmsg);
}
return absl::OkStatus();
};
for (const Node* node : graph->nodes()) {
if (node->type_string() == FunctionLibraryDefinition::kGradientOp) {
continue;
}
const string* function_name;
TF_RETURN_IF_ERROR(GetPotentialFunctionName(*node, &function_name));
const FunctionDef* fdef = flib_def.Find(*function_name);
Status s;
if (fdef) {
s = ValidateFunctionDef(fdef, flib_def);
TF_RETURN_IF_ERROR(maybe_error(node, s));
continue;
}
const OpDef* op_def;
s = OpRegistry::Global()->LookUpOpDef(node->def().op(), &op_def);
TF_RETURN_IF_ERROR(maybe_error(node, s));
TF_RETURN_IF_ERROR(ValidateNodeDef(node->def(), *op_def));
s = FindKernelDef(device_type, node->def(), nullptr, nullptr);
TF_RETURN_IF_ERROR(maybe_error(node, s));
}
return absl::OkStatus();
}
void ConvertConstantsToExpressions(xla::XlaBuilder* builder,
absl::Span<XlaExpression> expressions) {
for (XlaExpression& expression : expressions) {
if (expression.kind() == XlaExpression::Kind::kConstant) {
expression =
XlaExpression::XlaOp(expression.AsXlaOp(builder), expression.dtype());
}
}
}
}
class DummyStackTrace : public AbstractStackTrace {
absl::Span<StackFrame const> ToFrames() const override { return frames_; }
std::vector<StackFrame> ToUncachedFrames() const override { return frames_; }
StackFrame LastUserFrame() const override { return frames_.back(); }
std::vector<StackFrame> GetUserFrames(int ) const override {
return frames_;
}
std::string ToString(const TracePrintingOptions& opts) const override {
auto frame = LastUserFrame();
return absl::StrCat(frame.file_name, ":", frame.line_number, ":",
frame.function_name);
}
std::vector<StackFrame> frames_{
StackFrame({"dummy_file_name", 10, "dummy_function_name"})};
};
namespace {
void IncreasePrecisionsToAvoidTF32(xla::HloModuleProto& module) {
static constexpr std::array<absl::string_view, 2> kOpsPossiblyUsingTF32 = {
"dot", "convolution"};
xla::PrecisionConfig precision_config;
precision_config.add_operand_precision(xla::PrecisionConfig::HIGHEST);
precision_config.add_operand_precision(xla::PrecisionConfig::HIGHEST);
for (xla::HloComputationProto& computation : *module.mutable_computations()) {
for (xla::HloInstructionProto& instruction :
*computation.mutable_instructions()) {
if (absl::c_find(kOpsPossiblyUsingTF32, instruction.opcode()) !=
kOpsPossiblyUsingTF32.end()) {
*instruction.mutable_precision_config() = precision_config;
}
}
}
}
}
Status XlaCompiler::CompileGraph(const XlaCompiler::CompileOptions& options,
string const& name,
std::unique_ptr<Graph> graph,
absl::Span<const XlaCompiler::Argument> args,
CompilationResult* result) {
VLOG(1) << "Executing graph symbolically to populate XlaBuilder.: " << name;
if (VLOG_IS_ON(2) || DEBUG_DATA_DUMPER()->ShouldDump(name, kDebugGroupMain)) {
VLOG(2) << "XlaCompiler::CompileGraph: "
<< DumpGraphToFile(absl::StrCat("xla_compile_graph_", name), *graph,
flib_runtime_->GetFunctionLibraryDefinition());
}
DummyStackTrace stack_trace;
for (auto node : graph->nodes()) {
if (node->GetStackTrace() == nullptr) {
node->SetStackTrace(std::make_shared<DummyStackTrace>(stack_trace));
}
}
TF_RETURN_IF_ERROR(PropagateConstIntoFunctionalNodes(
graph.get(), options_.flib_def, local_flib_def_.get()));
TF_RETURN_IF_ERROR(RearrangeFunctionArguments(
[this](const NameAttrList& function, const FunctionBody** fbody) {
return FindFunctionBody(function, fbody);
},
graph.get(), local_flib_def_.get(),
pflr_->GetFunctionLibraryDefinition()));
TF_RETURN_IF_ERROR(initialization_status_);
TF_RETURN_IF_ERROR(ValidateGraph(graph.get(), *options_.flib_def,
options_.device_type, name));
auto builder = std::make_unique<xla::XlaBuilder>(name);
XlaContext* context = new XlaContext(this, builder.get(), graph.get());
core::ScopedUnref context_unref(context);
std::vector<XlaCompiler::Argument> real_args(args.begin(), args.end());
int token_input_index = -1;
std::unique_ptr<xla::XlaOp> token_output;
if (options.add_token_input_output) {
token_input_index = real_args.size();
XlaCompiler::Argument token_arg;
token_arg.kind = XlaCompiler::Argument::kToken;
real_args.push_back(token_arg);
}
std::map<int, xla::OpSharding> arg_shardings;
std::map<int, xla::OpSharding> retval_shardings;
TF_ASSIGN_OR_RETURN(std::tie(arg_shardings, retval_shardings),
ComputeArgAndRetvalShardings(*graph));
std::vector<XlaExpression> arg_expressions;
TF_RETURN_IF_ERROR(BuildArguments(
*graph, real_args, options.use_tuple_arg, builder.get(), context,
arg_shardings, &arg_expressions, &result->input_mapping,
&result->xla_input_shapes, options.is_entry_computation));
context->set_args(std::move(arg_expressions));
PushNodeTokenMapping();
std::set<std::string> output_node_token_inputs;
if (token_input_index != -1) {
auto arg_expression = context->args()[token_input_index];
TF_RETURN_IF_ERROR(
SetNodeToken(kXlaTokenArgNodeName, arg_expression.handle()));
output_node_token_inputs = CalculateTokenInputsForOutputToken(*graph);
if (output_node_token_inputs.empty()) {
output_node_token_inputs.insert(kXlaTokenArgNodeName);
}
} else if (options.is_entry_computation) {
if (HasSideEffectingNodes(*graph)) {
TF_RETURN_IF_ERROR(
SetNodeToken(kXlaTokenArgNodeName, xla::CreateToken(builder.get())));
}
}
Status execute_status = ExecuteGraph(context, std::move(graph), device_,
flib_runtime_, NextStepId());
if (!execute_status.ok()) {
VLOG(1) << "Failed executing graph " << name;
return execute_status;
}
if (token_input_index != -1) {
std::vector<xla::XlaOp> token_inputs;
for (const auto& node_name : output_node_token_inputs) {
auto token_or = GetNodeToken(node_name);
TF_RETURN_IF_ERROR(token_or.status());
token_inputs.push_back(token_or.value());
}
token_output = std::make_unique<xla::XlaOp>(
xla::AfterAll(builder.get(), token_inputs));
}
TF_RETURN_IF_ERROR(PopNodeTokenMapping());
int num_nonconst_outputs;
int num_computation_outputs;
result->computation = std::make_shared<xla::XlaComputation>();
result->outputs.resize(context->retvals().size());
std::vector<XlaExpression> retvals = context->retvals();
ConvertConstantsToExpressions(builder.get(),
absl::Span<XlaExpression>(retvals));
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns{
UseNoPreferenceLayoutFn(), IdentityShapeRepresentationFn()};
TF_RETURN_IF_ERROR(BuildComputation(
real_args, retvals, arg_shardings, retval_shardings, context->resources(),
std::move(token_output),
options.is_entry_computation ? options_.shape_determination_fns
: shape_determination_fns,
options.is_entry_computation,
options.return_updated_values_for_all_resources,
options.always_return_tuple, options.use_tuple_arg,
options.alias_resource_update, builder.get(), result->computation.get(),
&num_computation_outputs, &num_nonconst_outputs, &result->outputs,
&result->resource_updates, &result->xla_output_shape,
result->input_mapping));
for (const auto& [key, send] : host_compute_sends_) {
auto* d2h = result->host_compute_metadata.add_device_to_host();
*d2h = send;
for (int i = 0; i < d2h->metadata_size(); ++i) {
const std::string channel_name =
GetDeviceToHostChannelName(d2h->key(), i);
xla::ChannelHandle handle;
TF_RETURN_IF_ERROR(GetDeviceToHostChannelHandle(channel_name, &handle));
d2h->mutable_metadata(i)->set_channel_id(handle.handle());
}
}
for (const auto& [key, recv] : host_compute_recvs_) {
auto* h2d = result->host_compute_metadata.add_host_to_device();
*h2d = recv;
for (int i = 0; i < h2d->metadata_size(); ++i) {
const std::string channel_name =
GetHostToDeviceChannelName(h2d->key(), i);
xla::ChannelHandle handle;
TF_RETURN_IF_ERROR(GetHostToDeviceChannelHandle(channel_name, &handle));
h2d->mutable_metadata(i)->set_channel_id(handle.handle());
}
}
if (!tsl::tensor_float_32_execution_enabled()) {
IncreasePrecisionsToAvoidTF32(*result->computation->mutable_proto());
}
VLOG(2) << "Outputs: total: " << context->retvals().size()
<< " nonconstant: " << num_nonconst_outputs;
VLOG(2) << "XLA output shape: "
<< xla::ShapeUtil::HumanStringWithLayout(result->xla_output_shape);
result->collective_info = context->GetCollectiveInfo();
return absl::OkStatus();
}
Status XlaCompiler::GetChannelHandle(const string& key,
xla::ChannelHandle* channel) {
auto result = channels_.emplace(key, xla::ChannelHandle());
if (result.second) {
TF_ASSIGN_OR_RETURN(result.first->second, client()->CreateChannelHandle());
}
*channel = result.first->second;
VLOG(1) << "Channel: " << key << " " << channel->DebugString();
return absl::OkStatus();
}
Status XlaCompiler::GetHostToDeviceChannelHandle(const string& key,
xla::ChannelHandle* channel) {
auto result = channels_.emplace(key, xla::ChannelHandle());
if (result.second) {
TF_ASSIGN_OR_RETURN(result.first->second,
client()->CreateHostToDeviceChannelHandle());
}
*channel = result.first->second;
VLOG(1) << "Host to device channel: " << key << " " << channel->DebugString();
return absl::OkStatus();
}
Status XlaCompiler::GetDeviceToHostChannelHandle(const string& key,
xla::ChannelHandle* channel) {
auto result = channels_.emplace(key, xla::ChannelHandle());
if (result.second) {
TF_ASSIGN_OR_RETURN(result.first->second,
client()->CreateDeviceToHostChannelHandle());
}
*channel = result.first->second;
VLOG(1) << "Device to host channel: " << key << " " << channel->DebugString();
return absl::OkStatus();
}
namespace {
void SetTransfer(const string& key, absl::Span<const DataType> types,
absl::Span<const TensorShape> shapes,
tf2xla::HostTransferMetadata* transfer) {
transfer->set_key(key);
CHECK(types.size() == shapes.size());
for (int i = 0, end = types.size(); i < end; ++i) {
tf2xla::TensorMetadata* metadata = transfer->add_metadata();
metadata->set_type(types[i]);
shapes[i].AsProto(metadata->mutable_shape());
}
}
}
Status XlaCompiler::SetDeviceToHostMetadata(
const string& key, absl::Span<const DataType> types,
absl::Span<const TensorShape> shapes) {
if (host_compute_sends_.find(key) != host_compute_sends_.end()) {
tf2xla::HostTransferMetadata& existing_transfer = host_compute_sends_[key];
tf2xla::HostTransferMetadata new_transfer;
SetTransfer(key, types, shapes, &new_transfer);
if (xla::protobuf_util::ProtobufEquals(existing_transfer, new_transfer)) {
return absl::OkStatus();
} else {
return errors::InvalidArgument(
"Duplicate calls to SetDeviceToHostMetadata with key ", key);
}
}
tf2xla::HostTransferMetadata& transfer = host_compute_sends_[key];
SetTransfer(key, types, shapes, &transfer);
return absl::OkStatus();
}
Status XlaCompiler::GetDeviceToHostShapes(
const string& key, std::vector<TensorShape>* shapes) const {
const auto iter = host_compute_sends_.find(key);
if (iter == host_compute_sends_.end()) {
return errors::InvalidArgument(
"No host compute send shapes registered for key ", key);
}
shapes->clear();
for (int i = 0; i < iter->second.metadata_size(); ++i) {
TensorShape shape(iter->second.metadata(i).shape());
shapes->push_back(shape);
}
return absl::OkStatus();
}
Status XlaCompiler::SetHostToDeviceMetadata(
const string& key, absl::Span<const DataType> types,
absl::Span<const TensorShape> shapes) {
if (host_compute_recvs_.find(key) != host_compute_recvs_.end()) {
tf2xla::HostTransferMetadata& existing_transfer = host_compute_recvs_[key];
tf2xla::HostTransferMetadata new_transfer;
SetTransfer(key, types, shapes, &new_transfer);
if (xla::protobuf_util::ProtobufEquals(existing_transfer, new_transfer)) {
return absl::OkStatus();
} else {
return errors::InvalidArgument(
"Duplicate calls to SetHostToDeviceMetadata with key ", key);
}
}
tf2xla::HostTransferMetadata& transfer = host_compute_recvs_[key];
SetTransfer(key, types, shapes, &transfer);
return absl::OkStatus();
}
Status XlaCompiler::GetHostComputeControlDependency(
const string& host_compute_name, xla::XlaOp* handle) {
const auto iter = host_compute_control_output_.find(host_compute_name);
if (iter == host_compute_control_output_.end()) {
return errors::InvalidArgument(
"No registered control handle for host compute Op '", host_compute_name,
"'");
} else {
*handle = iter->second;
}
return absl::OkStatus();
}
Status XlaCompiler::SetHostComputeControlDependency(
const string& host_compute_name, const xla::XlaOp handle) {
if (host_compute_control_output_.find(host_compute_name) !=
host_compute_control_output_.end()) {
return errors::InvalidArgument(
"Duplicate control handles registered for host compute Op ",
host_compute_name);
}
host_compute_control_output_[host_compute_name] = handle;
return absl::OkStatus();
}
void XlaCompiler::PushNodeTokenMapping() {
node_token_mapping_stack_.emplace(std::map<string, xla::XlaOp>{});
}
Status XlaCompiler::PopNodeTokenMapping() {
if (node_token_mapping_stack_.empty()) {
return errors::FailedPrecondition(
"Calling PopNodeTokenMapping() when node_token_mapping_stack_ is "
"empty.");
}
node_token_mapping_stack_.pop();
return absl::OkStatus();
}
Status XlaCompiler::SetNodeToken(const string& node_name, const xla::XlaOp op) {
if (node_token_mapping_stack_.empty()) {
return errors::FailedPrecondition(
"Calling SetNodeToken() when node_token_mapping_stack_ is "
"empty.");
}
auto insert_result = node_token_mapping_stack_.top().insert({node_name, op});
if (!insert_result.second) {
return errors::FailedPrecondition("Token mapping already exists for node ",
node_name);
}
return absl::OkStatus();
}
absl::StatusOr<xla::XlaOp> XlaCompiler::GetNodeToken(const string& node_name) {
if (node_token_mapping_stack_.empty()) {
return errors::FailedPrecondition(
"Calling GetNodeToken() when node_token_mapping_stack_ is "
"empty.");
}
auto iter = node_token_mapping_stack_.top().find(node_name);
if (iter == node_token_mapping_stack_.top().end()) {
return errors::FailedPrecondition("Cannot find token mapping for node ",
node_name);
}
return iter->second;
}
} | #include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "absl/strings/match.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/data_flow_ops.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/list_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/tf2xla/literal_util.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/side_effect_util.h"
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/client/client_library.h"
#include "xla/client/local_client.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/literal.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_proto_util.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tests/literal_test_util.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
class XlaCompilerTest : public ::testing::Test {
protected:
void SetUp() override {
client_ = xla::ClientLibrary::LocalClientOrDie();
XlaOpRegistry::RegisterCompilationKernels();
FunctionDefLibrary flib;
flib_def_.reset(new FunctionLibraryDefinition(OpRegistry::Global(), flib));
}
XlaCompiler::Options DefaultOptions() {
XlaCompiler::Options options;
options.device_type = DeviceType(DEVICE_CPU_XLA_JIT);
options.client = client_;
options.flib_def = flib_def_.get();
return options;
}
FunctionLibraryDefinition* LocalFlibDef(XlaCompiler* compiler) {
return compiler->local_flib_def_.get();
}
xla::Client* client_;
std::unique_ptr<FunctionLibraryDefinition> flib_def_;
};
namespace {
class DummyResourceForTest : public ResourceBase {
public:
string DebugString() const override { return "dummy"; }
void Increment() { ++value_; }
int Get() { return value_; }
private:
int value_ = 0;
};
class DummyReadResourceOp : public XlaOpKernel {
public:
explicit DummyReadResourceOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
ResourceMgr* rm = ctx->op_kernel_context()->resource_manager();
OP_REQUIRES(ctx, rm, errors::Internal("No resource manager."));
DummyResourceForTest* dummy;
OP_REQUIRES_OK(ctx, rm->Lookup<DummyResourceForTest>(
rm->default_container(), "dummy", &dummy));
dummy->Increment();
dummy->Unref();
ctx->SetOutput(0, ctx->Input(0));
ctx->SetOutput(1, ctx->Input(0));
}
};
class DummyReadResourceCC {
public:
DummyReadResourceCC(const Scope& scope, const Input& value) {
if (!scope.ok()) return;
auto _value = ops::AsNodeOut(scope, value);
if (!scope.ok()) return;
Node* ret;
const auto unique_name = scope.GetUniqueNameForOp("DummyReadResource");
auto builder = NodeBuilder(unique_name, "DummyReadResource").Input(_value);
scope.UpdateBuilder(&builder);
scope.UpdateStatus(builder.Finalize(scope.graph(), &ret));
if (!scope.ok()) return;
scope.UpdateStatus(scope.DoShapeInference(ret));
if (!scope.ok()) return;
this->output1_ = Output(ret, 0);
this->output2_ = Output(ret, 1);
}
Output output1_;
Output output2_;
};
REGISTER_OP("DummyReadResource")
.Input("input: int32")
.Output("output1: int32")
.Output("output2: int32")
.SetShapeFn(shape_inference::UnknownShape)
.Doc(R"doc(
A dummy Op.
input: dummy input.
output1: dummy output.
output2: dummy output.
)doc");
REGISTER_XLA_OP(Name("DummyReadResource"), DummyReadResourceOp);
class DummyDuplicateOp : public XlaOpKernel {
public:
explicit DummyDuplicateOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
ctx->SetOutput(0, ctx->Input(0));
}
};
REGISTER_OP("DummyDuplicateOp")
.Input("input: int32")
.Output("output: int32")
.Doc(R"doc(
A dummy Op.
input: dummy input.
output: dummy output.
)doc");
REGISTER_XLA_OP(Name("DummyDuplicateOp").Device(DEVICE_CPU_XLA_JIT),
DummyDuplicateOp);
REGISTER_XLA_OP(Name("DummyDuplicateOp").Device(DEVICE_GPU_XLA_JIT),
DummyDuplicateOp);
TEST_F(XlaCompilerTest, EmptyReturnValues) {
XlaCompiler compiler(DefaultOptions());
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "add",
std::move(graph),
{}, &result));
TF_ASSERT_OK(client_->Execute(*result.computation, {}).status());
}
TEST_F(XlaCompilerTest, Simple) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto b = ops::_Arg(scope.WithOpName("B"), DT_INT32, 1);
auto c = ops::Add(scope.WithOpName("C"), a, b);
auto d = ops::_Retval(scope.WithOpName("D"), c, 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2});
args[1].kind = XlaCompiler::Argument::kParameter;
args[1].type = DT_INT32;
args[1].shape = TensorShape({2});
XlaCompiler compiler(DefaultOptions());
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "add",
std::move(graph), args, &result));
xla::Literal param0_literal = xla::LiteralUtil::CreateR1<int32>({7, 42});
xla::Literal param1_literal = xla::LiteralUtil::CreateR1<int32>({-3, 101});
std::unique_ptr<xla::GlobalData> param0_data =
client_->TransferToServer(param0_literal).value();
std::unique_ptr<xla::GlobalData> param1_data =
client_->TransferToServer(param1_literal).value();
std::unique_ptr<xla::GlobalData> actual =
client_
->Execute(*result.computation, {param0_data.get(), param1_data.get()})
.value();
xla::Literal actual_literal = client_->Transfer(*actual).value();
xla::Literal expected0 = xla::LiteralUtil::CreateR1<int32>({4, 143});
xla::Literal expected_literal = xla::LiteralUtil::MakeTuple({&expected0});
EXPECT_TRUE(xla::LiteralTestUtil::Equal(expected_literal, actual_literal));
}
absl::StatusOr<std::unique_ptr<xla::HloModule>> LoadModuleFromHloProto(
const xla::HloModuleProto& module_proto) {
TF_ASSIGN_OR_RETURN(auto module_config,
xla::HloModule::CreateModuleConfigFromProto(
module_proto, xla::GetDebugOptionsFromFlags()));
return xla::CreateModuleFromProto(module_proto, module_config);
}
TEST_F(XlaCompilerTest, SimpleDynamicShapeParameter) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto b = ops::_Arg(scope.WithOpName("B"), DT_INT32, 1);
auto c = ops::Add(scope.WithOpName("C"), a, b);
auto d = ops::_Retval(scope.WithOpName("D"), c, 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape =
xla::ShapeUtil::MakeShape(xla::S32, {2},
{true});
args[1].kind = XlaCompiler::Argument::kParameter;
args[1].type = DT_INT32;
args[1].shape = TensorShape({2});
XlaCompiler compiler(DefaultOptions());
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "add",
std::move(graph), args, &result));
auto hlo = result.computation->proto();
TF_ASSERT_OK_AND_ASSIGN(auto module, LoadModuleFromHloProto(hlo));
EXPECT_EQ(module->computation_count(), 1);
EXPECT_TRUE(module->mutable_computation(0)
->parameter_instruction(0)
->shape()
.is_dynamic());
}
TEST_F(XlaCompilerTest, OutOfOrderGraph) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto b = ops::_Arg(scope.WithOpName("B"), DT_INT32, 1);
auto d = ops::_Retval(scope.WithOpName("D"), a, 0);
auto c = ops::Add(scope.WithOpName("C"), a, b);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2});
args[1].kind = XlaCompiler::Argument::kParameter;
args[1].type = DT_INT32;
args[1].shape = TensorShape({2});
XlaCompiler compiler(DefaultOptions());
XlaCompiler::CompileOptions compile_options;
compile_options.always_return_tuple = false;
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(compile_options, "add", std::move(graph),
args, &result));
xla::Literal param0_literal = xla::LiteralUtil::CreateR1<int32>({7, 42});
xla::Literal param1_literal = xla::LiteralUtil::CreateR1<int32>({-3, 101});
std::unique_ptr<xla::GlobalData> param0_data =
client_->TransferToServer(param0_literal).value();
std::unique_ptr<xla::GlobalData> param1_data =
client_->TransferToServer(param1_literal).value();
std::unique_ptr<xla::GlobalData> actual =
client_
->Execute(*result.computation, {param0_data.get(), param1_data.get()})
.value();
xla::Literal actual_literal = client_->Transfer(*actual).value();
EXPECT_TRUE(xla::LiteralTestUtil::Equal(param0_literal, actual_literal));
}
TEST_F(XlaCompilerTest, HonorShapeRepresentationFnForUnwrittenResource) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto var = ops::_Arg(scope.WithOpName("V"), DT_RESOURCE, 0);
auto d = ops::_Retval(scope.WithOpName("D"), var, 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kResource;
args[0].resource_kind = XlaResource::kVariable;
args[0].initialized = true;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2, 3});
auto options = DefaultOptions();
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns;
shape_determination_fns.shape_representation_fn =
[](const TensorShape& shape, DataType dt, bool use_fast_memory,
XlaLayoutPreference layout_preference) -> absl::StatusOr<xla::Shape> {
xla::Shape xla_shape;
TF_RETURN_IF_ERROR(TensorShapeToXLAShape(dt, shape, &xla_shape));
*xla_shape.mutable_layout() = xla::LayoutUtil::MakeLayout({0, 1});
return xla_shape;
};
options.shape_determination_fns = shape_determination_fns;
XlaCompiler compiler(options);
XlaCompiler::CompilationResult result;
XlaCompiler::CompileOptions compile_options;
compile_options.return_updated_values_for_all_resources = true;
TF_ASSERT_OK(compiler.CompileGraph(compile_options, "add", std::move(graph),
args, &result));
xla::Shape transposed =
xla::ShapeUtil::MakeShapeWithDenseLayout(xla::S32, {2, 3}, {0, 1});
EXPECT_EQ(result.xla_output_shape,
xla::ShapeUtil::MakeTupleShape({transposed}));
}
TEST_F(XlaCompilerTest, HonorShapeRepresentationFnForFastMemVar) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto var = ops::_Arg(scope.WithOpName("V"), DT_RESOURCE, 0);
auto d = ops::_Retval(scope.WithOpName("D"), var, 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kResource;
args[0].resource_kind = XlaResource::kVariable;
args[0].initialized = true;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2, 3});
args[0].fast_mem = true;
auto options = DefaultOptions();
int fast_mem_arg_count = 0;
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns;
shape_determination_fns.shape_representation_fn =
[&fast_mem_arg_count](
const TensorShape& shape, DataType dt, bool use_fast_memory,
XlaLayoutPreference layout_preference) -> absl::StatusOr<xla::Shape> {
xla::Shape xla_shape;
TF_RETURN_IF_ERROR(TensorShapeToXLAShape(dt, shape, &xla_shape));
*xla_shape.mutable_layout() = xla::LayoutUtil::MakeLayout({0, 1});
if (use_fast_memory) {
fast_mem_arg_count++;
}
return xla_shape;
};
options.shape_determination_fns = shape_determination_fns;
XlaCompiler compiler(options);
XlaCompiler::CompilationResult result;
XlaCompiler::CompileOptions compile_options;
compile_options.return_updated_values_for_all_resources = true;
TF_ASSERT_OK(compiler.CompileGraph(compile_options, "add", std::move(graph),
args, &result));
EXPECT_EQ(fast_mem_arg_count, 2);
}
TEST_F(XlaCompilerTest, HonorShapeRepresentationFnForRetVal) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto var = ops::_Arg(scope.WithOpName("V"), DT_RESOURCE, 1);
auto identity = ops::Identity(scope.WithOpName("VIdentity"), var);
auto write = ops::AssignAddVariableOp(scope, identity, a);
auto read = ops::ReadVariableOp(
scope.WithControlDependencies(std::vector<Operation>{write}), var,
DT_INT32);
auto read_plus_one = ops::Add(scope, read, ops::Const<int32>(scope, 1));
auto d = ops::_Retval(scope.WithOpName("D"), read_plus_one, 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2, 3});
args[1].kind = XlaCompiler::Argument::kResource;
args[1].resource_kind = XlaResource::kVariable;
args[1].initialized = true;
args[1].type = DT_INT32;
args[1].shape = TensorShape({2, 3});
auto options = DefaultOptions();
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns;
shape_determination_fns.shape_representation_fn =
[](const TensorShape& shape, DataType dt, bool use_fast_memory,
XlaLayoutPreference layout_preference) -> absl::StatusOr<xla::Shape> {
xla::Shape xla_shape;
TF_RETURN_IF_ERROR(TensorShapeToXLAShape(dt, shape, &xla_shape));
*xla_shape.mutable_layout() = xla::LayoutUtil::MakeLayout({0, 1});
return xla_shape;
};
options.shape_determination_fns = shape_determination_fns;
XlaCompiler compiler(options);
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "add",
std::move(graph), args, &result));
xla::Shape transposed =
xla::ShapeUtil::MakeShapeWithDenseLayout(xla::S32, {2, 3}, {0, 1});
EXPECT_EQ(result.xla_output_shape,
xla::ShapeUtil::MakeTupleShape({transposed, transposed}));
EXPECT_EQ(result.computation->GetProgramShape().value().result(),
xla::ShapeUtil::MakeTupleShape({transposed, transposed}));
}
TEST_F(XlaCompilerTest, TransposeVariables) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto var = ops::_Arg(scope.WithOpName("V"), DT_RESOURCE, 1);
auto identity = ops::Identity(scope.WithOpName("VIdentity"), var);
auto write = ops::AssignAddVariableOp(scope, identity, a);
auto read = ops::ReadVariableOp(
scope.WithControlDependencies(std::vector<Operation>{write}), var,
DT_INT32);
auto transposed_read = ops::Transpose(scope, read, {1, 0});
auto reshape = ops::Reshape(scope, transposed_read, {2, 3});
auto d = ops::_Retval(scope.WithOpName("D"), reshape, 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2, 3});
args[1].kind = XlaCompiler::Argument::kResource;
args[1].resource_kind = XlaResource::kVariable;
args[1].initialized = true;
args[1].type = DT_INT32;
args[1].shape = TensorShape({2, 3});
XlaCompiler compiler(DefaultOptions());
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "transpose",
std::move(graph), args, &result));
xla::Shape transposed =
xla::ShapeUtil::MakeShapeWithDenseLayout(xla::S32, {2, 3}, {1, 0});
EXPECT_EQ(result.xla_output_shape,
xla::ShapeUtil::MakeTupleShape({transposed, transposed}));
}
TEST_F(XlaCompilerTest, UnrankedFakeParam) {
Scope scope = Scope::NewRootScope().ExitOnError();
PartialTensorShape shape;
auto a = ops::FakeParam(scope, DT_INT32, shape);
auto ret = ops::_Retval(scope.WithOpName("D"), a, 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
XlaCompiler compiler(DefaultOptions());
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "compile",
std::move(graph), {}, &result));
EXPECT_EQ(result.xla_output_shape,
xla::ShapeUtil::MakeTupleShape(
{xla::ShapeUtil::MakeShape(xla::S32, {0})}));
}
TEST_F(XlaCompilerTest, MixedOrderArguments) {
for (bool swap_order : {false, true}) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto var =
ops::_Arg(scope.WithOpName("V"), DT_RESOURCE, swap_order ? 0 : 1);
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, swap_order ? 1 : 0);
auto identity = ops::Identity(scope.WithOpName("VIdentity"), var);
auto write = ops::AssignAddVariableOp(scope, identity, a);
auto read = ops::ReadVariableOp(
scope.WithControlDependencies(std::vector<Operation>{write}), var,
DT_INT32);
auto read_plus_one = ops::Add(scope, read, ops::Const<int32>(scope, 1));
auto d = ops::_Retval(scope.WithOpName("D"), read_plus_one, 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2});
args[1].kind = XlaCompiler::Argument::kResource;
args[1].resource_kind = XlaResource::kVariable;
args[1].initialized = true;
args[1].type = DT_INT32;
args[1].shape = TensorShape({2});
if (swap_order) {
std::swap(args[0], args[1]);
}
XlaCompiler compiler(DefaultOptions());
XlaCompiler::CompileOptions compile_options;
compile_options.always_return_tuple = false;
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(compile_options, "add", std::move(graph),
args, &result));
EXPECT_THAT(result.input_mapping, ::testing::ElementsAre(0, 1));
}
}
TEST_F(XlaCompilerTest, HasSaneErrorOnNonCompileTimeConstantInputToReshape) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto b = ops::_Arg(scope.WithOpName("B"), DT_INT32, 1);
auto c = ops::Reshape(scope.WithOpName("C"), a, b);
auto d = ops::_Retval(scope.WithOpName("D"), c, 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2});
args[1].kind = XlaCompiler::Argument::kParameter;
args[1].type = DT_INT32;
args[1].shape = TensorShape({2});
XlaCompiler compiler(DefaultOptions());
XlaCompiler::CompilationResult result;
Status status =
compiler.CompileGraph(XlaCompiler::CompileOptions(), "reshape",
std::move(graph), args, &result);
EXPECT_FALSE(status.ok());
EXPECT_TRUE(absl::StrContains(status.message(), "depends on a parameter"))
<< status.message();
EXPECT_TRUE(absl::StrContains(status.message(), "{{node C}}"))
<< status.message();
EXPECT_TRUE(
absl::StrContains(status.message(), "must be a compile-time constant"))
<< status.message();
}
TEST_F(XlaCompilerTest, ConstantOutputs) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto b = ops::Const<int32>(scope.WithOpName("B"), 7);
auto c = ops::Neg(scope.WithOpName("C"), a);
auto d = ops::_Retval(scope.WithOpName("D"), b, 0);
auto e = ops::_Retval(scope.WithOpName("E"), c, 1);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2});
XlaCompiler::Options options = DefaultOptions();
XlaCompiler compiler(options);
{
std::unique_ptr<Graph> graph_copy(new Graph(OpRegistry::Global()));
CopyGraph(*graph, graph_copy.get());
XlaCompiler::CompileOptions compile_options;
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(compile_options, "constants",
std::move(graph_copy), args, &result));
ASSERT_EQ(2, result.outputs.size());
EXPECT_FALSE(result.outputs[0].is_constant);
EXPECT_FALSE(result.outputs[1].is_constant);
xla::Literal param0_literal = xla::LiteralUtil::CreateR1<int32>({7, 42});
std::unique_ptr<xla::GlobalData> param0_data =
client_->TransferToServer(param0_literal).value();
std::unique_ptr<xla::GlobalData> actual =
client_->Execute(*result.computation, {param0_data.get()}).value();
xla::Literal actual_literal = client_->Transfer(*actual).value();
xla::Literal expected0 = xla::LiteralUtil::CreateR0<int32>(7);
xla::Literal expected1 = xla::LiteralUtil::CreateR1<int32>({-7, -42});
xla::Literal expected =
xla::LiteralUtil::MakeTuple({&expected0, &expected1});
EXPECT_TRUE(xla::LiteralTestUtil::Equal(expected, actual_literal));
}
}
TEST_F(XlaCompilerTest, ConstantOutputsOfFunctionalNode) {
const Tensor seven = test::AsScalar<int>(7);
FunctionDef fdef = FunctionDefHelper::Create(
"foo", {"a_0:int32"}, {"const:int32", "a:int32"}, {},
{
{{"Const"}, "Const", {}, {{"dtype", DT_INT32}, {"value", seven}}},
},
{{"a", "a_0"}, {"const", "Const:output:0"}});
(*fdef.mutable_attr())["_noinline"].set_b(true);
FunctionDefLibrary fdef_lib;
*(fdef_lib.add_function()) = fdef;
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
Scope scope = Scope::NewRootScope().ExitOnError();
TF_EXPECT_OK(scope.graph()->AddFunctionLibrary(fdef_lib));
auto arg = ops::_Arg(scope.WithOpName("input_arg"), DT_INT32, 0);
NodeDef foo;
foo.set_name("foo");
foo.set_op("foo");
*foo.add_input() = "input_arg";
Status status;
scope.graph()->AddNode(foo, &status);
TF_ASSERT_OK(status);
NodeDef retval_1;
retval_1.set_name("retval_0");
retval_1.set_op(FunctionLibraryDefinition::kRetOp);
*retval_1.add_input() = "foo";
(*retval_1.mutable_attr())["T"].set_type(DT_INT32);
(*retval_1.mutable_attr())["index"].set_i(0);
scope.graph()->AddNode(retval_1, &status);
TF_ASSERT_OK(status);
NodeDef retval_2;
retval_2.set_name("retval_1");
retval_2.set_op(FunctionLibraryDefinition::kRetOp);
*retval_2.add_input() = "foo:1";
(*retval_2.mutable_attr())["T"].set_type(DT_INT32);
(*retval_2.mutable_attr())["index"].set_i(1);
scope.graph()->AddNode(retval_2, &status);
TF_ASSERT_OK(status);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
}
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({1});
XlaCompiler::Options options = DefaultOptions();
FunctionLibraryDefinition flib_def(OpRegistry::Global(), fdef_lib);
options.flib_def = &flib_def;
XlaCompiler compiler(options);
XlaCompiler::CompileOptions compile_options;
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(compile_options, "constants",
std::move(graph), args, &result));
ASSERT_EQ(2, result.outputs.size());
EXPECT_FALSE(result.outputs[1].is_constant);
}
TEST_F(XlaCompilerTest, ResourceManager) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto b = DummyReadResourceCC(scope.WithOpName("B"), a);
auto c = ops::Add(scope.WithOpName("C"), b.output2_, b.output1_);
auto d = ops::_Retval(scope.WithOpName("D"), c, 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2});
DummyResourceForTest* resource = new DummyResourceForTest();
auto options = DefaultOptions();
std::function<Status(ResourceMgr*)> populate_function =
[resource](ResourceMgr* rm) {
resource->Ref();
return rm->Create(rm->default_container(), "dummy", resource);
};
options.populate_resource_manager = &populate_function;
XlaCompiler compiler(options);
EXPECT_EQ(0, resource->Get());
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "dummy",
std::move(graph), args, &result));
EXPECT_EQ(1, resource->Get());
resource->Unref();
}
TEST_F(XlaCompilerTest, DeterministicCompilation) {
const int64_t test_count = 2;
std::vector<XlaCompiler::CompilationResult> results(test_count);
for (int64_t i = 0; i < test_count; ++i) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto b = ops::Neg(scope.WithOpName("B"), a);
auto c = ops::Neg(scope.WithOpName("C"), a);
auto d = ops::Add(scope.WithOpName("D"), b, c);
auto e = ops::_Retval(scope.WithOpName("E"), d, 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2});
auto options = DefaultOptions();
XlaCompiler compiler(options);
TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "dummy",
std::move(graph), args, &results[i]));
}
for (int64_t i = 1; i < test_count; ++i) {
const auto& m1 = results[i - 1].computation->proto();
const auto& m2 = results[i].computation->proto();
ASSERT_EQ(m1.computations_size(), m2.computations_size());
for (int k = 0; k < m1.computations_size(); k++) {
const auto& c1 = m1.computations(k);
const auto& c2 = m2.computations(k);
ASSERT_EQ(c1.instructions_size(), c2.instructions_size());
for (int j = 0; j < c1.instructions_size(); j++) {
auto instr1 = c1.instructions(j);
auto instr2 = c2.instructions(j);
instr1.clear_name();
instr1.clear_id();
instr1.clear_operand_ids();
instr2.clear_name();
instr2.clear_id();
instr2.clear_operand_ids();
string str1, str2;
LOG(INFO) << "instr1 = " << instr1.DebugString();
LOG(INFO) << "instr2 = " << instr2.DebugString();
instr1.AppendPartialToString(&str1);
instr2.AppendPartialToString(&str2);
EXPECT_EQ(str1, str2);
}
}
}
}
TEST_F(XlaCompilerTest, CanPassTensorArraysToAndFromComputation) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg = ops::_Arg(scope.WithOpName("arg"), DT_RESOURCE, 0);
auto flow = ops::Const<float>(scope, {});
auto grad1 = ops::TensorArrayGrad(scope, arg, flow, "grad1");
auto grad2 = ops::TensorArrayGrad(scope, arg, grad1.flow_out, "grad2");
auto index = ops::Const<int32>(scope, 1);
auto write = ops::TensorArrayWrite(scope, grad1.grad_handle, index, index,
grad2.flow_out);
auto read = ops::TensorArrayRead(scope, arg, index, write.flow_out, DT_INT32);
auto retval = ops::_Retval(scope.WithOpName("retval"), read, 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kResource;
args[0].resource_kind = XlaResource::kTensorArray;
args[0].initialized = true;
args[0].type = DT_INT32;
args[0].shape = TensorShape({});
args[0].max_array_size = 2;
args[0].tensor_array_gradients = {"grad2"};
XlaCompiler compiler(DefaultOptions());
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "add",
std::move(graph), args, &result));
ASSERT_EQ(1, result.resource_updates.size());
const XlaCompiler::ResourceUpdate& update = result.resource_updates[0];
EXPECT_EQ(0, update.input_index);
EXPECT_EQ(DT_INT32, update.type);
EXPECT_EQ((std::set<string>{"grad1", "grad2"}),
update.tensor_array_gradients_accessed);
xla::Literal input_base = xla::LiteralUtil::CreateR1<int32>({7, 42});
xla::Literal input_grad2 = xla::LiteralUtil::CreateR1<int32>({-3, 101});
xla::Literal input = xla::LiteralUtil::MakeTuple({&input_base, &input_grad2});
std::unique_ptr<xla::GlobalData> param0_data =
client_->TransferToServer(input).value();
std::unique_ptr<xla::GlobalData> actual =
client_->Execute(*result.computation, {param0_data.get()}).value();
xla::Literal actual_literal = client_->Transfer(*actual).value();
xla::Literal output_read = xla::LiteralUtil::CreateR0<int32>(42);
xla::Literal output_base = xla::LiteralUtil::CreateR1<int32>({7, 42});
xla::Literal output_grad1 = xla::LiteralUtil::CreateR1<int32>({0, 1});
xla::Literal output_grad2 = xla::LiteralUtil::CreateR1<int32>({-3, 101});
xla::Literal output_resource =
xla::LiteralUtil::MakeTuple({&output_base, &output_grad1, &output_grad2});
xla::Literal expected_literal =
xla::LiteralUtil::MakeTuple({&output_read, &output_resource});
EXPECT_TRUE(xla::LiteralTestUtil::Equal(expected_literal, actual_literal));
}
TEST_F(XlaCompilerTest, UnwrittenTensorArrayGradientsAreNotComputationOutputs) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg = ops::_Arg(scope.WithOpName("arg"), DT_RESOURCE, 0);
auto flow = ops::Const<float>(scope, {});
auto grad1 = ops::TensorArrayGrad(scope, arg, flow, "grad1");
auto index = ops::Const<int32>(scope, 1);
auto read = ops::TensorArrayRead(scope, arg, index, grad1.flow_out, DT_INT32);
auto retval = ops::_Retval(scope.WithOpName("retval"), read, 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kResource;
args[0].resource_kind = XlaResource::kTensorArray;
args[0].initialized = true;
args[0].type = DT_INT32;
args[0].shape = TensorShape({});
args[0].max_array_size = 2;
args[0].tensor_array_gradients = {"grad1"};
XlaCompiler compiler(DefaultOptions());
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "add",
std::move(graph), args, &result));
EXPECT_EQ(0, result.resource_updates.size());
}
TEST_F(XlaCompilerTest, NewTensorArrayGradientsAreComputationOutputs) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg = ops::_Arg(scope.WithOpName("arg"), DT_RESOURCE, 0);
auto flow = ops::Const<float>(scope, {});
auto grad1 = ops::TensorArrayGrad(scope, arg, flow, "grad2");
auto index = ops::Const<int32>(scope, 1);
auto read = ops::TensorArrayRead(scope, arg, index, grad1.flow_out, DT_INT32);
auto retval = ops::_Retval(scope.WithOpName("retval"), read, 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kResource;
args[0].resource_kind = XlaResource::kTensorArray;
args[0].initialized = true;
args[0].type = DT_INT32;
args[0].shape = TensorShape({});
args[0].max_array_size = 2;
args[0].tensor_array_gradients = {"grad1"};
XlaCompiler compiler(DefaultOptions());
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "add",
std::move(graph), args, &result));
EXPECT_EQ(1, result.resource_updates.size());
}
TEST_F(XlaCompilerTest, UndefinedFunctionFails) {
XlaCompiler compiler(DefaultOptions());
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
XlaCompiler::CompilationResult result;
NameAttrList name_attr;
name_attr.set_name("Function_NotDefined_");
Status status =
compiler.CompileFunction(XlaCompiler::CompileOptions(), name_attr,
{}, &result);
EXPECT_FALSE(status.ok());
EXPECT_TRUE(absl::StrContains(status.message(), "is not defined."))
<< status.message();
}
FunctionDef FillFn() {
return FunctionDefHelper::Define(
"FillFn",
{"x: T", "dims: int32"},
{"y: T"},
{"T: {float, double, int32, int64}"},
{{{"y"}, "Fill", {"dims", "x"}, {{"T", "$T"}}}});
}
TEST_F(XlaCompilerTest, FunctionCallWithConstants) {
XlaCompiler compiler(DefaultOptions());
FunctionDefLibrary flib;
*flib.add_function() = FillFn();
TF_ASSERT_OK(flib_def_->AddFunctionDef(FillFn()));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope scope = Scope::NewRootScope().ExitOnError();
auto value = ops::Const<int32>(scope.WithOpName("value"), 1, {});
auto shape = ops::Const<int32>(scope.WithOpName("shape"), {5}, {1});
TF_EXPECT_OK(scope.graph()->AddFunctionLibrary(flib));
NodeDef def;
TF_ASSERT_OK(NodeDefBuilder("fill", "FillFn", flib_def_.get())
.Input(value.name(), 0, DT_INT32)
.Input(shape.name(), 1, DT_INT32)
.Finalize(&def));
Status status;
Node* fill = scope.graph()->AddNode(def, &status);
TF_ASSERT_OK(status);
TF_ASSERT_OK(scope.DoShapeInference(fill));
scope.graph()->AddEdge(value.node(), 0, fill, 0);
scope.graph()->AddEdge(shape.node(), 0, fill, 1);
auto retval = ops::_Retval(scope.WithOpName("retval"), Output(fill), 0);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args;
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "fill",
std::move(graph), args, &result));
}
TEST_F(XlaCompilerTest, LocalFunctionWithWrongArgumentsFail) {
XlaCompiler compiler(DefaultOptions());
auto local_flib_def = LocalFlibDef(&compiler);
TF_ASSERT_OK(local_flib_def->AddFunctionDef(test::function::XTimesTwo()));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
XlaCompiler::CompilationResult result;
NameAttrList name_attr;
name_attr.set_name("XTimesTwo");
Status status =
compiler.CompileFunction(XlaCompiler::CompileOptions(), name_attr,
{}, &result);
ASSERT_FALSE(status.ok());
EXPECT_TRUE(absl::StrContains(status.message(), "is not defined."))
<< status.message();
EXPECT_TRUE(absl::StrContains(status.message(), "Attr T is not found"))
<< status.message();
}
FunctionDef SliceFn() {
return FunctionDefHelper::Define(
"SliceFn",
{"x: T", "begin: Index", "size: Index"},
{"y: T"},
{"T: {float, double, int32, int64}", "Index: {int32,int64}"},
{{{"y"},
"Slice",
{"x", "begin", "size"},
{{"T", "$T"}, {"Index", "$Index"}}}});
}
TEST_F(XlaCompilerTest, SliceWithDynamicBegins) {
XlaCompiler compiler(DefaultOptions());
FunctionDefLibrary flib;
*flib.add_function() = SliceFn();
TF_ASSERT_OK(flib_def_->AddFunctionDef(SliceFn()));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope scope = Scope::NewRootScope().ExitOnError();
auto value = ops::Const<int32>(scope.WithOpName("shape"), {5}, {1});
auto begin = ops::_Arg(scope.WithOpName("arg"), DT_INT32, 0);
auto size = ops::Const<int32>(scope.WithOpName("value"), {1}, {1});
TF_EXPECT_OK(scope.graph()->AddFunctionLibrary(flib));
NodeDef def;
TF_ASSERT_OK(NodeDefBuilder("slice", "SliceFn", flib_def_.get())
.Input(value.name(), 0, DT_INT32)
.Input(begin.node()->name(), 1, DT_INT32)
.Input(size.name(), 2, DT_INT32)
.Finalize(&def));
Status status;
Node* slice = scope.graph()->AddNode(def, &status);
TF_ASSERT_OK(status);
TF_ASSERT_OK(scope.DoShapeInference(slice));
scope.graph()->AddEdge(value.node(), 0, slice, 0);
scope.graph()->AddEdge(begin.node(), 0, slice, 1);
scope.graph()->AddEdge(size.node(), 0, slice, 2);
auto retval = ops::_Retval(scope.WithOpName("retval"), Output(slice), 0);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({1});
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "slice",
std::move(graph), args, &result));
}
void RunAndCheckVariablesComputation(
xla::Client* client, const XlaCompiler::CompilationResult& result) {
xla::Literal param0_literal = xla::LiteralUtil::CreateR1<int32>({7, 42});
xla::Literal param1_literal = xla::LiteralUtil::CreateR1<int32>({-3, 101});
std::unique_ptr<xla::GlobalData> param0_data =
client->TransferToServer(param0_literal).value();
std::unique_ptr<xla::GlobalData> param1_data =
client->TransferToServer(param1_literal).value();
std::unique_ptr<xla::GlobalData> actual =
client
->Execute(*result.computation, {param0_data.get(), param1_data.get()})
.value();
xla::Literal actual_literal = client->Transfer(*actual).value();
xla::Literal expected0 = xla::LiteralUtil::CreateR1<int32>({5, 144});
xla::Literal expected1 = xla::LiteralUtil::CreateR1<int32>({4, 143});
xla::Literal expected_literal =
xla::LiteralUtil::MakeTuple({&expected0, &expected1});
EXPECT_TRUE(xla::LiteralTestUtil::Equal(expected_literal, actual_literal));
}
TEST_F(XlaCompilerTest, Variables) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto var = ops::_Arg(scope.WithOpName("V"), DT_RESOURCE, 1);
auto identity = ops::Identity(scope.WithOpName("VIdentity"), var);
auto write = ops::AssignAddVariableOp(scope, identity, a);
auto read = ops::ReadVariableOp(
scope.WithControlDependencies(std::vector<Operation>{write}), var,
DT_INT32);
auto read_plus_one = ops::Add(scope, read, ops::Const<int32>(scope, 1));
auto d = ops::_Retval(scope.WithOpName("D"), read_plus_one, 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2});
args[1].kind = XlaCompiler::Argument::kResource;
args[1].resource_kind = XlaResource::kVariable;
args[1].initialized = true;
args[1].type = DT_INT32;
args[1].shape = TensorShape({2});
XlaCompiler compiler(DefaultOptions());
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "add",
std::move(graph), args, &result));
RunAndCheckVariablesComputation(client_, result);
}
TEST_F(XlaCompilerTest, ResultLayoutSingle) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto b = ops::_Retval(scope.WithOpName("RET"), a, 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2, 3});
auto options = DefaultOptions();
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns;
shape_determination_fns.shape_representation_fn =
[](const TensorShape& shape, DataType type, bool use_fast_memory,
XlaLayoutPreference layout_preference) -> absl::StatusOr<xla::Shape> {
xla::Shape xla_shape;
TF_RETURN_IF_ERROR(TensorShapeToXLAShape(type, shape, &xla_shape));
*xla_shape.mutable_layout() = xla::LayoutUtil::MakeLayout({0, 1});
return xla_shape;
};
options.shape_determination_fns = shape_determination_fns;
XlaCompiler compiler(options);
XlaCompiler::CompilationResult result;
auto compile_options = XlaCompiler::CompileOptions();
compile_options.always_return_tuple = false;
TF_ASSERT_OK(compiler.CompileGraph(compile_options, "id", std::move(graph),
args, &result));
EXPECT_TRUE(xla::ShapeUtil::Equal(
result.xla_output_shape,
xla::ShapeUtil::MakeShapeWithDenseLayout(xla::S32, {2, 3}, {0, 1})));
EXPECT_EQ(result.computation->GetProgramShape().value().result(),
result.xla_output_shape);
}
TEST_F(XlaCompilerTest, ResultLayoutMultiple) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto b = ops::_Retval(scope.WithOpName("RET1"), a, 0);
auto c = ops::_Retval(scope.WithOpName("RET2"), a, 1);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2, 3});
auto options = DefaultOptions();
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns;
shape_determination_fns.shape_representation_fn =
[](const TensorShape& shape, DataType type, bool use_fast_memory,
XlaLayoutPreference layout_preference) -> absl::StatusOr<xla::Shape> {
xla::Shape xla_shape;
TF_RETURN_IF_ERROR(TensorShapeToXLAShape(type, shape, &xla_shape));
*xla_shape.mutable_layout() = xla::LayoutUtil::MakeLayout({0, 1});
return xla_shape;
};
shape_determination_fns.layout_preference_fn = UseNoPreferenceLayoutFn();
options.shape_determination_fns = shape_determination_fns;
XlaCompiler compiler(options);
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "id",
std::move(graph), args, &result));
xla::Shape result_shape =
xla::ShapeUtil::MakeShapeWithDenseLayout(xla::S32, {2, 3}, {0, 1});
EXPECT_TRUE(xla::ShapeUtil::Equal(
result.xla_output_shape,
xla::ShapeUtil::MakeTupleShape({result_shape, result_shape})));
EXPECT_EQ(result.computation->GetProgramShape().value().result(),
result.xla_output_shape);
}
TEST_F(XlaCompilerTest, ReturnResourceHandleOnly) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto var = ops::_Arg(scope.WithOpName("V"), DT_RESOURCE, 0);
auto d = ops::_Retval(scope.WithOpName("D"), var, 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kResource;
args[0].resource_kind = XlaResource::kVariable;
args[0].initialized = true;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2});
XlaCompiler compiler(DefaultOptions());
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "add",
std::move(graph), args, &result));
xla::Literal param1_literal = xla::LiteralUtil::CreateR1<int32>({-3, 101});
std::unique_ptr<xla::GlobalData> param1_data =
client_->TransferToServer(param1_literal).value();
std::unique_ptr<xla::GlobalData> actual =
client_->Execute(*result.computation, {param1_data.get()}).value();
xla::Literal actual_literal = client_->Transfer(*actual).value();
xla::Literal expected_literal = xla::LiteralUtil::MakeTuple({});
EXPECT_TRUE(xla::LiteralTestUtil::Equal(expected_literal, actual_literal));
}
TEST_F(XlaCompilerTest, ReturnResourceHandle) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto var = ops::_Arg(scope.WithOpName("V"), DT_RESOURCE, 1);
auto identity = ops::Identity(scope.WithOpName("VIdentity"), var);
auto write = ops::AssignAddVariableOp(scope, identity, a);
auto read = ops::ReadVariableOp(
scope.WithControlDependencies(std::vector<Operation>{write}), var,
DT_INT32);
auto read_plus_one = ops::Add(scope, read, ops::Const<int32>(scope, 1));
auto r = ops::_Retval(scope.WithOpName("R"), var, 0);
auto d = ops::_Retval(scope.WithOpName("D"), read_plus_one, 1);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2});
args[1].kind = XlaCompiler::Argument::kResource;
args[1].resource_kind = XlaResource::kVariable;
args[1].initialized = true;
args[1].type = DT_INT32;
args[1].shape = TensorShape({2});
XlaCompiler compiler(DefaultOptions());
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "add",
std::move(graph), args, &result));
RunAndCheckVariablesComputation(client_, result);
}
absl::StatusOr<std::unique_ptr<Graph>> BuildTestGraph() {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto var = ops::_Arg(scope.WithOpName("V"), DT_RESOURCE, 1);
auto write = ops::AssignAddVariableOp(scope, var, a);
auto read = ops::ReadVariableOp(
scope.WithControlDependencies(std::vector<Operation>{write}), var,
DT_INT32);
auto read_plus_one = ops::Add(scope, read, ops::Const<int32>(scope, 1));
auto d = ops::_Retval(scope.WithOpName("D"), read_plus_one, 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_RETURN_IF_ERROR(scope.ToGraph(graph.get()));
return std::move(graph);
}
TEST_F(XlaCompilerTest, VariableRepresentationShapeFunction) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Graph> graph, BuildTestGraph());
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2, 2});
args[1].kind = XlaCompiler::Argument::kResource;
args[1].resource_kind = XlaResource::kVariable;
args[1].initialized = true;
args[1].type = DT_INT32;
args[1].shape = TensorShape({2, 2});
XlaCompiler::Options options = DefaultOptions();
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns;
shape_determination_fns.shape_representation_fn =
[](const TensorShape& shape, DataType type, bool use_fast_memory,
XlaLayoutPreference layout_preference) -> absl::StatusOr<xla::Shape> {
xla::PrimitiveType ptype;
TF_RETURN_IF_ERROR(DataTypeToPrimitiveType(type, &ptype));
return xla::ShapeUtil::MakeShape(ptype, {shape.num_elements()});
};
options.shape_determination_fns = shape_determination_fns;
XlaCompiler compiler(options);
XlaCompiler::CompileOptions compile_options;
compile_options.is_entry_computation = false;
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(compile_options, "add", std::move(graph),
args, &result));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::ProgramShape> program_shape,
client_->GetComputationShape(*result.computation));
ASSERT_EQ(program_shape->parameters_size(), 2);
EXPECT_TRUE(
xla::ShapeUtil::Compatible(program_shape->parameters(0),
xla::ShapeUtil::MakeShape(xla::S32, {2, 2})));
EXPECT_TRUE(xla::ShapeUtil::Compatible(
program_shape->parameters(1), xla::ShapeUtil::MakeShape(xla::S32, {4})));
EXPECT_TRUE(xla::ShapeUtil::Compatible(
program_shape->result(),
xla::ShapeUtil::MakeTupleShape(
{xla::ShapeUtil::MakeShape(xla::S32, {2, 2}),
xla::ShapeUtil::MakeShape(xla::S32, {4})})));
xla::Literal param0_literal =
xla::LiteralUtil::CreateR2<int32>({{4, 55}, {1, -3}});
xla::Literal param1_literal =
xla::LiteralUtil::CreateR1<int32>({22, 11, 33, 404});
std::unique_ptr<xla::GlobalData> param0_data =
client_->TransferToServer(param0_literal).value();
std::unique_ptr<xla::GlobalData> param1_data =
client_->TransferToServer(param1_literal).value();
std::unique_ptr<xla::GlobalData> actual =
client_
->Execute(*result.computation, {param0_data.get(), param1_data.get()})
.value();
xla::Literal actual_literal = client_->Transfer(*actual).value();
xla::Literal expected0 =
xla::LiteralUtil::CreateR2<int32>({{27, 67}, {35, 402}});
xla::Literal expected1 = xla::LiteralUtil::CreateR1<int32>({26, 66, 34, 401});
xla::Literal expected_literal =
xla::LiteralUtil::MakeTuple({&expected0, &expected1});
EXPECT_TRUE(xla::LiteralTestUtil::Equal(expected_literal, actual_literal));
}
TEST_F(XlaCompilerTest, ArgRetvalShapeRepresentationFunction) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Graph> graph, BuildTestGraph());
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2, 2});
args[1].kind = XlaCompiler::Argument::kResource;
args[1].resource_kind = XlaResource::kVariable;
args[1].initialized = true;
args[1].type = DT_INT32;
args[1].shape = TensorShape({2, 2});
XlaCompiler::Options options = DefaultOptions();
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns;
shape_determination_fns.shape_representation_fn =
[](const TensorShape& shape, DataType type, bool use_fast_memory,
XlaLayoutPreference layout_preference) -> absl::StatusOr<xla::Shape> {
xla::PrimitiveType ptype;
TF_RETURN_IF_ERROR(DataTypeToPrimitiveType(type, &ptype));
return xla::ShapeUtil::MakeShape(ptype, {shape.num_elements()});
};
options.shape_determination_fns = shape_determination_fns;
XlaCompiler compiler(options);
XlaCompiler::CompileOptions compile_options;
compile_options.is_entry_computation = true;
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(compile_options, "add", std::move(graph),
args, &result));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::ProgramShape> program_shape,
client_->GetComputationShape(*result.computation));
ASSERT_EQ(program_shape->parameters_size(), 2);
EXPECT_TRUE(xla::ShapeUtil::Compatible(
program_shape->parameters(0), xla::ShapeUtil::MakeShape(xla::S32, {4})));
EXPECT_TRUE(xla::ShapeUtil::Compatible(
program_shape->parameters(1), xla::ShapeUtil::MakeShape(xla::S32, {4})));
EXPECT_TRUE(xla::ShapeUtil::Compatible(
program_shape->result(),
xla::ShapeUtil::MakeTupleShape(
{xla::ShapeUtil::MakeShape(xla::S32, {4}),
xla::ShapeUtil::MakeShape(xla::S32, {4})})));
xla::Literal param0_literal =
xla::LiteralUtil::CreateR1<int32>({4, 55, 1, -3});
xla::Literal param1_literal =
xla::LiteralUtil::CreateR1<int32>({22, 11, 33, 404});
std::unique_ptr<xla::GlobalData> param0_data =
client_->TransferToServer(param0_literal).value();
std::unique_ptr<xla::GlobalData> param1_data =
client_->TransferToServer(param1_literal).value();
std::unique_ptr<xla::GlobalData> actual =
client_
->Execute(*result.computation, {param0_data.get(), param1_data.get()})
.value();
xla::Literal actual_literal = client_->Transfer(*actual).value();
xla::Literal expected0 = xla::LiteralUtil::CreateR1<int32>({27, 67, 35, 402});
xla::Literal expected1 = xla::LiteralUtil::CreateR1<int32>({26, 66, 34, 401});
xla::Literal expected_literal =
xla::LiteralUtil::MakeTuple({&expected0, &expected1});
EXPECT_TRUE(xla::LiteralTestUtil::Equal(expected_literal, actual_literal));
}
TEST_F(XlaCompilerTest, FunctionWithInvalidOp) {
XlaCompiler compiler(DefaultOptions());
FunctionDefLibrary flib;
FunctionDef fn = FillFn();
NodeDef* node = fn.add_node_def();
node->set_name("Invalid");
node->set_op("InvalidOp");
node = fn.add_node_def();
node->set_name("Switch");
node->set_op("Switch");
*flib.add_function() = fn;
TF_ASSERT_OK(flib_def_->AddFunctionDef(fn));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope scope = Scope::NewRootScope().ExitOnError();
auto value = ops::Const<int32>(scope.WithOpName("value"), 1, {});
auto shape = ops::Const<int32>(scope.WithOpName("shape"), {5}, {1});
TF_ASSERT_OK(scope.graph()->AddFunctionLibrary(flib));
NodeDef def;
TF_ASSERT_OK(NodeDefBuilder("fill_fn", "FillFn", flib_def_.get())
.Input(value.name(), 0, DT_INT32)
.Input(shape.name(), 1, DT_INT32)
.Finalize(&def));
Status status;
Node* fill = scope.graph()->AddNode(def, &status);
TF_ASSERT_OK(status);
TF_ASSERT_OK(scope.DoShapeInference(fill));
scope.graph()->AddEdge(value.node(), 0, fill, 0);
scope.graph()->AddEdge(shape.node(), 0, fill, 1);
auto retval = ops::_Retval(scope.WithOpName("retval"), Output(fill), 0);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args;
XlaCompiler::CompilationResult result;
status = compiler.CompileGraph(XlaCompiler::CompileOptions(), "fill",
std::move(graph), args, &result);
ASSERT_FALSE(status.ok());
EXPECT_TRUE(absl::StrContains(status.message(), "InvalidOp"))
<< status.message();
EXPECT_TRUE(absl::StrContains(status.message(), "{{node fill_fn}}"))
<< status.message();
}
TEST_F(XlaCompilerTest, NodeWithInvalidDataType) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
NodeDef shape;
shape.set_name("Shape");
shape.set_op("Shape");
(*shape.mutable_attr())["T"].set_type(DT_INT32);
(*shape.mutable_attr())["out_type"].set_type(DT_BOOL);
Status status;
Node* shape_node = graph->AddNode(shape, &status);
TF_ASSERT_OK(status);
graph->AddControlEdge(graph->source_node(), shape_node);
std::vector<XlaCompiler::Argument> args;
XlaCompiler::CompilationResult result;
XlaCompiler compiler(DefaultOptions());
status = compiler.CompileGraph(XlaCompiler::CompileOptions(), "invalid_type",
std::move(graph), args, &result);
ASSERT_FALSE(status.ok());
EXPECT_TRUE(absl::StrContains(status.message(),
"is not in the list of allowed values"))
<< status.message();
EXPECT_TRUE(absl::StrContains(status.message(), "{{node Shape}}"))
<< status.message();
}
TEST_F(XlaCompilerTest, SingleOpWithoutInputs) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
NodeDef no_op;
no_op.set_name("NoOp");
no_op.set_op("NoOp");
Status status;
graph->AddNode(no_op, &status);
TF_ASSERT_OK(status);
std::vector<XlaCompiler::Argument> args;
XlaCompiler compiler(DefaultOptions());
{
std::unique_ptr<Graph> graph_copy(new Graph(OpRegistry::Global()));
CopyGraph(*graph, graph_copy.get());
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "NoOp",
std::move(graph_copy), args, &result));
}
}
class DummySideEffectingOp : public XlaOpKernel {
public:
explicit DummySideEffectingOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
OP_REQUIRES_OK(ctx, ctx->compiler()->SetNodeToken(
name(), xla::CreateToken(ctx->builder())));
}
};
REGISTER_OP("DummySideEffectingOp");
REGISTER_XLA_OP(Name("DummySideEffectingOp"), DummySideEffectingOp);
TEST_F(XlaCompilerTest, TokenInputAndOutput) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
NodeDef side_effecting_op;
side_effecting_op.set_name("DummySideEffectingOp");
side_effecting_op.set_op("DummySideEffectingOp");
AddNodeAttr(kXlaTokenInputNodesAttrName,
std::vector<string>{kXlaTokenArgNodeName}, &side_effecting_op);
AddNodeAttr(kXlaOriginalOutsideCompilationNodeName, side_effecting_op.name(),
&side_effecting_op);
Status status;
graph->AddNode(side_effecting_op, &status);
TF_ASSERT_OK(status);
EXPECT_TRUE(FixupSourceAndSinkEdges(graph.get()));
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kResource;
args[0].resource_kind = XlaResource::kVariable;
args[0].initialized = true;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2, 2});
{
XlaCompiler::CompileOptions options;
options.is_entry_computation = true;
options.add_token_input_output = false;
options.return_updated_values_for_all_resources = true;
XlaCompiler compiler(DefaultOptions());
std::unique_ptr<Graph> graph_copy(new Graph(OpRegistry::Global()));
CopyGraph(*graph, graph_copy.get());
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(options, "NoOp", std::move(graph_copy),
args, &result));
EXPECT_EQ(result.xla_input_shapes.size(), 1);
EXPECT_TRUE(result.xla_output_shape.IsTuple());
EXPECT_EQ(xla::ShapeUtil::TupleElementCount(result.xla_output_shape), 1);
}
{
XlaCompiler::CompileOptions options;
options.is_entry_computation = false;
options.add_token_input_output = true;
options.return_updated_values_for_all_resources = true;
XlaCompiler compiler(DefaultOptions());
std::unique_ptr<Graph> graph_copy(new Graph(OpRegistry::Global()));
CopyGraph(*graph, graph_copy.get());
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(options, "NoOp", std::move(graph_copy),
args, &result));
EXPECT_EQ(result.xla_input_shapes.size(), 2);
EXPECT_TRUE(result.xla_input_shapes[1].IsToken());
EXPECT_TRUE(result.xla_output_shape.IsTuple());
EXPECT_EQ(xla::ShapeUtil::TupleElementCount(result.xla_output_shape), 2);
EXPECT_TRUE(xla::ShapeUtil::GetTupleElementShape(result.xla_output_shape, 1)
.IsToken());
}
}
TEST_F(XlaCompilerTest, OpsWithTensorListInput) {
FunctionDefLibrary fdef_lib;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), fdef_lib);
{
Scope scope = Scope::NewRootScope().ExitOnError();
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
ops::_Arg(scope.WithOpName("arg"), DT_VARIANT, 0);
auto result = ops::Const<bool>(scope, {true}, {});
ops::_Retval(scope.WithOpName("ret"), result, 0);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
FunctionDef fdef;
TF_ASSERT_OK(GraphToFunctionDef(*graph, "cond", &fdef));
TF_ASSERT_OK(flib_def.AddFunctionDef(fdef));
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
auto arg = ops::_Arg(scope.WithOpName("arg"), DT_VARIANT, 0);
ops::_Retval(scope.WithOpName("ret"), arg, 0);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
FunctionDef fdef;
TF_ASSERT_OK(GraphToFunctionDef(*graph, "body", &fdef));
TF_ASSERT_OK(flib_def.AddFunctionDef(fdef));
}
Scope scope = Scope::NewRootScope().ExitOnError();
auto element_shape = ops::Const<int32>(scope, {1}, {1});
auto max_elements = ops::Const<int32>(scope, {10}, {});
auto arg = ops::_Arg(scope.WithOpName("arg"), DT_VARIANT, 0);
std::initializer_list<Output> out = {arg, arg};
auto add_n = ops::AddN(scope, out);
NameAttrList cond_fn, body_fn;
cond_fn.set_name("cond");
body_fn.set_name("body");
auto while_op =
ops::While(scope, std::initializer_list<Input>{arg}, cond_fn, body_fn);
auto ret0 = ops::_Retval(scope.WithOpName("ret0"), add_n, 0);
auto ret1 = ops::_Retval(scope.WithOpName("ret1"), while_op.output[0], 1);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kTensorList;
xla::Shape tensor_list_element_shape;
TF_ASSERT_OK(TensorShapeToXLAShape(DT_INT32, TensorShape{1},
&tensor_list_element_shape));
xla::Shape index_shape;
TF_ASSERT_OK(TensorShapeToXLAShape(DT_INT32, TensorShape{}, &index_shape));
std::vector<xla::Shape> shapes{tensor_list_element_shape, index_shape};
xla::Shape arg_shape = xla::ShapeUtil::MakeTupleShape(shapes);
args[0].shape = arg_shape;
XlaCompiler::Options options = DefaultOptions();
options.flib_def = &flib_def;
XlaCompiler compiler(options);
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "add",
std::move(graph), args, &result));
ASSERT_EQ(result.outputs.size(), 2);
const XlaCompiler::OutputDescription& output0 = result.outputs[0];
ASSERT_TRUE(output0.is_tensor_list);
const XlaCompiler::OutputDescription& output1 = result.outputs[1];
ASSERT_TRUE(output1.is_tensor_list);
}
TEST_F(XlaCompilerTest, WhileWithResources) {
FunctionDefLibrary fdef_lib;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), fdef_lib);
{
Scope scope = Scope::NewRootScope().ExitOnError();
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
auto arg0 = ops::_Arg(scope.WithOpName("arg0"), DT_INT32, 0);
auto arg1 = ops::_Arg(scope.WithOpName("arg1"), DT_RESOURCE, 1);
auto arg2 = ops::_Arg(scope.WithOpName("arg2"), DT_RESOURCE, 2);
auto less = ops::Less(scope, arg0, ops::Const<int32>(scope, 10));
(void)ops::_Retval(scope.WithOpName("ret"), less, 0);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
FunctionDef fdef;
TF_ASSERT_OK(GraphToFunctionDef(*graph, "cond", &fdef));
TF_ASSERT_OK(flib_def.AddFunctionDef(fdef));
}
{
Scope scope = Scope::NewRootScope().ExitOnError();
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
auto arg0 = ops::_Arg(scope.WithOpName("arg0"), DT_INT32, 0);
auto arg1 = ops::_Arg(scope.WithOpName("arg1"), DT_RESOURCE, 1);
auto arg2 = ops::_Arg(scope.WithOpName("arg2"), DT_RESOURCE, 2);
auto read1 = ops::ReadVariableOp(scope.WithOpName("read1"), arg1, DT_INT32);
auto plus_read1 = ops::Add(scope, arg0, read1);
auto read2 = ops::ReadVariableOp(scope.WithOpName("read2"), arg2, DT_INT32);
auto minus_read2 = ops::Sub(scope, plus_read1, read2);
(void)ops::_Retval(scope.WithOpName("ret0"), minus_read2, 0);
(void)ops::_Retval(scope.WithOpName("ret1"), arg1, 1);
(void)ops::_Retval(scope.WithOpName("ret2"), arg2, 2);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
FunctionDef fdef;
TF_ASSERT_OK(GraphToFunctionDef(*graph, "body", &fdef));
TF_ASSERT_OK(flib_def.AddFunctionDef(fdef));
}
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg0 = ops::_Arg(scope.WithOpName("arg0"), DT_INT32, 0);
auto arg1 = ops::_Arg(scope.WithOpName("arg1"), DT_RESOURCE, 1);
auto arg2 = ops::_Arg(scope.WithOpName("arg2"), DT_RESOURCE, 2);
NameAttrList cond_fn, body_fn;
cond_fn.set_name("cond");
body_fn.set_name("body");
auto while_op = ops::While(
scope, std::initializer_list<Input>{arg0, arg1, arg2}, cond_fn, body_fn);
(void)ops::_Retval(scope.WithOpName("ret0"), while_op.output[0], 0);
(void)ops::_Retval(scope.WithOpName("ret1"), while_op.output[1], 1);
(void)ops::_Retval(scope.WithOpName("ret2"), while_op.output[2], 2);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(3);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({});
args[1].kind = XlaCompiler::Argument::kResource;
args[1].resource_kind = XlaResource::kVariable;
args[1].initialized = true;
args[1].type = DT_INT32;
args[1].shape = TensorShape({});
args[2].kind = XlaCompiler::Argument::kResource;
args[2].resource_kind = XlaResource::kVariable;
args[2].initialized = true;
args[2].type = DT_INT32;
args[2].shape = TensorShape({});
XlaCompiler::Options options = DefaultOptions();
options.flib_def = &flib_def;
XlaCompiler compiler(options);
XlaCompiler::CompileOptions compile_options = XlaCompiler::CompileOptions();
compile_options.return_updated_values_for_all_resources = true;
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(compile_options, "tested_while_with_vars",
std::move(graph), args, &result));
ASSERT_EQ(result.outputs.size(), 3);
const XlaCompiler::OutputDescription& output1 = result.outputs[1];
ASSERT_EQ(output1.input_index, 1);
const XlaCompiler::OutputDescription& output2 = result.outputs[2];
ASSERT_EQ(output2.input_index, 2);
xla::Literal literal0 = xla::LiteralUtil::CreateR0<int32>(0);
xla::Literal literal1 = xla::LiteralUtil::CreateR0<int32>(2);
xla::Literal literal2 = xla::LiteralUtil::CreateR0<int32>(1);
std::unique_ptr<xla::GlobalData> data0 =
client_->TransferToServer(literal0).value();
std::unique_ptr<xla::GlobalData> data1 =
client_->TransferToServer(literal1).value();
std::unique_ptr<xla::GlobalData> data2 =
client_->TransferToServer(literal2).value();
std::unique_ptr<xla::GlobalData> actual =
client_
->Execute(*result.computation,
{data0.get(), data1.get(), data2.get()})
.value();
xla::Literal actual_literal = client_->Transfer(*actual).value();
xla::Literal expected0 = xla::LiteralUtil::CreateR0<int32>(10);
xla::Literal expected1 = xla::LiteralUtil::CreateR0<int32>(2);
xla::Literal expected2 = xla::LiteralUtil::CreateR0<int32>(1);
xla::Literal expected_literal =
xla::LiteralUtil::MakeTuple({&expected0, &expected1, &expected2});
EXPECT_TRUE(xla::LiteralTestUtil::Equal(expected_literal, actual_literal));
}
TEST_F(XlaCompilerTest, SetShardingForReturnedTuple) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto b = ops::_Retval(scope.WithOpName("B"), a, 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
auto node_name_index = graph->BuildNodeNameIndex();
Node* ret_node = node_name_index["B"];
ASSERT_NE(ret_node, nullptr);
xla::Array<int64_t> tile_assignment({2});
tile_assignment.FillIota(0);
xla::HloSharding sharding = xla::HloSharding::Tile(tile_assignment);
ret_node->AddAttr("_XlaSharding", sharding.ToProto().SerializeAsString());
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2});
XlaCompiler compiler(DefaultOptions());
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "test",
std::move(graph), args, &result));
const auto& hlo_module_proto = result.computation->proto();
ASSERT_EQ(hlo_module_proto.computations_size(), 1);
const auto& hlo_computation_proto = hlo_module_proto.computations(0);
std::optional<xla::HloInstructionProto> root_instruction_proto;
for (const auto& inst : hlo_computation_proto.instructions()) {
if (inst.id() == hlo_computation_proto.root_id()) {
root_instruction_proto = inst;
break;
}
}
ASSERT_TRUE(root_instruction_proto);
xla::Shape tuple_shape = xla::ShapeUtil::MakeTupleShape(
{xla::ShapeUtil::MakeShape(xla::S32, {2})});
xla::HloSharding tuple_sharding = xla::HloSharding::Tuple(
tuple_shape, std::vector<xla::HloSharding>{sharding});
EXPECT_EQ(root_instruction_proto->sharding().SerializeAsString(),
tuple_sharding.ToProto().SerializeAsString());
}
TEST_F(XlaCompilerTest, AliasResourceUpdates) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Const<int32>(scope.WithOpName("A"), {1, 2});
auto var = ops::_Arg(scope.WithOpName("V"), DT_RESOURCE, 1);
auto write = ops::AssignAddVariableOp(scope, var, a);
auto read = ops::ReadVariableOp(
scope.WithControlDependencies(std::vector<Operation>{write}), var,
DT_INT32);
auto d = ops::_Retval(scope.WithOpName("D"), read, 0);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kConstant;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2});
args[0].constant_value = Tensor(DT_INT32, {1, 1});
args[0].initialized = true;
args[1].kind = XlaCompiler::Argument::kResource;
args[1].resource_kind = XlaResource::kVariable;
args[1].initialized = true;
args[1].type = DT_INT32;
args[1].shape = TensorShape({2});
XlaCompiler compiler(DefaultOptions());
XlaCompiler::CompileOptions compile_options;
compile_options.alias_resource_update = true;
XlaCompiler::CompilationResult result;
TF_ASSERT_OK(compiler.CompileGraph(compile_options, "add", std::move(graph),
args, &result));
const xla::HloInputOutputAliasProto& alias =
result.computation->proto().input_output_alias();
EXPECT_EQ(alias.entries_size(), 1);
EXPECT_EQ(alias.entries(0).parameter_number(), 0);
}
TEST_F(XlaCompilerTest, SetDeviceToHostMetadataExactDuplicate) {
XlaCompiler compiler(DefaultOptions());
const string& key = "comm_key";
std::vector<DataType> types{DT_INT32};
std::vector<TensorShape> shapes{TensorShape({2})};
TF_ASSERT_OK(compiler.SetDeviceToHostMetadata(key, types, shapes));
TF_ASSERT_OK(compiler.SetDeviceToHostMetadata(key, types, shapes));
}
TEST_F(XlaCompilerTest, SetDeviceToHostMetadataMismatchedDuplicate) {
XlaCompiler compiler(DefaultOptions());
const string& key = "comm_key";
std::vector<DataType> types{DT_INT32};
std::vector<TensorShape> shapes{TensorShape({2})};
std::vector<DataType> types2{DT_FLOAT};
std::vector<TensorShape> shapes2{TensorShape({1})};
TF_ASSERT_OK(compiler.SetDeviceToHostMetadata(key, types, shapes));
Status status = compiler.SetDeviceToHostMetadata(key, types2, shapes2);
EXPECT_EQ(status.code(), error::Code::INVALID_ARGUMENT);
}
TEST_F(XlaCompilerTest, SetHostToDeviceMetadataExactDuplicate) {
XlaCompiler compiler(DefaultOptions());
const string& key = "comm_key";
std::vector<DataType> types{DT_INT32};
std::vector<TensorShape> shapes{TensorShape({2})};
TF_ASSERT_OK(compiler.SetHostToDeviceMetadata(key, types, shapes));
TF_ASSERT_OK(compiler.SetHostToDeviceMetadata(key, types, shapes));
}
TEST_F(XlaCompilerTest, SetHostToDeviceMetadataMismatchedDuplicate) {
XlaCompiler compiler(DefaultOptions());
const string& key = "comm_key";
std::vector<DataType> types{DT_INT32};
std::vector<TensorShape> shapes{TensorShape({2})};
std::vector<DataType> types2{DT_FLOAT};
std::vector<TensorShape> shapes2{TensorShape({1})};
TF_ASSERT_OK(compiler.SetHostToDeviceMetadata(key, types, shapes));
Status status = compiler.SetHostToDeviceMetadata(key, types2, shapes2);
EXPECT_EQ(status.code(), error::Code::INVALID_ARGUMENT);
}
TEST_F(OpsTestBase, BuildSingleOpCompileArgument) {
TF_EXPECT_OK(NodeDefBuilder("identity_op", "Identity")
.Input(FakeInput(DT_FLOAT))
.Attr("T", DT_FLOAT)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 2}), {0, 1});
TF_EXPECT_OK(RunOpKernel());
XlaCompiler::SingleOpCompileArgument arg(*context_);
EXPECT_THAT(arg.output_dtypes, ::testing::ElementsAreArray({DT_FLOAT}));
EXPECT_EQ(arg.node_def.SerializeAsString(),
context_->op_kernel().def().SerializeAsString());
EXPECT_EQ(arg.config_proto.ByteSizeLong(), 0);
}
TEST_F(OpsTestBase, CompileSingleOp) {
TF_EXPECT_OK(NodeDefBuilder("identity_op", "Identity")
.Input(FakeInput(DT_FLOAT))
.Attr("T", DT_FLOAT)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 2}), {6.9, 4.2});
TF_EXPECT_OK(RunOpKernel());
XlaCompiler::SingleOpCompileArgument single_op_arg(*context_);
xla::Client* client = xla::ClientLibrary::LocalClientOrDie();
XlaOpRegistry::RegisterCompilationKernels();
FunctionDefLibrary flib;
std::unique_ptr<FunctionLibraryDefinition> flib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), flib));
XlaCompiler::Options options;
options.device_type = DeviceType(DEVICE_CPU_XLA_JIT);
options.client = client;
options.flib_def = flib_def.get();
XlaCompiler compiler(options);
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kConstant;
args[0].type = DT_FLOAT;
args[0].shape = TensorShape({1, 2});
args[0].constant_value = GetInput(0);
args[0].initialized = true;
XlaCompiler::CompilationResult result;
TF_EXPECT_OK(compiler.CompileSingleOp(XlaCompiler::CompileOptions(),
single_op_arg, args, &result));
std::unique_ptr<xla::GlobalData> actual =
client->Execute(*result.computation, {}).value();
xla::Literal actual_literal = client->Transfer(*actual).value();
xla::Literal expected0 = xla::LiteralUtil::CreateR2<float>({{6.9, 4.2}});
xla::Literal expected_literal = xla::LiteralUtil::MakeTuple({&expected0});
EXPECT_TRUE(xla::LiteralTestUtil::Equal(expected_literal, actual_literal));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/xla_compiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/xla_compiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
85d220d5-9ed3-477a-8730-5b2484f55ad5 | cpp | tensorflow/tensorflow | logging | tensorflow/c/logging.cc | third_party/xla/third_party/tsl/tsl/platform/logging_test.cc | #include "tensorflow/c/logging.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/stringprintf.h"
static ::tensorflow::string BuildMessage(const char* fmt, va_list args) {
::tensorflow::string message;
::tensorflow::strings::Appendv(&message, fmt, args);
return message;
}
void TF_Log(TF_LogLevel level, const char* fmt, ...) {
if (level < TF_INFO || level > TF_FATAL) return;
va_list args;
va_start(args, fmt);
auto message = BuildMessage(fmt, args);
va_end(args);
switch (level) {
case TF_INFO:
LOG(INFO) << message;
break;
case TF_WARNING:
LOG(WARNING) << message;
break;
case TF_ERROR:
LOG(ERROR) << message;
break;
case TF_FATAL:
LOG(FATAL) << message;
break;
}
}
void TF_VLog(int level, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
auto message = BuildMessage(fmt, args);
va_end(args);
VLOG(level) << message;
}
void TF_DVLog(int level, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
auto message = BuildMessage(fmt, args);
va_end(args);
DVLOG(level) << message;
} | #include "tsl/platform/logging.h"
#include <cerrno>
#include <cstddef>
#include <cstdio>
#include <cstdlib>
#include <memory>
#include <sstream>
#include <vector>
#include "absl/base/log_severity.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "tsl/platform/path.h"
#include "tsl/platform/stacktrace_handler.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#ifdef PLATFORM_WINDOWS
#define popen _popen
#define pclose _pclose
#endif
static char* program_name;
namespace tsl {
namespace {
using ::testing::HasSubstr;
using ::testing::Not;
TEST(Logging, Log) {
LOG(INFO) << "Hello";
LOG(INFO) << "Another log message";
LOG(ERROR) << "Error message";
VLOG(1) << "A VLOG message";
VLOG(2) << "A higher VLOG message";
DVLOG(1) << "A DVLOG message";
DVLOG(2) << "A higher DVLOG message";
}
TEST(Logging, CheckChecks) {
CHECK(true);
CHECK(7 > 5);
string a("abc");
string b("xyz");
CHECK_EQ(a, a);
CHECK_NE(a, b);
CHECK_EQ(3, 3);
CHECK_NE(4, 3);
CHECK_GT(4, 3);
CHECK_GE(3, 3);
CHECK_LT(2, 3);
CHECK_LE(2, 3);
DCHECK(true);
DCHECK(7 > 5);
DCHECK_EQ(a, a);
DCHECK_NE(a, b);
DCHECK_EQ(3, 3);
DCHECK_NE(4, 3);
DCHECK_GT(4, 3);
DCHECK_GE(3, 3);
DCHECK_LT(2, 3);
DCHECK_LE(2, 3);
}
TEST(LoggingDeathTest, FailedChecks) {
string a("abc");
string b("xyz");
const char* p_const = "hello there";
const char* p_null_const = nullptr;
char mybuf[10];
char* p_non_const = mybuf;
char* p_null = nullptr;
CHECK_NOTNULL(p_const);
CHECK_NOTNULL(p_non_const);
ASSERT_DEATH(CHECK(false), "false");
ASSERT_DEATH(CHECK(9 < 7), "9 < 7");
ASSERT_DEATH(CHECK_EQ(a, b), "a == b");
ASSERT_DEATH(CHECK_EQ(3, 4), "3 == 4");
ASSERT_DEATH(CHECK_NE(3, 3), "3 != 3");
ASSERT_DEATH(CHECK_GT(2, 3), "2 > 3");
ASSERT_DEATH(CHECK_GE(2, 3), "2 >= 3");
ASSERT_DEATH(CHECK_LT(3, 2), "3 < 2");
ASSERT_DEATH(CHECK_LE(3, 2), "3 <= 2");
ASSERT_DEATH(CHECK(false), "false");
ASSERT_DEATH(printf("%s", CHECK_NOTNULL(p_null)), "Must be non NULL");
ASSERT_DEATH(printf("%s", CHECK_NOTNULL(p_null_const)), "Must be non NULL");
#ifndef NDEBUG
ASSERT_DEATH(DCHECK(9 < 7), "9 < 7");
ASSERT_DEATH(DCHECK(9 < 7), "9 < 7");
ASSERT_DEATH(DCHECK_EQ(a, b), "a == b");
ASSERT_DEATH(DCHECK_EQ(3, 4), "3 == 4");
ASSERT_DEATH(DCHECK_NE(3, 3), "3 != 3");
ASSERT_DEATH(DCHECK_GT(2, 3), "2 > 3");
ASSERT_DEATH(DCHECK_GE(2, 3), "2 >= 3");
ASSERT_DEATH(DCHECK_LT(3, 2), "3 < 2");
ASSERT_DEATH(DCHECK_LE(3, 2), "3 <= 2");
#endif
}
TEST(InternalLogString, Basic) {
internal::LogString(__FILE__, __LINE__, absl::LogSeverity::kInfo,
"Hello there");
}
class TestSink : public TFLogSink {
public:
void Send(const TFLogEntry& entry) override {
ss_ << entry.text_message() << std::endl;
}
std::string Get() const { return ss_.str(); }
private:
std::stringstream ss_;
};
TEST(LogSinkTest, testLogSinks) {
const int sinks_initial_size = TFGetLogSinks().size();
TestSink sink;
TFAddLogSink(&sink);
EXPECT_EQ(TFGetLogSinks().size(), sinks_initial_size + 1);
LOG(INFO) << "Foo";
LOG(INFO) << "Bar";
EXPECT_EQ(sink.Get(), "Foo\nBar\n");
TFRemoveLogSink(&sink);
EXPECT_EQ(TFGetLogSinks().size(), sinks_initial_size);
}
std::string ReadFromFilePointer(FILE* fp) {
std::string result;
while (!feof(fp)) {
char buf[512];
size_t len = fread(buf, sizeof(buf[0]), 512, fp);
result.append(buf, len);
}
return result;
}
absl::StatusOr<std::string> ReadFromFile(const std::string& filename) {
std::shared_ptr<FILE> fp(fopen(filename.c_str(), "r"), fclose);
if (fp == nullptr) {
return absl::ErrnoToStatus(errno,
absl::StrFormat("Cannot fopen '%s'", filename));
}
return ReadFromFilePointer(fp.get());
}
class SubcommandTest : public ::testing::Test {
public:
static constexpr absl::string_view kLogVLog = "log_and_vlog";
static bool IsSubcommand(absl::string_view subcommand) {
return subcommand == kLogVLog;
}
static int Run(absl::string_view subcommand) {
CHECK_EQ(subcommand, kLogVLog);
LOG(INFO) << "LOG INFO";
LOG(WARNING) << "LOG WARNING";
LOG(ERROR) << "LOG ERROR";
LOG(INFO) << absl::StrFormat("VLOG_IS_ON(1)? %d", VLOG_IS_ON(1));
LOG(INFO) << absl::StrFormat("VLOG_IS_ON(2)? %d", VLOG_IS_ON(2));
LOG(INFO) << absl::StrFormat("VLOG_IS_ON(3)? %d", VLOG_IS_ON(3));
VLOG(1) << "VLevel 1";
VLOG(2) << "VLevel 2";
VLOG(3) << "VLevel 3";
return EXIT_SUCCESS;
}
protected:
absl::StatusOr<std::string> CaptureOutput(const char* invocation) {
std::shared_ptr<FILE> fp(popen(invocation, "r"), pclose);
if (fp == nullptr) {
return absl::ErrnoToStatus(
errno, absl::StrFormat("Cannot popen '%s'", invocation));
}
return ReadFromFilePointer(fp.get());
}
};
TEST_F(SubcommandTest, LogDefaultTest) {
std::string command = absl::StrFormat("%s %s", program_name, kLogVLog);
#if defined(PLATFORM_GOOGLE)
command += " --alsologtostderr";
#endif
command += " 2>&1";
TF_ASSERT_OK_AND_ASSIGN(std::string out, CaptureOutput(command.c_str()));
EXPECT_THAT(out, HasSubstr("LOG INFO"));
EXPECT_THAT(out, HasSubstr("LOG WARNING"));
EXPECT_THAT(out, HasSubstr("LOG ERROR"));
EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(1)? 0"));
EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(2)? 0"));
EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(3)? 0"));
}
TEST_F(SubcommandTest, MinLogLevelTest) {
std::string command = absl::StrFormat("%s %s", program_name, kLogVLog);
#if defined(PLATFORM_GOOGLE)
command += " --minloglevel=1 --alsologtostderr";
#elif defined(PLATFORM_WINDOWS)
command = absl::StrFormat("set TF_CPP_MIN_LOG_LEVEL=1 && %s", command);
#else
command = absl::StrFormat("TF_CPP_MIN_LOG_LEVEL=1 %s", command);
#endif
command += " 2>&1";
TF_ASSERT_OK_AND_ASSIGN(std::string out, CaptureOutput(command.c_str()));
EXPECT_THAT(out, Not(HasSubstr("LOG INFO")));
EXPECT_THAT(out, HasSubstr("LOG WARNING"));
EXPECT_THAT(out, HasSubstr("LOG ERROR"));
}
TEST_F(SubcommandTest, VLogDefaultTest) {
std::string command = absl::StrFormat("%s %s", program_name, kLogVLog);
#if defined(PLATFORM_GOOGLE)
command += " --alsologtostderr";
#endif
command += " 2>&1";
TF_ASSERT_OK_AND_ASSIGN(std::string out, CaptureOutput(command.c_str()));
EXPECT_THAT(out, Not(HasSubstr("VLevel 1")));
EXPECT_THAT(out, Not(HasSubstr("VLevel 2")));
EXPECT_THAT(out, Not(HasSubstr("VLevel 3")));
}
TEST_F(SubcommandTest, MaxVLogLevelTest) {
std::string command = absl::StrFormat("%s %s", program_name, kLogVLog);
#if defined(PLATFORM_GOOGLE)
command += " --v=2 --alsologtostderr";
#elif defined(PLATFORM_WINDOWS)
command = absl::StrFormat("set TF_CPP_MAX_VLOG_LEVEL=2 && %s", command);
#else
command = absl::StrFormat("TF_CPP_MAX_VLOG_LEVEL=2 %s", command);
#endif
command += " 2>&1";
TF_ASSERT_OK_AND_ASSIGN(std::string out, CaptureOutput(command.c_str()));
EXPECT_THAT(out, HasSubstr("VLevel 1"));
EXPECT_THAT(out, HasSubstr("VLevel 2"));
EXPECT_THAT(out, Not(HasSubstr("VLevel 3")));
EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(1)? 1"));
EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(2)? 1"));
EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(3)? 0"));
}
TEST_F(SubcommandTest, VModuleTest) {
std::string command = absl::StrFormat("%s %s", program_name, kLogVLog);
#if defined(PLATFORM_GOOGLE)
command += " --vmodule=logging_test=2,shoobadooba=3 --alsologtostderr";
#elif defined(PLATFORM_WINDOWS)
command = absl::StrFormat(
"set TF_CPP_VMODULE=logging_test=2,shoobadooba=3 && %s", command);
#else
command = absl::StrFormat("TF_CPP_VMODULE=logging_test=2,shoobadooba=3 %s",
command);
#endif
command += " 2>&1";
TF_ASSERT_OK_AND_ASSIGN(std::string out, CaptureOutput(command.c_str()));
EXPECT_THAT(out, HasSubstr("VLevel 1"));
EXPECT_THAT(out, HasSubstr("VLevel 2"));
EXPECT_THAT(out, Not(HasSubstr("VLevel 3")));
EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(1)? 1"));
EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(2)? 1"));
EXPECT_THAT(out, HasSubstr("VLOG_IS_ON(3)? 0"));
}
TEST_F(SubcommandTest, VLogFilenameTest) {
#if defined(PLATFORM_GOOGLE)
constexpr bool kVLogFilenameEnvVarIsSupported = false;
#else
constexpr bool kVLogFilenameEnvVarIsSupported = true;
#endif
if (!kVLogFilenameEnvVarIsSupported) {
GTEST_SKIP() << "Not supported on this platform";
}
std::string command = absl::StrFormat("%s %s", program_name, kLogVLog);
std::string filename = io::GetTempFilename("logging_test");
#if defined(PLATFORM_WINDOWS)
command = absl::StrFormat(
"set TF_CPP_VLOG_FILENAME=%s && set TF_CPP_MAX_VLOG_LEVEL=1 && %s",
filename, command);
#else
command = absl::StrFormat(
"TF_CPP_VLOG_FILENAME=%s TF_CPP_MAX_VLOG_LEVEL=1 %s", filename, command);
#endif
command += " 2>&1";
TF_ASSERT_OK_AND_ASSIGN(std::string out, CaptureOutput(command.c_str()));
EXPECT_THAT(out, Not(HasSubstr("LOG INFO")));
EXPECT_THAT(out, Not(HasSubstr("LOG WARNING")));
EXPECT_THAT(out, Not(HasSubstr("LOG ERROR")));
EXPECT_THAT(out, Not(HasSubstr("VLOG_IS_ON(1)?")));
EXPECT_THAT(out, Not(HasSubstr("VLOG_IS_ON(2)?")));
EXPECT_THAT(out, Not(HasSubstr("VLOG_IS_ON(3)?")));
EXPECT_THAT(out, Not(HasSubstr("VLevel 1")));
EXPECT_THAT(out, Not(HasSubstr("VLevel 2")));
EXPECT_THAT(out, Not(HasSubstr("VLevel 3")));
TF_ASSERT_OK_AND_ASSIGN(std::string log_file, ReadFromFile(filename));
EXPECT_THAT(log_file, HasSubstr("LOG INFO"));
EXPECT_THAT(log_file, HasSubstr("LOG WARNING"));
EXPECT_THAT(log_file, HasSubstr("LOG ERROR"));
EXPECT_THAT(log_file, HasSubstr("VLOG_IS_ON(1)"));
EXPECT_THAT(log_file, HasSubstr("VLOG_IS_ON(2)"));
EXPECT_THAT(log_file, HasSubstr("VLOG_IS_ON(3)"));
EXPECT_THAT(log_file, HasSubstr("VLevel 1"));
EXPECT_THAT(log_file, Not(HasSubstr("VLevel 2")));
EXPECT_THAT(log_file, Not(HasSubstr("VLevel 3")));
}
}
}
GTEST_API_ int main(int argc, char** argv) {
tsl::testing::InstallStacktraceHandler();
testing::InitGoogleTest(&argc, argv);
program_name = argv[0];
if (argc >= 2 && tsl::SubcommandTest::IsSubcommand(argv[1])) {
return tsl::SubcommandTest::Run(argv[1]);
}
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/logging.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/logging_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
131efe9b-b444-418c-9737-d0c1475dd8de | cpp | tensorflow/tensorflow | pjrt_attribute_map_util | third_party/xla/xla/python/pjrt_ifrt/pjrt_attribute_map_util.cc | third_party/xla/xla/python/pjrt_ifrt/pjrt_attribute_map_util_test.cc | #include "xla/python/pjrt_ifrt/pjrt_attribute_map_util.h"
#include <cstdint>
#include <string>
#include <type_traits>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/python/ifrt/attribute_map.h"
namespace xla {
namespace ifrt {
AttributeMap FromPjRtAttributeMap(
absl::flat_hash_map<std::string, xla::PjRtValueType> attributes) {
AttributeMap::Map result;
result.reserve(attributes.size());
for (auto& item : attributes) {
std::visit(
[&](auto& value) {
using T = std::decay_t<decltype(value)>;
const auto& key = item.first;
if constexpr (std::is_same_v<T, std::string>) {
result.insert({key, AttributeMap::StringValue(std::move(value))});
} else if constexpr (std::is_same_v<T, bool>) {
result.insert({key, AttributeMap::BoolValue(value)});
} else if constexpr (std::is_same_v<T, int64_t>) {
result.insert({key, AttributeMap::Int64Value(value)});
} else if constexpr (std::is_same_v<T, std::vector<int64_t>>) {
result.insert(
{key, AttributeMap::Int64ListValue(std::move(value))});
} else if constexpr (std::is_same_v<T, float>) {
result.insert({key, AttributeMap::FloatValue(value)});
}
},
item.second);
}
return AttributeMap(std::move(result));
}
absl::flat_hash_map<std::string, xla::PjRtValueType> ToPjRtAttributeMap(
AttributeMap attributes) {
absl::flat_hash_map<std::string, xla::PjRtValueType> result;
result.reserve(attributes.map().size());
for (auto& item : attributes.map()) {
std::visit(
[&](auto& value) {
using T = std::decay_t<decltype(value)>;
const auto& key = item.first;
if constexpr (std::is_same_v<T, AttributeMap::StringValue>) {
result.insert({key, std::move(value.value)});
} else if constexpr (std::is_same_v<T, AttributeMap::BoolValue>) {
result.insert({key, value.value});
} else if constexpr (std::is_same_v<T, AttributeMap::Int64Value>) {
result.insert({key, value.value});
} else if constexpr (std::is_same_v<T,
AttributeMap::Int64ListValue>) {
result.insert({key, std::move(value.value)});
} else if constexpr (std::is_same_v<T, AttributeMap::FloatValue>) {
result.insert({key, value.value});
}
},
item.second);
}
return result;
}
}
} | #include "xla/python/pjrt_ifrt/pjrt_attribute_map_util.h"
#include <cstdint>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/python/ifrt/attribute_map.h"
namespace xla {
namespace ifrt {
namespace {
TEST(PjRtAttributeMapUtilTest, FromPjRtAttributeMap) {
absl::flat_hash_map<std::string, PjRtValueType> pjrt_map({
{"string", xla::PjRtValueType(std::string("value"))},
{"bool", xla::PjRtValueType(true)},
{"int64", xla::PjRtValueType(int64_t{123})},
{"int64_list",
xla::PjRtValueType(std::vector<int64_t>({int64_t{1}, int64_t{2}}))},
{"float", xla::PjRtValueType(1.23f)},
});
EXPECT_EQ(FromPjRtAttributeMap(pjrt_map).map(),
AttributeMap::Map({
{"string", AttributeMap::StringValue("value")},
{"bool", AttributeMap::BoolValue(true)},
{"int64", AttributeMap::Int64Value(123)},
{"int64_list",
AttributeMap::Int64ListValue({int64_t{1}, int64_t{2}})},
{"float", AttributeMap::FloatValue(1.23f)},
}));
}
TEST(PjRtAttributeMapUtilTest, ToPjRtAttributeMap) {
AttributeMap map({
{"string", AttributeMap::StringValue("value")},
{"bool", AttributeMap::BoolValue(true)},
{"int64", AttributeMap::Int64Value(123)},
{"int64_list", AttributeMap::Int64ListValue({int64_t{1}, int64_t{2}})},
{"float", AttributeMap::FloatValue(1.23f)},
});
EXPECT_EQ(
ToPjRtAttributeMap(map),
(absl::flat_hash_map<std::string, xla::PjRtValueType>({
{"string", xla::PjRtValueType(std::string("value"))},
{"bool", xla::PjRtValueType(true)},
{"int64", xla::PjRtValueType(int64_t{123})},
{"int64_list",
xla::PjRtValueType(std::vector<int64_t>({int64_t{1}, int64_t{2}}))},
{"float", xla::PjRtValueType(1.23f)},
})));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/pjrt_ifrt/pjrt_attribute_map_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/pjrt_ifrt/pjrt_attribute_map_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cea0912d-1692-4ae8-ae3b-f70f75185623 | cpp | tensorflow/tensorflow | basic_string_array | third_party/xla/xla/python/pjrt_ifrt/basic_string_array.cc | third_party/xla/xla/python/pjrt_ifrt/basic_string_array_test.cc | #include "xla/python/pjrt_ifrt/basic_string_array.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/hash/hash.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
std::string BasicStringArrayLayout::Serialize() const {
return std::string();
}
std::string BasicStringArrayLayout::ToString() const {
return "BasicStringArrayLayout: Dense, major-to-minor.";
}
bool BasicStringArrayLayout::operator==(const PjRtLayout& other) const {
auto* other_basic_string_array_layout =
dynamic_cast<const xla::ifrt::BasicStringArrayLayout*>(&other);
if (other_basic_string_array_layout == nullptr) {
return false;
}
return true;
}
void BasicStringArrayLayout::Hash(absl::HashState state) const {
}
char BasicStringArray::ID = 0;
absl::StatusOr<tsl::RCReference<BasicStringArray>> BasicStringArray::Create(
Client* client, Shape shape, std::shared_ptr<const Sharding> sharding,
Future<Buffers> buffers, OnDoneWithBuffer on_done_with_buffer) {
if (!buffers.IsValid()) {
return absl::InvalidArgumentError("Got buffers_ future is invalid");
}
auto buffers_promise = Future<Buffers>::CreatePromise();
auto buffers_future = Future<Buffers>(buffers_promise);
auto ready_promise = Future<>::CreatePromise();
auto ready_future = Future<>(ready_promise);
auto buffer_validator =
[buffers_promise = std::move(buffers_promise),
ready_promise = std::move(ready_promise),
sharding = sharding](absl::StatusOr<Buffers> buffers) mutable {
if (!buffers.ok()) {
buffers_promise.Set(buffers.status());
ready_promise.Set(buffers.status());
return;
}
if (sharding->devices()->size() != (*buffers).size()) {
auto error = absl::FailedPreconditionError(absl::StrCat(
"Number of buffers: ", (*buffers).size(),
" does not match the number of devices in sharding: ",
sharding->devices()->size()));
buffers_promise.Set(error);
ready_promise.Set(error);
return;
}
buffers_promise.Set(std::move(buffers));
ready_promise.Set(absl::OkStatus());
};
buffers.OnReady(std::move(buffer_validator));
return tsl::MakeRef<BasicStringArray>(
client, std::move(shape), std::move(sharding), std::move(buffers_future),
std::move(ready_future), std::move(on_done_with_buffer));
}
BasicStringArray::BasicStringArray(Client* client, Shape shape,
std::shared_ptr<const Sharding> sharding,
Future<Buffers> buffers,
Future<> ready_future,
OnDoneWithBuffer on_done_with_buffer)
: client_(client),
shape_(std::move(shape)),
sharding_(std::move(sharding)),
buffers_(std::move(buffers)),
ready_future_(std::move(ready_future)),
on_done_with_buffer_(std::move(on_done_with_buffer)) {}
BasicStringArray::~BasicStringArray() { DeleteInternal(); }
Future<> BasicStringArray::Delete() {
DeleteInternal();
return Future<>(absl::OkStatus());
}
bool BasicStringArray::IsDeleted() const {
absl::MutexLock lock(&mu_);
return is_deleted_;
}
void BasicStringArray::DeleteInternal() {
absl::MutexLock lock(&mu_);
if (is_deleted_) {
return;
}
if (on_done_with_buffer_) {
std::move(on_done_with_buffer_)();
}
is_deleted_ = true;
}
Future<> BasicStringArray::GetReadyFuture() const {
DCHECK(this);
absl::MutexLock lock(&mu_);
if (is_deleted_) {
return Future<>(
absl::FailedPreconditionError("Array has already been deleted"));
}
return ready_future_;
}
absl::StatusOr<std::vector<tsl::RCReference<Array>>>
BasicStringArray::DisassembleIntoSingleDeviceArrays(
ArrayCopySemantics semantics) {
DCHECK(this);
absl::MutexLock lock(&mu_);
if (is_deleted_) {
return absl::FailedPreconditionError("Array has already been deleted");
}
int num_shards = sharding_->devices()->size();
std::vector<Promise<Buffers>> buffer_promises;
buffer_promises.reserve(num_shards);
std::vector<Future<Buffers>> buffer_futures;
buffer_futures.reserve(num_shards);
struct PerShardBufferBackingStore {
void CopyFrom(absl::Span<const absl::string_view> input_buffer) {
strings.reserve(input_buffer.size());
string_views.reserve(input_buffer.size());
for (absl::string_view buf : input_buffer) {
strings.push_back(std::string(buf.data(), buf.size()));
string_views.push_back(strings.back());
}
}
std::vector<std::string> strings;
std::vector<absl::string_view> string_views;
};
std::vector<std::shared_ptr<PerShardBufferBackingStore>>
per_shard_buffer_backing_stores;
per_shard_buffer_backing_stores.reserve(num_shards);
std::vector<OnDoneWithBuffer> on_done_with_buffer_callbacks;
on_done_with_buffer_callbacks.reserve(num_shards);
for (int i = 0; i < num_shards; ++i) {
buffer_promises.push_back(Future<Buffers>::CreatePromise());
buffer_futures.push_back(Future<Buffers>(buffer_promises.back()));
auto backing_store = std::make_shared<PerShardBufferBackingStore>();
per_shard_buffer_backing_stores.push_back(backing_store);
on_done_with_buffer_callbacks.push_back(
[backing_store = std::move(backing_store)]() {});
}
buffers_.OnReady([buffer_promises = std::move(buffer_promises),
per_shard_buffer_backing_stores =
std::move(per_shard_buffer_backing_stores)](
absl::StatusOr<Buffers> buffers) mutable {
if (!buffers.ok()) {
for (auto& promise : buffer_promises) {
promise.Set(buffers.status());
}
per_shard_buffer_backing_stores.clear();
return;
}
auto num_shards = buffers->size();
for (int i = 0; i < num_shards; ++i) {
per_shard_buffer_backing_stores[i]->CopyFrom((*buffers)[i]);
Buffers buffers;
buffers.push_back(per_shard_buffer_backing_stores[i]->string_views);
buffer_promises[i].Set(std::move(buffers));
}
});
TF_ASSIGN_OR_RETURN(auto shapes_and_shadings, sharding_->Disassemble(shape_));
std::vector<tsl::RCReference<Array>> arrays;
arrays.reserve(num_shards);
for (int i = 0; i < num_shards; ++i) {
TF_ASSIGN_OR_RETURN(auto array,
BasicStringArray::Create(
client_, std::move(shapes_and_shadings[i].first),
std::move(shapes_and_shadings[i].second),
std::move(buffer_futures[i]),
std::move(on_done_with_buffer_callbacks[i])));
arrays.push_back(array);
}
return arrays;
}
Future<> BasicStringArray::CopyToHostBuffer(
void* data, std::optional<absl::Span<const int64_t>> byte_strides,
ArrayCopySemantics semantics) {
DCHECK(this);
return Future<>(absl::UnimplementedError("Not implemented"));
}
absl::StatusOr<tsl::RCReference<Array>> BasicStringArray::Copy(
std::optional<tsl::RCReference<xla::ifrt::DeviceList>> devices,
std::optional<xla::ifrt::MemoryKind> memory_kind,
ArrayCopySemantics semantics) {
DCHECK(this);
absl::MutexLock lock(&mu_);
if (is_deleted_) {
return absl::FailedPreconditionError("Array has already been deleted");
}
TF_ASSIGN_OR_RETURN(auto new_sharding, sharding().WithDeviceAssignment(
std::move(devices), memory_kind));
if (new_sharding->devices()->size() != sharding_->devices()->size()) {
return absl::InvalidArgumentError(absl::StrCat(
"Number of devices in new sharding: ", new_sharding->devices()->size(),
" does not match the number of devices in the current sharding: ",
sharding_->devices()->size()));
}
struct BufferBackingStore {
void AddShardData(absl::Span<const absl::string_view> input_buffer) {
auto& shard_strings = strings.emplace_back();
shard_strings.reserve(input_buffer.size());
auto& shard_string_views = string_views.emplace_back();
shard_string_views.reserve(input_buffer.size());
for (absl::string_view buf : input_buffer) {
shard_strings.push_back(std::string(buf.data(), buf.size()));
shard_string_views.push_back(shard_strings.back());
}
}
std::vector<std::vector<std::string>> strings;
std::vector<std::vector<absl::string_view>> string_views;
};
auto backing_store = std::make_shared<BufferBackingStore>();
auto on_done_with_buffer = [backing_store]() {};
auto buffers_promise = Future<Buffers>::CreatePromise();
auto buffers_future = Future<Buffers>(buffers_promise);
auto copier = [backing_store = std::move(backing_store),
buffers_promise = std::move(buffers_promise)](
absl::StatusOr<Buffers> input_buffers) mutable {
if (!input_buffers.ok()) {
buffers_promise.Set(input_buffers.status());
return;
}
Buffers buffers;
buffers.reserve(input_buffers->size());
for (auto& input_buffer : *input_buffers) {
backing_store->AddShardData(input_buffer);
buffers.push_back(backing_store->string_views.back());
}
buffers_promise.Set(std::move(buffers));
};
buffers_.OnReady(std::move(copier));
return BasicStringArray::Create(client_, shape_, std::move(new_sharding),
std::move(buffers_future),
std::move(on_done_with_buffer));
}
absl::StatusOr<tsl::RCReference<Array>> BasicStringArray::FullyReplicatedShard(
ArrayCopySemantics semantics) {
absl::MutexLock lock(&mu_);
if (is_deleted_) {
return absl::FailedPreconditionError("Array has already been deleted");
}
if (!sharding_->IsFullyReplicated()) {
return absl::FailedPreconditionError("This array is not fully replicated");
}
struct BufferBackingStore {
void CopyFrom(absl::Span<const absl::string_view> input_buffer) {
strings.reserve(input_buffer.size());
string_views.reserve(input_buffer.size());
for (absl::string_view buf : input_buffer) {
strings.push_back(std::string(buf.data(), buf.size()));
string_views.push_back(strings.back());
}
}
std::vector<std::string> strings;
std::vector<absl::string_view> string_views;
};
auto backing_store = std::make_shared<BufferBackingStore>();
auto on_done_with_buffer = [backing_store]() {};
auto buffers_promise = Future<Buffers>::CreatePromise();
auto buffers_future = Future<Buffers>(buffers_promise);
auto copier = [backing_store = std::move(backing_store),
buffers_promise = std::move(buffers_promise)](
absl::StatusOr<Buffers> input_buffers) mutable {
if (!input_buffers.ok()) {
buffers_promise.Set(input_buffers.status());
return;
}
auto& input_buffer = (*input_buffers)[0];
backing_store->CopyFrom(input_buffer);
Buffers buffers;
buffers.push_back(backing_store->string_views);
buffers_promise.Set(std::move(buffers));
};
buffers_.OnReady(std::move(copier));
return BasicStringArray::Create(
client_, shape_,
SingleDeviceSharding::Create(sharding_->devices()->devices().front(),
MemoryKind()),
std::move(buffers_future), std::move(on_done_with_buffer));
}
absl::StatusOr<std::unique_ptr<PjRtLayout>> BasicStringArray::layout() const {
absl::MutexLock lock(&mu_);
if (is_deleted_) {
return absl::FailedPreconditionError("Array has already been deleted");
}
return std::make_unique<BasicStringArrayLayout>();
}
std::string BasicStringArray::DebugString() const {
DCHECK(this);
return absl::StrFormat(
"BasicStringArray(shape=%s; sharding=%s; layout=major-to-minor-dense)",
shape_.DebugString(), sharding_->DebugString());
}
}
} | #include "xla/python/pjrt_ifrt/basic_string_array.h"
#include <cstdint>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/notification.h"
#include "absl/types/span.h"
#include "llvm/Support/Casting.h"
#include "xla/layout.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
absl::StatusOr<tsl::RCReference<BasicStringArray>> CreateTestArray(
Client* client, Future<BasicStringArray::Buffers> buffers,
BasicStringArray::OnDoneWithBuffer on_done_with_buffer) {
Shape shape({1});
Device* device = client->addressable_devices().at(0);
std::shared_ptr<const Sharding> sharding =
SingleDeviceSharding::Create(device, MemoryKind());
return BasicStringArray::Create(client, shape, sharding, std::move(buffers),
std::move(on_done_with_buffer));
}
std::pair<BasicStringArray::Buffers, BasicStringArray::OnDoneWithBuffer>
MakeBuffersAndOnDoneWithBuffer(
absl::Span<const absl::string_view> input_strings) {
BasicStringArray::Buffers buffers;
auto string_holder = std::make_shared<std::vector<std::string>>();
string_holder->reserve(input_strings.size());
auto string_view_holder = std::make_shared<std::vector<absl::string_view>>();
string_view_holder->reserve(input_strings.size());
for (const auto str : input_strings) {
string_holder->push_back(std::string(str));
}
for (const auto& str : *string_holder) {
string_view_holder->push_back(absl::string_view(str));
}
buffers.push_back(*string_view_holder);
BasicStringArray::OnDoneWithBuffer on_done_with_buffer =
[string_holder = std::move(string_holder),
string_view_holder = std::move(string_view_holder)]() {};
return std::make_pair(std::move(buffers), std::move(on_done_with_buffer));
}
absl::StatusOr<std::pair<tsl::RCReference<BasicStringArray>,
Promise<BasicStringArray::Buffers>>>
CreateNonReadyTestArray(
Client* client, Device* const device,
BasicStringArray::OnDoneWithBuffer on_done_with_buffer) {
auto buffers_promise = Future<BasicStringArray::Buffers>::CreatePromise();
auto buffers_future = Future<BasicStringArray::Buffers>(buffers_promise);
Shape shape({1});
std::shared_ptr<const Sharding> sharding =
SingleDeviceSharding::Create(device, MemoryKind());
TF_ASSIGN_OR_RETURN(auto array,
BasicStringArray::Create(client, shape, sharding,
std::move(buffers_future),
std::move(on_done_with_buffer)));
return std::make_pair(std::move(array), std::move(buffers_promise));
}
TEST(BasicStringArrayLayoutTest, Serialize) {
BasicStringArrayLayout layout;
EXPECT_TRUE(layout.Serialize().empty());
}
TEST(BasicStringArrayLayoutTest, ToString) {
BasicStringArrayLayout layout;
auto output_str = layout.ToString();
EXPECT_THAT(output_str, HasSubstr("major-to-minor"));
}
TEST(BasicStringArrayLayoutTest, Equality) {
BasicStringArrayLayout layout_1;
BasicStringArrayLayout layout_2;
const PjRtLayout& layout_3 = layout_2;
EXPECT_EQ(layout_1, layout_3);
xla::PjRtXlaLayout layout_6((xla::Layout()));
const PjRtLayout& layout_7 = layout_6;
EXPECT_FALSE(layout_7 == layout_1);
}
TEST(BasicStringArrayTest, CreateSuccess) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
BasicStringArray::Buffers buffers;
buffers.push_back({"abc", "def"});
TF_EXPECT_OK(CreateTestArray(client.get(),
Future<BasicStringArray::Buffers>(buffers),
nullptr));
}
TEST(BasicStringArrayTest, CreateFailureWithInvalidFuture) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
EXPECT_THAT(CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(),
nullptr),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(BasicStringArrayTest, Destruction) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
BasicStringArray::Buffers buffers;
buffers.push_back({"abc", "def"});
absl::Notification on_done_with_buffer_called;
BasicStringArray::OnDoneWithBuffer on_done_with_buffer =
[&on_done_with_buffer_called]() { on_done_with_buffer_called.Notify(); };
auto array_creation_status_promise = PjRtFuture<>::CreatePromise();
tsl::Env::Default()->SchedClosure(([&]() {
auto array = CreateTestArray(client.get(),
Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer));
array_creation_status_promise.Set(array.status());
}));
TF_ASSERT_OK(Future<>(array_creation_status_promise).Await());
on_done_with_buffer_called.WaitForNotification();
}
TEST(BasicStringArrayTest, InvalidBuffersAreHandledCorrectly) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 1);
auto shard0_data = std::make_shared<std::vector<absl::string_view>>();
shard0_data->push_back("abc");
auto shard1_data = std::make_shared<std::vector<absl::string_view>>();
shard1_data->push_back("def");
BasicStringArray::Buffers buffers;
buffers.push_back(*shard0_data);
buffers.push_back(*shard1_data);
auto on_done_with_buffer = [shard0_data = std::move(shard0_data),
shard1_data = std::move(shard1_data)]() {};
TF_ASSERT_OK_AND_ASSIGN(
auto ret, CreateNonReadyTestArray(client.get(), devices[0],
std::move(on_done_with_buffer)));
auto array = ret.first;
auto promise = ret.second;
auto basic_string_array = llvm::dyn_cast<BasicStringArray>(array.get());
tsl::Env::Default()->SchedClosure([&]() { promise.Set(buffers); });
EXPECT_THAT(basic_string_array->GetReadyFuture().Await(),
StatusIs(absl::StatusCode::kFailedPrecondition));
EXPECT_THAT(basic_string_array->buffers().Await(),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(BasicStringArrayTest, Delete) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
BasicStringArray::Buffers buffers;
buffers.push_back({"abc", "def"});
absl::Notification on_done_with_buffer_called;
BasicStringArray::OnDoneWithBuffer on_done_with_buffer =
[&on_done_with_buffer_called]() { on_done_with_buffer_called.Notify(); };
TF_ASSERT_OK_AND_ASSIGN(
auto array,
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
tsl::Env::Default()->SchedClosure([&]() { array->Delete(); });
on_done_with_buffer_called.WaitForNotification();
EXPECT_TRUE(array->IsDeleted());
}
TEST(GetReadyFutureTest, SuccessCase) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto promise = Future<BasicStringArray::Buffers>::CreatePromise();
auto buffers_future = Future<BasicStringArray::Buffers>(promise);
TF_ASSERT_OK_AND_ASSIGN(auto array,
CreateTestArray(client.get(), buffers_future,
nullptr));
auto ready_future = array->GetReadyFuture();
EXPECT_FALSE(ready_future.IsKnownReady());
BasicStringArray::Buffers buffers;
buffers.push_back({"abc", "def"});
tsl::Env::Default()->SchedClosure([&]() { promise.Set(buffers); });
TF_EXPECT_OK(ready_future.Await());
}
TEST(GetReadyFutureTest, FailureCases) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto promise = Future<BasicStringArray::Buffers>::CreatePromise();
auto buffers_future = Future<BasicStringArray::Buffers>(promise);
TF_ASSERT_OK_AND_ASSIGN(auto array,
CreateTestArray(client.get(), buffers_future,
nullptr));
auto ready_future = array->GetReadyFuture();
EXPECT_FALSE(ready_future.IsKnownReady());
tsl::Env::Default()->SchedClosure(
[&]() { promise.Set(absl::InternalError("injected error")); });
EXPECT_THAT(ready_future.Await(), StatusIs(absl::StatusCode::kInternal));
}
TEST(MakeArrayFromHostBufferTest, SuccessCase) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
Shape shape({1});
Device* device = client->addressable_devices().at(0);
std::shared_ptr<const Sharding> sharding =
SingleDeviceSharding::Create(device, MemoryKind());
auto string_views = std::make_shared<std::vector<absl::string_view>>();
string_views->push_back("abc");
string_views->push_back("def");
const void* data = string_views->data();
auto on_done_with_host_buffer = [string_views = std::move(string_views)]() {};
TF_ASSERT_OK(client->MakeArrayFromHostBuffer(
data, DType(DType::kString), shape,
std::nullopt, std::move(sharding),
Client::HostBufferSemantics::kImmutableOnlyDuringCall,
std::move(on_done_with_host_buffer)));
}
TEST(MakeArrayFromHostBufferTest, FailureCases) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
Shape shape({1});
Device* device = client->addressable_devices().at(0);
std::shared_ptr<const Sharding> single_device_sharding =
SingleDeviceSharding::Create(device, MemoryKind());
auto string_views = std::make_shared<std::vector<absl::string_view>>();
string_views->push_back("abc");
string_views->push_back("def");
const void* data = string_views->data();
auto on_done_with_host_buffer = [string_views = std::move(string_views)]() {};
EXPECT_THAT(
client->MakeArrayFromHostBuffer(
data, DType(DType::kString), shape,
std::optional<absl::Span<const int64_t>>({8}),
single_device_sharding,
Client::HostBufferSemantics::kImmutableOnlyDuringCall,
on_done_with_host_buffer),
StatusIs(absl::StatusCode::kInvalidArgument));
std::shared_ptr<const Sharding> opaque_sharding =
OpaqueSharding::Create(BasicDeviceList::Create({device}), MemoryKind());
EXPECT_THAT(client->MakeArrayFromHostBuffer(
data, DType(DType::kString), shape,
std::nullopt, opaque_sharding,
Client::HostBufferSemantics::kImmutableOnlyDuringCall,
on_done_with_host_buffer),
StatusIs(absl::StatusCode::kInvalidArgument));
for (Client::HostBufferSemantics host_buffer_semantics :
{Client::HostBufferSemantics::kImmutableUntilTransferCompletes,
Client::HostBufferSemantics::kImmutableZeroCopy,
Client::HostBufferSemantics::kMutableZeroCopy}) {
SCOPED_TRACE(
absl::StrCat("host_buffer_semantics: ", host_buffer_semantics));
EXPECT_THAT(client->MakeArrayFromHostBuffer(
data, DType(DType::kString), shape,
std::nullopt, single_device_sharding,
host_buffer_semantics, on_done_with_host_buffer),
StatusIs(absl::StatusCode::kInvalidArgument));
}
}
absl::StatusOr<tsl::RCReference<Array>> MakeSingleDeviceStringTestArray(
absl::Span<const std::string> contents, Client* client,
Device* const device) {
Shape shape({1});
std::shared_ptr<const Sharding> sharding =
SingleDeviceSharding::Create(device, MemoryKind());
auto string_views = std::make_shared<std::vector<absl::string_view>>();
for (const auto& content : contents) {
string_views->push_back(content);
}
const void* data = string_views->data();
auto on_done_with_host_buffer = [string_views = std::move(string_views)]() {};
return client->MakeArrayFromHostBuffer(
data, DType(DType::kString), shape,
std::nullopt, std::move(sharding),
Client::HostBufferSemantics::kImmutableOnlyDuringCall,
std::move(on_done_with_host_buffer));
}
absl::StatusOr<tsl::RCReference<Array>> MakeSingleDeviceFloatTestArray(
Client* client, Device* const device) {
DType dtype(DType::kF32);
Shape shape({2, 3});
auto data = std::make_unique<std::vector<float>>(6);
std::iota(data->begin(), data->end(), 0);
std::shared_ptr<const Sharding> sharding =
SingleDeviceSharding::Create(device, MemoryKind());
return client->MakeArrayFromHostBuffer(
data->data(), dtype, shape,
std::nullopt, sharding,
Client::HostBufferSemantics::kImmutableOnlyDuringCall,
nullptr);
}
absl::StatusOr<tsl::RCReference<Array>> MakeShardedStringTestArray(
Client* client, absl::Span<const std::string> data,
bool is_fully_replicated) {
if (data.size() < 2) {
return absl::InvalidArgumentError(absl::StrCat(
"Input data has too few strings. Need at least 2. got: ", data.size()));
}
auto devices = client->addressable_devices();
if (devices.size() < 2) {
return absl::InvalidArgumentError(absl::StrCat(
"Test client has too few devices. Need 2, got:", devices.size()));
}
std::shared_ptr<const Sharding> sharding = ConcreteEvenSharding::Create(
BasicDeviceList::Create({devices[0], devices[1]}), MemoryKind(),
Shape({2, 1}), Shape({1}), is_fully_replicated);
std::vector<tsl::RCReference<Array>> arrays;
for (int i = 0; i < 2; ++i) {
TF_ASSIGN_OR_RETURN(auto array, MakeSingleDeviceStringTestArray(
{data[i]}, client, devices[i]));
arrays.push_back(std::move(array));
}
return client->AssembleArrayFromSingleDeviceArrays(
Shape({2, 1}), std::move(sharding), absl::MakeSpan(arrays),
ArrayCopySemantics::kAlwaysCopy);
}
TEST(AssembleArrayFromSingleDeviceArraysTest,
SuccessWithReadySingleDeviceArrays) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
const std::vector<std::string> per_shard_contents({"shard 0", "shard 1"});
TF_ASSERT_OK_AND_ASSIGN(
auto array, MakeShardedStringTestArray(client.get(), per_shard_contents,
false));
auto basic_string_array = llvm::dyn_cast<BasicStringArray>(array.get());
ASSERT_NE(basic_string_array, nullptr);
TF_ASSERT_OK_AND_ASSIGN(auto buffers, basic_string_array->buffers().Await());
EXPECT_EQ(buffers.size(), 2);
for (int i = 0; i < buffers.size(); ++i) {
SCOPED_TRACE(absl::StrCat("buffer #", i));
auto buffer = buffers[i];
EXPECT_THAT(buffer, testing::ElementsAre(per_shard_contents[i]));
}
}
TEST(AssembleArrayFromSingleDeviceArraysTest, FailsWithNonStringArrays) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
std::shared_ptr<const Sharding> opaque_sharding = OpaqueSharding::Create(
BasicDeviceList::Create({devices[0], devices[1]}), MemoryKind());
std::vector<tsl::RCReference<Array>> arrays(2);
TF_ASSERT_OK_AND_ASSIGN(
arrays[0], MakeSingleDeviceFloatTestArray(client.get(), devices[0]));
TF_ASSERT_OK_AND_ASSIGN(
arrays[1], MakeSingleDeviceStringTestArray({"string_array_contents"},
client.get(), devices[1]));
EXPECT_THAT(client->AssembleArrayFromSingleDeviceArrays(
Shape({2}), std::move(opaque_sharding),
absl::MakeSpan(arrays), ArrayCopySemantics::kAlwaysCopy),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AssembleArrayFromSingleDeviceArraysTest,
FailsWithNonSingleDeviceStringArrays) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
std::shared_ptr<const Sharding> opaque_sharding = OpaqueSharding::Create(
BasicDeviceList::Create({devices[0], devices[1]}), MemoryKind());
std::vector<tsl::RCReference<Array>> arrays(2);
const std::vector<std::string> per_shard_contents({"abc", "def"});
TF_ASSERT_OK_AND_ASSIGN(
arrays[0], MakeShardedStringTestArray(client.get(), per_shard_contents,
false));
TF_ASSERT_OK_AND_ASSIGN(
arrays[1], MakeSingleDeviceStringTestArray({"string_array_contents"},
client.get(), devices[1]));
EXPECT_THAT(client->AssembleArrayFromSingleDeviceArrays(
Shape({2}), std::move(opaque_sharding),
absl::MakeSpan(arrays), ArrayCopySemantics::kAlwaysCopy),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AssembleArrayFromSingleDeviceArraysTest,
FromNonReadySingleDeviceArraysSuccess) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
std::shared_ptr<const Sharding> opaque_sharding = OpaqueSharding::Create(
BasicDeviceList::Create({devices[0], devices[1]}), MemoryKind());
std::vector<tsl::RCReference<Array>> arrays;
std::vector<Promise<BasicStringArray::Buffers>> promises;
arrays.reserve(2);
auto buf_and_on_done_with_buffer = MakeBuffersAndOnDoneWithBuffer({"abc"});
auto buffers0 = buf_and_on_done_with_buffer.first;
auto on_done_with_buffer0 = buf_and_on_done_with_buffer.second;
TF_ASSERT_OK_AND_ASSIGN(
auto ret, CreateNonReadyTestArray(client.get(), devices[0],
std::move(on_done_with_buffer0)));
arrays.push_back(std::move(ret.first));
promises.push_back(std::move(ret.second));
buf_and_on_done_with_buffer = MakeBuffersAndOnDoneWithBuffer({"def"});
auto buffers1 = buf_and_on_done_with_buffer.first;
auto on_done_with_buffer1 = buf_and_on_done_with_buffer.second;
TF_ASSERT_OK_AND_ASSIGN(
ret, CreateNonReadyTestArray(client.get(), devices[1],
std::move(on_done_with_buffer1)));
arrays.push_back(std::move(ret.first));
promises.push_back(std::move(ret.second));
TF_ASSERT_OK_AND_ASSIGN(
auto array, client->AssembleArrayFromSingleDeviceArrays(
Shape({1}), std::move(opaque_sharding),
absl::MakeSpan(arrays), ArrayCopySemantics::kAlwaysCopy));
tsl::Env::Default()->SchedClosure(([&]() mutable {
promises[0].Set(buffers0);
promises[1].Set(buffers1);
}));
auto basic_string_array = llvm::dyn_cast<BasicStringArray>(array.get());
ASSERT_NE(basic_string_array, nullptr);
auto buffers_future = basic_string_array->buffers();
TF_ASSERT_OK_AND_ASSIGN(auto buffers, buffers_future.Await());
EXPECT_EQ(buffers.size(), 2);
EXPECT_THAT(buffers[0], testing::ElementsAre("abc"));
EXPECT_THAT(buffers[1], testing::ElementsAre("def"));
}
TEST(AssembleArrayFromSingleDeviceArraysTest,
FromNonReadySingleDeviceArraysFailure) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
std::shared_ptr<const Sharding> opaque_sharding = OpaqueSharding::Create(
BasicDeviceList::Create({devices[0], devices[1]}), MemoryKind());
std::vector<tsl::RCReference<Array>> arrays;
std::vector<Promise<BasicStringArray::Buffers>> promises;
arrays.reserve(2);
TF_ASSERT_OK_AND_ASSIGN(
auto ret, CreateNonReadyTestArray(client.get(), devices[0],
nullptr));
arrays.push_back(std::move(ret.first));
promises.push_back(std::move(ret.second));
TF_ASSERT_OK_AND_ASSIGN(
ret, CreateNonReadyTestArray(client.get(), devices[1],
nullptr));
arrays.push_back(std::move(ret.first));
promises.push_back(std::move(ret.second));
TF_ASSERT_OK_AND_ASSIGN(
auto array, client->AssembleArrayFromSingleDeviceArrays(
Shape({1}), std::move(opaque_sharding),
absl::MakeSpan(arrays), ArrayCopySemantics::kAlwaysCopy));
absl::Notification done_readying_single_device_arrays;
tsl::Env::Default()->SchedClosure(([&]() mutable {
promises[0].Set(absl::InternalError("injected from the test"));
promises[1].Set(absl::InternalError("injected from the test"));
done_readying_single_device_arrays.Notify();
}));
auto basic_string_array = llvm::dyn_cast<BasicStringArray>(array.get());
ASSERT_NE(basic_string_array, nullptr);
auto buffers_future = basic_string_array->buffers();
EXPECT_THAT(buffers_future.Await(),
StatusIs(absl::StatusCode::kInternal,
HasSubstr("injected from the test")));
done_readying_single_device_arrays.WaitForNotification();
}
TEST(DisassembleArrayIntoSingleDeviceArrays,
SingleDeviceArrayDisassembleSuccess) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto [buffers, on_done_with_buffer] = MakeBuffersAndOnDoneWithBuffer({"abc"});
TF_ASSERT_OK_AND_ASSIGN(
auto array,
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
TF_ASSERT_OK_AND_ASSIGN(auto disassembled_arrays,
array->DisassembleIntoSingleDeviceArrays(
ArrayCopySemantics::kAlwaysCopy));
ASSERT_EQ(disassembled_arrays.size(), 1);
auto basic_string_array =
llvm::dyn_cast<BasicStringArray>(disassembled_arrays[0].get());
TF_ASSERT_OK_AND_ASSIGN(auto new_buffers,
basic_string_array->buffers().Await());
ASSERT_EQ(buffers.size(), 1);
EXPECT_THAT(buffers[0], testing::ElementsAre("abc"));
}
TEST(DisassembleArrayIntoSingleDeviceArrays, ShardedArrayDisassembleSuccess) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
const std::vector<std::string> per_shard_contents({"abc", "def"});
TF_ASSERT_OK_AND_ASSIGN(
auto array, MakeShardedStringTestArray(client.get(), per_shard_contents,
false));
TF_ASSERT_OK_AND_ASSIGN(auto disassembled_arrays,
array->DisassembleIntoSingleDeviceArrays(
ArrayCopySemantics::kAlwaysCopy));
ASSERT_EQ(disassembled_arrays.size(), 2);
for (int i = 0; i < disassembled_arrays.size(); ++i) {
SCOPED_TRACE(absl::StrCat("dissembled array: ", i));
auto basic_string_array =
llvm::dyn_cast<BasicStringArray>(disassembled_arrays[i].get());
TF_ASSERT_OK_AND_ASSIGN(auto buffer, basic_string_array->buffers().Await());
ASSERT_EQ(buffer.size(), 1);
EXPECT_THAT(buffer[0], testing::ElementsAre(per_shard_contents[i]));
}
}
TEST(DisassembleArrayIntoSingleDeviceArrays, FailsIfTheArrayHasBeenDeleted) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto [buffers, on_done_with_buffer] = MakeBuffersAndOnDoneWithBuffer({"abc"});
TF_ASSERT_OK_AND_ASSIGN(
auto array,
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
array->Delete();
EXPECT_THAT(
array->DisassembleIntoSingleDeviceArrays(ArrayCopySemantics::kAlwaysCopy),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(CopyTest, SuccessSingleDeviceShardedArray) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
auto [buffers, on_done_with_buffer] = MakeBuffersAndOnDoneWithBuffer({"abc"});
std::vector<tsl::RCReference<Array>> arrays;
TF_ASSERT_OK_AND_ASSIGN(
arrays.emplace_back(),
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
TF_ASSERT_OK_AND_ASSIGN(
auto new_arrays,
client->CopyArrays(absl::MakeSpan(arrays),
BasicDeviceList::Create({devices[1]}), MemoryKind(),
ArrayCopySemantics::kAlwaysCopy));
auto new_basic_string_array =
llvm::dyn_cast<BasicStringArray>(new_arrays[0].get());
TF_ASSERT_OK_AND_ASSIGN(auto new_buffers,
new_basic_string_array->buffers().Await());
ASSERT_EQ(new_buffers.size(), 1);
EXPECT_THAT(new_buffers[0], testing::ElementsAre("abc"));
}
TEST(CopyTest, SuccessMultiDeviceShardedArray) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 4);
const std::vector<std::string> per_shard_contents({"shard 0", "shard 1"});
std::vector<tsl::RCReference<Array>> arrays;
TF_ASSERT_OK_AND_ASSIGN(
arrays.emplace_back(),
MakeShardedStringTestArray(client.get(), per_shard_contents,
false));
TF_ASSERT_OK_AND_ASSIGN(
auto new_arrays,
client->CopyArrays(absl::MakeSpan(arrays),
BasicDeviceList::Create({devices[2], devices[3]}),
MemoryKind(), ArrayCopySemantics::kAlwaysCopy));
auto new_basic_string_array =
llvm::dyn_cast<BasicStringArray>(new_arrays[0].get());
TF_ASSERT_OK_AND_ASSIGN(auto new_buffers,
new_basic_string_array->buffers().Await());
ASSERT_EQ(new_buffers.size(), 2);
EXPECT_THAT(new_buffers[0], testing::ElementsAre("shard 0"));
EXPECT_THAT(new_buffers[1], testing::ElementsAre("shard 1"));
}
TEST(CopyTest, FailsAfterDeletion) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
auto [buffers, on_done_with_buffer] = MakeBuffersAndOnDoneWithBuffer({"abc"});
std::vector<tsl::RCReference<Array>> arrays;
TF_ASSERT_OK_AND_ASSIGN(
arrays.emplace_back(),
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
arrays[0]->Delete();
EXPECT_THAT(client->CopyArrays(absl::MakeSpan(arrays),
BasicDeviceList::Create({devices[1]}),
MemoryKind(), ArrayCopySemantics::kAlwaysCopy),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(CopyTest, FailsWithDifferentNumbersDevices) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
auto [buffers, on_done_with_buffer] = MakeBuffersAndOnDoneWithBuffer({"abc"});
std::vector<tsl::RCReference<Array>> arrays;
TF_ASSERT_OK_AND_ASSIGN(
arrays.emplace_back(),
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
EXPECT_THAT(
client->CopyArrays(absl::MakeSpan(arrays),
BasicDeviceList::Create({devices[0], devices[1]}),
MemoryKind(), ArrayCopySemantics::kAlwaysCopy),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(CopyTest, NonReadySourceArraySuccessfullyBecomesReadyAfterCopy) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
auto buf_and_on_done_with_buffer = MakeBuffersAndOnDoneWithBuffer({"abc"});
auto buffers = buf_and_on_done_with_buffer.first;
auto on_done_with_buffer = buf_and_on_done_with_buffer.second;
TF_ASSERT_OK_AND_ASSIGN(
auto ret, CreateNonReadyTestArray(client.get(), devices[0],
std::move(on_done_with_buffer)));
std::vector<tsl::RCReference<Array>> arrays;
arrays.push_back(std::move(ret.first));
auto promise = std::move(ret.second);
TF_ASSERT_OK(client->CopyArrays(
absl::MakeSpan(arrays), BasicDeviceList::Create({devices[1]}),
MemoryKind(), ArrayCopySemantics::kAlwaysCopy));
absl::Notification done_readying_single_device_arrays;
tsl::Env::Default()->SchedClosure(([&]() mutable {
promise.Set(std::move(buffers));
done_readying_single_device_arrays.Notify();
}));
auto basic_string_array = llvm::dyn_cast<BasicStringArray>(arrays[0].get());
ASSERT_NE(basic_string_array, nullptr);
TF_ASSERT_OK_AND_ASSIGN(auto new_buffers,
basic_string_array->buffers().Await());
ASSERT_EQ(new_buffers.size(), 1);
EXPECT_THAT(new_buffers[0], testing::ElementsAre("abc"));
done_readying_single_device_arrays.WaitForNotification();
}
TEST(CopyTest, NonReadySourceArrayFailsToBecomeReadyAfterCopy) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
auto devices = client->addressable_devices();
ASSERT_GE(devices.size(), 2);
auto buf_and_on_done_with_buffer = MakeBuffersAndOnDoneWithBuffer({"abc"});
auto buffers = buf_and_on_done_with_buffer.first;
auto on_done_with_buffer = buf_and_on_done_with_buffer.second;
TF_ASSERT_OK_AND_ASSIGN(
auto ret, CreateNonReadyTestArray(client.get(), devices[0],
std::move(on_done_with_buffer)));
std::vector<tsl::RCReference<Array>> arrays;
arrays.push_back(std::move(ret.first));
auto promise = std::move(ret.second);
TF_ASSERT_OK(client->CopyArrays(
absl::MakeSpan(arrays), BasicDeviceList::Create({devices[1]}),
MemoryKind(), ArrayCopySemantics::kAlwaysCopy));
absl::Notification done_readying_single_device_arrays;
tsl::Env::Default()->SchedClosure(([&]() mutable {
promise.Set(absl::InternalError("injected from the test"));
done_readying_single_device_arrays.Notify();
}));
auto basic_string_array = llvm::dyn_cast<BasicStringArray>(arrays[0].get());
ASSERT_NE(basic_string_array, nullptr);
auto buffers_future = basic_string_array->buffers();
EXPECT_THAT(buffers_future.Await(),
StatusIs(absl::StatusCode::kInternal,
HasSubstr("injected from the test")));
done_readying_single_device_arrays.WaitForNotification();
}
TEST(FullyReplicatedShardTest, SuccessSingleDeviceShardedArray) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
constexpr char kContents[] = "abc";
auto [buffers, on_done_with_buffer] =
MakeBuffersAndOnDoneWithBuffer({kContents});
TF_ASSERT_OK_AND_ASSIGN(
auto array,
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
TF_ASSERT_OK_AND_ASSIGN(
auto relicated_shard,
array->FullyReplicatedShard(ArrayCopySemantics::kAlwaysCopy));
auto replicated_basic_string_array =
llvm::dyn_cast<BasicStringArray>(relicated_shard.get());
TF_ASSERT_OK_AND_ASSIGN(auto replicated_buffers,
replicated_basic_string_array->buffers().Await());
ASSERT_EQ(replicated_buffers.size(), 1);
EXPECT_THAT(replicated_buffers[0], testing::ElementsAre(kContents));
}
TEST(FullyReplicatedShardTest, SuccessMultiDeviceShardedArray) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
constexpr char kReplicatedContents[] = "abc";
const std::vector<std::string> per_shard_contents(
{kReplicatedContents, kReplicatedContents});
TF_ASSERT_OK_AND_ASSIGN(
auto array, MakeShardedStringTestArray(client.get(), per_shard_contents,
true));
TF_ASSERT_OK_AND_ASSIGN(
auto replicated_shard,
array->FullyReplicatedShard(ArrayCopySemantics::kAlwaysCopy));
auto replicated_basic_string_array =
llvm::dyn_cast<BasicStringArray>(replicated_shard.get());
TF_ASSERT_OK_AND_ASSIGN(auto replicated_buffers,
replicated_basic_string_array->buffers().Await());
ASSERT_EQ(replicated_buffers.size(), 1);
EXPECT_THAT(replicated_buffers[0], testing::ElementsAre(kReplicatedContents));
}
TEST(FullyReplicatedShardTest, FailsWithNonFullyReplicatedArrays) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
const std::vector<std::string> per_shard_contents({"abc", "def"});
TF_ASSERT_OK_AND_ASSIGN(
auto array, MakeShardedStringTestArray(client.get(), per_shard_contents,
false));
EXPECT_THAT(array->FullyReplicatedShard(ArrayCopySemantics::kAlwaysCopy),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(FullyReplicatedShardTest, FailsAfterDeletion) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
constexpr char kContents[] = "abc";
auto [buffers, on_done_with_buffer] =
MakeBuffersAndOnDoneWithBuffer({kContents});
TF_ASSERT_OK_AND_ASSIGN(
auto array,
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
array->Delete();
EXPECT_THAT(array->FullyReplicatedShard(ArrayCopySemantics::kAlwaysCopy),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(LayoutTest, Success) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
constexpr char kContents[] = "abc";
auto [buffers, on_done_with_buffer] =
MakeBuffersAndOnDoneWithBuffer({kContents});
TF_ASSERT_OK_AND_ASSIGN(
auto array,
CreateTestArray(client.get(),
Future<BasicStringArray::Buffers>(std::move(buffers)),
std::move(on_done_with_buffer)));
TF_ASSERT_OK_AND_ASSIGN(auto layout, array->layout());
EXPECT_TRUE(layout->Serialize().empty());
}
TEST(LayoutTest, FailsAfterDeletion) {
TF_ASSERT_OK_AND_ASSIGN(auto client, test_util::GetClient());
constexpr char kContents[] = "abc";
auto [buffers, on_done_with_buffer] =
MakeBuffersAndOnDoneWithBuffer({kContents});
TF_ASSERT_OK_AND_ASSIGN(
auto array,
CreateTestArray(client.get(), Future<BasicStringArray::Buffers>(buffers),
std::move(on_done_with_buffer)));
array->Delete();
EXPECT_THAT(array->layout(), StatusIs(absl::StatusCode::kFailedPrecondition));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/pjrt_ifrt/basic_string_array.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/pjrt_ifrt/basic_string_array_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d37fb101-53be-457f-b13d-5254be7b4fa6 | cpp | tensorflow/tensorflow | xla_sharding_serdes | third_party/xla/xla/python/pjrt_ifrt/xla_sharding_serdes.cc | third_party/xla/xla/python/pjrt_ifrt/xla_sharding_serdes_test.cc | #include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/pjrt_ifrt/xla_sharding.h"
#include "xla/python/pjrt_ifrt/xla_sharding.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
class HloShardingSerDes : public llvm::RTTIExtends<HloSharding, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::HloSharding";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
const HloSharding& sharding = llvm::cast<HloSharding>(serializable);
HloShardingProto proto;
*proto.mutable_devices() = sharding.devices()->ToProto();
if (sharding.memory_kind().memory_kind().has_value()) {
proto.set_memory_kind(std::string(*sharding.memory_kind().memory_kind()));
}
*proto.mutable_xla_op_sharding() = sharding.xla_hlo_sharding().ToProto();
return proto.SerializeAsString();
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
const auto* deserialize_sharding_options =
llvm::cast<DeserializeShardingOptions>(options.get());
HloShardingProto proto;
if (!proto.ParseFromString(serialized)) {
return absl::InvalidArgumentError(
"Failed to parse serialized HloSharding");
}
TF_ASSIGN_OR_RETURN(
auto devices,
DeviceList::FromProto(deserialize_sharding_options->lookup_device,
proto.devices()));
MemoryKind memory_kind;
if (proto.has_memory_kind()) {
memory_kind = MemoryKind(proto.memory_kind());
}
TF_ASSIGN_OR_RETURN(auto xla_hlo_sharding,
xla::HloSharding::FromProto(proto.xla_op_sharding()));
return HloSharding::Create(std::move(devices), memory_kind,
std::move(xla_hlo_sharding));
}
static char ID;
};
[[maybe_unused]] char HloShardingSerDes::ID = 0;
bool register_hlo_sharding_serdes = ([] {
RegisterSerDes<HloSharding>(
std::make_unique<HloShardingSerDes>());
}(), true);
}
}
} | #include <cstdint>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/functional/bind_front.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/device_test_util.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/pjrt_ifrt/xla_sharding.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::ElementsAreArray;
class XlaShardingSerDesTest : public test_util::DeviceTest {};
TEST_P(XlaShardingSerDesTest, HloShardingRoundTrip) {
auto device_list = GetDevices({0, 1});
auto xla_hlo_sharding = xla::HloSharding::Tile(xla::TileAssignment({2, 1}));
auto sharding = HloSharding::Create(device_list, MemoryKind("abc"),
xla_hlo_sharding);
TF_ASSERT_OK_AND_ASSIGN(auto serialized, Serialize(*sharding));
TF_ASSERT_OK_AND_ASSIGN(
auto out_sharding,
Deserialize<HloSharding>(
serialized, std::make_unique<DeserializeShardingOptions>(
absl::bind_front(&Client::LookupDevice, client()))));
EXPECT_THAT(out_sharding->devices()->devices(),
ElementsAreArray(sharding->devices()->devices()));
EXPECT_EQ(out_sharding->xla_hlo_sharding(), sharding->xla_hlo_sharding());
}
INSTANTIATE_TEST_SUITE_P(NumDevices, XlaShardingSerDesTest,
testing::Values(test_util::DeviceTestParam{
.num_devices = 2, .num_addressable_devices = 2}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/pjrt_ifrt/xla_sharding_serdes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/pjrt_ifrt/xla_sharding_serdes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
134c7f20-751e-43a1-9c82-a54fe7d13ce3 | cpp | tensorflow/tensorflow | xla_sharding | third_party/xla/xla/python/pjrt_ifrt/xla_sharding.cc | third_party/xla/xla/python/pjrt_ifrt/xla_sharding_test.cc | #include "xla/python/pjrt_ifrt/xla_sharding.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/index.h"
#include "xla/python/ifrt/index_domain.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/shape_util.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
char XlaCompatibleSharding::ID = 0;
char HloSharding::ID = 0;
namespace {
bool NextIndex(Index::Elements* index, absl::Span<const int64_t> limit) {
DCHECK_LE(index->size(), limit.size());
for (int64_t i = index->size() - 1; i >= 0; --i) {
++(*index)[i];
if ((*index)[i] < limit[i]) {
return true;
}
(*index)[i] = 0;
}
return false;
}
std::vector<IndexDomain> IndexDomainsSlowPath(
const xla::HloSharding& hlo_sharding,
const tsl::RCReference<DeviceList>& devices, const Shape& shape) {
auto xla_shape = xla::ShapeUtil::MakeShapeWithDescendingLayout(
xla::PrimitiveType::S32, shape.dims());
if (devices->size() > 8) {
LOG_FIRST_N(WARNING, 1)
<< "Taking a slow path for HloSharding::IndexDomains(). This will not "
"scale for a large number of devices.";
}
std::vector<IndexDomain> result;
result.reserve(devices->size());
Index::Elements origin(shape.dims().size());
Shape::Dimensions shard_shape(shape.dims().size());
for (int device_idx = 0; device_idx < devices->size(); ++device_idx) {
auto tile_offset = hlo_sharding.TileOffsetForDevice(xla_shape, device_idx);
auto tile_limit = hlo_sharding.TileLimitForDevice(xla_shape, device_idx);
for (int i = 0; i < shape.dims().size(); ++i) {
origin[i] = tile_offset[i];
shard_shape[i] = tile_limit[i] - tile_offset[i];
}
result.push_back(IndexDomain(Index(origin), Shape(shard_shape)));
}
return result;
}
MemoryKind CanonicalizeMemoryKindWithDevices(
const MemoryKind& memory_kind,
const tsl::RCReference<DeviceList>& devices) {
CHECK(devices != nullptr);
CHECK(!devices->devices().empty());
return CanonicalizeMemoryKind(memory_kind, devices->devices().front());
}
}
std::unique_ptr<HloSharding> HloSharding::Create(
tsl::RCReference<DeviceList> devices, MemoryKind memory_kind,
xla::HloSharding xla_hlo_sharding) {
memory_kind = CanonicalizeMemoryKindWithDevices(memory_kind, devices);
return std::unique_ptr<HloSharding>(new HloSharding(
std::move(devices), memory_kind, std::move(xla_hlo_sharding)));
}
HloSharding::HloSharding(tsl::RCReference<DeviceList> devices,
MemoryKind memory_kind,
xla::HloSharding xla_hlo_sharding)
: llvm::RTTIExtends<HloSharding, XlaCompatibleSharding>(
std::move(devices), memory_kind, xla_hlo_sharding.IsReplicated()),
xla_hlo_sharding_(std::move(xla_hlo_sharding)) {}
absl::StatusOr<Shape> HloSharding::GetShardShape(const Shape& shape) const {
if (xla_hlo_sharding_.IsTileMaximal() || xla_hlo_sharding_.IsManual() ||
xla_hlo_sharding_.IsUnknown()) {
return shape;
}
if (xla_hlo_sharding_.TotalNumTiles() != devices_->size()) {
return absl::InvalidArgumentError(
absl::StrFormat("sharding's tile count and device count does not "
"match: %d vs. %d; shape=%s, sharding=%s",
xla_hlo_sharding_.TotalNumTiles(), devices_->size(),
shape.DebugString(), xla_hlo_sharding_.ToString()));
}
if (shape.dims().size() != xla_hlo_sharding_.TiledDataRank()) {
return InvalidArgument(
"Numbers of dimensions don't match. From Shape %d vs from "
"HloSharding %d",
shape.dims().size(), xla_hlo_sharding_.TiledDataRank());
}
const absl::Span<const int64_t> tile_assignment_dims =
xla_hlo_sharding_.tile_assignment().dimensions();
Shape::Dimensions tile_shape;
tile_shape.reserve(shape.dims().size());
for (int64_t i = 0; i < shape.dims().size(); ++i) {
tile_shape.push_back(
xla::CeilOfRatio(shape.dims()[i], tile_assignment_dims[i]));
}
return Shape(std::move(tile_shape));
}
bool HloSharding::HasSamePartitioning(const Sharding& other) const {
if (this == &other) {
return true;
}
if (devices()->size() != other.devices()->size()) {
return false;
}
const auto* other_hlo_sharding = llvm::dyn_cast<HloSharding>(&other);
if (!other_hlo_sharding) {
return false;
}
return xla_hlo_sharding_ == other_hlo_sharding->xla_hlo_sharding_;
}
absl::StatusOr<std::unique_ptr<Sharding>> HloSharding::WithDeviceAssignment(
std::optional<tsl::RCReference<DeviceList>> devices,
std::optional<MemoryKind> memory_kind) const {
if (devices.has_value() && (*devices)->size() != devices_->size()) {
return InvalidArgument(
"HloSharding should have the same number of devices as the current "
"sharding, but was asked to have %d devices",
(*devices)->size());
}
return Create(devices.value_or(devices_), memory_kind.value_or(memory_kind_),
xla_hlo_sharding_);
}
absl::StatusOr<std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>>>
HloSharding::Disassemble(const Shape& shape) const {
bool is_even_sharding = false;
if (xla_hlo_sharding_.IsReplicated() || xla_hlo_sharding_.IsTileMaximal()) {
is_even_sharding = true;
} else if (xla_hlo_sharding_.IsTiled()) {
const int64_t tiled_data_rank = xla_hlo_sharding_.TiledDataRank();
if (shape.dims().size() != tiled_data_rank) {
return absl::InvalidArgumentError(absl::StrFormat(
"shape must have %d dimensions, but has %d dimensions: "
"shape=%s, sharding=%s",
tiled_data_rank, shape.dims().size(), shape.DebugString(),
xla_hlo_sharding_.ToString()));
}
is_even_sharding = true;
for (int i = 0; i < tiled_data_rank; ++i) {
if (shape.dims()[i] % xla_hlo_sharding_.tile_assignment().dim(i) != 0) {
is_even_sharding = false;
break;
}
}
} else if (xla_hlo_sharding_.IsManual()) {
is_even_sharding = true;
}
const absl::Span<Device* const> devices = devices_->devices();
if (is_even_sharding) {
TF_ASSIGN_OR_RETURN(xla::ifrt::Shape shard_shape, GetShardShape(shape));
std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>> result;
result.reserve(devices_->size());
for (int i = 0; i < devices_->size(); ++i) {
result.push_back({
shard_shape,
SingleDeviceSharding::Create(devices[i], memory_kind_),
});
}
return result;
} else {
TF_ASSIGN_OR_RETURN(std::vector<IndexDomain> index_domains,
IndexDomains(shape));
CHECK_EQ(index_domains.size(), devices_->size());
std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>> result;
result.reserve(index_domains.size());
for (int i = 0; i < index_domains.size(); ++i) {
result.push_back({
index_domains[i].shape(),
SingleDeviceSharding::Create(devices[i], memory_kind_),
});
}
return result;
}
}
absl::StatusOr<
std::vector<std::pair<DynamicShape, std::shared_ptr<const Sharding>>>>
HloSharding::Disassemble(const DynamicShape& dynamic_shape) const {
return InvalidArgument(
"HloSharding can only disassemble static shape, but was asked "
"to disassemble dynamic shape %s",
dynamic_shape.DebugString());
}
absl::StatusOr<std::vector<IndexDomain>> HloSharding::IndexDomains(
const Shape& shape) const {
std::vector<IndexDomain> result;
const int num_devices = devices_->size();
if (xla_hlo_sharding_.IsManual()) {
return absl::InvalidArgumentError(
"Manual sharding does not support IndexDomains");
}
if (xla_hlo_sharding_.IsReplicated() || xla_hlo_sharding_.IsTileMaximal()) {
IndexDomain element(shape);
result.resize(num_devices, element);
return result;
}
if (!xla_hlo_sharding_.IsTiled()) {
return IndexDomainsSlowPath(xla_hlo_sharding_, devices_, shape);
}
for (const xla::OpSharding::Type subgroup_type :
xla_hlo_sharding_.subgroup_types()) {
if (subgroup_type != xla::OpSharding::REPLICATED) {
return IndexDomainsSlowPath(xla_hlo_sharding_, devices_, shape);
}
}
if (xla_hlo_sharding_.tile_assignment().num_elements() != num_devices) {
return absl::InvalidArgumentError(absl::StrFormat(
"sharding's tile_assignment_devices and device count does not "
"match: %d vs. %d; shape=%s, sharding=%s",
xla_hlo_sharding_.tile_assignment().num_elements(), num_devices,
shape.DebugString(), DebugString()));
}
const int64_t tiled_data_rank = xla_hlo_sharding_.TiledDataRank();
if (shape.dims().size() != tiled_data_rank) {
return absl::InvalidArgumentError(
absl::StrFormat("shape must have %d dimensions, but has %d dimensions: "
"shape=%s, sharding=%s",
tiled_data_rank, shape.dims().size(),
shape.DebugString(), xla_hlo_sharding_.ToString()));
}
TF_ASSIGN_OR_RETURN(Shape tile_shape, GetShardShape(shape));
const absl::Span<const int64_t> tile_shape_dims = tile_shape.dims();
const absl::Span<const int64_t> tile_assignment_dims =
xla_hlo_sharding_.tile_assignment().dimensions();
const int64_t replication_dim = xla_hlo_sharding_.SubgroupReplicationDim();
int64_t num_replicas;
if (replication_dim == -1) {
num_replicas = 1;
} else {
num_replicas = tile_assignment_dims[replication_dim];
}
Index::Elements unique_tile_index(shape.dims().size());
std::vector<Index::Elements> origins(num_devices);
Index::Elements origin(shape.dims().size());
int64_t device_assignment_index = 0;
do {
for (int64_t i = 0; i < shape.dims().size(); ++i) {
origin[i] =
std::min(tile_shape_dims[i] * unique_tile_index[i], shape.dims()[i]);
}
for (int64_t i = 0; i < num_replicas; ++i) {
CHECK_LT(device_assignment_index, num_devices);
const int64_t device_id = xla_hlo_sharding_.tile_assignment()
.array()
.data()[device_assignment_index];
if (device_id < 0 || device_id >= num_devices) {
return absl::InvalidArgumentError(
absl::StrFormat("Out of range device id in device_assignment: %d; "
"valid range: [0, %d)",
device_id, num_devices));
}
origins[device_id] = origin;
++device_assignment_index;
}
} while (NextIndex(&unique_tile_index, tile_assignment_dims));
result.reserve(num_devices);
for (int device_idx = 0; device_idx < num_devices; ++device_idx) {
Shape::Dimensions actual_tile_shape;
actual_tile_shape.reserve(tile_shape_dims.size());
for (int i = 0; i < tile_shape_dims.size(); ++i) {
actual_tile_shape.push_back(std::min(
tile_shape_dims[i], shape.dims()[i] - origins[device_idx][i]));
}
result.push_back(IndexDomain(Index(origins[device_idx]),
Shape(std::move(actual_tile_shape))));
}
return result;
}
std::string HloSharding::DebugString() const {
return absl::StrFormat("HloSharding(memory_kind: %v, hlo_sharding: %s)",
memory_kind_, xla_hlo_sharding_.ToString());
}
std::vector<IndexDomain> TEST_HloShardingIndexDomainsSlowPath(
const HloSharding& hlo_sharding, const Shape& shape) {
return IndexDomainsSlowPath(hlo_sharding.xla_hlo_sharding(),
hlo_sharding.devices(), shape);
}
}
} | #include "xla/python/pjrt_ifrt/xla_sharding.h"
#include <memory>
#include <optional>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/tile_assignment.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/device_test_util.h"
#include "xla/python/ifrt/index.h"
#include "xla/python/ifrt/index_domain.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::testing::SizeIs;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
class HloShardingTest : public test_util::DeviceTest {};
TEST_P(HloShardingTest, CreateWithBadDeviceList) {
auto xla_hlo_sharding = xla::HloSharding::Replicate();
EXPECT_DEATH(HloSharding::Create(tsl::RCReference<DeviceList>(), MemoryKind(),
xla_hlo_sharding),
"");
EXPECT_DEATH(HloSharding::Create(BasicDeviceList::Create({}), MemoryKind(),
xla_hlo_sharding),
"");
}
TEST_P(HloShardingTest, IsFullyReplicated) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
{
auto xla_hlo_sharding = xla::HloSharding::Replicate();
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
EXPECT_TRUE(sharding->IsFullyReplicated());
}
{
auto xla_hlo_sharding = xla::HloSharding::IotaTile({1, 6});
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
EXPECT_FALSE(sharding->IsFullyReplicated());
}
{
auto xla_hlo_sharding = xla::HloSharding::IotaTile({2, 3});
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
EXPECT_FALSE(sharding->IsFullyReplicated());
}
}
TEST_P(HloShardingTest, GetShardShape) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
auto xla_hlo_sharding = xla::HloSharding::IotaTile({2, 3});
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
EXPECT_THAT(sharding->GetShardShape(Shape({6, 6})),
IsOkAndHolds(Shape({3, 2})));
EXPECT_THAT(sharding->GetShardShape(Shape({6, 6, 6})),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("Numbers of dimensions don't match. From "
"Shape 3 vs from HloSharding 2")));
}
TEST_P(HloShardingTest, HasSamePartitioning) {
auto device_list0 = GetDevices({0, 1, 2, 3, 4, 5});
auto xla_hlo_sharding0 = xla::HloSharding::IotaTile({2, 3});
std::shared_ptr<const HloSharding> sharding0 =
HloSharding::Create(device_list0, MemoryKind(), xla_hlo_sharding0);
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding0));
{
auto device_list1 = GetDevices({3, 4, 5, 0, 1, 2});
auto xla_hlo_sharding1 = xla::HloSharding::IotaTile({2, 3});
std::shared_ptr<const HloSharding> sharding1 =
HloSharding::Create(device_list1, MemoryKind(), xla_hlo_sharding1);
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({3, 4, 5});
auto xla_hlo_sharding1 = xla::HloSharding::IotaTile({3, 1});
std::shared_ptr<const HloSharding> sharding1 =
HloSharding::Create(device_list1, MemoryKind(), xla_hlo_sharding1);
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({3, 4, 5, 0, 1, 2});
auto xla_hlo_sharding1 = xla::HloSharding::IotaTile({3, 2});
std::shared_ptr<const HloSharding> sharding1 =
HloSharding::Create(device_list1, MemoryKind(), xla_hlo_sharding1);
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({0, 1, 2});
std::shared_ptr<const HloSharding> hlo_sharding0 = HloSharding::Create(
device_list0, MemoryKind(), xla::HloSharding::Replicate());
std::shared_ptr<const HloSharding> hlo_sharding1 = HloSharding::Create(
device_list1, MemoryKind(), xla::HloSharding::Replicate());
EXPECT_FALSE(hlo_sharding0->HasSamePartitioning(*hlo_sharding1));
}
}
TEST_P(HloShardingTest, WithDeviceAssignment) {
auto device_list0 = GetDevices({0, 1, 2, 3, 4, 5});
auto xla_hlo_sharding0 = xla::HloSharding::IotaTile({2, 3});
std::shared_ptr<const HloSharding> sharding0 =
HloSharding::Create(device_list0, MemoryKind(), xla_hlo_sharding0);
{
auto device_list1 = GetDevices({3, 4, 5, 0, 1, 2});
auto xla_hlo_sharding1 = xla::HloSharding::IotaTile({2, 3});
std::shared_ptr<const HloSharding> sharding1 =
HloSharding::Create(device_list1, MemoryKind(), xla_hlo_sharding1);
TF_ASSERT_OK_AND_ASSIGN(
auto new_sharding,
sharding0->WithDeviceAssignment(device_list1,
std::nullopt));
EXPECT_EQ(*new_sharding, *sharding1);
}
{
auto device_list1 = GetDevices({0, 1, 2});
EXPECT_THAT(
sharding0->WithDeviceAssignment(device_list1,
std::nullopt),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("HloSharding should have the same number of "
"devices as the current sharding, but was asked to "
"have 3 devices")));
}
}
TEST_P(HloShardingTest, IndexDomainsWithReplication) {
auto device_list = GetDevices({0, 1});
auto xla_hlo_sharding = xla::HloSharding::Replicate();
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto index_domains, sharding->IndexDomains(shape));
EXPECT_THAT(index_domains,
ElementsAre(IndexDomain(shape), IndexDomain(shape)));
EXPECT_THAT(
index_domains,
ElementsAreArray(TEST_HloShardingIndexDomainsSlowPath(*sharding, shape)));
}
TEST_P(HloShardingTest, DisassembleWithReplication) {
auto device_list = GetDevices({0, 1});
auto xla_hlo_sharding = xla::HloSharding::Replicate();
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto disassembled, sharding->Disassemble(shape));
ASSERT_THAT(disassembled, SizeIs(2));
for (int i = 0; i < 2; ++i) {
const auto& [shape, sharding] = disassembled[i];
EXPECT_EQ(shape, Shape({10, 20}));
EXPECT_EQ(*sharding, *SingleDeviceSharding::Create(
device_list->devices()[i], MemoryKind()));
}
}
TEST_P(HloShardingTest, IndexDomainsWithTile) {
auto device_list = GetDevices({0, 1});
auto xla_hlo_sharding = xla::HloSharding::Tile(xla::TileAssignment({2, 1}));
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto index_domains, sharding->IndexDomains(shape));
EXPECT_THAT(index_domains,
ElementsAre(IndexDomain(Index({0, 0}), Shape({5, 20})),
IndexDomain(Index({5, 0}), Shape({5, 20}))));
EXPECT_THAT(
index_domains,
ElementsAreArray(TEST_HloShardingIndexDomainsSlowPath(*sharding, shape)));
}
TEST_P(HloShardingTest, DisassembleWithTile) {
auto device_list = GetDevices({0, 1});
auto xla_hlo_sharding = xla::HloSharding::Tile(xla::TileAssignment({2, 1}));
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto disassembled, sharding->Disassemble(shape));
ASSERT_THAT(disassembled, SizeIs(2));
for (int i = 0; i < 2; ++i) {
const auto& [shape, sharding] = disassembled[i];
EXPECT_EQ(shape, Shape({5, 20}));
EXPECT_EQ(*sharding, *SingleDeviceSharding::Create(
device_list->devices()[i], MemoryKind()));
}
}
TEST_P(HloShardingTest, IndexDomainsWithUnevenTile) {
auto device_list = GetDevices({0, 1});
auto xla_hlo_sharding = xla::HloSharding::Tile(xla::TileAssignment({2, 1}));
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({11, 20});
TF_ASSERT_OK_AND_ASSIGN(auto index_domains, sharding->IndexDomains(shape));
EXPECT_THAT(index_domains,
ElementsAre(IndexDomain(Index({0, 0}), Shape({6, 20})),
IndexDomain(Index({6, 0}), Shape({5, 20}))));
EXPECT_THAT(
index_domains,
ElementsAreArray(TEST_HloShardingIndexDomainsSlowPath(*sharding, shape)));
}
TEST_P(HloShardingTest, DisassembleWithUnevenTile) {
auto device_list = GetDevices({0, 1});
auto xla_hlo_sharding = xla::HloSharding::Tile(xla::TileAssignment({2, 1}));
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({11, 20});
TF_ASSERT_OK_AND_ASSIGN(auto disassembled, sharding->Disassemble(shape));
ASSERT_THAT(disassembled, SizeIs(2));
for (int i = 0; i < 2; ++i) {
const auto& [shape, sharding] = disassembled[i];
if (i == 0) {
EXPECT_EQ(shape, Shape({6, 20}));
} else {
EXPECT_EQ(shape, Shape({5, 20}));
}
EXPECT_EQ(*sharding, *SingleDeviceSharding::Create(
device_list->devices()[i], MemoryKind()));
}
}
TEST_P(HloShardingTest, IndexDomainsWithPartialTile) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
auto xla_hlo_sharding =
xla::HloSharding::PartialTile(xla::TileAssignment({2, 1, 3}));
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto index_domains, sharding->IndexDomains(shape));
EXPECT_THAT(index_domains,
ElementsAre(IndexDomain(Index({0, 0}), Shape({5, 20})),
IndexDomain(Index({0, 0}), Shape({5, 20})),
IndexDomain(Index({0, 0}), Shape({5, 20})),
IndexDomain(Index({5, 0}), Shape({5, 20})),
IndexDomain(Index({5, 0}), Shape({5, 20})),
IndexDomain(Index({5, 0}), Shape({5, 20}))));
EXPECT_THAT(
index_domains,
ElementsAreArray(TEST_HloShardingIndexDomainsSlowPath(*sharding, shape)));
}
TEST_P(HloShardingTest, DisassembleWithPartialTile) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
auto xla_hlo_sharding =
xla::HloSharding::PartialTile(xla::TileAssignment({2, 1, 3}));
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto disassembled, sharding->Disassemble(shape));
ASSERT_THAT(disassembled, SizeIs(6));
for (int i = 0; i < 6; ++i) {
const auto& [shape, sharding] = disassembled[i];
EXPECT_EQ(shape, Shape({5, 20}));
EXPECT_EQ(*sharding, *SingleDeviceSharding::Create(
device_list->devices()[i], MemoryKind()));
}
}
TEST_P(HloShardingTest, IndexDomainsWithSubgroupReplicated) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
auto xla_hlo_sharding = xla::HloSharding::Subgroup(
xla::TileAssignment({2, 1, 3}), {xla::OpSharding::REPLICATED});
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto index_domains, sharding->IndexDomains(shape));
EXPECT_THAT(index_domains,
ElementsAre(IndexDomain(Index({0, 0}), Shape({5, 20})),
IndexDomain(Index({0, 0}), Shape({5, 20})),
IndexDomain(Index({0, 0}), Shape({5, 20})),
IndexDomain(Index({5, 0}), Shape({5, 20})),
IndexDomain(Index({5, 0}), Shape({5, 20})),
IndexDomain(Index({5, 0}), Shape({5, 20}))));
EXPECT_THAT(
index_domains,
ElementsAreArray(TEST_HloShardingIndexDomainsSlowPath(*sharding, shape)));
}
TEST_P(HloShardingTest, DisassembleWithSubgroupReplicated) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
auto xla_hlo_sharding = xla::HloSharding::Subgroup(
xla::TileAssignment({2, 1, 3}), {xla::OpSharding::REPLICATED});
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto disassembled, sharding->Disassemble(shape));
ASSERT_THAT(disassembled, SizeIs(6));
for (int i = 0; i < 6; ++i) {
const auto& [shape, sharding] = disassembled[i];
EXPECT_EQ(shape, Shape({5, 20}));
EXPECT_EQ(*sharding, *SingleDeviceSharding::Create(
device_list->devices()[i], MemoryKind()));
}
}
TEST_P(HloShardingTest, IndexDomainsWithSubgroupMaximalSlowPath) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
auto xla_hlo_sharding = xla::HloSharding::Subgroup(
xla::TileAssignment({2, 1, 3}), {xla::OpSharding::MAXIMAL});
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto index_domains, sharding->IndexDomains(shape));
EXPECT_THAT(index_domains,
ElementsAre(IndexDomain(Index({0, 0}), Shape({5, 20})),
IndexDomain(Index({0, 0}), Shape({5, 20})),
IndexDomain(Index({0, 0}), Shape({5, 20})),
IndexDomain(Index({5, 0}), Shape({5, 20})),
IndexDomain(Index({5, 0}), Shape({5, 20})),
IndexDomain(Index({5, 0}), Shape({5, 20}))));
EXPECT_THAT(
index_domains,
ElementsAreArray(TEST_HloShardingIndexDomainsSlowPath(*sharding, shape)));
}
TEST_P(HloShardingTest, DisassembleWithSubgroupMaximalSlowPath) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
auto xla_hlo_sharding = xla::HloSharding::Subgroup(
xla::TileAssignment({2, 1, 3}), {xla::OpSharding::MAXIMAL});
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto disassembled, sharding->Disassemble(shape));
ASSERT_THAT(disassembled, SizeIs(6));
for (int i = 0; i < 6; ++i) {
const auto& [shape, sharding] = disassembled[i];
EXPECT_EQ(shape, Shape({5, 20}));
EXPECT_EQ(*sharding, *SingleDeviceSharding::Create(
device_list->devices()[i], MemoryKind()));
}
}
TEST_P(HloShardingTest, IndexDomainsWithManual) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
auto xla_hlo_sharding = xla::HloSharding::Manual();
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
EXPECT_THAT(
sharding->IndexDomains(shape).status(),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("Manual sharding does not support IndexDomains")));
}
TEST_P(HloShardingTest, DisassembleWithManual) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
auto xla_hlo_sharding = xla::HloSharding::Manual();
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto disassembled, sharding->Disassemble(shape));
ASSERT_THAT(disassembled, SizeIs(6));
for (int i = 0; i < 6; ++i) {
const auto& [shape, sharding] = disassembled[i];
EXPECT_EQ(shape, Shape({10, 20}));
EXPECT_EQ(*sharding, *SingleDeviceSharding::Create(
device_list->devices()[i], MemoryKind()));
}
}
TEST_P(HloShardingTest, DisassembleFailsWithInvalidDeviceCount) {
auto device_list = GetDevices({0});
auto xla_hlo_sharding = xla::HloSharding::Tile(xla::TileAssignment({2, 1}));
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
EXPECT_THAT(
sharding->Disassemble(shape),
StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("sharding's tile count and device count does not match")));
}
TEST_P(HloShardingTest, DisassembleFailsWithMismatchingShapeDimsSize) {
auto device_list = GetDevices({0, 1});
auto xla_hlo_sharding = xla::HloSharding::Tile(xla::TileAssignment({2, 1}));
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10});
EXPECT_THAT(
sharding->Disassemble(shape),
StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("shape must have 2 dimensions, but has 1 dimensions")));
}
TEST_P(HloShardingTest, DisassembleFailsWithDynamicShape) {
auto device_list = GetDevices({0, 1});
auto xla_hlo_sharding = xla::HloSharding::Tile(xla::TileAssignment({2}));
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape dynamic_shape,
DynamicShape::Create(Shape({10}), BoundedDynamicShapeTag({true})));
EXPECT_THAT(sharding->Disassemble(dynamic_shape),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("can only disassemble static shape")));
}
INSTANTIATE_TEST_SUITE_P(NumDevices, HloShardingTest,
testing::Values(test_util::DeviceTestParam{
.num_devices = 6, .num_addressable_devices = 4}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/pjrt_ifrt/xla_sharding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/pjrt_ifrt/xla_sharding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fc72ca28-1a60-4ddb-be48-f5dd209aa2ef | cpp | tensorflow/tensorflow | ifrt_backend | third_party/xla/xla/python/ifrt_proxy/server/ifrt_backend.cc | third_party/xla/xla/python/ifrt_proxy/server/ifrt_backend_test.cc | #include "xla/python/ifrt_proxy/server/ifrt_backend.h"
#include <cstdint>
#include <cstring>
#include <functional>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_map.h"
#include "absl/functional/bind_front.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "llvm/Support/Casting.h"
#include "xla/layout.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/compiler.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/executable.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/host_callback.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/program.h"
#include "xla/python/ifrt/program_serdes.h"
#include "xla/python/ifrt/remap_plan.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/ifrt/value.h"
#include "xla/python/ifrt_proxy/common/array_util.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/common/proto_util.h"
#include "xla/python/ifrt_proxy/common/types.h"
#include "xla/python/ifrt_proxy/common/types.pb.h"
#include "xla/python/ifrt_proxy/server/host_buffer.h"
#include "xla/python/ifrt_proxy/server/host_callback.h"
#include "xla/python/ifrt_proxy/server/version.h"
#include "xla/python/pjrt_ifrt/xla_compiler.h"
#include "xla/status_macros.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace ifrt {
namespace proxy {
IfrtBackend::IfrtBackend(IfrtProxyVersion version, uint64_t session_id,
std::shared_ptr<xla::ifrt::Client> ifrt_client,
std::shared_ptr<HostBufferStore> host_buffer_store)
: version_(std::move(version)),
session_id_(session_id),
client_(std::move(ifrt_client)),
host_buffer_store_(std::move(host_buffer_store)),
compile_thread_pool_(
tsl::Env::Default(),
[]() {
tsl::ThreadOptions options;
options.stack_size = 240 * 1024;
return options;
}(),
"IfrtBackend",
32) {}
absl::StatusOr<std::unique_ptr<IfrtBackend>> IfrtBackend::Create(
IfrtProxyVersion version, uint64_t session_id,
std::shared_ptr<xla::ifrt::Client> ifrt_client,
std::shared_ptr<HostBufferStore> host_buffer_store) {
if (ifrt_client == nullptr) {
return absl::InvalidArgumentError("ifrt_client cannot be a nullptr.");
}
if (version.protocol_version() < kServerMinVersion ||
version.protocol_version() > kServerMaxVersion) {
return absl::FailedPreconditionError(absl::StrCat(
"Protocol version ", version.protocol_version(),
" is unsupported by IFRT Proxy server; supported versions: [",
kServerMinVersion, ",", kServerMaxVersion, "]"));
}
return absl::WrapUnique<IfrtBackend>(
new IfrtBackend(std::move(version), session_id, std::move(ifrt_client),
std::move(host_buffer_store)));
}
IfrtBackend::~IfrtBackend() {
{
absl::MutexLock lock(&host_callback_queues_mutex_);
for (const auto& [key, queue] : host_callback_queues_) {
queue->Close();
}
}
absl::flat_hash_map<uint64_t, RemoteLoadedHostCallbackQueue::ExecutionRequest>
host_callback_executions;
{
absl::MutexLock lock(&host_callback_executions_mutex_);
host_callback_executions.swap(host_callback_executions_);
}
for (auto& [handle, execution_request] : host_callback_executions) {
std::move(execution_request)
.status.Set(absl::CancelledError("IFRT backend has shut down"));
}
{
auto done = [this]() ABSL_SHARED_LOCKS_REQUIRED(in_flight_count_mutex_) {
return in_flight_count_ == 0;
};
absl::MutexLock lock(&in_flight_count_mutex_, absl::Condition(&done));
}
}
Future<BackendInterface::Response> IfrtBackend::Process(
std::unique_ptr<IfrtRequest> request) {
switch (request->request_case()) {
case IfrtRequest::RequestCase::kInitRequest:
return Future<Response>(HandleInit(std::move(request)));
case IfrtRequest::RequestCase::kCheckFutureRequest:
return HandleCheckFutureRequest(std::move(request));
case IfrtRequest::RequestCase::kMakeArrayFromHostBufferRequest:
return Future<Response>(
HandleMakeArrayFromHostBufferRequest(std::move(request)));
case IfrtRequest::RequestCase::kAssembleArrayFromSingleDeviceArraysRequest:
return Future<Response>(
HandleAssembleArrayFromSingleDeviceArraysRequest(std::move(request)));
case IfrtRequest::RequestCase::kRemapArraysRequest:
return Future<Response>(HandleRemapArraysRequest(std::move(request)));
case IfrtRequest::RequestCase::kCopyToHostBufferRequest:
return HandleCopyToHostBufferRequest(std::move(request));
case IfrtRequest::RequestCase::kDisassembleIntoSingleDeviceArraysRequest:
return Future<Response>(
HandleDisassembleIntoSingleDeviceArraysRequest(std::move(request)));
case IfrtRequest::RequestCase::kCheckValueReadyRequest:
return Future<Response>(HandleCheckValueReadyRequest(std::move(request)));
case IfrtRequest::RequestCase::kCopyArraysRequest:
return Future<Response>(HandleCopyArraysRequest(std::move(request)));
case IfrtRequest::RequestCase::kReshardRequest:
return Future<Response>(HandleReshardRequest(std::move(request)));
case IfrtRequest::RequestCase::kFullyReplicatedShardRequest:
return Future<Response>(
HandleFullyReplicatedShardRequest(std::move(request)));
case IfrtRequest::RequestCase::kDeleteArrayRequest:
return Future<Response>(HandleDeleteArrayRequest(std::move(request)));
case IfrtRequest::RequestCase::kIsArrayDeletedRequest:
return Future<Response>(HandleIsArrayDeletedRequest(std::move(request)));
case IfrtRequest::RequestCase::kDestructArrayRequest:
return Future<Response>(HandleDestructArrayRequest(std::move(request)));
case IfrtRequest::RequestCase::kCompileRequest:
return Future<Response>(HandleCompileRequest(std::move(request)));
case IfrtRequest::RequestCase::kLoadedExecutableMetadataRequest:
return HandleLoadedExecutableMetadataRequest(std::move(request));
case IfrtRequest::RequestCase::kLoadedExecutableExecuteRequest:
return Future<Response>(
HandleLoadedExecutableExecuteRequest(std::move(request)));
case IfrtRequest::RequestCase::kLoadedExecutableDeleteRequest:
return Future<Response>(
HandleLoadedExecutableDeleteRequest(std::move(request)));
case IfrtRequest::RequestCase::kLoadedExecutableIsDeletedRequest:
return Future<Response>(
HandleLoadedExecutableIsDeletedRequest(std::move(request)));
case IfrtRequest::RequestCase::kLoadedExecutableDestructRequest:
return Future<Response>(
HandleLoadedExecutableDestructRequest(std::move(request)));
case IfrtRequest::RequestCase::kLoadedHostCallbackPollRequest:
return HandleLoadedHostCallbackPollRequest(std::move(request));
case IfrtRequest::RequestCase::kLoadedHostCallbackReturnRequest:
return Future<Response>(
HandleLoadedHostCallbackReturnRequest(std::move(request)));
case IfrtRequest::RequestCase::kGetDefaultDeviceAssignmentRequest:
return Future<Response>(
HandleGetDefaultDeviceAssignmentRequest(std::move(request)));
default:
LOG(ERROR) << "Got unimplemented request type: "
<< request->DebugString();
return Future<Response>(absl::UnimplementedError(absl::StrCat(
"Got unimplemented request type: ", request->request_case())));
}
}
uint64_t IfrtBackend::HandleGenerator::New() {
absl::MutexLock lock(&mu_);
return current_++;
}
void IfrtBackend::HandleGenerator::BulkNew(absl::Span<uint64_t> handles) {
absl::MutexLock lock(&mu_);
std::iota(handles.begin(), handles.end(), current_);
current_ += handles.size();
}
Future<BackendInterface::Response> IfrtBackend::AsyncExecute(
std::function<absl::StatusOr<Response>()> handle_fn,
tsl::thread::ThreadPool* thread_pool) {
{
absl::MutexLock lock(&in_flight_count_mutex_);
++in_flight_count_;
}
auto promise = Future<Response>::CreatePromise();
auto f = [this, promise, handle_fn = std::move(handle_fn)]() mutable {
promise.Set(handle_fn());
{
absl::MutexLock lock(&in_flight_count_mutex_);
--in_flight_count_;
}
};
if (thread_pool != nullptr) {
thread_pool->Schedule(std::move(f));
} else {
tsl::Env::Default()->SchedClosure(std::move(f));
}
return Future<Response>(std::move(promise));
}
absl::StatusOr<BackendInterface::Response> IfrtBackend::HandleInit(
std::unique_ptr<IfrtRequest> request) {
std::unique_ptr<IfrtResponse> response =
NewIfrtResponse(request->request_metadata().op_id());
auto* init_resp = response->mutable_init_response();
init_resp->set_session_id(session_id_);
init_resp->set_platform_name(AsProtoStringData(client_->platform_name()));
init_resp->set_platform_version(
AsProtoStringData(client_->platform_version()));
init_resp->set_platform_id(client_->platform_id());
init_resp->set_runtime_type(AsProtoStringData(client_->runtime_type()));
init_resp->set_process_index(client_->process_index());
for (auto* device : client_->devices()) {
InitResponse::Device* d = init_resp->add_devices();
d->set_id(device->Id().value());
d->set_device_kind(AsProtoStringData(device->Kind()));
if (auto default_memory = device->DefaultMemory(); default_memory.ok()) {
d->set_default_memory_id((*default_memory)->Id().value());
}
for (const auto* memory : device->Memories()) {
d->add_memory_ids(memory->Id().value());
}
d->set_debug_string(AsProtoStringData(device->DebugString()));
d->set_to_string(AsProtoStringData(device->ToString()));
if (version_.protocol_version() <= 3) {
for (const auto& [name, attr] : device->Attributes().map()) {
TF_ASSIGN_OR_RETURN(
(*d->mutable_deprecated_attributes())[name],
std::visit(
[&](const auto& attr) { return ToVariantProto(attr.value); },
attr));
}
} else {
*d->mutable_attributes() = device->Attributes().ToProto();
}
}
for (auto* addressable_device : client_->addressable_devices()) {
init_resp->add_addressable_device_ids(addressable_device->Id().value());
}
absl::flat_hash_map<int, xla::ifrt::Memory*> memories;
for (auto* device : client_->devices()) {
for (xla::ifrt::Memory* memory : device->Memories()) {
const auto [it, inserted] =
memories.insert({memory->Id().value(), memory});
if (!inserted && it->second != memory) {
return absl::FailedPreconditionError(absl::StrCat(
"Two memories cannot have the same id: ", memory->ToString(),
" vs. ", it->second->ToString()));
}
}
}
for (const auto& [id, memory] : memories) {
auto* m = init_resp->add_memories();
m->set_id(id);
m->set_memory_space_kind(AsProtoStringData(*memory->Kind().memory_kind()));
for (const auto* device : memory->Devices()) {
m->add_device_ids(device->Id().value());
}
m->set_debug_string(AsProtoStringData(memory->DebugString()));
m->set_to_string(AsProtoStringData(memory->ToString()));
}
return response;
}
Future<BackendInterface::Response> IfrtBackend::HandleCheckFutureRequest(
std::unique_ptr<IfrtRequest> request) {
const CheckFutureRequest& check_request = request->check_future_request();
Future<> future;
{
absl::MutexLock lock(&futures_mutex_);
const auto it = futures_.find(check_request.future_handle());
if (it == futures_.end()) {
return Future<Response>(absl::NotFoundError(absl::StrCat(
"Unknown future handle: ", check_request.future_handle())));
}
future = std::move(it->second);
futures_.erase(it);
}
auto promise = Future<BackendInterface::Response>::CreatePromise();
future.OnReady([op_id = request->request_metadata().op_id(), promise,
hold = future](absl::Status status) mutable {
if (!status.ok()) {
promise.Set(std::move(status));
return;
}
auto ifrt_resp = NewIfrtResponse(op_id);
ifrt_resp->mutable_check_future_response();
promise.Set(std::move(ifrt_resp));
});
return Future<BackendInterface::Response>(std::move(promise));
}
Future<BackendInterface::Response> IfrtBackend::HandleCheckValueReadyRequest(
std::unique_ptr<IfrtRequest> request) {
std::vector<tsl::RCReference<xla::ifrt::Value>> values;
values.reserve(request->check_value_ready_request().value_handles_size());
for (const auto& value_handle :
request->check_value_ready_request().value_handles()) {
auto array = GetArray(value_handle);
if (!array.ok()) {
return Future<Response>(array.status());
}
values.push_back(*std::move(array));
}
auto ifrt_response_promise =
Future<BackendInterface::Response>::CreatePromise();
Future<BackendInterface::Response> ifrt_response_future(
ifrt_response_promise);
client_->GetReadyFuture(values).OnReady(
[op_id = request->request_metadata().op_id(),
promise = std::move(ifrt_response_promise)](
absl::Status status) mutable -> void {
if (!status.ok()) {
promise.Set(std::move(status));
return;
}
auto ifrt_response = NewIfrtResponse(op_id);
ifrt_response->mutable_check_value_ready_response();
promise.Set(std::move(ifrt_response));
});
return ifrt_response_future;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleMakeArrayFromHostBufferRequest(
std::unique_ptr<IfrtRequest> request) {
if (!request->has_make_array_from_host_buffer_request()) {
return absl::InternalError(
"MakeArrayFromHostBuffer got an IfrtRequest with no "
"MakeArrayFromHostBufferRequest in it.");
}
auto* make_array_request =
request->mutable_make_array_from_host_buffer_request();
TF_ASSIGN_OR_RETURN(
auto sharding, Sharding::FromProto(
absl::bind_front(&Client::LookupDevice, client_.get()),
make_array_request->sharding()));
const auto byte_strides = [&]() -> std::optional<std::vector<int64_t>> {
if (!make_array_request->has_byte_strides()) return std::nullopt;
return FromByteStridesProto(make_array_request->byte_strides());
}();
TF_ASSIGN_OR_RETURN(const auto shape,
Shape::FromProto(make_array_request->shape()));
TF_ASSIGN_OR_RETURN(const auto dtype,
DType::FromProto(make_array_request->dtype()));
const uint64_t host_buffer_handle = make_array_request->host_buffer_handle();
absl::Cleanup cleanup = [&] {
CHECK_OK(host_buffer_store_->Delete(host_buffer_handle));
};
TF_ASSIGN_OR_RETURN(std::shared_ptr<const std::string> host_buffer,
host_buffer_store_->Lookup(host_buffer_handle));
std::move(cleanup).Invoke();
TF_ASSIGN_OR_RETURN(const auto mem_region,
ArrayMemRegion::FromMinimalMemRegion(
*host_buffer, dtype, shape, byte_strides));
TF_ASSIGN_OR_RETURN(
auto array,
client_->MakeArrayFromHostBuffer(
mem_region.zeroth_element(), dtype, std::move(shape),
std::move(byte_strides), std::move(sharding),
xla::ifrt::Client::HostBufferSemantics::
kImmutableUntilTransferCompletes,
[hold = std::move(host_buffer)]() mutable { hold.reset(); }));
uint64_t handle = handle_generator_.New();
{
absl::MutexLock lock(&arrays_mutex_);
arrays_.insert({handle, std::move(array)});
}
std::unique_ptr<IfrtResponse> response =
NewIfrtResponse(request->request_metadata().op_id());
auto* make_array_resp =
response->mutable_make_array_from_host_buffer_response();
make_array_resp->set_array_handle(handle);
return response;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleAssembleArrayFromSingleDeviceArraysRequest(
std::unique_ptr<IfrtRequest> request) {
const auto& assemble_request =
request->assemble_array_from_single_device_arrays_request();
std::vector<tsl::RCReference<xla::ifrt::Array>> arrays;
{
absl::ReaderMutexLock lock(&arrays_mutex_);
for (const uint64_t handle :
assemble_request.single_device_array_handles()) {
TF_ASSIGN_OR_RETURN(arrays.emplace_back(), GetArrayLocked(handle));
}
}
TF_ASSIGN_OR_RETURN(Shape shape, Shape::FromProto(assemble_request.shape()));
TF_ASSIGN_OR_RETURN(
auto sharding, Sharding::FromProto(
absl::bind_front(&Client::LookupDevice, client_.get()),
assemble_request.sharding()));
TF_ASSIGN_OR_RETURN(auto semantics, FromArrayCopySemanticsProto(
assemble_request.copy_semantics()));
TF_ASSIGN_OR_RETURN(auto array, client_->AssembleArrayFromSingleDeviceArrays(
std::move(shape), std::move(sharding),
absl::MakeSpan(arrays), semantics));
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
uint64_t handle = handle_generator_.New();
ifrt_resp->mutable_assemble_array_from_single_device_arrays_response()
->set_array_handle(handle);
{
absl::MutexLock lock(&arrays_mutex_);
arrays_.insert({handle, std::move(array)});
}
return ifrt_resp;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleRemapArraysRequest(std::unique_ptr<IfrtRequest> request) {
const auto& remap_request = request->remap_arrays_request();
std::vector<tsl::RCReference<xla::ifrt::Array>> arrays;
{
absl::ReaderMutexLock lock(&arrays_mutex_);
for (const uint64_t handle : remap_request.array_handles()) {
TF_ASSIGN_OR_RETURN(arrays.emplace_back(), GetArrayLocked(handle));
}
}
TF_ASSIGN_OR_RETURN(
RemapPlan plan,
RemapPlan::FromProto(
absl::bind_front(&Client::LookupDevice, client_.get()),
remap_request.plan()));
TF_ASSIGN_OR_RETURN(auto semantics, FromArrayCopySemanticsProto(
remap_request.copy_semantics()));
TF_ASSIGN_OR_RETURN(
auto out_arrays,
client_->RemapArrays(plan, absl::MakeSpan(arrays), semantics));
int64_t num_arrays = out_arrays.size();
auto response = NewIfrtResponse(request->request_metadata().op_id());
auto* handles =
response->mutable_remap_arrays_response()->mutable_array_handles();
handles->Reserve(num_arrays);
uint64_t* handles_buf = handles->AddNAlreadyReserved(num_arrays);
handle_generator_.BulkNew(absl::MakeSpan(handles_buf, num_arrays));
{
absl::MutexLock lock(&arrays_mutex_);
for (int i = 0; i < num_arrays; ++i) {
arrays_.insert({handles_buf[i], out_arrays[i]});
}
}
return response;
}
Future<BackendInterface::Response> IfrtBackend::HandleCopyToHostBufferRequest(
std::unique_ptr<IfrtRequest> request) {
const CopyToHostBufferRequest& copy_to_host =
request->copy_to_host_buffer_request();
auto array = GetArray(copy_to_host.array_handle());
if (!array.ok()) {
return Future<Response>(array.status());
}
std::optional<int> element_size = (*array)->dtype().byte_size();
if (element_size == std::nullopt) {
return Future<Response>(
absl::InternalError("Array element size is unknown."));
}
int64_t host_buffer_size =
(*array)->shape().num_elements() * element_size.value();
auto host_buffer = std::make_unique<std::string>();
host_buffer->resize(host_buffer_size);
const auto byte_strides = [&]() -> std::optional<std::vector<int64_t>> {
if (!copy_to_host.has_byte_strides()) {
return std::nullopt;
}
return FromByteStridesProto(copy_to_host.byte_strides());
}();
const auto mem_region = ArrayMemRegion::FromMinimalMemRegion(
absl::string_view(*host_buffer), (*array)->dtype(), (*array)->shape(),
byte_strides);
if (!mem_region.ok()) {
return Future<Response>(mem_region.status());
}
Future<> copy_status =
(*array)->CopyToHostBuffer(mem_region->zeroth_element(), byte_strides,
ArrayCopySemantics::kAlwaysCopy);
auto resp_promise = Future<BackendInterface::Response>::CreatePromise();
Future<BackendInterface::Response> resp_future(resp_promise);
auto on_ready = [this, op_id = request->request_metadata().op_id(),
host_buffer = std::move(host_buffer),
host_buffer_handle = copy_to_host.host_buffer_handle()](
absl::Status status) mutable
-> absl::StatusOr<std::unique_ptr<IfrtResponse>> {
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(
host_buffer_store_->Store(host_buffer_handle, *std::move(host_buffer)));
std::unique_ptr<IfrtResponse> response = NewIfrtResponse(op_id);
response->mutable_copy_to_host_buffer_response();
return response;
};
copy_status.OnReady(
[promise = std::move(resp_promise), on_ready = std::move(on_ready)](
absl::Status status) mutable { promise.Set(on_ready(status)); });
return resp_future;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleDisassembleIntoSingleDeviceArraysRequest(
std::unique_ptr<IfrtRequest> request) {
TF_ASSIGN_OR_RETURN(
auto array,
GetArray(request->disassemble_into_single_device_arrays_request()
.array_handle()));
TF_ASSIGN_OR_RETURN(auto single_device_arrays,
array->DisassembleIntoSingleDeviceArrays(
xla::ifrt::ArrayCopySemantics::kAlwaysCopy));
int64_t num_arrays = single_device_arrays.size();
auto response = NewIfrtResponse(request->request_metadata().op_id());
auto* handles =
response->mutable_disassemble_into_single_device_arrays_response()
->mutable_single_device_array_handles();
handles->Reserve(num_arrays);
uint64_t* handles_buf = handles->AddNAlreadyReserved(num_arrays);
handle_generator_.BulkNew(absl::MakeSpan(handles_buf, num_arrays));
{
absl::MutexLock lock(&arrays_mutex_);
for (int i = 0; i < num_arrays; ++i) {
arrays_.insert({handles_buf[i], single_device_arrays[i]});
}
}
return response;
}
absl::StatusOr<BackendInterface::Response> IfrtBackend::HandleCopyArraysRequest(
std::unique_ptr<IfrtRequest> request) {
const auto& copy_arrays_request = request->copy_arrays_request();
std::vector<tsl::RCReference<xla::ifrt::Array>> arrays;
arrays.reserve(copy_arrays_request.array_handles_size());
for (const auto& handle : copy_arrays_request.array_handles()) {
TF_ASSIGN_OR_RETURN(arrays.emplace_back(), GetArray(handle));
}
std::optional<tsl::RCReference<DeviceList>> devices;
if (!copy_arrays_request.device_ids().empty()) {
BasicDeviceList::Devices ds;
for (const auto& device_id : copy_arrays_request.device_ids()) {
TF_ASSIGN_OR_RETURN(ds.emplace_back(),
client_->LookupDevice(DeviceId(device_id)));
}
devices.emplace(BasicDeviceList::Create(std::move(ds)));
}
std::optional<MemoryKind> memory_kind;
if (copy_arrays_request.has_memory_kind()) {
if (const absl::string_view m = copy_arrays_request.memory_kind();
!m.empty()) {
memory_kind.emplace(MemoryKind(m));
} else {
memory_kind.emplace(MemoryKind());
}
}
TF_ASSIGN_OR_RETURN(
auto semantics,
FromArrayCopySemanticsProto(copy_arrays_request.copy_semantics()));
TF_ASSIGN_OR_RETURN(
auto new_arrays,
client_->CopyArrays(absl::MakeSpan(arrays), std::move(devices),
memory_kind, semantics));
std::unique_ptr<IfrtResponse> ifrt_resp =
NewIfrtResponse(request->request_metadata().op_id());
auto* const copy_arrays_resp = ifrt_resp->mutable_copy_arrays_response();
std::vector<uint64_t> new_handles(new_arrays.size());
handle_generator_.BulkNew(absl::MakeSpan(new_handles));
{
absl::MutexLock lock(&arrays_mutex_);
for (int i = 0; i < new_arrays.size(); ++i) {
arrays_.insert({new_handles[i], new_arrays[i]});
copy_arrays_resp->add_array_handles(new_handles[i]);
}
}
return ifrt_resp;
}
absl::StatusOr<BackendInterface::Response> IfrtBackend::HandleReshardRequest(
std::unique_ptr<IfrtRequest> request) {
const auto& reshard_request = request->reshard_request();
TF_ASSIGN_OR_RETURN(auto array, GetArray(reshard_request.array_handle()));
TF_ASSIGN_OR_RETURN(
std::shared_ptr<const Sharding> sharding,
Sharding::FromProto(
absl::bind_front(&Client::LookupDevice, client_.get()),
reshard_request.sharding()));
TF_ASSIGN_OR_RETURN(auto semantics, FromArrayCopySemanticsProto(
reshard_request.copy_semantics()));
if (!array->sharding().HasSamePartitioning(*sharding)) {
return absl::InvalidArgumentError(absl::StrCat(
"IFRT Proxy does not support resharding, but got ",
array->sharding().DebugString(), " as the original sharding and ",
sharding->DebugString(), " as the target sharding"));
}
TF_ASSIGN_OR_RETURN(
auto copied_arrays,
client_->CopyArrays(absl::MakeSpan(&array, 1), sharding->devices(),
sharding->memory_kind(), semantics));
uint64_t resharded_array_handle = handle_generator_.New();
{
absl::MutexLock lock(&arrays_mutex_);
arrays_.insert({resharded_array_handle, std::move(copied_arrays[0])});
}
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
ifrt_resp->mutable_reshard_response()->set_array_handle(
resharded_array_handle);
return ifrt_resp;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleFullyReplicatedShardRequest(
std::unique_ptr<IfrtRequest> request) {
const auto& fully_replicated_shard_request =
request->fully_replicated_shard_request();
TF_ASSIGN_OR_RETURN(auto array,
GetArray(fully_replicated_shard_request.array_handle()));
TF_ASSIGN_OR_RETURN(auto semantics,
FromArrayCopySemanticsProto(
fully_replicated_shard_request.copy_semantics()));
TF_ASSIGN_OR_RETURN(auto new_array, array->FullyReplicatedShard(semantics));
uint64_t new_array_handle = handle_generator_.New();
{
absl::MutexLock lock(&arrays_mutex_);
arrays_.insert({new_array_handle, std::move(new_array)});
}
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
ifrt_resp->mutable_fully_replicated_shard_response()->set_array_handle(
new_array_handle);
return ifrt_resp;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleDeleteArrayRequest(std::unique_ptr<IfrtRequest> request) {
std::vector<uint64_t> bad_handles;
std::vector<Future<>> deletion_futures;
auto delete_handle = [&](uint64_t handle) {
auto array = GetArray(handle);
if (array.ok()) {
deletion_futures.push_back(array.value()->Delete());
} else {
deletion_futures.push_back(Future<>(array.status()));
}
};
if (request->delete_array_request().has_array_handle_deprecated()) {
delete_handle(request->delete_array_request().array_handle_deprecated());
}
for (auto array_handle : request->delete_array_request().array_handle()) {
delete_handle(array_handle);
}
uint64_t future_handle = handle_generator_.New();
{
absl::MutexLock lock(&futures_mutex_);
futures_.insert({future_handle, JoinFutures(deletion_futures)});
}
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
ifrt_resp->mutable_delete_array_response()->set_deletion_future_handle(
future_handle);
return ifrt_resp;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleIsArrayDeletedRequest(std::unique_ptr<IfrtRequest> request) {
TF_ASSIGN_OR_RETURN(
auto array, GetArray(request->is_array_deleted_request().array_handle()));
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
ifrt_resp->mutable_is_array_deleted_response()->set_deleted(
array->IsDeleted());
return ifrt_resp;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleDestructArrayRequest(std::unique_ptr<IfrtRequest> request) {
std::vector<uint64_t> bad_handles;
{
absl::MutexLock lock(&arrays_mutex_);
for (const uint64_t array_handle :
request->destruct_array_request().array_handle()) {
if (!arrays_.erase(array_handle)) {
bad_handles.push_back(array_handle);
}
}
if (request->destruct_array_request().has_array_handle_deprecated()) {
const uint64_t array_handle =
request->destruct_array_request().array_handle_deprecated();
if (!arrays_.erase(array_handle)) {
bad_handles.push_back(array_handle);
}
}
}
if (!bad_handles.empty()) {
return absl::NotFoundError(absl::StrCat("Unknown array handle(s): ",
absl::StrJoin(bad_handles, ",")));
}
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
ifrt_resp->mutable_destruct_array_response();
return ifrt_resp;
}
Future<BackendInterface::Response> IfrtBackend::HandleCompileRequest(
std::unique_ptr<IfrtRequest> request) {
auto f = [this, request = std::shared_ptr<IfrtRequest>(
std::move(request))]() -> absl::StatusOr<Response> {
const CompileRequest& compile_request = request->compile_request();
auto deserialize_program_options =
std::make_unique<DeserializeProgramOptions>(
absl::bind_front(&Client::LookupDevice, client_.get()));
TF_ASSIGN_OR_RETURN(
auto program,
Deserialize<xla::ifrt::Program>(
compile_request.program(), std::move(deserialize_program_options)));
TF_ASSIGN_OR_RETURN(auto options, Deserialize<xla::ifrt::CompileOptions>(
compile_request.compile_options(),
nullptr));
std::vector<std::shared_ptr<RemoteLoadedHostCallbackQueue>>
host_callback_queues;
{
std::vector<tsl::RCReference<xla::ifrt::LoadedHostCallback>>
loaded_host_callbacks;
for (int i = 0; i < compile_request.host_callbacks_size(); ++i) {
host_callback_queues.emplace_back(
std::make_shared<RemoteLoadedHostCallbackQueue>());
TF_ASSIGN_OR_RETURN(
loaded_host_callbacks.emplace_back(),
RemoteLoadedHostCallback::CreateFromSerialized(
client_.get(), compile_request.host_callbacks(i),
host_callback_queues.back()));
}
if (!loaded_host_callbacks.empty()) {
if (auto xla_options =
llvm::dyn_cast<xla::ifrt::XlaCompileOptions>(options.get())) {
xla_options->loaded_host_callbacks = std::move(loaded_host_callbacks);
} else {
return absl::UnimplementedError(
"Host callbacks are supported only for XLA-like IFRT "
"implementations using `xla::ifrt::XlaCompileOptions`");
}
}
}
TF_ASSIGN_OR_RETURN(auto executable,
client_->GetDefaultCompiler()->Compile(
std::move(program), std::move(options)));
std::unique_ptr<IfrtResponse> ifrt_resp =
NewIfrtResponse(request->request_metadata().op_id());
auto* compile_resp = ifrt_resp->mutable_compile_response();
uint64_t handle = handle_generator_.New();
compile_resp->set_loaded_executable_handle(handle);
std::vector<uint64_t> host_callback_handles(host_callback_queues.size());
handle_generator_.BulkNew(absl::MakeSpan(host_callback_handles));
compile_resp->mutable_loaded_host_callback_handles()->Add(
host_callback_handles.begin(), host_callback_handles.end());
compile_resp->set_name(AsProtoStringData(executable->name()));
compile_resp->set_num_devices(executable->num_devices());
for (const auto* device : executable->addressable_devices()) {
compile_resp->add_addressable_device_ids(device->Id().value());
}
auto fingerprint = executable->Fingerprint();
if (!fingerprint.ok()) {
*compile_resp->mutable_fingerprint_error() =
tsl::StatusToProto(fingerprint.status());
} else if (fingerprint->has_value()) {
compile_resp->set_fingerprint_value(std::move(fingerprint)->value());
}
{
absl::MutexLock lock(&futures_mutex_);
compile_resp->set_ready_future_handle(handle_generator_.New());
futures_.insert(
{compile_resp->ready_future_handle(), executable->GetReadyFuture()});
}
{
absl::MutexLock lock(&executables_mutex_);
executables_.insert({handle, std::move(executable)});
}
{
absl::MutexLock lock(&host_callback_queues_mutex_);
for (int i = 0; i < host_callback_queues.size(); ++i) {
host_callback_queues_.insert(
{host_callback_handles[i], std::move(host_callback_queues[i])});
}
}
return ifrt_resp;
};
return AsyncExecute(std::move(f), &compile_thread_pool_);
}
Future<BackendInterface::Response>
IfrtBackend::HandleLoadedExecutableMetadataRequest(
std::unique_ptr<IfrtRequest> request) {
return AsyncExecute([this, request = std::shared_ptr<IfrtRequest>(std::move(
request))]() -> absl::StatusOr<Response> {
const uint64_t handle = request->loaded_executable_metadata_request()
.loaded_executable_handle();
TF_ASSIGN_OR_RETURN(std::shared_ptr<xla::ifrt::LoadedExecutable> executable,
GetLoadedExecutable(handle));
std::unique_ptr<IfrtResponse> ifrt_resp =
NewIfrtResponse(request->request_metadata().op_id());
auto* metadata_resp =
ifrt_resp->mutable_loaded_executable_metadata_response();
if (auto parameter_shardings = executable->GetParameterShardings();
parameter_shardings.has_value()) {
metadata_resp->mutable_parameter_shardings()->mutable_shardings()->Add(
parameter_shardings->begin(), parameter_shardings->end());
}
if (auto output_shardings = executable->GetOutputShardings();
output_shardings.has_value()) {
metadata_resp->mutable_output_shardings()->mutable_shardings()->Add(
output_shardings->begin(), output_shardings->end());
}
if (auto parameter_layouts = executable->GetParameterLayouts();
parameter_layouts.ok()) {
auto* const layouts =
metadata_resp->mutable_parameter_layouts_list()->mutable_layouts();
for (const std::unique_ptr<xla::PjRtLayout>& parameter_layout :
*parameter_layouts) {
const xla::PjRtXlaLayout* layout =
dynamic_cast<const xla::PjRtXlaLayout*>(parameter_layout.get());
TF_RET_CHECK(layout != nullptr)
<< "IFRT proxy only supports PjRtXlaLayout, got a different "
"subclass";
layouts->Add(layout->xla_layout().ToProto());
}
} else {
*metadata_resp->mutable_parameter_layouts_error() =
tsl::StatusToProto(parameter_layouts.status());
}
if (auto output_layouts = executable->GetOutputLayouts();
output_layouts.ok()) {
auto* const layouts =
metadata_resp->mutable_output_layouts_list()->mutable_layouts();
for (const std::unique_ptr<xla::PjRtLayout>& output_layout :
*output_layouts) {
const xla::PjRtXlaLayout* layout =
dynamic_cast<const xla::PjRtXlaLayout*>(output_layout.get());
TF_RET_CHECK(layout != nullptr)
<< "IFRT proxy only supports PjRtXlaLayout, got a different "
"subclass";
layouts->Add(layout->xla_layout().ToProto());
}
} else {
*metadata_resp->mutable_output_layouts_error() =
tsl::StatusToProto(output_layouts.status());
}
auto output_memory_kinds = executable->GetOutputMemoryKinds();
if (output_memory_kinds.ok()) {
for (const auto& memory_kinds : *output_memory_kinds) {
auto* const list = metadata_resp->mutable_output_memory_kinds()
->add_memory_kind_lists()
->mutable_memory_kinds();
list->Reserve(memory_kinds.size());
list->Add(memory_kinds.begin(), memory_kinds.end());
}
} else {
*metadata_resp->mutable_output_memory_kinds()->mutable_status() =
tsl::StatusToProto(output_memory_kinds.status());
}
return ifrt_resp;
});
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleLoadedExecutableExecuteRequest(
std::unique_ptr<IfrtRequest> request) {
const LoadedExecutableExecuteRequest& execute =
request->loaded_executable_execute_request();
TF_ASSIGN_OR_RETURN(std::shared_ptr<xla::ifrt::LoadedExecutable> executable,
GetLoadedExecutable(execute.loaded_executable_handle()));
std::vector<tsl::RCReference<xla::ifrt::Array>> args;
args.reserve(execute.args_handles_size());
{
absl::ReaderMutexLock lock(&arrays_mutex_);
for (const uint64_t handle : execute.args_handles()) {
TF_ASSIGN_OR_RETURN(args.emplace_back(), GetArrayLocked(handle));
}
}
TF_ASSIGN_OR_RETURN(auto execute_options,
xla::ifrt::LoadedExecutable::ExecuteOptions::FromProto(
execute.execute_options()));
if (version_.protocol_version() < 6) {
execute_options.fill_status = true;
}
std::optional<tsl::RCReference<DeviceList>> devices;
if (!execute.device_ids().empty()) {
BasicDeviceList::Devices d;
d.reserve(execute.device_ids_size());
for (const int32_t device_id : execute.device_ids()) {
TF_ASSIGN_OR_RETURN(d.emplace_back(),
client_->LookupDevice(DeviceId(device_id)));
}
devices = BasicDeviceList::Create(std::move(d));
}
TF_ASSIGN_OR_RETURN(
xla::ifrt::LoadedExecutable::ExecuteResult result,
executable->Execute(absl::MakeSpan(args), execute_options, devices));
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
LoadedExecutableExecuteResponse* execute_response =
ifrt_resp->mutable_loaded_executable_execute_response();
if (version_.protocol_version() < 6 || execute_options.fill_status) {
absl::MutexLock lock(&futures_mutex_);
execute_response->set_status_handle(handle_generator_.New());
futures_.insert(
{execute_response->status_handle(), std::move(result.status)});
}
std::vector<uint64_t> output_handles(result.outputs.size());
handle_generator_.BulkNew(absl::MakeSpan(output_handles));
{
absl::MutexLock lock(&arrays_mutex_);
for (int i = 0; i < result.outputs.size(); ++i) {
tsl::RCReference<xla::ifrt::Array>& array = result.outputs[i];
LoadedExecutableExecuteResponse::Output* output =
execute_response->add_outputs();
*output->mutable_dtype() = array->dtype().ToProto();
*output->mutable_shape() = array->shape().ToProto();
TF_ASSIGN_OR_RETURN(*output->mutable_sharding(),
array->sharding().ToProto());
output->set_array_handle(output_handles[i]);
arrays_.insert({output_handles[i], std::move(array)});
}
}
return ifrt_resp;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleLoadedExecutableDeleteRequest(
std::unique_ptr<IfrtRequest> request) {
const auto& del = request->loaded_executable_delete_request();
TF_ASSIGN_OR_RETURN(std::shared_ptr<xla::ifrt::LoadedExecutable> executable,
GetLoadedExecutable(del.loaded_executable_handle()));
Future<> future = executable->Delete();
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
auto* del_response = ifrt_resp->mutable_loaded_executable_delete_response();
{
absl::MutexLock lock(&futures_mutex_);
del_response->set_future_handle(handle_generator_.New());
futures_.insert({del_response->future_handle(), std::move(future)});
}
return ifrt_resp;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleLoadedExecutableIsDeletedRequest(
std::unique_ptr<IfrtRequest> request) {
const auto& is_deleted = request->loaded_executable_is_deleted_request();
TF_ASSIGN_OR_RETURN(
std::shared_ptr<xla::ifrt::LoadedExecutable> executable,
GetLoadedExecutable(is_deleted.loaded_executable_handle()));
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
auto* is_deleted_response =
ifrt_resp->mutable_loaded_executable_is_deleted_response();
is_deleted_response->set_is_deleted(executable->IsDeleted());
return ifrt_resp;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleLoadedExecutableDestructRequest(
std::unique_ptr<IfrtRequest> request) {
const auto& destruct = request->loaded_executable_destruct_request();
std::shared_ptr<xla::ifrt::LoadedExecutable> executable;
{
absl::MutexLock lock(&executables_mutex_);
const auto it = executables_.find(destruct.loaded_executable_handle());
if (it == executables_.end()) {
return absl::NotFoundError(
absl::StrCat("Unknown loaded executable handle: ",
destruct.loaded_executable_handle()));
}
executable = std::move(it->second);
executables_.erase(it);
}
executable.reset();
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
ifrt_resp->mutable_loaded_executable_destruct_response();
return ifrt_resp;
}
Future<BackendInterface::Response>
IfrtBackend::HandleLoadedHostCallbackPollRequest(
std::unique_ptr<IfrtRequest> request) {
return AsyncExecute([this, request = std::shared_ptr<IfrtRequest>(std::move(
request))]() -> absl::StatusOr<Response> {
const auto& poll = request->loaded_host_callback_poll_request();
const uint64_t handle = poll.loaded_host_callback_handle();
std::shared_ptr<RemoteLoadedHostCallbackQueue> queue;
{
absl::MutexLock lock(&host_callback_queues_mutex_);
auto it = host_callback_queues_.find(handle);
if (it == host_callback_queues_.end()) {
return absl::NotFoundError(
absl::StrCat("Unknown loaded host callback handle: ", handle));
}
queue = it->second;
}
auto execution_request = queue->Pop();
if (!execution_request.has_value()) {
{
absl::MutexLock lock(&host_callback_queues_mutex_);
host_callback_queues_.erase(handle);
}
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
ifrt_resp->mutable_loaded_host_callback_poll_response();
return ifrt_resp;
}
absl::Cleanup cleanup = [&] {
std::move(execution_request)
->status.Set(absl::UnknownError(
"Unable to enqueue the host callback execution"));
};
{
std::string buffer;
for (const auto& operand : execution_request->operands) {
buffer.append(static_cast<const char*>(operand.data), operand.size);
}
TF_RETURN_IF_ERROR(host_buffer_store_->Store(
poll.operand_host_buffer_handle(), std::move(buffer)));
}
const uint64_t execution_handle = handle_generator_.New();
{
absl::MutexLock lock(&host_callback_executions_mutex_);
host_callback_executions_.insert(
{execution_handle, *std::move(execution_request)});
}
std::move(cleanup).Cancel();
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
auto* poll_response =
ifrt_resp->mutable_loaded_host_callback_poll_response();
poll_response->set_host_callback_execution_handle(execution_handle);
return ifrt_resp;
});
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleLoadedHostCallbackReturnRequest(
std::unique_ptr<IfrtRequest> request) {
const auto& ret = request->loaded_host_callback_return_request();
RemoteLoadedHostCallbackQueue::ExecutionRequest execution_request;
{
absl::MutexLock lock(&host_callback_executions_mutex_);
const auto it =
host_callback_executions_.find(ret.host_callback_execution_handle());
if (it == host_callback_executions_.end()) {
return absl::NotFoundError(
absl::StrCat("Unknown host callback execution: ",
ret.host_callback_execution_handle()));
}
execution_request = std::move(it->second);
host_callback_executions_.erase(it);
}
absl::Cleanup cleanup = [&] {
std::move(execution_request)
.status.Set(absl::UnknownError(
"Unable to process the host callback execution results"));
};
absl::Status status;
if (ret.has_result_host_buffer_handle()) {
TF_ASSIGN_OR_RETURN(
std::shared_ptr<const std::string> buffer,
host_buffer_store_->Lookup(ret.result_host_buffer_handle()));
absl::Cleanup cleanup = [&] {
CHECK_OK(host_buffer_store_->Delete(ret.result_host_buffer_handle()));
};
int64_t offset = 0;
for (const auto& result : execution_request.results) {
if (offset + result.size > buffer->size()) {
return absl::InternalError(
absl::StrCat("Buffer overflow while reading host callback "
"execution results; ",
"range: [", offset, ", ", offset + result.size, "), ",
"buffer size: ", buffer->size()));
}
std::memcpy(result.data, buffer->data() + offset, result.size);
offset += result.size;
}
if (offset != buffer->size()) {
return absl::InternalError(
absl::StrCat("Host callback execution did not consume the entire "
"result buffer; size: ",
buffer->size(), "; consumed: ", offset));
}
} else {
status = tsl::StatusFromProto(ret.error());
}
std::move(execution_request).status.Set(std::move(status));
std::move(cleanup).Cancel();
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
ifrt_resp->mutable_loaded_host_callback_return_response();
return ifrt_resp;
}
absl::StatusOr<BackendInterface::Response>
IfrtBackend::HandleGetDefaultDeviceAssignmentRequest(
std::unique_ptr<IfrtRequest> request) {
const auto& get_default_device_assignment_request =
request->get_default_device_assignment_request();
TF_ASSIGN_OR_RETURN(
auto assignment,
client_->GetDefaultDeviceAssignment(
get_default_device_assignment_request.num_replicas(),
get_default_device_assignment_request.num_partitions()));
auto ifrt_resp = NewIfrtResponse(request->request_metadata().op_id());
assignment.Serialize(
ifrt_resp->mutable_get_default_device_assignment_response()
->mutable_device_assignment());
return ifrt_resp;
}
absl::StatusOr<std::shared_ptr<xla::ifrt::LoadedExecutable>>
IfrtBackend::GetLoadedExecutable(uint64_t handle) {
absl::MutexLock lock(&executables_mutex_);
auto it = executables_.find(handle);
if (it == executables_.end()) {
return absl::NotFoundError(
absl::StrCat("Unknown loaded executable handle: ", handle));
}
return it->second;
}
absl::StatusOr<tsl::RCReference<xla::ifrt::Array>> IfrtBackend::GetArray(
uint64_t array_handle) {
absl::ReaderMutexLock lock(&arrays_mutex_);
return GetArrayLocked(array_handle);
}
absl::StatusOr<tsl::RCReference<xla::ifrt::Array>> IfrtBackend::GetArrayLocked(
uint64_t array_handle) {
auto it = arrays_.find(array_handle);
if (it == arrays_.end()) {
return absl::NotFoundError(
absl::StrCat("Unknown array handle: ", array_handle));
}
return it->second;
}
}
}
} | #include "xla/python/ifrt_proxy/server/ifrt_backend.h"
#include <sys/types.h>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/pjrt/host_callback.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/attribute_map.h"
#include "xla/python/ifrt/compiler.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/executable.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/host_callback.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/mock.h"
#include "xla/python/ifrt/program.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/common/types.pb.h"
#include "xla/python/ifrt_proxy/server/host_buffer.h"
#include "xla/python/ifrt_proxy/server/host_callback.h"
#include "xla/python/ifrt_proxy/server/version.h"
#include "xla/python/pjrt_ifrt/xla_compiler.h"
#include "xla/service/computation_placer.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/test.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/protobuf/status.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::testing::_;
using ::testing::ByMove;
using ::testing::DoAll;
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::testing::Invoke;
using ::testing::Not;
using ::testing::NotNull;
using ::testing::Optional;
using ::testing::Pointee;
using ::testing::Return;
using ::testing::ReturnRef;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::tsl::protobuf::TextFormat;
using ::tsl::testing::IsOk;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
#if defined(PLATFORM_GOOGLE)
using ::testing::EquivToProto;
using ::testing::proto::IgnoringRepeatedFieldOrdering;
using ::testing::proto::Partially;
#endif
constexpr uint64_t kSessionId = 12345;
class IfrtBackendTest
: public ::testing::TestWithParam<int> {
protected:
IfrtProxyVersion Version() {
IfrtProxyVersion version;
version.set_protocol_version(GetParam());
return version;
}
};
std::unique_ptr<IfrtRequest> NewIfrtRequest(uint64_t op_id) {
auto ifrt_request = std::make_unique<IfrtRequest>();
auto* request_metadata = ifrt_request->mutable_request_metadata();
request_metadata->set_op_id(op_id);
return ifrt_request;
}
TEST_P(IfrtBackendTest, CreationFailsWithNullIfrtClient) {
EXPECT_THAT(IfrtBackend::Create(Version(), kSessionId, nullptr, nullptr),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST_P(IfrtBackendTest, SuccessfulCreation) {
auto ifrt_client = std::make_unique<MockClient>();
ASSERT_THAT(IfrtBackend::Create(Version(), kSessionId, std::move(ifrt_client),
std::make_shared<HostBufferStore>()),
IsOk());
}
TEST_P(IfrtBackendTest, ShutdownSucceeds) {
auto ifrt_client = std::make_unique<MockClient>();
TF_ASSERT_OK_AND_ASSIGN(
auto ifrt_backend,
IfrtBackend::Create(Version(), kSessionId, std::move(ifrt_client),
std::make_shared<HostBufferStore>()));
}
TEST_P(IfrtBackendTest, ProcessFailsWithNoRequestSet) {
auto ifrt_client = std::make_unique<MockClient>();
TF_ASSERT_OK_AND_ASSIGN(
auto ifrt_backend,
IfrtBackend::Create(Version(), kSessionId, std::move(ifrt_client),
std::make_shared<HostBufferStore>()));
auto request = std::make_unique<IfrtRequest>();
auto process_status = ifrt_backend->Process(std::move(request)).Await();
ASSERT_THAT(process_status, Not(IsOk()));
}
INSTANTIATE_TEST_SUITE_P(
IfrtBackendTestWithAllVersions, IfrtBackendTest,
testing::Range(kServerMinVersion, kServerMaxVersion + 1),
[](const testing::TestParamInfo<IfrtBackendTest::ParamType>& info) {
return absl::StrCat(info.param);
});
struct TestProgram : llvm::RTTIExtends<TestProgram, Program> {
static char ID;
};
[[maybe_unused]] char TestProgram::ID = 0;
class TestProgramSerDes : public llvm::RTTIExtends<TestProgramSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::proxy::TestProgram";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
CHECK(llvm::isa<TestProgram>(serializable));
return "";
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
return std::make_unique<TestProgram>();
}
static char ID;
};
[[maybe_unused]] char TestProgramSerDes::ID = 0;
struct TestCompileOptions
: llvm::RTTIExtends<TestCompileOptions, CompileOptions> {
static char ID;
};
[[maybe_unused]] char TestCompileOptions::ID = 0;
class TestCompileOptionsSerDes
: public llvm::RTTIExtends<TestCompileOptionsSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::proxy::TestCompileOptions";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
CHECK(llvm::isa<TestCompileOptions>(serializable));
return "";
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
return std::make_unique<TestCompileOptions>();
}
static char ID;
};
[[maybe_unused]] char TestCompileOptionsSerDes::ID = 0;
class IfrtBackendHandlerTest : public IfrtBackendTest {
protected:
static void SetUpTestSuite() {
RegisterSerDes<TestProgram>(std::make_unique<TestProgramSerDes>());
RegisterSerDes<TestCompileOptions>(
std::make_unique<TestCompileOptionsSerDes>());
}
void SetUp() override {
auto mock_client = std::make_unique<xla::ifrt::MockClient>();
std::vector<xla::ifrt::Device*> raw_device_ptrs;
for (int i = 0; i < 2; ++i) {
auto mock_device = std::make_unique<xla::ifrt::MockDevice>();
ON_CALL(*mock_device, Id()).WillByDefault(Return(DeviceId(i)));
raw_device_ptrs.push_back(mock_device.get());
mock_devices_.push_back(std::move(mock_device));
}
ON_CALL(*mock_client, devices()).WillByDefault(Return(raw_device_ptrs));
ON_CALL(*mock_client, LookupDevice(_))
.WillByDefault(
Invoke([this](DeviceId id) -> absl::StatusOr<xla::ifrt::Device*> {
if (id.value() < 0 || id.value() >= mock_devices_.size()) {
return absl::NotFoundError(
absl::StrCat("Unknown device id: ", id.value()));
}
return mock_devices_[id.value()].get();
}));
mock_client_ = mock_client.get();
EXPECT_CALL(*mock_client_, GetDefaultCompiler)
.WillRepeatedly(Return(&mock_compiler_));
host_buffer_store_ = std::make_shared<HostBufferStore>();
TF_ASSERT_OK_AND_ASSIGN(
backend_,
IfrtBackend::Create(Version(), kSessionId, std::move(mock_client),
host_buffer_store_));
}
absl::StatusOr<std::shared_ptr<IfrtResponse>> CallBackend(
std::unique_ptr<IfrtRequest> request) {
auto response_future = backend_->Process(std::move(request));
return std::move(response_future).Await();
}
uint64_t NewOpId() {
absl::MutexLock lock(&mu_);
return current_op_id_++;
}
uint64_t NewHostBufferHandle() { return current_host_buffer_handle_++; }
absl::StatusOr<uint64_t> MakeTestArray(tsl::RCReference<Array> mock_array) {
EXPECT_CALL(*mock_client_, MakeArrayFromHostBuffer(_, _, _, _, _, _, _))
.WillOnce(Return(std::move(mock_array)));
auto ifrt_request = NewIfrtRequest(NewOpId());
{
const uint64_t host_buffer_handle = NewHostBufferHandle();
TF_RETURN_IF_ERROR(
host_buffer_store_->Store(host_buffer_handle, "01234567"));
auto* make_array =
ifrt_request->mutable_make_array_from_host_buffer_request();
make_array->mutable_dtype()->set_kind(DTypeProto::KIND_S32);
make_array->mutable_shape()->add_dims(2);
make_array->set_host_buffer_handle(host_buffer_handle);
TF_ASSIGN_OR_RETURN(auto* device,
mock_client_->LookupDevice(DeviceId(1)));
TF_ASSIGN_OR_RETURN(
*make_array->mutable_sharding(),
SingleDeviceSharding::Create(device, MemoryKind())->ToProto());
}
TF_ASSIGN_OR_RETURN(auto make_array_response,
CallBackend(std::move(ifrt_request)));
TF_RETURN_IF_ERROR(tsl::StatusFromProto(
make_array_response->response_metadata().status()));
return make_array_response->make_array_from_host_buffer_response()
.array_handle();
}
absl::StatusOr<CompileResponse> CompileTestLoadedExecutable(
absl::StatusOr<std::unique_ptr<LoadedExecutable>> loaded_executable) {
auto request = NewIfrtRequest(NewOpId());
CompileRequest* compile_request = request->mutable_compile_request();
TestProgram program;
TF_ASSIGN_OR_RETURN(*compile_request->mutable_program(),
Serialize(program));
TestCompileOptions compile_options;
TF_ASSIGN_OR_RETURN(*compile_request->mutable_compile_options(),
Serialize(compile_options));
EXPECT_CALL(mock_compiler_, Compile(_, _))
.WillOnce(Return(ByMove(std::move(loaded_executable))));
TF_ASSIGN_OR_RETURN(std::shared_ptr<IfrtResponse> response,
CallBackend(std::move(request)));
TF_RET_CHECK(response->has_compile_response());
return response->compile_response();
}
absl::Status CheckFuture(uint64_t handle) {
if (handle == 0) {
return absl::InternalError("Test error, future handle is 0");
}
auto request = NewIfrtRequest(NewOpId());
request->mutable_check_future_request()->set_future_handle(handle);
TF_ASSIGN_OR_RETURN(std::shared_ptr<IfrtResponse> response,
CallBackend(std::move(request)));
return tsl::StatusFromProto(response->response_metadata().status());
}
xla::ifrt::MockClient* mock_client_;
xla::ifrt::MockCompiler mock_compiler_;
std::vector<std::unique_ptr<xla::ifrt::MockDevice>> mock_devices_;
std::shared_ptr<HostBufferStore> host_buffer_store_;
private:
absl::Mutex mu_;
uint64_t current_op_id_ ABSL_GUARDED_BY(mu_) = 1;
uint64_t current_host_buffer_handle_ = 1;
std::unique_ptr<IfrtBackend> backend_;
};
#if defined(PLATFORM_GOOGLE)
TEST_P(IfrtBackendHandlerTest, Init) {
EXPECT_CALL(*mock_client_, platform_name())
.WillRepeatedly(Return("ifrt_backend"));
EXPECT_CALL(*mock_client_, platform_version()).WillRepeatedly(Return("n/a"));
EXPECT_CALL(*mock_client_, platform_id()).WillRepeatedly(Return(42));
EXPECT_CALL(*mock_client_, process_index()).WillRepeatedly(Return(1));
EXPECT_CALL(*mock_client_, runtime_type())
.WillRepeatedly(Return("ifrt-service"));
std::vector<std::vector<xla::ifrt::Device*>> mock_memory_devices;
mock_memory_devices.reserve(mock_devices_.size());
for (const auto& mock_device : mock_devices_) {
mock_memory_devices.push_back({mock_device.get()});
}
std::vector<MockMemory> mock_memories(mock_devices_.size());
MemoryKind kind("mock");
for (int i = 0; i < mock_memories.size(); ++i) {
MockMemory& memory = mock_memories[i];
EXPECT_CALL(memory, Devices())
.WillRepeatedly(Return(mock_memory_devices[i]));
EXPECT_CALL(memory, Id()).WillRepeatedly(Return(MemoryId(i)));
EXPECT_CALL(memory, Kind()).WillRepeatedly(ReturnRef(kind));
}
std::vector<std::vector<Memory*>> device_memories;
device_memories.reserve(mock_devices_.size());
for (int i = 0; i < mock_devices_.size(); ++i) {
device_memories.push_back({&mock_memories[i]});
}
std::vector<AttributeMap> device_attributes;
device_attributes.reserve(mock_devices_.size());
for (int i = 0; i < mock_devices_.size(); ++i) {
AttributeMap::Map map;
map.insert({"name", AttributeMap::StringValue(absl::StrCat("device", i))});
device_attributes.push_back(AttributeMap(std::move(map)));
MockDevice& mock_device = *mock_devices_[i];
EXPECT_CALL(mock_device, Kind()).WillRepeatedly(Return("mock"));
EXPECT_CALL(mock_device, Memories())
.WillRepeatedly(Return(device_memories[i]));
EXPECT_CALL(mock_device, DefaultMemory())
.WillRepeatedly(Return(&mock_memories[i]));
EXPECT_CALL(mock_device, Attributes())
.WillRepeatedly(ReturnRef(device_attributes[i]));
}
auto request = NewIfrtRequest(NewOpId());
request->mutable_init_request();
if (Version().protocol_version() <= 3) {
EXPECT_THAT(CallBackend(std::move(request)),
IsOkAndHolds(Pointee(
Partially(IgnoringRepeatedFieldOrdering(EquivToProto(R"pb(
init_response {
session_id: 12345
platform_name: "ifrt_backend"
platform_version: "n/a"
platform_id: 42
process_index: 1
runtime_type: "ifrt-service"
devices {
id: 0
device_kind: "mock"
default_memory_id: 0
memory_ids: [ 0 ]
deprecated_attributes {
key: "name"
value { string_value: "device0" }
}
}
devices {
id: 1
device_kind: "mock"
default_memory_id: 1
memory_ids: [ 1 ]
deprecated_attributes {
key: "name"
value { string_value: "device1" }
}
}
memories {
id: 0
memory_space_kind: "mock"
device_ids: [ 0 ]
}
memories {
id: 1
memory_space_kind: "mock"
device_ids: [ 1 ]
}
}
)pb"))))));
} else {
EXPECT_THAT(CallBackend(std::move(request)),
IsOkAndHolds(Pointee(
Partially(IgnoringRepeatedFieldOrdering(EquivToProto(R"pb(
init_response {
session_id: 12345
platform_name: "ifrt_backend"
platform_version: "n/a"
platform_id: 42
process_index: 1
runtime_type: "ifrt-service"
devices {
id: 0
device_kind: "mock"
default_memory_id: 0
memory_ids: [ 0 ]
attributes {
attributes {
key: "name"
value { string_value: "device0" }
}
}
}
devices {
id: 1
device_kind: "mock"
default_memory_id: 1
memory_ids: [ 1 ]
attributes {
attributes {
key: "name"
value { string_value: "device1" }
}
}
}
memories {
id: 0
memory_space_kind: "mock"
device_ids: [ 0 ]
}
memories {
id: 1
memory_space_kind: "mock"
device_ids: [ 1 ]
}
}
)pb"))))));
}
}
#endif
TEST_P(IfrtBackendHandlerTest, DisassembleIntoSingleDeviceArraysSucceeds) {
std::vector<tsl::RCReference<xla::ifrt::Array>> single_device_arrays;
single_device_arrays.push_back(tsl::MakeRef<xla::ifrt::MockArray>());
single_device_arrays.push_back(tsl::MakeRef<xla::ifrt::MockArray>());
tsl::RCReference<xla::ifrt::MockArray> source_mock_array =
tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*source_mock_array, DisassembleIntoSingleDeviceArrays(_))
.WillOnce(Return(std::move(single_device_arrays)));
TF_ASSERT_OK_AND_ASSIGN(auto array_handle,
MakeTestArray(std::move(source_mock_array)));
auto disassemble_request = NewIfrtRequest(NewOpId());
disassemble_request->mutable_disassemble_into_single_device_arrays_request()
->set_array_handle(array_handle);
TF_ASSERT_OK_AND_ASSIGN(auto disassemble_response,
CallBackend(std::move(disassemble_request)));
EXPECT_THAT(
disassemble_response->disassemble_into_single_device_arrays_response()
.single_device_array_handles(),
SizeIs(2));
}
TEST_P(IfrtBackendHandlerTest, MakeArrayFromHostBufferSuccess) {
const uint64_t kHostBufferHandle = 1234;
ASSERT_THAT(
host_buffer_store_->Store(kHostBufferHandle, std::string(480, 'a')),
IsOk());
auto ifrt_request = NewIfrtRequest(NewOpId());
{
auto* make_array =
ifrt_request->mutable_make_array_from_host_buffer_request();
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(
dtype { kind: KIND_F64 }
shape { dims: [ 5, 3, 4 ] }
byte_strides { strides: [ 8, 40, 120 ] }
)pb",
make_array));
make_array->set_host_buffer_handle(kHostBufferHandle);
TF_ASSERT_OK_AND_ASSIGN(auto* device,
mock_client_->LookupDevice(DeviceId(1)));
TF_ASSERT_OK_AND_ASSIGN(
*make_array->mutable_sharding(),
SingleDeviceSharding::Create(device, MemoryKind())->ToProto());
}
const Shape expected_shape({5, 3, 4});
const std::vector<int64_t> expected_byte_strides_vec = {8, 40, 120};
const std::optional<absl::Span<const int64_t>> expected_byte_strides =
absl::Span<const int64_t>(expected_byte_strides_vec);
tsl::RCReference<xla::ifrt::MockArray> mock_array =
tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*mock_client_,
MakeArrayFromHostBuffer(_, DType(DType::kF64), expected_shape,
expected_byte_strides, _, _, _))
.WillOnce(Return(std::move(mock_array)));
TF_ASSERT_OK_AND_ASSIGN(auto response, CallBackend(std::move(ifrt_request)));
EXPECT_NE(response->make_array_from_host_buffer_response().array_handle(), 0);
}
TEST_P(IfrtBackendHandlerTest, AssembleArrayFromSingleDeviceArrays) {
auto ifrt_request = NewIfrtRequest(NewOpId());
{
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
shape { dims: [ 2, 2 ] }
copy_semantics: ARRAY_COPY_SEMANTICS_ALWAYS_COPY
)pb",
ifrt_request
->mutable_assemble_array_from_single_device_arrays_request()));
TF_ASSERT_OK_AND_ASSIGN(auto* device,
mock_client_->LookupDevice(DeviceId(1)));
TF_ASSERT_OK_AND_ASSIGN(
*ifrt_request
->mutable_assemble_array_from_single_device_arrays_request()
->mutable_sharding(),
SingleDeviceSharding::Create(device, MemoryKind())->ToProto());
}
std::vector<tsl::RCReference<xla::ifrt::MockArray>> single_device_arrays;
for (int i = 0; i < 2; ++i) {
auto array = tsl::MakeRef<xla::ifrt::MockArray>();
single_device_arrays.push_back(array);
TF_ASSERT_OK_AND_ASSIGN(uint64_t array_handle, MakeTestArray(array));
ifrt_request->mutable_assemble_array_from_single_device_arrays_request()
->add_single_device_array_handles(array_handle);
}
tsl::RCReference<xla::ifrt::MockArray> result =
tsl::MakeRef<xla::ifrt::MockArray>();
const Shape expected_shape({2, 2});
EXPECT_CALL(*mock_client_,
AssembleArrayFromSingleDeviceArrays(
expected_shape, _, ElementsAreArray(single_device_arrays), _))
.WillOnce(Return(std::move(result)));
TF_ASSERT_OK_AND_ASSIGN(auto response, CallBackend(std::move(ifrt_request)));
EXPECT_NE(response->assemble_array_from_single_device_arrays_response()
.array_handle(),
0);
}
TEST_P(IfrtBackendHandlerTest, CopyToHostSuccess) {
Shape shape({5, 3, 4});
tsl::RCReference<xla::ifrt::MockArray> array =
tsl::MakeRef<xla::ifrt::MockArray>();
ON_CALL(*array, shape()).WillByDefault(ReturnRef(shape));
ON_CALL(*array, dtype()).WillByDefault(Return(DType(DType::kF64)));
TF_ASSERT_OK_AND_ASSIGN(auto array_handle, MakeTestArray(array));
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* copy_to_host = ifrt_request->mutable_copy_to_host_buffer_request();
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(
byte_strides { strides: [ 8, 40, 120 ] }
)pb",
copy_to_host));
copy_to_host->set_array_handle(array_handle);
const uint64_t host_buffer_handle = NewHostBufferHandle();
copy_to_host->set_host_buffer_handle(host_buffer_handle);
const std::vector<int64_t> expected_byte_strides_vec = {8, 40, 120};
const std::optional<absl::Span<const int64_t>> expected_byte_strides =
absl::Span<const int64_t>(expected_byte_strides_vec);
EXPECT_CALL(*array, CopyToHostBuffer(_, expected_byte_strides, _))
.WillOnce(Return(Future<>(absl::OkStatus())));
TF_ASSERT_OK_AND_ASSIGN(auto response, CallBackend(std::move(ifrt_request)));
EXPECT_THAT(host_buffer_store_->Lookup(host_buffer_handle),
IsOkAndHolds(Pointee(SizeIs(480))));
}
TEST_P(IfrtBackendHandlerTest, CopyToHostFailsWithNonExistentArrays) {
auto ifrt_request = NewIfrtRequest(NewOpId());
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
byte_strides { strides: [ 8, 40, 120 ] }
)pb",
ifrt_request->mutable_copy_to_host_buffer_request()));
ifrt_request->mutable_copy_to_host_buffer_request()->set_array_handle(0);
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_P(IfrtBackendHandlerTest,
DisassembleIntoSingleArrayFailsWhenBackendRuntimeFails) {
constexpr absl::string_view kDisassembleErrorMessage =
"Some test-injected error message that is unlikely to match other error "
"messages - 1234";
tsl::RCReference<xla::ifrt::MockArray> source_mock_array =
tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*source_mock_array, DisassembleIntoSingleDeviceArrays(_))
.WillOnce(Return(absl::UnknownError(kDisassembleErrorMessage)));
TF_ASSERT_OK_AND_ASSIGN(auto array_handle,
MakeTestArray(std::move(source_mock_array)));
auto disassemble_request = NewIfrtRequest(NewOpId());
disassemble_request->mutable_disassemble_into_single_device_arrays_request()
->set_array_handle(array_handle);
ASSERT_THAT(
CallBackend(std::move(disassemble_request)),
StatusIs(absl::StatusCode::kUnknown, StrEq(kDisassembleErrorMessage)));
}
MATCHER_P(EqualsDeviceList, device_list, "") { return *arg == *device_list; }
TEST_P(IfrtBackendHandlerTest, CopyArrays) {
std::vector<tsl::RCReference<xla::ifrt::Array>> src_arrays;
src_arrays.push_back(tsl::MakeRef<xla::ifrt::MockArray>());
std::vector<tsl::RCReference<xla::ifrt::Array>> copied_arrays;
copied_arrays.push_back(tsl::MakeRef<xla::ifrt::MockArray>());
BasicDeviceList::Devices ds;
TF_ASSERT_OK_AND_ASSIGN(ds.emplace_back(),
mock_client_->LookupDevice(DeviceId(1)));
tsl::RCReference<DeviceList> devices = BasicDeviceList::Create(std::move(ds));
MemoryKind memory_kind("device");
EXPECT_CALL(*mock_client_, CopyArrays(ElementsAreArray(src_arrays),
Optional(EqualsDeviceList(devices)),
Optional(memory_kind),
ArrayCopySemantics::kAlwaysCopy))
.WillOnce(Return(
std::vector<tsl::RCReference<xla::ifrt::Array>>(copied_arrays)));
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* copy_arrays_request = ifrt_request->mutable_copy_arrays_request();
for (const auto& src_array : src_arrays) {
TF_ASSERT_OK_AND_ASSIGN(auto src_array_handle, MakeTestArray(src_array));
copy_arrays_request->add_array_handles(src_array_handle);
}
for (const auto& device : devices->devices()) {
copy_arrays_request->add_device_ids(device->Id().value());
}
copy_arrays_request->set_memory_kind(std::string(*memory_kind.memory_kind()));
copy_arrays_request->set_copy_semantics(
proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY);
TF_ASSERT_OK_AND_ASSIGN(auto response, CallBackend(std::move(ifrt_request)));
EXPECT_THAT(tsl::StatusFromProto(response->response_metadata().status()),
IsOk());
EXPECT_THAT(response->copy_arrays_response().array_handles(),
SizeIs(copied_arrays.size()));
}
TEST_P(IfrtBackendHandlerTest, ReshardSuccess) {
auto src_mock_array = tsl::MakeRef<xla::ifrt::MockArray>();
TF_ASSERT_OK_AND_ASSIGN(auto* device,
mock_client_->LookupDevice(DeviceId(0)));
auto src_sharding = SingleDeviceSharding::Create(device, MemoryKind());
ON_CALL(*src_mock_array, sharding()).WillByDefault(ReturnRef(*src_sharding));
TF_ASSERT_OK_AND_ASSIGN(auto src_array_handle,
MakeTestArray(std::move(src_mock_array)));
auto copied_mock_array = tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*mock_client_, CopyArrays(_, _, _, _))
.WillOnce(Return(std::vector<tsl::RCReference<xla::ifrt::Array>>(
{copied_mock_array})));
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* reshard_request = ifrt_request->mutable_reshard_request();
reshard_request->set_array_handle(src_array_handle);
reshard_request->set_copy_semantics(proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY);
TF_ASSERT_OK_AND_ASSIGN(auto* new_device,
mock_client_->LookupDevice(DeviceId(1)));
TF_ASSERT_OK_AND_ASSIGN(
*ifrt_request->mutable_reshard_request()->mutable_sharding(),
SingleDeviceSharding::Create(new_device, MemoryKind())->ToProto());
TF_ASSERT_OK_AND_ASSIGN(auto response, CallBackend(std::move(ifrt_request)));
EXPECT_THAT(tsl::StatusFromProto(response->response_metadata().status()),
IsOk());
EXPECT_NE(response->reshard_response().array_handle(), 0);
}
TEST_P(IfrtBackendHandlerTest, ReshardFailsWhenTheBackendFails) {
auto mock_array = tsl::MakeRef<xla::ifrt::MockArray>();
TF_ASSERT_OK_AND_ASSIGN(auto* device,
mock_client_->LookupDevice(DeviceId(1)));
auto sharding = SingleDeviceSharding::Create(device, MemoryKind());
ON_CALL(*mock_array, sharding()).WillByDefault(ReturnRef(*sharding));
TF_ASSERT_OK_AND_ASSIGN(auto array_handle,
MakeTestArray(std::move(mock_array)));
EXPECT_CALL(*mock_client_, CopyArrays(_, _, _, _))
.WillOnce(Return(absl::UnknownError("injected error")));
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* reshard_request = ifrt_request->mutable_reshard_request();
reshard_request->set_array_handle(array_handle);
reshard_request->set_copy_semantics(proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY);
TF_ASSERT_OK_AND_ASSIGN(auto* new_device,
mock_client_->LookupDevice(DeviceId(1)));
TF_ASSERT_OK_AND_ASSIGN(
*ifrt_request->mutable_reshard_request()->mutable_sharding(),
SingleDeviceSharding::Create(new_device, MemoryKind())->ToProto());
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kUnknown, StrEq("injected error")));
}
TEST_P(IfrtBackendHandlerTest, ReshardFailsWithNonExistentArrayHandle) {
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* reshard_request = ifrt_request->mutable_reshard_request();
reshard_request->set_array_handle(0);
reshard_request->set_copy_semantics(proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY);
reshard_request->mutable_sharding();
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_P(IfrtBackendHandlerTest, FullyReplicatedShardSuccess) {
auto fully_replicated_mock_array = tsl::MakeRef<xla::ifrt::MockArray>();
auto resultant_array = tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*fully_replicated_mock_array, FullyReplicatedShard(_))
.WillOnce(Return(std::move(resultant_array)));
TF_ASSERT_OK_AND_ASSIGN(
auto fully_replicated_array_handle,
MakeTestArray(std::move(fully_replicated_mock_array)));
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* fully_replicated_shard_request =
ifrt_request->mutable_fully_replicated_shard_request();
fully_replicated_shard_request->set_array_handle(
fully_replicated_array_handle);
fully_replicated_shard_request->set_copy_semantics(
proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY);
TF_ASSERT_OK_AND_ASSIGN(auto response, CallBackend(std::move(ifrt_request)));
EXPECT_NE(response->fully_replicated_shard_response().array_handle(), 0);
}
TEST_P(IfrtBackendHandlerTest, FullyReplicatedShardFailure) {
auto fully_replicated_mock_array = tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*fully_replicated_mock_array, FullyReplicatedShard(_))
.WillOnce(Return(absl::UnknownError("injected error")));
TF_ASSERT_OK_AND_ASSIGN(
auto fully_replicated_array_handle,
MakeTestArray(std::move(fully_replicated_mock_array)));
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* fully_replicated_shard_request =
ifrt_request->mutable_fully_replicated_shard_request();
fully_replicated_shard_request->set_array_handle(
fully_replicated_array_handle);
fully_replicated_shard_request->set_copy_semantics(
proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY);
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kUnknown, StrEq("injected error")));
}
TEST_P(IfrtBackendHandlerTest,
FullyReplicatedShardFailsWithNonExistentArrayHandle) {
auto ifrt_request = NewIfrtRequest(NewOpId());
auto* fully_replicated_shard_request =
ifrt_request->mutable_fully_replicated_shard_request();
fully_replicated_shard_request->set_array_handle(0);
fully_replicated_shard_request->set_copy_semantics(
proto::ARRAY_COPY_SEMANTICS_ALWAYS_COPY);
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_P(IfrtBackendHandlerTest,
CheckArrayReadyRequestRelaysTheResultFromBackend) {
auto mock_array = tsl::MakeRef<xla::ifrt::MockArray>();
TF_ASSERT_OK_AND_ASSIGN(auto array_handle,
MakeTestArray(std::move(mock_array)));
EXPECT_CALL(*mock_client_, GetReadyFuture(_))
.WillOnce(Return(Future<>(absl::OkStatus())))
.WillOnce(Return(Future<>(absl::UnknownError("injected error"))));
{
auto ifrt_request = NewIfrtRequest(NewOpId());
ifrt_request->mutable_check_value_ready_request()->add_value_handles(
array_handle);
TF_ASSERT_OK_AND_ASSIGN(auto ifrt_response,
CallBackend(std::move(ifrt_request)));
EXPECT_THAT(ifrt_response->response_metadata().status().code(),
tensorflow::error::OK);
EXPECT_TRUE(ifrt_response->has_check_value_ready_response());
}
{
auto ifrt_request = NewIfrtRequest(NewOpId());
ifrt_request->mutable_check_value_ready_request()->add_value_handles(
array_handle);
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kUnknown, StrEq("injected error")));
}
}
TEST_P(IfrtBackendHandlerTest,
CheckArrayReadyRequestFailsWithNonExistentArrayHandle) {
auto ifrt_request = NewIfrtRequest(NewOpId());
ifrt_request->mutable_check_value_ready_request()->add_value_handles(0);
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_P(IfrtBackendHandlerTest, DeleteArraySuccess) {
auto mock_array1 = tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*mock_array1, Delete())
.WillOnce(Return(Future<>(absl::OkStatus())));
auto mock_array2 = tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*mock_array2, Delete())
.WillOnce(Return(Future<>(absl::OkStatus())));
TF_ASSERT_OK_AND_ASSIGN(auto array_handle1,
MakeTestArray(std::move(mock_array1)));
TF_ASSERT_OK_AND_ASSIGN(auto array_handle2,
MakeTestArray(std::move(mock_array2)));
uint64_t op_id = NewOpId();
auto ifrt_request = NewIfrtRequest(op_id);
ifrt_request->mutable_delete_array_request()->add_array_handle(array_handle1);
ifrt_request->mutable_delete_array_request()->add_array_handle(array_handle2);
TF_ASSERT_OK_AND_ASSIGN(auto resp, CallBackend(std::move(ifrt_request)));
EXPECT_THAT(tsl::StatusFromProto(resp->response_metadata().status()), IsOk());
TF_EXPECT_OK(
CheckFuture(resp->delete_array_response().deletion_future_handle()));
}
TEST_P(IfrtBackendHandlerTest,
DeleteArrayReturnsFutureWithNonExistentArrayHandle) {
auto mock_array1 = tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*mock_array1, Delete())
.WillOnce(Return(Future<>(absl::OkStatus())));
TF_ASSERT_OK_AND_ASSIGN(auto real_handle,
MakeTestArray(std::move(mock_array1)));
constexpr int kBadHandle = 400;
auto ifrt_request = NewIfrtRequest(NewOpId());
ifrt_request->mutable_delete_array_request()->add_array_handle(real_handle);
ifrt_request->mutable_delete_array_request()->add_array_handle(kBadHandle);
TF_ASSERT_OK_AND_ASSIGN(auto resp, CallBackend(std::move(ifrt_request)));
EXPECT_THAT(
CheckFuture(resp->delete_array_response().deletion_future_handle()),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_P(IfrtBackendHandlerTest,
IsDeleteRelaysBackTheReturnValueFromBackendRuntime) {
tsl::RCReference<xla::ifrt::MockArray> mock_array =
tsl::MakeRef<xla::ifrt::MockArray>();
EXPECT_CALL(*mock_array, IsDeleted())
.WillOnce(Return(true))
.WillOnce(Return(false));
TF_ASSERT_OK_AND_ASSIGN(auto array_handle,
MakeTestArray(std::move(mock_array)));
auto ifrt_request = NewIfrtRequest(NewOpId());
ifrt_request->mutable_is_array_deleted_request()->set_array_handle(
array_handle);
TF_ASSERT_OK_AND_ASSIGN(auto resp, CallBackend(std::move(ifrt_request)));
EXPECT_TRUE(resp->is_array_deleted_response().deleted());
ifrt_request = NewIfrtRequest(NewOpId());
ifrt_request->mutable_is_array_deleted_request()->set_array_handle(
array_handle);
TF_ASSERT_OK_AND_ASSIGN(resp, CallBackend(std::move(ifrt_request)));
EXPECT_FALSE(resp->is_array_deleted_response().deleted());
}
TEST_P(IfrtBackendHandlerTest, IsDeleteFailsForNonExistentArrays) {
auto ifrt_request = NewIfrtRequest(NewOpId());
ifrt_request->mutable_is_array_deleted_request()->set_array_handle(0);
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_P(IfrtBackendHandlerTest, DestructArrayTest) {
tsl::RCReference<xla::ifrt::MockArray> mock_array1 =
tsl::MakeRef<xla::ifrt::MockArray>();
TF_ASSERT_OK_AND_ASSIGN(auto array_handle1,
MakeTestArray(std::move(mock_array1)));
tsl::RCReference<xla::ifrt::MockArray> mock_array2 =
tsl::MakeRef<xla::ifrt::MockArray>();
TF_ASSERT_OK_AND_ASSIGN(auto array_handle2,
MakeTestArray(std::move(mock_array2)));
auto ifrt_request = NewIfrtRequest(NewOpId());
ifrt_request->mutable_destruct_array_request()->add_array_handle(
array_handle1);
ifrt_request->mutable_destruct_array_request()->add_array_handle(
array_handle2);
TF_ASSERT_OK_AND_ASSIGN(auto ifrt_resp, CallBackend(std::move(ifrt_request)));
EXPECT_TRUE(ifrt_resp->has_destruct_array_response());
ifrt_request = NewIfrtRequest(NewOpId());
ifrt_request->mutable_destruct_array_request()->add_array_handle(
array_handle1);
EXPECT_THAT(CallBackend(std::move(ifrt_request)),
StatusIs(absl::StatusCode::kNotFound));
}
#if defined(PLATFORM_GOOGLE)
TEST_P(IfrtBackendHandlerTest, CompileSuccess) {
std::vector<MockDevice> devices(4);
for (int i = 0; i < 4; ++i) {
EXPECT_CALL(devices[i], Id()).WillOnce(Return(DeviceId(i)));
}
std::vector<xla::ifrt::Device*> addressable_devices;
for (int i = 0; i < 4; ++i) {
addressable_devices.push_back(&devices[i]);
}
auto executable = std::make_unique<MockLoadedExecutable>();
EXPECT_CALL(*executable, name()).WillOnce(Return("executable_name"));
EXPECT_CALL(*executable, num_devices()).WillOnce(Return(4));
EXPECT_CALL(*executable, addressable_devices())
.WillOnce(Return(absl::MakeSpan(addressable_devices)));
EXPECT_CALL(*executable, Fingerprint()).WillOnce(Return("fingerprint"));
EXPECT_CALL(*executable, GetReadyFuture())
.WillOnce(Return(Future<>(absl::OkStatus())));
ASSERT_OK_AND_ASSIGN(CompileResponse response,
CompileTestLoadedExecutable(std::move(executable)));
EXPECT_THAT(response, Partially(EquivToProto(R"pb(
name: "executable_name"
num_devices: 4
addressable_device_ids: [ 0, 1, 2, 3 ]
fingerprint_value: "fingerprint"
)pb")));
TF_EXPECT_OK(CheckFuture(response.ready_future_handle()));
}
#endif
TEST_P(IfrtBackendHandlerTest, CompileFailure) {
ASSERT_THAT(
CompileTestLoadedExecutable(absl::InternalError("injected error")),
StatusIs(absl::StatusCode::kInternal, StrEq("injected error")));
}
#if defined(PLATFORM_GOOGLE)
TEST_P(IfrtBackendHandlerTest, LoadedExecutableMetadata) {
MockLoadedExecutable* executable;
uint64_t handle;
{
auto e = std::make_unique<MockLoadedExecutable>();
executable = e.get();
TF_ASSERT_OK_AND_ASSIGN(CompileResponse response,
CompileTestLoadedExecutable(std::move(e)));
handle = response.loaded_executable_handle();
}
{
OpSharding op_sharding1;
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(type: REPLICATED)pb", &op_sharding1));
OpSharding op_sharding2;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(type: OTHER
tile_shape {
element_type: BF16
dimensions: [ 2, 2 ]
}
tile_assignment_dimensions: [ 0, 1 ])pb",
&op_sharding2));
EXPECT_CALL(*executable, GetParameterShardings())
.WillOnce(Return(std::vector<OpSharding>{op_sharding1, op_sharding2}));
EXPECT_CALL(*executable, GetOutputShardings())
.WillOnce(Return(std::vector<OpSharding>{op_sharding1}));
std::vector<std::unique_ptr<xla::PjRtLayout>> parameter_layouts;
parameter_layouts.push_back(std::make_unique<xla::PjRtXlaLayout>(
xla::LayoutUtil::MakeDescendingLayout(1)));
parameter_layouts.push_back(std::make_unique<xla::PjRtXlaLayout>(
xla::LayoutUtil::MakeDescendingLayout(2)));
EXPECT_CALL(*executable, GetParameterLayouts())
.WillOnce(Return(std::move(parameter_layouts)));
std::vector<std::unique_ptr<xla::PjRtLayout>> output_layouts;
output_layouts.push_back(std::make_unique<xla::PjRtXlaLayout>(
xla::LayoutUtil::MakeDescendingLayout(2)));
EXPECT_CALL(*executable, GetOutputLayouts())
.WillOnce(Return(std::move(output_layouts)));
EXPECT_CALL(*executable, GetOutputMemoryKinds())
.WillOnce(Return(std::vector<std::vector<absl::string_view>>{{"foo"}}));
auto request = NewIfrtRequest(NewOpId());
LoadedExecutableMetadataRequest* metadata_request =
request->mutable_loaded_executable_metadata_request();
metadata_request->set_loaded_executable_handle(handle);
EXPECT_THAT(CallBackend(std::move(request)),
IsOkAndHolds(Pointee(Partially(EquivToProto(R"pb(
loaded_executable_metadata_response {
parameter_shardings {
shardings { type: REPLICATED }
shardings {
type: OTHER
tile_shape {
element_type: BF16
dimensions: [ 2, 2 ]
}
tile_assignment_dimensions: [ 0, 1 ]
}
}
output_shardings { shardings { type: REPLICATED } }
parameter_layouts_list {
layouts { minor_to_major: 0 }
layouts { minor_to_major: [ 1, 0 ] }
}
output_layouts_list { layouts { minor_to_major: [ 1, 0 ] } }
output_memory_kinds {
memory_kind_lists { memory_kinds: [ "foo" ] }
}
}
)pb")))));
}
{
EXPECT_CALL(*executable, GetParameterShardings())
.WillOnce(Return(std::nullopt));
EXPECT_CALL(*executable, GetOutputShardings())
.WillOnce(Return(std::nullopt));
EXPECT_CALL(*executable, GetParameterLayouts())
.WillOnce(Return(absl::UnimplementedError("unimplemented")));
EXPECT_CALL(*executable, GetOutputLayouts())
.WillOnce(Return(absl::UnimplementedError("unimplemented")));
EXPECT_CALL(*executable, GetOutputMemoryKinds())
.WillOnce(Return(std::vector<std::vector<absl::string_view>>{}));
auto request = NewIfrtRequest(NewOpId());
LoadedExecutableMetadataRequest* metadata_request =
request->mutable_loaded_executable_metadata_request();
metadata_request->set_loaded_executable_handle(handle);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<IfrtResponse> response,
CallBackend(std::move(request)));
const auto& metadata_response =
response->loaded_executable_metadata_response();
EXPECT_FALSE(metadata_response.has_parameter_shardings());
EXPECT_FALSE(metadata_response.has_output_shardings());
EXPECT_TRUE(metadata_response.has_parameter_layouts_error());
EXPECT_TRUE(metadata_response.has_output_layouts_error());
}
}
#endif
#if defined(PLATFORM_GOOGLE)
TEST_P(IfrtBackendHandlerTest, LoadedExecutableExecute) {
MockDevice device;
ON_CALL(device, Id()).WillByDefault(Return(DeviceId(0)));
MockLoadedExecutable* executable;
uint64_t handle;
{
auto e = std::make_unique<MockLoadedExecutable>();
executable = e.get();
TF_ASSERT_OK_AND_ASSIGN(CompileResponse response,
CompileTestLoadedExecutable(std::move(e)));
handle = response.loaded_executable_handle();
}
constexpr int kNumArgs = 3;
constexpr int kNumOutputs = 2;
Shape shape({2, 2});
auto sharding = SingleDeviceSharding::Create(&device, MemoryKind());
auto make_array = [&]() {
auto array = tsl::MakeRef<MockArray>();
ON_CALL(*array, dtype()).WillByDefault(Return(DType(DType::kF32)));
ON_CALL(*array, shape()).WillByDefault(ReturnRef(shape));
ON_CALL(*array, sharding()).WillByDefault(ReturnRef(*sharding));
return array;
};
std::vector<tsl::RCReference<Array>> outputs;
outputs.reserve(kNumOutputs);
for (int i = 0; i < kNumOutputs; ++i) {
outputs.push_back(make_array());
}
EXPECT_CALL(*executable, Execute(SizeIs(kNumArgs), _, _))
.WillOnce(
Invoke([&](absl::Span<tsl::RCReference<Array>> args,
const xla::ifrt::LoadedExecutable::ExecuteOptions& options,
std::optional<tsl::RCReference<DeviceList>> devices)
-> absl::StatusOr<LoadedExecutable::ExecuteResult> {
return LoadedExecutable::ExecuteResult{
.status = Future<>(absl::InternalError("injected error")),
.outputs = outputs,
};
}));
auto request = NewIfrtRequest(NewOpId());
LoadedExecutableExecuteRequest* execute_request =
request->mutable_loaded_executable_execute_request();
for (int i = 0; i < kNumArgs; ++i) {
TF_ASSERT_OK_AND_ASSIGN(uint64_t arg_handle, MakeTestArray(make_array()));
execute_request->add_args_handles(arg_handle);
}
execute_request->set_loaded_executable_handle(handle);
xla::ifrt::LoadedExecutable::ExecuteOptions execute_options;
execute_options.fill_status = true;
TF_ASSERT_OK_AND_ASSIGN(*execute_request->mutable_execute_options(),
execute_options.ToProto());
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<IfrtResponse> response,
CallBackend(std::move(request)));
EXPECT_THAT(response, Pointee(Partially(EquivToProto(R"pb(
loaded_executable_execute_response {
outputs {
dtype { kind: KIND_F32 }
shape { dims: [ 2, 2 ] }
}
outputs {
dtype { kind: KIND_F32 }
shape { dims: [ 2, 2 ] }
}
}
)pb"))));
TF_ASSERT_OK_AND_ASSIGN(
auto sharding_proto,
SingleDeviceSharding::Create(&device, MemoryKind())->ToProto());
for (const auto& output :
response->loaded_executable_execute_response().outputs()) {
EXPECT_THAT(output.sharding(), EquivToProto(sharding_proto));
EXPECT_NE(output.array_handle(), 0);
}
EXPECT_THAT(
CheckFuture(
response->loaded_executable_execute_response().status_handle()),
StatusIs(absl::StatusCode::kInternal, StrEq("injected error")));
EXPECT_THAT(
CheckFuture(
response->loaded_executable_execute_response().status_handle()),
StatusIs(absl::StatusCode::kNotFound,
HasSubstr("Unknown future handle")));
}
#endif
#if defined(PLATFORM_GOOGLE)
TEST_P(IfrtBackendHandlerTest, LoadedExecutableDelete) {
MockLoadedExecutable* executable;
uint64_t handle;
{
auto e = std::make_unique<MockLoadedExecutable>();
executable = e.get();
TF_ASSERT_OK_AND_ASSIGN(CompileResponse response,
CompileTestLoadedExecutable(std::move(e)));
handle = response.loaded_executable_handle();
}
{
EXPECT_CALL(*executable, Delete())
.WillOnce(Return(Future<>(absl::OkStatus())));
auto request = NewIfrtRequest(NewOpId());
LoadedExecutableDeleteRequest* delete_request =
request->mutable_loaded_executable_delete_request();
delete_request->set_loaded_executable_handle(handle);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<IfrtResponse> response,
CallBackend(std::move(request)));
ASSERT_TRUE(response->has_loaded_executable_delete_response());
EXPECT_THAT(
CheckFuture(
response->loaded_executable_delete_response().future_handle()),
IsOk());
}
{
EXPECT_CALL(*executable, IsDeleted()).WillOnce(Return(true));
auto request = NewIfrtRequest(NewOpId());
LoadedExecutableIsDeletedRequest* is_deleted_request =
request->mutable_loaded_executable_is_deleted_request();
is_deleted_request->set_loaded_executable_handle(handle);
EXPECT_THAT(CallBackend(std::move(request)),
IsOkAndHolds(Pointee(Partially(EquivToProto(R"pb(
loaded_executable_is_deleted_response { is_deleted: true }
)pb")))));
}
}
#endif
TEST_P(IfrtBackendHandlerTest, LoadedExecutableDestruct) {
MockLoadedExecutable* executable;
uint64_t handle;
{
auto e = std::make_unique<MockLoadedExecutable>();
executable = e.get();
TF_ASSERT_OK_AND_ASSIGN(CompileResponse response,
CompileTestLoadedExecutable(std::move(e)));
handle = response.loaded_executable_handle();
}
{
auto request = NewIfrtRequest(NewOpId());
LoadedExecutableDestructRequest* destruct_request =
request->mutable_loaded_executable_destruct_request();
destruct_request->set_loaded_executable_handle(handle);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<IfrtResponse> response,
CallBackend(std::move(request)));
ASSERT_TRUE(response->has_loaded_executable_destruct_response());
}
{
auto request = NewIfrtRequest(NewOpId());
LoadedExecutableDestructRequest* destruct_request =
request->mutable_loaded_executable_destruct_request();
destruct_request->set_loaded_executable_handle(handle);
EXPECT_THAT(CallBackend(std::move(request)),
StatusIs(absl::StatusCode::kNotFound,
HasSubstr("Unknown loaded executable handle")));
}
}
TEST_P(IfrtBackendHandlerTest, LoadedHostCallbackExecute) {
std::vector<xla::HostCallbackArgInfo> hcb_args = {{
.channel_id = 1,
.shape = xla::ShapeUtil::MakeShape(xla::F32, {}),
}};
std::vector<xla::HostCallbackArgInfo> hcb_results = {{
.channel_id = 2,
.shape = xla::ShapeUtil::MakeShape(xla::F32, {}),
}};
auto hcb = tsl::MakeRef<RemoteLoadedHostCallback>(
mock_client_, std::move(hcb_args), std::move(hcb_results),
nullptr);
MockLoadedExecutable* executable;
tsl::RCReference<xla::ifrt::LoadedHostCallback> loaded_host_callback;
uint64_t loaded_host_callback_handle;
{
auto request = NewIfrtRequest(NewOpId());
CompileRequest* compile_request = request->mutable_compile_request();
TestProgram program;
TF_ASSERT_OK_AND_ASSIGN(*compile_request->mutable_program(),
Serialize(program));
xla::ifrt::XlaCompileOptions compile_options;
TF_ASSERT_OK_AND_ASSIGN(*compile_request->mutable_compile_options(),
Serialize(compile_options));
TF_ASSERT_OK_AND_ASSIGN(std::string host_callback_serialized,
hcb->Serialize());
compile_request->add_host_callbacks(std::move(host_callback_serialized));
auto e = std::make_unique<MockLoadedExecutable>();
executable = e.get();
EXPECT_CALL(mock_compiler_, Compile(_, _))
.WillOnce(DoAll(
Invoke(
[&](const std::unique_ptr<xla::ifrt::Program>& program,
const std::unique_ptr<xla::ifrt::CompileOptions>& options) {
auto* xla_compile_options =
llvm::cast<xla::ifrt::XlaCompileOptions>(options.get());
auto& loaded_host_callbacks =
xla_compile_options->loaded_host_callbacks;
ASSERT_EQ(loaded_host_callbacks.size(), 1);
loaded_host_callback = loaded_host_callbacks.front();
}),
Return(ByMove(std::move(e)))));
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<IfrtResponse> response,
CallBackend(std::move(request)));
ASSERT_TRUE(response->has_compile_response());
CompileResponse compile_response = response->compile_response();
loaded_host_callback_handle =
compile_response.loaded_host_callback_handles(0);
ASSERT_THAT(loaded_host_callback, NotNull());
}
auto host_callback_thread = absl::WrapUnique(tsl::Env::Default()->StartThread(
tsl::ThreadOptions(), "HostCallback", [&]() {
xla::Literal x = xla::LiteralUtil::CreateR0(1.0f);
std::vector<void*> operands;
operands.push_back(x.untyped_data());
xla::Literal out = xla::LiteralUtil::CreateR0(0.0f);
std::vector<void*> results;
results.push_back(out.untyped_data());
const xla::HostCallback* xla_host_callback =
&llvm::cast<RemoteLoadedHostCallback>(loaded_host_callback.get())
->host_callback();
ASSERT_THAT(
xla_host_callback->callback(results.data(), operands.data()),
IsOk());
EXPECT_EQ(out, xla::LiteralUtil::CreateR0(2.0f));
}));
uint64_t host_callback_execution_handle;
{
const uint64_t operand_host_buffer_handle = NewHostBufferHandle();
auto request = NewIfrtRequest(NewOpId());
LoadedHostCallbackPollRequest* poll_request =
request->mutable_loaded_host_callback_poll_request();
poll_request->set_loaded_host_callback_handle(loaded_host_callback_handle);
poll_request->set_operand_host_buffer_handle(operand_host_buffer_handle);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<IfrtResponse> response,
CallBackend(std::move(request)));
ASSERT_TRUE(response->has_loaded_host_callback_poll_response());
const LoadedHostCallbackPollResponse& poll_response =
response->loaded_host_callback_poll_response();
host_callback_execution_handle =
poll_response.host_callback_execution_handle();
TF_ASSERT_OK_AND_ASSIGN(
const std::shared_ptr<const std::string> operands,
host_buffer_store_->Lookup(operand_host_buffer_handle));
EXPECT_EQ(xla::BorrowingLiteral(operands->data(),
xla::ShapeUtil::MakeShape(xla::F32, {})),
xla::LiteralUtil::CreateR0(1.0f));
}
{
auto result = xla::LiteralUtil::CreateR0(2.0f);
std::string result_buffer(absl::string_view(
static_cast<const char*>(result.untyped_data()), result.size_bytes()));
const uint64_t result_host_buffer_handle = NewHostBufferHandle();
ASSERT_THAT(host_buffer_store_->Store(result_host_buffer_handle,
std::move(result_buffer)),
IsOk());
auto request = NewIfrtRequest(NewOpId());
LoadedHostCallbackReturnRequest* ret_request =
request->mutable_loaded_host_callback_return_request();
ret_request->set_host_callback_execution_handle(
host_callback_execution_handle);
ret_request->set_result_host_buffer_handle(result_host_buffer_handle);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<IfrtResponse> response,
CallBackend(std::move(request)));
ASSERT_TRUE(response->has_loaded_host_callback_return_response());
}
}
TEST_P(IfrtBackendHandlerTest, GetDefaultDeviceAssignmentSuccess) {
const int kNumReplicas = 1;
const int kNumPartitions = 3;
EXPECT_CALL(*mock_client_,
GetDefaultDeviceAssignment(kNumReplicas, kNumPartitions))
.WillOnce(Return(xla::DeviceAssignment(kNumReplicas, kNumPartitions)));
auto request = NewIfrtRequest(NewOpId());
auto* default_device_assignment_request =
request->mutable_get_default_device_assignment_request();
default_device_assignment_request->set_num_replicas(kNumReplicas);
default_device_assignment_request->set_num_partitions(kNumPartitions);
TF_ASSERT_OK_AND_ASSIGN(auto response, CallBackend(std::move(request)));
TF_ASSERT_OK_AND_ASSIGN(auto assignment_got,
xla::DeviceAssignment::Deserialize(
response->get_default_device_assignment_response()
.device_assignment()));
EXPECT_EQ(assignment_got->replica_count(), kNumReplicas);
EXPECT_EQ(assignment_got->computation_count(), kNumPartitions);
}
TEST_P(IfrtBackendHandlerTest,
GetDefaultDeviceAssignmentFailsIfTheBackendFails) {
const int kNumReplicas = 1;
const int kNumPartitions = 3;
EXPECT_CALL(*mock_client_,
GetDefaultDeviceAssignment(kNumReplicas, kNumPartitions))
.WillOnce(Return(absl::UnknownError("injected error")));
auto request = NewIfrtRequest(NewOpId());
auto* default_device_assignment_request =
request->mutable_get_default_device_assignment_request();
default_device_assignment_request->set_num_replicas(kNumReplicas);
default_device_assignment_request->set_num_partitions(kNumPartitions);
EXPECT_THAT(CallBackend(std::move(request)),
StatusIs(absl::StatusCode::kUnknown, StrEq("injected error")));
}
INSTANTIATE_TEST_SUITE_P(
IfrtBackendHandlerTestWithAllVersions, IfrtBackendHandlerTest,
testing::Range(kServerMinVersion, kServerMaxVersion + 1),
[](const testing::TestParamInfo<IfrtBackendHandlerTest::ParamType>& info) {
return absl::StrCat(info.param);
});
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/server/ifrt_backend.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/server/ifrt_backend_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5d9b7217-eb9e-46b0-9e7b-11565ed43a00 | cpp | tensorflow/tensorflow | host_buffer | third_party/xla/xla/python/ifrt_proxy/server/host_buffer.cc | third_party/xla/xla/python/ifrt_proxy/server/host_buffer_test.cc | #include "xla/python/ifrt_proxy/server/host_buffer.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
namespace xla {
namespace ifrt {
namespace proxy {
absl::Status HostBufferStore::Store(uint64_t handle, std::string data) {
absl::MutexLock lock(&mu_);
const bool inserted =
buffers_.insert({handle, std::make_shared<std::string>(std::move(data))})
.second;
if (!inserted) {
return absl::AlreadyExistsError(
absl::StrCat("Host buffer handle ", handle, " already exists"));
}
return absl::OkStatus();
}
absl::StatusOr<std::shared_ptr<const std::string>> HostBufferStore::Lookup(
uint64_t handle) {
absl::MutexLock lock(&mu_);
const auto it = buffers_.find(handle);
if (it == buffers_.end()) {
return absl::NotFoundError(
absl::StrCat("Host buffer handle ", handle, " not found"));
}
return it->second;
}
absl::Status HostBufferStore::Delete(uint64_t handle) {
absl::MutexLock lock(&mu_);
if (buffers_.erase(handle) == 0) {
return absl::NotFoundError(
absl::StrCat("Host buffer handle ", handle, " not found"));
}
return absl::OkStatus();
}
}
}
} | #include "xla/python/ifrt_proxy/server/host_buffer.h"
#include <cstdint>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tsl/platform/status_matchers.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::testing::Pointee;
using ::tsl::testing::IsOk;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
TEST(HostBufferStoreTest, ReadAfterWrite) {
HostBufferStore store;
const uint64_t kHandle = 1;
ASSERT_THAT(store.Store(kHandle, "foo"), IsOk());
EXPECT_THAT(store.Lookup(kHandle), IsOkAndHolds(Pointee(std::string("foo"))));
ASSERT_THAT(store.Delete(kHandle), IsOk());
EXPECT_THAT(store.Lookup(kHandle), StatusIs(absl::StatusCode::kNotFound));
}
TEST(HostBufferStoreTest, UnknownHandle) {
HostBufferStore store;
const uint64_t kHandle = 1;
EXPECT_THAT(store.Lookup(kHandle), StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(store.Delete(kHandle), StatusIs(absl::StatusCode::kNotFound));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/server/host_buffer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/server/host_buffer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0cade028-d640-4047-8bf8-abf849d918d7 | cpp | tensorflow/tensorflow | grpc_service_impl | third_party/xla/xla/python/ifrt_proxy/server/grpc_service_impl.cc | third_party/xla/xla/python/ifrt_proxy/server/grpc_service_impl_test.cc | #include "xla/python/ifrt_proxy/server/grpc_service_impl.h"
#include <atomic>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/cleanup/cleanup.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "grpcpp/server_context.h"
#include "grpcpp/support/status.h"
#include "grpcpp/support/sync_stream.h"
#include "xla/pjrt/distributed/util.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/common/proto_util.h"
#include "xla/python/ifrt_proxy/server/host_buffer.h"
#include "xla/python/ifrt_proxy/server/version.h"
namespace xla {
namespace ifrt {
namespace proxy {
::grpc::Status GrpcServiceImpl::GetVersion(::grpc::ServerContext* context,
const GrpcGetVersionRequest* request,
GrpcGetVersionResponse* response) {
auto protocol_version =
ChooseVersion(request->min_version().protocol_version(),
request->max_version().protocol_version());
if (!protocol_version.ok()) {
return xla::ToGrpcStatus(protocol_version.status());
}
response->mutable_version()->set_protocol_version(*protocol_version);
return ::grpc::Status::OK;
}
::grpc::Status GrpcServiceImpl::IfrtSession(
::grpc::ServerContext* context,
::grpc::ServerReaderWriter<IfrtResponse, IfrtRequest>* stream) {
GrpcIfrtSessionMetadata metadata;
{
const auto it = context->client_metadata().find(
"ifrt-proxy-grpc-ifrt-session-metadata-bin");
if (it == context->client_metadata().end()) {
return ::grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT,
"Missing metadata for GrpcIfrtService.IfrtSession: "
"ifrt-proxy-grpc-ifrt-session-metadata-bin");
}
if (!metadata.ParseFromString(AsProtoStringData(
absl::string_view(it->second.data(), it->second.size())))) {
return ::grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT,
"Unable to parse GrpcIfrtSessionMetadata");
}
}
const uint64_t session_id =
next_session_id_.fetch_add(1, std::memory_order_relaxed);
VLOG(0) << "Starting a new IFRT session with session_id=" << session_id;
auto host_buffer_store =
std::make_shared<xla::ifrt::proxy::HostBufferStore>();
{
absl::MutexLock l(&host_buffer_store_mu_);
CHECK(host_buffer_stores_.insert({session_id, host_buffer_store}).second);
}
absl::Cleanup cleanup = [&] {
absl::MutexLock l(&host_buffer_store_mu_);
CHECK_GT(host_buffer_stores_.erase(session_id), 0);
};
auto backend = backend_factory_(metadata.version(), session_id,
std::move(host_buffer_store));
if (!backend.ok()) {
LOG(INFO) << "Creating IFRT backend " << session_id
<< " failed: " << backend.status();
return xla::ToGrpcStatus(backend.status());
}
absl::Mutex writer_mu;
bool first_request_read = false;
while (true) {
auto request = std::make_unique<IfrtRequest>();
if (!stream->Read(request.get())) {
break;
}
if (!first_request_read) {
VLOG(0) << "First request read for session " << session_id;
first_request_read = true;
}
const uint64_t op_id = request->request_metadata().op_id();
auto response = (*backend)->Process(std::move(request));
response.OnReady(
[op_id, stream,
&writer_mu](absl::StatusOr<std::shared_ptr<IfrtResponse>> response) {
absl::MutexLock l(&writer_mu);
if (response.ok()) {
stream->Write(**response);
} else {
stream->Write(*NewIfrtResponse(op_id, response.status()));
}
});
}
backend->reset();
VLOG(0) << "Finishing IFRT session " << session_id;
return ::grpc::Status::OK;
}
::grpc::Status GrpcServiceImpl::HostBufferStore(
::grpc::ServerContext* context,
::grpc::ServerReader<GrpcHostBufferStoreRequest>* stream,
GrpcHostBufferStoreResponse* response) {
const auto it = context->client_metadata().find(
"ifrt-proxy-grpc-host-buffer-store-metadata-bin");
if (it == context->client_metadata().end()) {
return ::grpc::Status(
::grpc::StatusCode::INTERNAL,
"Missing gRPC metadata for GrpcHostBufferService.Store");
}
GrpcHostBufferStoreMetadata metadata;
if (!metadata.ParseFromString(AsProtoStringData(
absl::string_view(it->second.data(), it->second.size())))) {
return ::grpc::Status(::grpc::StatusCode::DATA_LOSS,
"Unable to parse GrpcHostBufferStoreMetadata");
}
std::string data;
data.reserve(metadata.buffer_size());
GrpcHostBufferStoreRequest request;
while (stream->Read(&request)) {
data.append(request.data());
}
if (data.size() != metadata.buffer_size()) {
return ::grpc::Status(
::grpc::StatusCode::DATA_LOSS,
absl::StrCat("Potential data loss for host buffers: expected ",
metadata.buffer_size(), " bytes but got ", data.size(),
" bytes"));
}
auto store = GetHostBufferStore(metadata.session_id());
if (!store.ok()) {
return xla::ToGrpcStatus(store.status());
}
return xla::ToGrpcStatus((*store)->Store(metadata.handle(), std::move(data)));
}
::grpc::Status GrpcServiceImpl::HostBufferLookup(
::grpc::ServerContext* context, const GrpcHostBufferLookupRequest* request,
::grpc::ServerWriter<GrpcHostBufferLookupResponse>* stream) {
static constexpr int64_t kChunkSize = 1024 * 1024;
auto store = GetHostBufferStore(request->session_id());
if (!store.ok()) {
return xla::ToGrpcStatus(store.status());
}
auto data = (*store)->Lookup(request->handle());
if (!data.ok()) {
return xla::ToGrpcStatus(data.status());
}
GrpcHostBufferLookupResponse response;
if (!(*data)->empty()) {
for (int64_t offset = 0; offset < (*data)->size(); offset += kChunkSize) {
#if defined(PLATFORM_GOOGLE)
response.set_alias_data(
absl::string_view(**data).substr(offset, kChunkSize));
#else
response.set_data((*data)->substr(offset, kChunkSize));
#endif
stream->Write(response);
response.Clear();
}
} else {
stream->Write(response);
}
return ::grpc::Status::OK;
}
::grpc::Status GrpcServiceImpl::HostBufferDelete(
::grpc::ServerContext* context, const GrpcHostBufferDeleteRequest* request,
GrpcHostBufferDeleteResponse* response) {
auto store = GetHostBufferStore(request->session_id());
if (!store.ok()) {
return xla::ToGrpcStatus(store.status());
}
return xla::ToGrpcStatus((*store)->Delete(request->handle()));
}
bool GrpcServiceImpl::Test_InsertHostBufferStore(
uint64_t session_id,
std::shared_ptr<xla::ifrt::proxy::HostBufferStore> store) {
absl::MutexLock l(&host_buffer_store_mu_);
return host_buffer_stores_.insert({session_id, std::move(store)}).second;
}
bool GrpcServiceImpl::Test_DeleteHostBufferStore(uint64_t session_id) {
absl::MutexLock l(&host_buffer_store_mu_);
return host_buffer_stores_.erase(session_id) > 0;
}
absl::StatusOr<std::shared_ptr<xla::ifrt::proxy::HostBufferStore>>
GrpcServiceImpl::GetHostBufferStore(uint64_t session_id) {
absl::MutexLock l(&host_buffer_store_mu_);
const auto it = host_buffer_stores_.find(session_id);
if (it == host_buffer_stores_.end()) {
return absl::NotFoundError(
absl::StrCat("Session id ", session_id, " does not exist"));
}
return it->second;
}
}
}
} | #include "xla/python/ifrt_proxy/server/grpc_service_impl.h"
#include <cstdint>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "grpcpp/server.h"
#include "grpcpp/server_builder.h"
#include "grpcpp/support/channel_arguments.h"
#include "grpcpp/support/status.h"
#include "xla/python/ifrt_proxy/client/grpc_host_buffer.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.grpc.pb.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/server/grpc_server.h"
#include "xla/python/ifrt_proxy/server/host_buffer.h"
#include "xla/python/ifrt_proxy/server/version.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::tsl::testing::IsOk;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
IfrtProxyVersion Version() {
IfrtProxyVersion version;
version.set_protocol_version(kServerMaxVersion);
return version;
}
absl::StatusOr<std::unique_ptr<GrpcServer>> MakeGrpcServer() {
auto addr = absl::StrCat("[::1]:", tsl::testing::PickUnusedPortOrDie());
return GrpcServer::CreateFromIfrtClientFactory(addr, []() {
return absl::UnimplementedError(
"IFRT client creation fails. This test is not expected to "
"instantiate any IFRT client");
});
}
TEST(GrpcServiceImplTest, CanBeUsedToSetupAnGrpcServer) {
ASSERT_THAT(MakeGrpcServer(), IsOk());
}
class GrpcIfrtServiceImplHostBufferTest
: public testing::TestWithParam<int64_t> {
protected:
GrpcIfrtServiceImplHostBufferTest()
: impl_([](IfrtProxyVersion version, uint64_t session_id,
std::shared_ptr<HostBufferStore> host_buffer_store) {
return absl::UnimplementedError(
"IFRT backend creation is not implemented");
}) {
::grpc::ServerBuilder builder;
builder.RegisterService(&impl_);
server_ = builder.BuildAndStart();
stub_ = grpc::GrpcIfrtService::NewStub(
server_->InProcessChannel(::grpc::ChannelArguments()));
}
std::string GetTestData() const {
std::string data;
for (int i = 0; i < GetParam(); ++i) {
data.push_back(i % 7);
}
return data;
}
GrpcServiceImpl impl_;
std::unique_ptr<::grpc::Server> server_;
std::shared_ptr<grpc::GrpcIfrtService::Stub> stub_;
};
TEST_P(GrpcIfrtServiceImplHostBufferTest, StoreAndLookupStringView) {
static constexpr uint64_t kSessionId = 1;
auto store = std::make_shared<HostBufferStore>();
ASSERT_TRUE(impl_.Test_InsertHostBufferStore(kSessionId, store));
GrpcClientHostBufferStore client(stub_, Version(), kSessionId);
constexpr uint64_t kHandle = 2;
const std::string data = GetTestData();
absl::string_view source(data);
ASSERT_THAT(client.Store(kHandle, source).Await(), IsOk());
EXPECT_THAT(client.Lookup(kHandle).Await(), IsOkAndHolds(data));
EXPECT_TRUE(impl_.Test_DeleteHostBufferStore(kSessionId));
}
TEST_P(GrpcIfrtServiceImplHostBufferTest, StoreAndLookupCord) {
static constexpr uint64_t kSessionId = 1;
auto store = std::make_shared<HostBufferStore>();
ASSERT_TRUE(impl_.Test_InsertHostBufferStore(kSessionId, store));
GrpcClientHostBufferStore client(stub_, Version(), kSessionId);
constexpr uint64_t kHandle = 2;
const std::string data = GetTestData();
absl::Cord source(data);
ASSERT_THAT(client.Store(kHandle, source).Await(), IsOk());
EXPECT_THAT(client.Lookup(kHandle).Await(), IsOkAndHolds(data));
EXPECT_TRUE(impl_.Test_DeleteHostBufferStore(kSessionId));
}
TEST_P(GrpcIfrtServiceImplHostBufferTest, Lookup) {
static constexpr uint64_t kSessionId = 1;
auto store = std::make_shared<HostBufferStore>();
ASSERT_TRUE(impl_.Test_InsertHostBufferStore(kSessionId, store));
GrpcClientHostBufferStore client(stub_, Version(), kSessionId);
constexpr uint64_t kHandle = 2;
const std::string data = GetTestData();
ASSERT_THAT(store->Store(kHandle, data), IsOk());
EXPECT_THAT(client.Lookup(kHandle).Await(), IsOkAndHolds(data));
EXPECT_TRUE(impl_.Test_DeleteHostBufferStore(kSessionId));
}
TEST_P(GrpcIfrtServiceImplHostBufferTest, Delete) {
static constexpr uint64_t kSessionId = 1;
auto store = std::make_shared<HostBufferStore>();
ASSERT_TRUE(impl_.Test_InsertHostBufferStore(kSessionId, store));
GrpcClientHostBufferStore client(stub_, Version(), kSessionId);
constexpr uint64_t kHandle = 2;
const std::string data = GetTestData();
ASSERT_THAT(store->Store(kHandle, data), IsOk());
ASSERT_THAT(client.Delete(kHandle).Await(), IsOk());
EXPECT_THAT(client.Lookup(kHandle).Await(),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_TRUE(impl_.Test_DeleteHostBufferStore(kSessionId));
}
INSTANTIATE_TEST_SUITE_P(
DataSize, GrpcIfrtServiceImplHostBufferTest,
testing::Values(0,
16,
3 * 1024 * 1024));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/server/grpc_service_impl.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/server/grpc_service_impl_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
04433fb2-7496-4b2a-81ed-6816e4136e92 | cpp | tensorflow/tensorflow | grpc_server | third_party/xla/xla/python/ifrt_proxy/server/grpc_server.cc | third_party/xla/xla/python/ifrt_proxy/server/grpc_server_test.cc | #include "xla/python/ifrt_proxy/server/grpc_server.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "grpc/grpc.h"
#include "grpcpp/completion_queue.h"
#include "grpcpp/grpcpp.h"
#include "grpcpp/server_builder.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt_proxy/common/grpc_credentials.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.grpc.pb.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/server/grpc_service_impl.h"
#include "xla/python/ifrt_proxy/server/host_buffer.h"
#include "xla/python/ifrt_proxy/server/ifrt_backend.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace proxy {
GrpcServer::~GrpcServer() {
server_->Shutdown();
server_->Wait();
}
absl::StatusOr<std::unique_ptr<GrpcServer>> GrpcServer::Create(
absl::string_view address,
std::unique_ptr<grpc::GrpcIfrtService::Service> impl) {
if (impl == nullptr) {
return absl::InvalidArgumentError(
"Service implementation cannot be a nullptr.");
}
::grpc::ServerBuilder builder;
builder.AddChannelArgument(GRPC_ARG_MAX_SEND_MESSAGE_LENGTH, -1);
builder.AddChannelArgument(GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH, -1);
builder.RegisterService(impl.get());
builder.AddListeningPort(std::string(address), GetServerCredentials());
auto server = builder.BuildAndStart();
if (server == nullptr) {
return absl::UnavailableError(
absl::StrCat("Failed to initialize gRPC server at address:", address));
}
return absl::WrapUnique<GrpcServer>(
new GrpcServer(address, std::move(impl), std::move(server)));
}
absl::StatusOr<std::unique_ptr<GrpcServer>>
GrpcServer::CreateFromIfrtClientFactory(
absl::string_view address,
absl::AnyInvocable<absl::StatusOr<std::shared_ptr<xla::ifrt::Client>>()>
backend_ifrt_client_factory) {
if (backend_ifrt_client_factory == nullptr) {
return absl::InvalidArgumentError(
"backend_ifrt_client_factory cannot be nullptr.");
}
auto service = std::make_unique<GrpcServiceImpl>(
[ifrt_client_factory = std::move(backend_ifrt_client_factory)](
IfrtProxyVersion version, uint64_t session_id,
std::shared_ptr<HostBufferStore> host_buffer_store) mutable
-> absl::StatusOr<std::unique_ptr<BackendInterface>> {
TF_ASSIGN_OR_RETURN(auto ifrt_client, ifrt_client_factory());
return IfrtBackend::Create(version, session_id, std::move(ifrt_client),
std::move(host_buffer_store));
});
return Create(address, std::move(service));
}
}
}
} | #include "xla/python/ifrt_proxy/server/grpc_server.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.grpc.pb.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::testing::Not;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
class FakeIfrtService : public grpc::GrpcIfrtService::Service {};
TEST(GrpcServerTest, CreationTest) {
auto addr = absl::StrCat("[::1]:", tsl::testing::PickUnusedPortOrDie());
auto grpc_service_impl = std::make_unique<FakeIfrtService>();
ASSERT_THAT(GrpcServer::Create(addr, std::move(grpc_service_impl)), IsOk());
}
TEST(GrpcServerTest, CreationFailsIfImplIsNullptr) {
auto addr = absl::StrCat("[::1]:", tsl::testing::PickUnusedPortOrDie());
EXPECT_THAT(GrpcServer::Create(addr, nullptr),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(GrpcServerTest, CreationFailsWithInvalidAddress) {
auto grpc_service_impl = std::make_unique<FakeIfrtService>();
EXPECT_THAT(GrpcServer::Create("invalid-address",
std::move(grpc_service_impl)),
Not(IsOk()));
}
TEST(GrpcServerTest, RetrievingServerAddressWorks) {
auto addr = absl::StrCat("[::1]:", tsl::testing::PickUnusedPortOrDie());
auto grpc_service_impl = std::make_unique<FakeIfrtService>();
TF_ASSERT_OK_AND_ASSIGN(
auto grpc_server, GrpcServer::Create(addr, std::move(grpc_service_impl)));
EXPECT_EQ(grpc_server->address(), addr);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/server/grpc_server.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/server/grpc_server_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
160aad35-3fdd-4e79-a85d-a8b2a9e081e4 | cpp | tensorflow/tensorflow | version | third_party/xla/xla/python/ifrt_proxy/server/version.cc | third_party/xla/xla/python/ifrt_proxy/server/version_test.cc | #include "xla/python/ifrt_proxy/server/version.h"
#include <algorithm>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
namespace xla {
namespace ifrt {
namespace proxy {
absl::StatusOr<int> ChooseVersion(int client_min_version,
int client_max_version,
int server_min_version,
int server_max_version) {
const int version = std::min(server_max_version, client_max_version);
if (version < server_min_version || version < client_min_version) {
return absl::InvalidArgumentError(absl::StrCat(
"IFRT Proxy client and server failed to agree on the "
"protocol version; supported versions: client = [",
client_min_version, ", ", client_max_version, "], server = [",
server_min_version, ", ", server_max_version, "]"));
}
return version;
}
}
}
} | #include "xla/python/ifrt_proxy/server/version.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tsl/platform/status_matchers.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
struct Param {
int client_min_version;
int client_max_version;
int server_min_version;
int server_max_version;
};
class CompatibleVersionTest : public ::testing::TestWithParam<Param> {};
TEST_P(CompatibleVersionTest, Verify) {
const Param& param = GetParam();
EXPECT_THAT(ChooseVersion(param.client_min_version, param.client_max_version,
param.server_min_version, param.server_max_version),
IsOk());
}
INSTANTIATE_TEST_SUITE_P(CompatibleVersionTest, CompatibleVersionTest,
::testing::Values(Param{1, 1, 1, 1}, Param{1, 2, 2, 2},
Param{2, 2, 1, 2},
Param{1, 3, 3, 4}));
class IncompatibleVersionTest : public ::testing::TestWithParam<Param> {};
TEST_P(IncompatibleVersionTest, Verify) {
const Param& param = GetParam();
EXPECT_THAT(ChooseVersion(param.client_min_version, param.client_max_version,
param.server_min_version, param.server_max_version),
StatusIs(absl::StatusCode::kInvalidArgument));
}
INSTANTIATE_TEST_SUITE_P(IncompatibleVersionTest, IncompatibleVersionTest,
::testing::Values(Param{1, 2, 3, 3}, Param{1, 3, 4, 6},
Param{1, 1, 2, 2}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/server/version.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/server/version_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
363476b4-86dd-4517-8a90-829165178641 | cpp | tensorflow/tensorflow | array_util | third_party/xla/xla/python/ifrt_proxy/common/array_util.cc | third_party/xla/xla/python/ifrt_proxy/common/array_util_test.cc | #include "xla/python/ifrt_proxy/common/array_util.h"
#include <string>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/shape.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
std::string StridesAsStr(const ArrayMemRegion::ByteStrides& strides) {
if (!strides.has_value()) return "strides{nullopt}";
return absl::StrCat("strides{", absl::StrJoin(*strides, ","), "}");
}
}
absl::StatusOr<std::vector<int64_t>> DefaultByteStrides(const DType dtype,
const Shape& shape) {
if (!dtype.byte_size().has_value()) {
return absl::InvalidArgumentError(
absl::StrCat("Unsupported data type to query byte-strides for: ",
dtype.DebugString()));
}
std::vector<int64_t> result(shape.dims().size());
int64_t stride = *dtype.byte_size();
for (int i = static_cast<int>(shape.dims().size()) - 1; i >= 0; --i) {
result[i] = stride;
stride *= shape.dims()[i];
}
return result;
}
absl::StatusOr<ArrayMemRegion> ArrayMemRegion::FromZerothElementPointer(
const void* zeroth_element, const DType dtype, const Shape& shape,
ByteStrides byte_strides) {
if (!dtype.byte_size().has_value()) {
return absl::InvalidArgumentError(
absl::StrCat("Unsupported data type to construct ArrayMemRegion: ",
dtype.DebugString()));
}
void* const mem_region_start = const_cast<void*>(zeroth_element);
if (!byte_strides.has_value() ||
(byte_strides->empty() && shape.dims().empty())) {
return ArrayMemRegion(mem_region_start,
dtype.byte_size().value() * shape.num_elements());
}
if (shape.num_elements() == 0) {
return ArrayMemRegion(mem_region_start, 0);
}
if (shape.dims().size() != byte_strides->size()) {
return absl::InvalidArgumentError(
absl::StrCat("Shape has different dimensions from byte_strides: ",
shape.DebugString(), " vs ", StridesAsStr(byte_strides)));
}
uint64_t last_element_byte_offset = 0;
for (int i = 0; i < byte_strides->size(); ++i) {
int stride = (*byte_strides)[i];
if (shape.dims()[i] < 0) {
return absl::InvalidArgumentError(
absl::StrCat("A shape dimension is negative: ", shape.DebugString()));
} else if (shape.dims()[i] == 1) {
continue;
} else if (stride <= 0) {
return absl::UnimplementedError(
absl::StrCat("Negative or zero strides are not fully supported: ",
StridesAsStr(byte_strides)));
} else if (stride % dtype.byte_size().value() != 0) {
return absl::UnimplementedError(absl::StrCat(
"byte_stride[", i, "] is not a multiple of the data-type's size: ",
StridesAsStr(byte_strides), ", dtype=", dtype.DebugString()));
} else {
DCHECK_GT(shape.dims()[i], 0);
last_element_byte_offset += (stride * (shape.dims()[i] - 1));
}
}
return ArrayMemRegion(mem_region_start,
last_element_byte_offset + dtype.byte_size().value());
}
absl::StatusOr<ArrayMemRegion> ArrayMemRegion::FromMinimalMemRegion(
absl::string_view mem_region, const DType dtype, const Shape& shape,
ByteStrides byte_strides) {
TF_ASSIGN_OR_RETURN(
auto result,
FromZerothElementPointer(mem_region.data(), dtype, shape, byte_strides));
if (result.mem_region().size() != mem_region.size()) {
return absl::InvalidArgumentError(
absl::StrCat("Incorrect size ", result.mem_region().size(), " vs ",
mem_region.size(), "; is provided memory region minimal? ",
dtype.DebugString(), " ", shape.DebugString(), " ",
StridesAsStr(byte_strides)));
}
CHECK_EQ(result.mem_region().data(), mem_region.data());
return result;
}
absl::string_view ArrayMemRegion::mem_region() const {
return absl::string_view(static_cast<char*>(mem_region_start_), nbytes_);
}
void* ArrayMemRegion::zeroth_element() const {
return mem_region_start_;
}
}
}
} | #include "xla/python/ifrt_proxy/common/array_util.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/shape.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::testing::ElementsAre;
using ::testing::Not;
using ::testing::TestWithParam;
using ::tsl::testing::IsOk;
using ::tsl::testing::IsOkAndHolds;
constexpr DType::Kind kF64 = DType::Kind::kF64;
constexpr DType::Kind kS32 = DType::Kind::kS32;
constexpr DType::Kind kString = DType::Kind::kString;
using Strides = std::vector<int64_t>;
TEST(DefaultByteStrides, ErrorsIfBadDtype) {
EXPECT_THAT(DefaultByteStrides(DType(kString), Shape({1})), Not(IsOk()));
}
TEST(DefaultByteStrides, HappyCase) {
EXPECT_THAT(DefaultByteStrides(DType(kF64), Shape({4, 3, 5})),
IsOkAndHolds(ElementsAre(120, 40, 8)));
}
struct TC {
const std::string test_name;
const DType::Kind dtype_kind;
const std::vector<int64_t> shape;
const std::optional<std::vector<int64_t>> byte_strides;
const std::optional<size_t> expected_size;
};
std::string PrintToString(const TC& tc) { return tc.test_name; }
class ArrayMemRegionSuccess : public TestWithParam<TC> {};
INSTANTIATE_TEST_SUITE_P(
Tests, ArrayMemRegionSuccess,
testing::Values(
TC{"DefaultF64", kF64, {4, 3, 5}, std::nullopt},
TC{"MajorToMinorStridesF64", kF64, {4, 3, 5}, Strides({120, 40, 8})},
TC{"NotMajorToMinorF64", kF64, {3, 4, 5}, Strides({40, 120, 8})},
TC{"TransposedF64", kF64, {5, 3, 4}, Strides({8, 40, 120})},
TC{"DefaultS32", kS32, {4, 3, 5}, std::nullopt},
TC{"MajorToMinorStridesS32", kS32, {4, 3, 5}, Strides({60, 20, 4})},
TC{"NotMajorToMinorS32", kS32, {3, 4, 5}, Strides({20, 60, 4})},
TC{"TransposedS32", kS32, {5, 3, 4}, Strides({4, 20, 60})},
TC{"ScalarF64DefaultStrides", kF64, {}, std::nullopt},
TC{"ScalarF64EmptyStrides", kF64, {}, Strides({})},
TC{"NoColsDefaultStrides", kF64, {5, 0}, std::nullopt},
TC{"NoColsStridesNonZero", kF64, {5, 0}, Strides({40, 4})},
TC{"NoColsStridesZero", kF64, {5, 0}, Strides({0, 0})},
TC{"NoRowsDefaultStrides", kF64, {0, 5}, std::nullopt},
TC{"NoRowsStridesNonZero", kF64, {0, 5}, Strides({40, 4})},
TC{"NoRowsStridesZero", kF64, {0, 5}, Strides({0, 0})},
TC{"SingleElementArbitraryStrides", kF64, {1, 1}, Strides({100, 100})},
TC{"OneRowArbitraryColStride", kF64, {1, 5}, Strides({100, 8})},
TC{"OneColArbitraryRowStride", kF64, {5, 1}, Strides({8, 100})},
TC{"OneRowZeroColStride", kF64, {1, 5}, Strides({0, 8})},
TC{"OneColZeroRowStride", kF64, {5, 1}, Strides({8, 0})},
TC{"NonCompactSingleDimension", kS32, {5}, Strides({16}), 68},
TC{"NonCompactDim0", kS32, {4, 3, 5}, Strides({120, 20, 4}), 420},
TC{"PaddedElements", kS32, {4, 3, 5}, Strides({120, 40, 8}), 476}),
testing::PrintToStringParamName());
TEST_P(ArrayMemRegionSuccess, TestCase) {
const TC tc = GetParam();
const DType dtype(tc.dtype_kind);
const Shape shape(tc.shape);
const size_t expected_size = tc.expected_size.value_or(
dtype.byte_size().value() * shape.num_elements());
std::string data(expected_size, 'a');
TF_ASSERT_OK_AND_ASSIGN(auto mem_region1,
ArrayMemRegion::FromZerothElementPointer(
data.data(), dtype, shape, tc.byte_strides));
EXPECT_EQ(mem_region1.zeroth_element(), data.data());
EXPECT_EQ(mem_region1.mem_region().data(), data.data());
EXPECT_EQ(mem_region1.mem_region().size(), data.size());
TF_ASSERT_OK_AND_ASSIGN(
auto mem_region2, ArrayMemRegion::FromMinimalMemRegion(data, dtype, shape,
tc.byte_strides));
EXPECT_EQ(mem_region2.zeroth_element(), data.data());
EXPECT_EQ(mem_region2.mem_region().data(), data.data());
EXPECT_EQ(mem_region2.mem_region().size(), data.size());
}
class ArrayMemRegionFailure : public TestWithParam<TC> {};
INSTANTIATE_TEST_SUITE_P(
Tests, ArrayMemRegionFailure,
testing::Values(
TC{"OneString", kString, {}, std::nullopt},
TC{"ManyStrings", kString, {5}, std::nullopt},
TC{"NegativeByteStrides", kS32, {4, 3, 5}, Strides({-60, -20, -4})},
TC{"ZeroByteStride", kS32, {5, 5}, Strides({0, 0})},
TC{"SmallerByteStrideThanDataType", kS32, {5, 5}, Strides({1, 1})},
TC{"ByteStrideIndivisibleByDataType", kS32, {5, 5}, Strides({7, 7})},
TC{"NegativeShapeDimension", kS32, {-5, -5}, Strides({20, 4})}),
testing::PrintToStringParamName());
TEST_P(ArrayMemRegionFailure, TestCase) {
const TC tc = GetParam();
const DType dtype(tc.dtype_kind);
const Shape shape(tc.shape);
char const* kSomeAddr = reinterpret_cast<char*>(1UL << 48);
auto mem_region1 = ArrayMemRegion::FromZerothElementPointer(
kSomeAddr, dtype, shape, tc.byte_strides);
EXPECT_THAT(mem_region1.status(), Not(IsOk()));
const size_t kSomeSize = 1024;
auto mem_region2 = ArrayMemRegion::FromMinimalMemRegion(
absl::string_view(kSomeAddr, kSomeSize), dtype, shape, tc.byte_strides);
EXPECT_THAT(mem_region2.status(), Not(IsOk()));
}
TEST(ArrayMemRegion, FromBadMemRegionSizeFails) {
const DType kDType(kS32);
const Shape kShape({5, 5});
const size_t kDataBytes = kDType.byte_size().value() * kShape.num_elements();
const size_t kExtraSuffixBytes = 10;
std::string data_with_extra_suffix(kDataBytes + kExtraSuffixBytes, 'a');
TF_ASSERT_OK_AND_ASSIGN(
auto mem_region1,
ArrayMemRegion::FromZerothElementPointer(
data_with_extra_suffix.data(), kDType, kShape,
std::nullopt));
EXPECT_EQ(mem_region1.mem_region().data(), data_with_extra_suffix.data());
EXPECT_EQ(mem_region1.zeroth_element(), data_with_extra_suffix.data());
EXPECT_LT(mem_region1.mem_region().size(), data_with_extra_suffix.size());
EXPECT_EQ(mem_region1.mem_region().size(), kDataBytes);
auto mem_region2 = ArrayMemRegion::FromMinimalMemRegion(
data_with_extra_suffix, kDType, kShape,
std::nullopt);
EXPECT_THAT(mem_region2.status(), Not(IsOk()));
std::string data_without_some_bytes(kDataBytes - kExtraSuffixBytes, 'a');
auto mem_region3 = ArrayMemRegion::FromMinimalMemRegion(
data_without_some_bytes, kDType, kShape,
std::nullopt);
EXPECT_THAT(mem_region3.status(), Not(IsOk()));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/common/array_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/common/array_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
392aa368-f3b4-4624-abe5-8318bc62bf19 | cpp | tensorflow/tensorflow | test_utils | tensorflow/compiler/mlir/tfrt/translate/mlrt/test_utils.cc | tensorflow/core/lib/monitoring/test_utils_test.cc | #include "tensorflow/compiler/mlir/tfrt/translate/mlrt/test_utils.h"
#include <algorithm>
#include <cstring>
#include <functional>
#include <numeric>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/tfrt/mlrt/attribute/attribute.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/kernel.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/context.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/interpreter_testutil.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace mlrt {
namespace testing {
absl::StatusOr<std::string> EncodeAttribute(const tensorflow::AttrValue& attr) {
if (attr.has_b()) {
std::string result;
result.resize(sizeof(uint8_t));
uint8_t v = attr.b();
std::memcpy(result.data(), &v, sizeof(v));
return result;
}
if (attr.has_i()) {
std::string result;
result.resize(sizeof(int64_t));
int64_t v = attr.i();
std::memcpy(result.data(), &v, sizeof(v));
return result;
}
if (attr.has_f()) {
std::string result;
result.resize(sizeof(float));
float v = attr.f();
std::memcpy(result.data(), &v, sizeof(v));
return result;
}
if (attr.has_s()) {
return attr.s();
}
if (attr.has_list()) {
if (attr.list().s_size() > 0) {
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
auto ctor = mlrt::bc::New<mlrt::bc::Vector<mlrt::bc::String>>(
&allocator, attr.list().s_size());
for (int i = 0; i < attr.list().s_size(); ++i) {
ctor.ConstructAt(i, attr.list().s(i));
}
return std::string(buffer.data(), buffer.size());
}
}
if (attr.has_tensor()) {
mlrt::bc::Buffer buffer;
mlrt::bc::Allocator allocator(&buffer);
tensorflow::Tensor tensor;
if (!tensor.FromProto(attr.tensor())) {
return absl::InvalidArgumentError("Invalid tensor proto.");
}
auto tensor_attr_ctor = mlrt::bc::New<tensorflow::tf_mlrt::TensorAttr>(
&allocator, tensor.dtype());
auto shape = tensor.shape().dim_sizes();
tensor_attr_ctor.construct_shape(shape.size())
.Assign(shape.begin(), shape.end());
auto tensor_data = tensor.tensor_data();
tensor_attr_ctor.construct_data(tensor_data.size())
.Place(tensor_data.data(), tensor_data.size());
return std::string(buffer.data(), buffer.size());
}
return absl::InvalidArgumentError("Unsupported attribute.");
}
namespace {
bool CanBeInlined(const tensorflow::AttrValue& attr) {
return attr.has_b() || attr.has_f();
}
}
absl::Status EncodeAttributes(AttributeTable& attributes,
const tensorflow::AttrValueMap& attr_map) {
std::vector<std::pair<std::string, tensorflow::AttrValue>> attrs(
attr_map.begin(), attr_map.end());
std::sort(attrs.begin(), attrs.end(),
[](const auto& x, const auto& y) { return x.first < y.first; });
for (int i = 0; i < attrs.size(); ++i) {
const tensorflow::AttrValue& attr = attrs[i].second;
TF_ASSIGN_OR_RETURN(auto attr_str, EncodeAttribute(attr));
if (CanBeInlined(attr)) {
attributes.AddInline(absl::StrCat(i), attr_str);
} else {
attributes.Add(absl::StrCat(i), attr_str);
}
}
return absl::OkStatus();
}
absl::StatusOr<std::pair<mlrt::bc::Kernel, mlrt::bc::Vector<mlrt::bc::String>>>
CreateKernelAndAttrs(int num_inputs, int num_outputs,
mlrt::ExecutionContext& exec_ctx, mlrt::bc::Buffer* buffer,
const tensorflow::AttrValueMap& attrs) {
mlrt::bc::Allocator allocator(buffer);
auto attributes_ctor = mlrt::bc::New<mlrt::bc::Vector<mlrt::bc::String>>(
&allocator, attrs.size());
AttributeTable attribute_table(attributes_ctor);
TF_RETURN_IF_ERROR(EncodeAttributes(attribute_table, attrs));
auto kernel_ctor = mlrt::bc::New<mlrt::bc::Kernel>(&allocator);
kernel_ctor.set_code(0);
std::vector<int> input_indices(num_inputs);
std::iota(input_indices.begin(), input_indices.end(), 0);
kernel_ctor.construct_arguments(input_indices.size())
.Assign(input_indices.begin(), input_indices.end());
std::vector<int> output_indices(num_outputs);
std::iota(output_indices.begin(), output_indices.end(), num_inputs);
kernel_ctor.construct_results(output_indices.size())
.Assign(output_indices.begin(), output_indices.end());
std::vector<uint32_t> attr_indices;
attr_indices.reserve(attrs.size());
for (int i = 0; i < attrs.size(); ++i) {
attr_indices.push_back(attribute_table.GetHandle(absl::StrCat(i)));
}
kernel_ctor.construct_attributes(attr_indices.size())
.Assign(attr_indices.begin(), attr_indices.end());
mlrt::bc::Vector<mlrt::bc::String> attributes(
buffer->Get(attributes_ctor.address()));
mlrt::bc::Kernel kernel(buffer->Get(kernel_ctor.address()));
return std::make_pair(kernel, attributes);
}
}
} | #include "tensorflow/core/lib/monitoring/test_utils.h"
#include <string>
#include "tensorflow/core/lib/monitoring/types.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace monitoring {
namespace testing {
namespace {
using ::tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
template <typename MessageType>
StatusOr<MessageType> ParseTextProto(const std::string& text_proto) {
protobuf::TextFormat::Parser parser;
MessageType parsed_proto;
protobuf::io::ArrayInputStream input_stream(text_proto.data(),
text_proto.size());
if (!parser.Parse(&input_stream, &parsed_proto)) {
return errors::InvalidArgument("Could not parse text proto: ", text_proto);
}
return parsed_proto;
}
TEST(HistogramTest, Subtract) {
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram1,
ParseTextProto<HistogramProto>(R"pb(
min: 0.0
max: 500.0
num: 3.0
sum: 555.0
sum_squares: 252525.0
bucket_limit: 0.0
bucket_limit: 10.0
bucket_limit: 100.0
bucket: 0
bucket: 1
bucket: 1
bucket: 1
)pb"));
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram2,
ParseTextProto<HistogramProto>(R"pb(
min: 0.0
max: 5.0
num: 1.0
sum: 5.0
sum_squares: 25.0
bucket_limit: 0.0
bucket_limit: 10.0
bucket_limit: 100.0
bucket: 0
bucket: 1
bucket: 0
bucket: 0
)pb"));
TF_ASSERT_OK_AND_ASSIGN(
Histogram delta, Histogram(histogram1).Subtract(Histogram(histogram2)));
EXPECT_FLOAT_EQ(delta.num(), 2.0);
EXPECT_FLOAT_EQ(delta.sum(), 550.0);
EXPECT_FLOAT_EQ(delta.sum_squares(), 252500.0);
EXPECT_FLOAT_EQ(delta.num(0), 0.0);
EXPECT_FLOAT_EQ(delta.num(1), 0.0);
EXPECT_FLOAT_EQ(delta.num(2), 1.0);
EXPECT_FLOAT_EQ(delta.num(3), 1.0);
}
TEST(HistogramTest, ReverseSubtract) {
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram1,
ParseTextProto<HistogramProto>(R"pb(
min: 0.0
max: 500.0
num: 3.0
sum: 555.0
sum_squares: 252525.0
bucket_limit: 0.0
bucket_limit: 10.0
bucket_limit: 100.0
bucket: 0
bucket: 1
bucket: 1
bucket: 1
)pb"));
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram2,
ParseTextProto<HistogramProto>(R"pb(
min: 0.0
max: 5.0
num: 1.0
sum: 5.0
sum_squares: 25.0
bucket_limit: 0.0
bucket_limit: 10.0
bucket_limit: 100.0
bucket: 0
bucket: 1
bucket: 0
bucket: 0
)pb"));
EXPECT_THAT(
Histogram(histogram2).Subtract(Histogram(histogram1)),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr("Failed to subtract a histogram by a larger histogram.")));
}
TEST(HistogramTest, NegativeSubtract) {
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram1,
ParseTextProto<HistogramProto>(R"pb(
min: -100.0
max: 0.0
num: 5.0
sum: -500.0
sum_squares: 50000.0
bucket_limit: 0.0
bucket_limit: 10.0
bucket_limit: 100.0
bucket: 5
bucket: 0
bucket: 0
bucket: 0
)pb"));
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram2,
ParseTextProto<HistogramProto>(R"pb(
min: -100.0
max: 0.0
num: 2.0
sum: -200.0
sum_squares: 20000.0
bucket_limit: 0.0
bucket_limit: 10.0
bucket_limit: 100.0
bucket: 2
bucket: 0
bucket: 0
bucket: 0
)pb"));
TF_ASSERT_OK_AND_ASSIGN(
Histogram delta, Histogram(histogram1).Subtract(Histogram(histogram2)));
EXPECT_FLOAT_EQ(delta.num(), 3.0);
EXPECT_FLOAT_EQ(delta.sum(), -300.0);
EXPECT_FLOAT_EQ(delta.sum_squares(), 30000.0);
EXPECT_FLOAT_EQ(delta.num(0), 3.0);
EXPECT_FLOAT_EQ(delta.num(1), 0.0);
EXPECT_FLOAT_EQ(delta.num(2), 0.0);
EXPECT_FLOAT_EQ(delta.num(3), 0.0);
}
TEST(HistogramTest, SingleBucketSubtract) {
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram1,
ParseTextProto<HistogramProto>(R"pb(
min: 0.0
max: 1.0
num: 100.0
sum: 100.0
sum_squares: 100.0
bucket: 100
)pb"));
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram2,
ParseTextProto<HistogramProto>(R"pb(
min: 0.0
max: 1.0
num: 50.0
sum: 50.0
sum_squares: 50.0
bucket: 50
)pb"));
TF_ASSERT_OK_AND_ASSIGN(
Histogram delta, Histogram(histogram1).Subtract(Histogram(histogram2)));
EXPECT_FLOAT_EQ(delta.num(), 50.0);
EXPECT_FLOAT_EQ(delta.sum(), 50.0);
EXPECT_FLOAT_EQ(delta.sum_squares(), 50.0);
EXPECT_FLOAT_EQ(delta.num(0), 50.0);
}
TEST(HistogramTest, SelfSubtract) {
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram,
ParseTextProto<HistogramProto>(R"pb(
min: 0.0
max: 500.0
num: 3.0
sum: 555.0
sum_squares: 252525.0
bucket_limit: 0.0
bucket_limit: 10.0
bucket_limit: 100.0
bucket: 0
bucket: 1
bucket: 1
bucket: 1
)pb"));
TF_ASSERT_OK_AND_ASSIGN(Histogram delta,
Histogram(histogram).Subtract(Histogram(histogram)));
EXPECT_FLOAT_EQ(delta.num(), 0.0);
EXPECT_FLOAT_EQ(delta.sum(), 0.0);
EXPECT_FLOAT_EQ(delta.sum_squares(), 0.0);
EXPECT_FLOAT_EQ(delta.num(0), 0.0);
EXPECT_FLOAT_EQ(delta.num(1), 0.0);
EXPECT_FLOAT_EQ(delta.num(2), 0.0);
EXPECT_FLOAT_EQ(delta.num(3), 0.0);
}
TEST(HistogramTest, SubtractEmptyHistogram) {
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram,
ParseTextProto<HistogramProto>(R"pb(
min: 0.0
max: 500.0
num: 3.0
sum: 555.0
sum_squares: 252525.0
bucket_limit: 0.0
bucket_limit: 10.0
bucket_limit: 100.0
bucket: 0
bucket: 1
bucket: 1
bucket: 1
)pb"));
const HistogramProto empty;
TF_ASSERT_OK_AND_ASSIGN(Histogram delta,
Histogram(histogram).Subtract(Histogram(empty)));
EXPECT_FLOAT_EQ(delta.num(), 3.0);
EXPECT_FLOAT_EQ(delta.sum(), 555.0);
EXPECT_FLOAT_EQ(delta.sum_squares(), 252525.0);
EXPECT_FLOAT_EQ(delta.num(0), 0.0);
EXPECT_FLOAT_EQ(delta.num(1), 1.0);
EXPECT_FLOAT_EQ(delta.num(2), 1.0);
EXPECT_FLOAT_EQ(delta.num(3), 1.0);
}
TEST(HistogramTest, SubtractTwoEmptyHistograms) {
const HistogramProto histogram1;
const HistogramProto histogram2;
TF_ASSERT_OK_AND_ASSIGN(
Histogram delta, Histogram(histogram1).Subtract(Histogram(histogram2)));
EXPECT_FLOAT_EQ(delta.num(), 0.0);
EXPECT_FLOAT_EQ(delta.sum(), 0.0);
EXPECT_FLOAT_EQ(delta.sum_squares(), 0.0);
EXPECT_FLOAT_EQ(delta.num(0), 0.0);
EXPECT_FLOAT_EQ(delta.num(1), 0.0);
EXPECT_FLOAT_EQ(delta.num(2), 0.0);
EXPECT_FLOAT_EQ(delta.num(3), 0.0);
}
TEST(HistogramTest, DifferentBuckets) {
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram1,
ParseTextProto<HistogramProto>(R"pb(
min: 0.0
max: 500.0
num: 3.0
sum: 555.0
sum_squares: 252525.0
bucket_limit: 0.0
bucket_limit: 10.0
bucket_limit: 100.0
bucket: 0
bucket: 1
bucket: 1
bucket: 1
)pb"));
TF_ASSERT_OK_AND_ASSIGN(HistogramProto histogram2,
ParseTextProto<HistogramProto>(R"pb(
min: 0.0
max: 50000.0
num: 5.0
sum: 55555.0
sum_squares: 2525252525.0
bucket_limit: 0.0
bucket_limit: 10.0
bucket_limit: 100.0
bucket_limit: 1000.0
bucket: 0
bucket: 1
bucket: 1
bucket: 1
bucket: 2
)pb"));
EXPECT_THAT(
Histogram(histogram1).Subtract(Histogram(histogram2)),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Subtracting a histogram with different buckets.")));
}
TEST(PercentilesTest, Percentiles) {
tensorflow::monitoring::Percentiles percentiles_value;
percentiles_value.total_samples = 100;
percentiles_value.accumulator = -100;
Percentiles percentiles(percentiles_value);
EXPECT_EQ(percentiles.num(), 100);
EXPECT_FLOAT_EQ(percentiles.sum(), -100);
Percentiles delta = percentiles.Subtract(percentiles);
EXPECT_EQ(delta.num(), 0);
EXPECT_FLOAT_EQ(delta.sum(), 0);
delta = delta.Subtract(percentiles);
EXPECT_EQ(delta.num(), -100);
EXPECT_FLOAT_EQ(delta.sum(), 100);
}
TEST(PercentilesTest, Subtract) {
tensorflow::monitoring::Percentiles percentiles_value1;
percentiles_value1.total_samples = 100;
percentiles_value1.accumulator = 100;
Percentiles percentiles1(percentiles_value1);
EXPECT_EQ(percentiles1.num(), 100);
EXPECT_FLOAT_EQ(percentiles1.sum(), 100);
tensorflow::monitoring::Percentiles percentiles_value2;
percentiles_value2.total_samples = 90;
percentiles_value2.accumulator = 90;
Percentiles percentiles2(percentiles_value2);
EXPECT_EQ(percentiles2.num(), 90);
EXPECT_FLOAT_EQ(percentiles2.sum(), 90);
Percentiles delta = percentiles1.Subtract(percentiles2);
EXPECT_EQ(delta.num(), 10);
EXPECT_FLOAT_EQ(delta.sum(), 10);
}
TEST(PercentilesTest, ReverseSubtract) {
tensorflow::monitoring::Percentiles percentiles_value1;
percentiles_value1.total_samples = 100;
percentiles_value1.accumulator = 100;
Percentiles percentiles1(percentiles_value1);
EXPECT_EQ(percentiles1.num(), 100);
EXPECT_FLOAT_EQ(percentiles1.sum(), 100);
tensorflow::monitoring::Percentiles percentiles_value2;
percentiles_value2.total_samples = 90;
percentiles_value2.accumulator = 90;
Percentiles percentiles2(percentiles_value2);
EXPECT_EQ(percentiles2.num(), 90);
EXPECT_FLOAT_EQ(percentiles2.sum(), 90);
Percentiles delta = percentiles2.Subtract(percentiles1);
EXPECT_EQ(delta.num(), -10);
EXPECT_FLOAT_EQ(delta.sum(), -10);
}
TEST(PercentilesTest, SubtractEmptyPercentile) {
tensorflow::monitoring::Percentiles percentiles_value;
percentiles_value.total_samples = 1;
percentiles_value.accumulator = 1;
Percentiles percentiles(percentiles_value);
EXPECT_EQ(percentiles.num(), 1);
EXPECT_FLOAT_EQ(percentiles.sum(), 1);
Percentiles empty_percentile((tensorflow::monitoring::Percentiles()));
EXPECT_EQ(empty_percentile.num(), 0);
EXPECT_FLOAT_EQ(empty_percentile.sum(), 0);
Percentiles delta = percentiles.Subtract(empty_percentile);
EXPECT_EQ(delta.num(), 1);
EXPECT_FLOAT_EQ(delta.sum(), 1);
}
TEST(PercentilesTest, EmptyPercentiles) {
Percentiles empty_percentile((tensorflow::monitoring::Percentiles()));
EXPECT_EQ(empty_percentile.num(), 0);
EXPECT_FLOAT_EQ(empty_percentile.sum(), 0);
Percentiles delta = empty_percentile.Subtract(empty_percentile);
EXPECT_EQ(delta.num(), 0);
EXPECT_FLOAT_EQ(delta.sum(), 0);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/translate/mlrt/test_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/monitoring/test_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8ba9fb26-5b1e-47d3-8a9e-005dcae536cb | cpp | tensorflow/tensorflow | executable | third_party/xla/xla/service/executable.cc | tensorflow/core/tfrt/mlrt/bytecode/executable_test.cc | #include "xla/service/executable.h"
#include <memory>
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "xla/debug_options_flags.h"
#include "xla/service/dump.h"
#include "xla/service/hlo_graph_dumper.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
namespace xla {
ExecutionInput::~ExecutionInput() {
for (auto& index : unowned_indices_) {
auto buffer = buffers_.mutable_element(index)->Release();
if (buffer) {
buffer->Release();
}
}
}
absl::Status ExecutionInput::SetDynamicShape(Shape dynamic_shape) {
const Shape& input_shape = shape();
if (!ShapeUtil::DynamicShapeIsCompatible(input_shape, dynamic_shape)) {
return tsl::errors::InvalidArgument(
"Cannot set dynamic shape: ", input_shape.DebugString(), " vs. ",
dynamic_shape.DebugString());
}
dynamic_shape_ = std::make_unique<Shape>(std::move(dynamic_shape));
return absl::OkStatus();
}
void ExecutionInput::SetUnownedBuffer(const ShapeIndex& index,
MaybeOwningDeviceMemory buffer) {
*buffers_.mutable_element(index) = std::move(buffer);
unowned_indices_.insert(index);
}
absl::StatusOr<ShapedBuffer> ExecutionInput::ToShapedBuffer(
se::DeviceMemoryAllocator* allocator, int device_ordinal) const {
const Shape& input_shape = shape();
ShapedBuffer shaped_buffer(input_shape, device_ordinal);
for (const auto& index_buffer : Buffers()) {
const tensorflow::se::OwningDeviceMemory* mem =
index_buffer.second.AsOwningDeviceMemory();
if (mem != nullptr && (mem->allocator() != allocator ||
mem->device_ordinal() != device_ordinal)) {
return tsl::errors::InvalidArgument("Device buffer at index ",
index_buffer.first.ToString(),
" has mismatching allocator/device");
}
shaped_buffer.set_buffer(index_buffer.second.AsDeviceMemoryBase(),
index_buffer.first);
}
return std::move(shaped_buffer);
}
absl::StatusOr<ScopedShapedBuffer> Executable::ExecuteOnStream(
const ServiceExecutableRunOptions* run_options,
absl::Span<const ShapedBuffer* const> arguments,
HloExecutionProfile* hlo_execution_profile) {
absl::StatusOr<ScopedShapedBuffer> result =
ExecuteAsyncOnStream(run_options, arguments, hlo_execution_profile);
absl::Status blocking_status = run_options->stream()->BlockHostUntilDone();
TF_RETURN_IF_ERROR(result.status());
TF_RETURN_IF_ERROR(blocking_status);
return result;
}
static ExecutionInput MakeMaybeOwningDeviceMemoryTree(
const ShapedBuffer& shaped_buffer) {
ExecutionInput result(shaped_buffer.on_device_shape());
shaped_buffer.buffers().ForEachElement(
[&](const ShapeIndex& index, const se::DeviceMemoryBase& mem) {
result.SetBuffer(index, MaybeOwningDeviceMemory(mem));
});
return result;
}
absl::StatusOr<ScopedShapedBuffer> Executable::ExecuteAsyncOnStream(
const ServiceExecutableRunOptions* run_options,
absl::Span<const ShapedBuffer* const> arguments,
HloExecutionProfile* hlo_execution_profile) {
std::vector<ExecutionInput> args;
args.reserve(arguments.size());
for (const ShapedBuffer* arg : arguments) {
args.emplace_back(MakeMaybeOwningDeviceMemoryTree(*arg));
}
TF_ASSIGN_OR_RETURN(ExecutionOutput out,
ExecuteAsyncOnStream(run_options, std::move(args),
hlo_execution_profile));
return out.ConsumeResult();
}
absl::StatusOr<ExecutionOutput> Executable::ExecuteOnStream(
const ServiceExecutableRunOptions* run_options,
std::vector<ExecutionInput> arguments,
HloExecutionProfile* hlo_execution_profile) {
absl::StatusOr<ExecutionOutput> result = ExecuteAsyncOnStream(
run_options, std::move(arguments), hlo_execution_profile);
absl::Status blocking_status = run_options->stream()->BlockHostUntilDone();
TF_RETURN_IF_ERROR(result.status());
TF_RETURN_IF_ERROR(blocking_status);
return result;
}
absl::StatusOr<std::vector<ScopedShapedBuffer>> Executable::ExecuteOnStreams(
absl::Span<const ServiceExecutableRunOptions> run_options,
absl::Span<const absl::Span<const ShapedBuffer* const>> arguments) {
TF_RET_CHECK(run_options.size() == arguments.size());
std::vector<ScopedShapedBuffer> return_values;
return_values.reserve(run_options.size());
if (run_options.size() == 1) {
TF_ASSIGN_OR_RETURN(auto rv,
ExecuteOnStream(&run_options[0], arguments[0],
nullptr));
return_values.push_back(std::move(rv));
return std::move(return_values);
}
for (size_t i = 0; i < run_options.size(); ++i) {
TF_ASSIGN_OR_RETURN(
auto rv, ExecuteAsyncOnStream(&run_options[i], arguments[i],
nullptr));
return_values.push_back(std::move(rv));
}
for (const auto& options : run_options) {
TF_RET_CHECK(options.stream() != nullptr);
TF_RETURN_IF_ERROR(options.stream()->BlockHostUntilDone());
}
return std::move(return_values);
}
absl::StatusOr<ScopedShapedBuffer> Executable::ExecuteOnStreamWrapper(
const ServiceExecutableRunOptions* run_options,
absl::Span<const ShapedBuffer* const> arguments) {
absl::StatusOr<ScopedShapedBuffer> result =
ExecuteAsyncOnStreamWrapper(run_options, arguments);
absl::Status block_status = run_options->stream()->BlockHostUntilDone();
TF_RETURN_IF_ERROR(result.status());
TF_RETURN_IF_ERROR(block_status);
return result;
}
absl::StatusOr<ExecutionOutput> Executable::ExecuteOnStreamWrapper(
const ServiceExecutableRunOptions* run_options,
std::vector<ExecutionInput> arguments) {
absl::StatusOr<ExecutionOutput> result =
ExecuteAsyncOnStreamWrapper(run_options, std::move(arguments));
absl::Status block_status = run_options->stream()->BlockHostUntilDone();
TF_RETURN_IF_ERROR(result.status());
TF_RETURN_IF_ERROR(block_status);
return result;
}
struct ExecuteAsyncOnStreamWrapperState {
ExecutionProfile* profile;
};
static ExecuteAsyncOnStreamWrapperState ExecuteWrapperBeforeExecution(
const Executable& executable,
const ServiceExecutableRunOptions* run_options) {
ExecuteAsyncOnStreamWrapperState state;
state.profile = run_options->run_options().execution_profile();
VLOG(1) << "enqueueing executable on stream...";
return state;
}
absl::Status ExecuteWrapperAfterExecution(
Executable* executable, const ExecuteAsyncOnStreamWrapperState& state,
absl::Status return_status, se::Stream* stream) {
if (!return_status.ok()) {
if (state.profile != nullptr) {
absl::Status status = stream->BlockHostUntilDone();
if (!status.ok()) {
LOG(ERROR) << "Failed to BlockHostUntilDone: " << status;
}
}
return return_status;
}
if (state.profile != nullptr) {
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
const int64_t executable_size_in_bytes =
executable->SizeOfGeneratedCodeInBytes();
if (state.profile->compute_time_ns() == 0) {
state.profile->set_compute_time_ns(
state.profile->compute_and_transfer_time_ns());
}
if (executable_size_in_bytes != 0) {
state.profile->set_executable_size_in_bytes(executable_size_in_bytes);
}
}
return return_status;
}
absl::StatusOr<ScopedShapedBuffer> Executable::ExecuteAsyncOnStreamWrapper(
const ServiceExecutableRunOptions* run_options,
absl::Span<const ShapedBuffer* const> arguments) {
auto state = ExecuteWrapperBeforeExecution(*this, run_options);
absl::StatusOr<ScopedShapedBuffer> return_value =
ExecuteAsyncOnStream(run_options, arguments, nullptr);
TF_RETURN_IF_ERROR(ExecuteWrapperAfterExecution(
this, state, return_value.status(), run_options->stream()));
return return_value;
}
absl::StatusOr<ExecutionOutput> Executable::ExecuteAsyncOnStreamWrapper(
const ServiceExecutableRunOptions* run_options,
std::vector<ExecutionInput> arguments) {
auto state = ExecuteWrapperBeforeExecution(*this, run_options);
absl::StatusOr<ExecutionOutput> return_value =
ExecuteAsyncOnStream(run_options, std::move(arguments), nullptr);
TF_RETURN_IF_ERROR(ExecuteWrapperAfterExecution(
this, state, return_value.status(), run_options->stream()));
return return_value;
}
int64_t Executable::SizeOfGeneratedCodeInBytes() const { return -1; }
void Executable::MarkToBeReleasedArguments(absl::Span<ExecutionInput> arguments,
ExecutionOutput& result) {
for (ExecutionInput& argument : arguments) {
for (auto& index_buffer : *argument.MutableBuffers()) {
if (std::optional<se::OwningDeviceMemory> maybe_owning_buffer =
index_buffer.second.Release()) {
result.AddToBeReleased(std::move(*maybe_owning_buffer));
}
}
}
}
} | #include "tensorflow/core/tfrt/mlrt/bytecode/executable.h"
#include <cstring>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
namespace mlrt {
namespace bc {
namespace {
TEST(ExecutableTest, Executable) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
Executable::Constructor executable_ctor = bc::New<bc::Executable>(&allocator);
Vector<String>::Constructor kernel_names_ctor =
executable_ctor.construct_kernel_names(2);
kernel_names_ctor.ConstructAt(0, "add");
kernel_names_ctor.ConstructAt(1, "return");
auto attributes_ctor = executable_ctor.construct_attributes(1);
int32_t constant = 1;
std::string constant_str(sizeof(int32_t), '\0');
std::memcpy(constant_str.data(), &constant, sizeof(int32_t));
attributes_ctor.ConstructAt(0, constant_str);
executable_ctor.construct_functions(1);
Executable executable(buffer.Get(executable_ctor.address()));
EXPECT_THAT(executable.kernel_names(),
::testing::ElementsAreArray({"add", "return"}));
EXPECT_EQ(executable.attributes().size(), 1);
int32_t value;
ASSERT_EQ(executable.attributes()[0].size(), sizeof(value));
std::memcpy(&value, executable.attributes()[0].data(), sizeof(int32_t));
EXPECT_EQ(value, constant);
EXPECT_EQ(executable.functions().size(), 1);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/executable.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/bytecode/executable_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f724d8e5-774c-4366-8f95-a977c6960432 | cpp | tensorflow/tensorflow | grpc_client_session | third_party/xla/xla/python/ifrt_proxy/client/grpc_client_session.cc | third_party/xla/xla/python/ifrt_proxy/client/grpc_client_session_test.cc | #include "xla/python/ifrt_proxy/client/grpc_client_session.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/base/call_once.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/functional/bind_front.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "grpc/grpc.h"
#include "grpcpp/channel.h"
#include "grpcpp/client_context.h"
#include "grpcpp/create_channel.h"
#include "grpcpp/security/credentials.h"
#include "grpcpp/support/channel_arguments.h"
#include "xla/pjrt/distributed/util.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt_proxy/common/grpc_credentials.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.grpc.pb.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/threadpool.h"
#include "tsl/platform/unbounded_work_queue.h"
namespace xla {
namespace ifrt {
namespace proxy {
class GrpcClientSession::ResponseCallbackTable {
public:
absl::Status Add(OpId op_id, ResponseCallback callback) {
absl::MutexLock l(&mu_);
const bool inserted = table_.insert({op_id, std::move(callback)}).second;
if (!inserted) {
return absl::AlreadyExistsError(
absl::StrCat("Op id ", op_id, " already exists"));
}
return absl::OkStatus();
}
std::optional<ResponseCallback> Pop(OpId op_id) {
absl::MutexLock l(&mu_);
auto it = table_.find(op_id);
if (it == table_.end()) {
return std::nullopt;
}
auto cb = std::move(it->second);
table_.erase(it);
return std::move(cb);
}
absl::flat_hash_map<OpId, ResponseCallback> PopAll() {
absl::flat_hash_map<OpId, ResponseCallback> result;
absl::MutexLock l(&mu_);
result = std::move(table_);
table_ = absl::flat_hash_map<OpId, ResponseCallback>();
return result;
}
private:
absl::Mutex mu_;
absl::flat_hash_map<OpId, ResponseCallback> table_ ABSL_GUARDED_BY(mu_);
};
std::shared_ptr<GrpcClientSession> GrpcClientSession::Create(
std::shared_ptr<grpc::GrpcIfrtService::StubInterface> stub,
GrpcIfrtSessionMetadata metadata,
StreamTerminatedCallback stream_terminated_cb) {
auto context = std::make_unique<::grpc::ClientContext>();
context->AddMetadata("ifrt-proxy-grpc-ifrt-session-metadata-bin",
metadata.SerializeAsString());
std::shared_ptr<GrpcClientSession> result(new GrpcClientSession(
std::move(stub), std::move(context), std::move(stream_terminated_cb)));
return result;
}
GrpcClientSession::GrpcClientSession(
std::shared_ptr<grpc::GrpcIfrtService::StubInterface> stub,
std::unique_ptr<::grpc::ClientContext> context,
StreamTerminatedCallback stream_terminated_cb)
: response_callbacks_(std::make_unique<ResponseCallbackTable>()),
reader_thread_(std::make_unique<tsl::thread::ThreadPool>(
tsl::Env::Default(), "ifrt_proxy_client_grpc_reader",
1)),
stub_(std::move(stub)),
context_(std::move(context)),
stream_(stub_->IfrtSession(context_.get())),
stream_terminated_cb_(std::move(stream_terminated_cb)),
user_futures_work_queue_(std::make_unique<tsl::UnboundedWorkQueue>(
tsl::Env::Default(), "GrpcClientSessionUserFuturesWorkQueue")) {
reader_thread_->Schedule(
absl::bind_front(&GrpcClientSession::ReadLoop, this));
}
Future<std::shared_ptr<IfrtResponse>> GrpcClientSession::Enqueue(
std::unique_ptr<IfrtRequest> request) {
auto promise = Future<std::shared_ptr<IfrtResponse>>::CreatePromise();
absl::Status status = Enqueue(
std::move(request),
[promise, queue = user_futures_work_queue_.get()](
absl::StatusOr<std::shared_ptr<IfrtResponse>> response) mutable {
queue->Schedule([promise = std::move(promise),
response = std::move(response)]() mutable -> void {
promise.Set(std::move(response));
});
});
if (!status.ok()) {
user_futures_work_queue_->Schedule([promise, status]() mutable -> void {
promise.Set(std::move(status));
});
}
return Future<std::shared_ptr<IfrtResponse>>(std::move(promise));
}
absl::Status GrpcClientSession::Enqueue(std::unique_ptr<IfrtRequest> req,
ResponseCallback callback) {
absl::MutexLock l(&writer_mu_);
const OpId op_id = writer_next_op_id_++;
if (writes_stopped_) {
return absl::FailedPreconditionError(
"GrpcClientSession: writes no longer allowed.");
}
TF_RETURN_IF_ERROR(response_callbacks_->Add(op_id, std::move(callback)));
CHECK_EQ(req->mutable_request_metadata()->op_id(), 0);
req->mutable_request_metadata()->set_op_id(op_id);
if (!stream_->Write(*req)) {
CHECK(response_callbacks_->Pop(op_id).has_value());
return absl::UnknownError("GrpcClientSession: writing to stream failed.");
}
return absl::OkStatus();
}
void GrpcClientSession::ReadLoop() {
while (true) {
auto read_buffer = std::make_unique<IfrtResponse>();
if (!stream_->Read(read_buffer.get())) {
LOG(INFO) << "GrpcClientSession: reader loop is exiting.";
break;
}
const OpId op_id = read_buffer->response_metadata().op_id();
std::optional<ResponseCallback> callback = response_callbacks_->Pop(op_id);
if (callback.has_value()) {
VLOG(1) << "GrpcClientSession: Issuing callback for " << op_id;
(*callback)(std::move(read_buffer));
VLOG(1) << "GrpcClientSession: Done with callback for " << op_id;
} else {
LOG(ERROR) << "Received response with no remaining registered callback: "
<< read_buffer->DebugString();
}
}
reader_thread_stopped_.Notify();
Finish(absl::OkStatus());
}
void GrpcClientSession::Finish(const absl::Status& client_status) {
LOG(INFO) << "GrpcClientSession: Finish() called with client status "
<< client_status;
absl::call_once(finish_once_, [&] {
context_->TryCancel();
LOG(INFO) << "GrpcClientSession: Waiting for reader thread to stop.";
reader_thread_stopped_.WaitForNotification();
auto finish_stream_and_get_server_status = [&]() -> absl::Status {
LOG(INFO) << "GrpClientSession: Attempting to call stream->Finish()";
absl::MutexLock l(&writer_mu_);
LOG(INFO) << "GrpClientSession: Attempting to call stream->Finish(), "
"mutex acquired";
absl::Status server_status = xla::FromGrpcStatus(stream_->Finish());
LOG(INFO) << "GrpClientSession: stream->Finish() returned server status "
<< server_status;
CHECK(!writes_stopped_);
writes_stopped_ = true;
return server_status;
};
absl::Status combined_status = finish_stream_and_get_server_status();
combined_status.Update(client_status);
auto all_callbacks = response_callbacks_->PopAll();
for (auto& [_, cb] : all_callbacks) {
if (combined_status.ok()) {
cb(absl::AbortedError("Finish(OK) called."));
} else {
cb(combined_status);
}
}
LOG(INFO) << "GrpClientSession::Finish(): calling terminated cb with "
<< combined_status;
stream_terminated_cb_(combined_status);
});
}
GrpcClientSession::~GrpcClientSession() {
GrpcClientSession::Finish(absl::CancelledError("~GrpcClientSession called."));
reader_thread_.reset();
LOG(INFO) << "Deleting GrpcClientSession.user_futures_work_queue_ ...";
user_futures_work_queue_.reset();
LOG(INFO) << "Deleted GrpcClientSession.user_futures_work_queue_.";
}
std::shared_ptr<grpc::GrpcIfrtService::StubInterface> CreateGrpcStub(
absl::string_view server_address) {
::grpc::ChannelArguments args;
args.SetInt(GRPC_ARG_MAX_SEND_MESSAGE_LENGTH, -1);
args.SetInt(GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH, -1);
std::shared_ptr<::grpc::Channel> channel = ::grpc::CreateCustomChannel(
std::string(server_address), GetClientCredentials(), args);
VLOG(0) << " Established channel.";
CHECK(channel != nullptr);
std::shared_ptr<grpc::GrpcIfrtService::StubInterface> stub =
grpc::GrpcIfrtService::NewStub(channel);
VLOG(0) << " Created stub.";
CHECK(stub != nullptr);
return stub;
}
}
}
} | #include "xla/python/ifrt_proxy/client/grpc_client_session.h"
#include <atomic>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/log/log_sink_registry.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "absl/time/time.h"
#include "grpc/support/time.h"
#include "grpcpp/channel.h"
#include "grpcpp/create_channel.h"
#include "grpcpp/server_builder.h"
#include "grpcpp/server_context.h"
#include "grpcpp/support/status.h"
#include "grpcpp/support/sync_stream.h"
#include "xla/python/ifrt_proxy/client/version.h"
#include "xla/python/ifrt_proxy/common/grpc_credentials.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.grpc.pb.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/common/test_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::testing::Not;
using ::tsl::testing::IsOk;
constexpr absl::Duration kSufficientTime = absl::Seconds(5);
GrpcIfrtSessionMetadata Metadata() {
GrpcIfrtSessionMetadata metadata;
metadata.mutable_version()->set_protocol_version(kClientMaxVersion);
return metadata;
}
absl::Status TestError() { return absl::UnknownError("test error"); }
struct Queue : public TestQueue<absl::Status> {
Queue() : TestQueue<absl::Status>(kSufficientTime) {}
};
void ExpectHeadAndTail(
std::vector<std::variant<absl::StatusOr<Queue*>, absl::Status>> var_list) {
std::vector<absl::Status> status_list;
for (const auto& v : var_list) {
if (std::holds_alternative<absl::StatusOr<Queue*>>(v)) {
status_list.push_back(std::get<absl::StatusOr<Queue*>>(v).status());
} else {
status_list.push_back(std::get<absl::Status>(v));
}
}
bool seen_not_ok = false;
std::string str;
for (const auto& s : status_list) {
absl::StrAppend(&str, "\n", s.ToString(), "\n-----\n");
}
for (const auto& s : status_list) {
if (!s.ok()) seen_not_ok = true;
if (seen_not_ok) {
EXPECT_THAT(s, Not(IsOk())) << str;
}
}
}
using ServerStream = ::grpc::ServerReaderWriter<IfrtResponse, IfrtRequest>;
using SessionAction = bool;
constexpr SessionAction kContinueSession = true;
constexpr SessionAction kStopSession = false;
using OnSessionStart = std::function<SessionAction()>;
using OnReqReceived =
std::function<SessionAction(const IfrtRequest&, ServerStream*)>;
class SimpleIfrtService : public grpc::GrpcIfrtService::Service {
public:
SimpleIfrtService(OnReqReceived on_req_received,
OnSessionStart on_session_start)
: on_req_received_(std::move(on_req_received)),
on_session_start_(std::move(on_session_start)) {}
::grpc::Status IfrtSession(::grpc::ServerContext* context,
ServerStream* stream) override {
if (on_session_start_ && on_session_start_() == kStopSession) {
return ::grpc::Status::OK;
}
{
absl::MutexLock l(&mu_);
CHECK(contexts_.insert(context).second);
}
while (true) {
IfrtRequest request;
LOG(INFO) << "Server: waiting on Read().";
if (!stream->Read(&request)) {
LOG(INFO) << "Server: Read() returned false.";
break;
}
LOG(INFO) << "Server: Read() returned true.";
if (!on_req_received_) {
IfrtResponse response;
response.mutable_response_metadata()->set_op_id(
request.request_metadata().op_id());
stream->Write(response);
} else if (on_req_received_(request, stream) == kStopSession) {
break;
}
}
{
absl::MutexLock l(&mu_);
CHECK_EQ(contexts_.erase(context), 1);
}
LOG(INFO) << "Finishing IFRT session";
return ::grpc::Status::OK;
}
void CancelAllServerSessions() {
absl::MutexLock l(&mu_);
for (const auto& context : contexts_) {
context->TryCancel();
}
}
private:
const OnReqReceived on_req_received_;
const OnSessionStart on_session_start_;
absl::Mutex mu_;
absl::flat_hash_set<::grpc::ServerContext*> contexts_ ABSL_GUARDED_BY(mu_);
};
class ClientAndServer {
public:
explicit ClientAndServer(OnReqReceived on_req_received = nullptr,
OnSessionStart on_session_start = nullptr) {
std::string address =
absl::StrCat("localhost:", tsl::testing::PickUnusedPortOrDie());
::grpc::ServerBuilder builder;
builder.AddListeningPort(address, GetServerCredentials());
ifrt_service_ =
std::make_unique<SimpleIfrtService>(on_req_received, on_session_start);
builder.RegisterService(ifrt_service_.get());
server_ = builder.BuildAndStart();
LOG(INFO) << "Server started and listening on " << address;
absl::FlushLogSinks();
std::shared_ptr<::grpc::Channel> channel =
::grpc::CreateChannel(address, GetClientCredentials());
channel->WaitForConnected(gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10, GPR_TIMESPAN)));
LOG(INFO) << "conn_state = " << channel->GetState(false);
auto stub = grpc::GrpcIfrtService::NewStub(channel);
CHECK(stub != nullptr);
client_session_ = GrpcClientSession::Create(
std::move(stub), Metadata(), [this](absl::Status s) {
client_finished_q_.Push(s);
client_finished_notification_.Notify();
});
client_finished_q_.AllowNonEmptyDestruction(true);
}
void StopServer() {
ifrt_service_->CancelAllServerSessions();
server_->Shutdown();
server_->Wait();
}
~ClientAndServer() {
StopServer();
client_session_->Finish(absl::CancelledError("~ClientAndServer"));
client_finished_notification_.WaitForNotificationWithTimeout(
kSufficientTime);
CHECK(client_finished_notification_.HasBeenNotified());
}
GrpcClientSession* client_session() { return client_session_.get(); }
Queue* client_finished_q() { return &client_finished_q_; }
absl::StatusOr<Queue*> SendSimpleRequest() {
owned_queues_.push_back(std::make_unique<Queue>());
Queue* q = owned_queues_.back().get();
auto req = std::make_unique<IfrtRequest>();
TF_RETURN_IF_ERROR(client_session_->Enqueue(
std::move(req), [q](absl::StatusOr<GrpcClientSession::Response> resp) {
q->Push(resp.status());
}));
return q;
}
private:
std::vector<std::unique_ptr<Queue>> owned_queues_;
Queue client_finished_q_;
absl::Notification client_finished_notification_;
std::shared_ptr<GrpcClientSession> client_session_;
std::unique_ptr<::grpc::Server> server_;
std::unique_ptr<SimpleIfrtService> ifrt_service_;
};
TEST(GrpcClientSessionTest, HappyCaseOneRequestWithServerTermination) {
ClientAndServer cs;
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q, cs.SendSimpleRequest());
EXPECT_THAT(response_q->Pop(), IsOk());
EXPECT_EQ(cs.client_finished_q()->PopOrTimeout(), std::nullopt);
cs.StopServer();
EXPECT_THAT(cs.client_finished_q()->Pop(), Not(IsOk()));
}
TEST(GrpcClientSessionTest, HappyCaseTwoRequestsWithClientFinish) {
ClientAndServer cs;
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q_1, cs.SendSimpleRequest());
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q_2, cs.SendSimpleRequest());
EXPECT_THAT(response_q_1->Pop(), IsOk());
EXPECT_THAT(response_q_2->Pop(), IsOk());
EXPECT_EQ(cs.client_finished_q()->PopOrTimeout(), std::nullopt);
cs.client_session()->Finish(TestError());
EXPECT_THAT(cs.client_finished_q()->Pop(), Not(IsOk()));
}
TEST(GrpcClientSessionTest, ServerFinishesDuringFirstRead) {
ClientAndServer cs(
[](auto, auto) { return kStopSession; });
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q_1, cs.SendSimpleRequest());
EXPECT_THAT(response_q_1->Pop(), Not(IsOk()));
absl::StatusOr<Queue*> response_q_2 = cs.SendSimpleRequest();
EXPECT_THAT(response_q_2.status(), Not(IsOk()));
EXPECT_THAT(cs.client_finished_q()->Pop(), Not(IsOk()));
}
TEST(GrpcClientSessionTest, ServerFinishesDuringConstruction) {
ClientAndServer cs(nullptr,
[]() { return kStopSession; });
absl::StatusOr<Queue*> response_q_1 = cs.SendSimpleRequest();
absl::StatusOr<Queue*> response_q_2 = cs.SendSimpleRequest();
ExpectHeadAndTail({response_q_1, response_q_2});
if (response_q_1.ok()) EXPECT_THAT(response_q_1.value()->Pop(), Not(IsOk()));
if (response_q_2.ok()) EXPECT_THAT(response_q_2.value()->Pop(), Not(IsOk()));
EXPECT_THAT(cs.client_finished_q()->Pop(), Not(IsOk()));
}
TEST(GrpcClientSessionTest, ClientFinishesAfterServerConsumesFirstRequest) {
std::atomic<GrpcClientSession*> session_ptr;
ClientAndServer cs(
[session_ptr = &session_ptr](auto, auto) {
session_ptr->load()->Finish(TestError());
return kContinueSession;
});
session_ptr.store(cs.client_session());
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q_1, cs.SendSimpleRequest());
EXPECT_THAT(response_q_1->Pop(), Not(IsOk()));
absl::StatusOr<Queue*> response_q_2 = cs.SendSimpleRequest();
EXPECT_THAT(response_q_2.status(), Not(IsOk()));
EXPECT_THAT(cs.client_finished_q()->Pop(), Not(IsOk()));
}
TEST(GrpcClientSessionTest, ClientFinishesAfterServerWritesFirstResponse) {
std::atomic<GrpcClientSession*> session_ptr;
ClientAndServer cs(
[session_ptr = &session_ptr](const IfrtRequest& r,
ServerStream* s) {
IfrtResponse response;
response.mutable_response_metadata()->set_op_id(
r.request_metadata().op_id());
s->Write(response);
session_ptr->load()->Finish(TestError());
return kContinueSession;
});
session_ptr.store(cs.client_session());
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q_1, cs.SendSimpleRequest());
absl::StatusOr<Queue*> response_q_2 = cs.SendSimpleRequest();
response_q_1->Pop().IgnoreError();
if (response_q_2.ok()) {
EXPECT_THAT(response_q_2.value()->Pop(), Not(IsOk()));
}
EXPECT_THAT(cs.client_finished_q()->Pop(), Not(IsOk()));
}
TEST(GrpcClientSessionTest, ClientFinishesDuringServerConstruction) {
std::atomic<GrpcClientSession*> session_ptr;
absl::Notification init_done;
ClientAndServer cs(nullptr,
[session_ptr = &session_ptr,
init_done = &init_done]() {
init_done->WaitForNotification();
session_ptr->load()->Finish(TestError());
return kContinueSession;
});
session_ptr.store(cs.client_session());
init_done.Notify();
absl::StatusOr<Queue*> response_q_1 = cs.SendSimpleRequest();
absl::StatusOr<Queue*> response_q_2 = cs.SendSimpleRequest();
if (response_q_1.ok()) {
EXPECT_THAT(response_q_1.value()->Pop(), Not(IsOk()));
}
if (response_q_2.ok()) {
EXPECT_THAT(response_q_2.value()->Pop(), Not(IsOk()));
}
ExpectHeadAndTail({response_q_1, response_q_2});
EXPECT_THAT(cs.client_finished_q()->Pop(), Not(IsOk()));
}
TEST(GrpcClientSessionTest, MethodsAfterFinishReturnError) {
ClientAndServer cs;
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q_1, cs.SendSimpleRequest());
cs.client_session()->Finish(TestError());
EXPECT_THAT(cs.SendSimpleRequest(), Not(IsOk()));
response_q_1->AllowNonEmptyDestruction(true);
}
TEST(GrpcClientSessionTest, ReceivingBadIfrtResponseDoesNotCrash) {
ClientAndServer cs(
[](const IfrtRequest& r, ServerStream* s) mutable {
IfrtResponse resp;
resp.mutable_response_metadata()->set_op_id(2000);
s->Write(resp);
resp.mutable_response_metadata()->set_op_id(
r.request_metadata().op_id());
s->Write(resp);
return kContinueSession;
});
TF_ASSERT_OK_AND_ASSIGN(Queue * response_q, cs.SendSimpleRequest());
EXPECT_THAT(response_q->Pop(), IsOk());
}
TEST(GrpcClientSessionTest, BadInitialChannelFailsPromptly) {
std::string address =
absl::StrCat("localhost:", tsl::testing::PickUnusedPortOrDie());
std::shared_ptr<::grpc::Channel> channel =
::grpc::CreateChannel(address, GetClientCredentials());
std::unique_ptr<grpc::GrpcIfrtService::StubInterface> stub =
grpc::GrpcIfrtService::NewStub(channel);
EXPECT_TRUE(stub != nullptr);
auto session_finished = std::make_shared<Queue>();
auto session = GrpcClientSession::Create(
std::move(stub), Metadata(),
[session_finished](absl::Status s) { session_finished->Push(s); });
EXPECT_THAT(session_finished->Pop(), Not(IsOk()));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/client/grpc_client_session.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/client/grpc_client_session_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4649cce1-3b5d-4cf7-8780-4bdd02283558 | cpp | tensorflow/tensorflow | rpc_helper | third_party/xla/xla/python/ifrt_proxy/client/rpc_helper.cc | third_party/xla/xla/python/ifrt_proxy/client/rpc_helper_test.cc | #include "xla/python/ifrt_proxy/client/rpc_helper.h"
#include <array>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/functional/bind_front.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt_proxy/client/client_session.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/common/test_utils.h"
#include "xla/python/ifrt_proxy/common/types.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "tsl/platform/env.h"
#include "tsl/platform/random.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/threadpool.h"
#include "tsl/profiler/lib/traceme.h"
#include "tsl/profiler/lib/traceme_encode.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::tsl::profiler::XFlow;
constexpr absl::Duration kPeriodicFlushInterval = absl::Microseconds(50);
class XFlowHelper {
public:
explicit XFlowHelper(absl::string_view name)
: xflow_id_(tsl::random::New64() >> 8 ),
name_(name) {}
typedef enum { kSend, kRecv, kRecvSend } Direction;
template <Direction D>
tsl::profiler::TraceMe Span() const {
return tsl::profiler::TraceMe([xflow_id = xflow_id_, name = name_] {
return Encode<D>(xflow_id, name);
});
}
template <Direction D>
void InstantActivity() const {
return tsl::profiler::TraceMe::InstantActivity(
[xflow_id = xflow_id_, name = name_] {
return Encode<D>(xflow_id, name);
});
}
private:
template <Direction D>
static std::string Encode(uint64_t xflow_id, absl::string_view name) {
static constexpr absl::string_view flow_dir_str =
D == kSend ? "send" : (D == kRecv ? "recv" : "recv_send");
const XFlow flow(xflow_id, D == kRecvSend ? XFlow::kFlowInOut
: (D == kRecv ? XFlow::kFlowIn
: XFlow::kFlowOut));
return tsl::profiler::TraceMeEncode(
name, {{"dir", flow_dir_str}, {"flow", flow.ToStatValue()}});
};
const uint64_t xflow_id_;
const absl::string_view name_;
};
class BatchedOps {
public:
using BatchOperation = RpcHelper::BatchOperation;
void Add(BatchOperation op, ArrayHandle handle) {
absl::MutexLock l(&mu_);
batched_[op].push_back(handle);
}
struct IfrtRequests {
std::unique_ptr<IfrtRequest> delete_req;
std::unique_ptr<IfrtRequest> destruct_req;
};
IfrtRequests Consume() {
IfrtRequests result;
absl::MutexLock l(&mu_);
if (!batched_[BatchOperation::kDeleteArray].empty()) {
result.delete_req = std::make_unique<IfrtRequest>();
for (const auto& arr_handle : batched_[BatchOperation::kDeleteArray]) {
result.delete_req->mutable_delete_array_request()->add_array_handle(
arr_handle.handle);
}
batched_[BatchOperation::kDeleteArray].clear();
}
if (!batched_[BatchOperation::kDestructArray].empty()) {
result.destruct_req = std::make_unique<IfrtRequest>();
for (const auto& arr_handle : batched_[BatchOperation::kDestructArray]) {
result.destruct_req->mutable_destruct_array_request()->add_array_handle(
arr_handle.handle);
}
batched_[BatchOperation::kDestructArray].clear();
}
return result;
}
private:
absl::Mutex mu_;
std::array<std::vector<ArrayHandle>, BatchOperation::kSentinelDoNotUse>
batched_ ABSL_GUARDED_BY(mu_);
};
}
class RpcHelper::Batcher {
public:
explicit Batcher(std::shared_ptr<ClientSession> session)
: session_(std::move(session)) {
thread_pool_.emplace(tsl::Env::Default(), "IfrtProxyRpcHelperBatcher",
1);
thread_pool_->Schedule(absl::bind_front(&Batcher::PeriodicFlusher, this));
}
Future<ClientSession::Response> Immediate(
std::unique_ptr<IfrtRequest> request) {
absl::MutexLock l(&mu_);
if (finished_) {
LOG(WARNING) << "After RpcHelper::Finish(): " << request->DebugString();
return Future<ClientSession::Response>(
absl::FailedPreconditionError("RpcHelper::Finish() already called."));
}
Flush();
return session_->Enqueue(std::move(request));
}
void Batch(BatchOperation op, ArrayHandle handle) {
batched_.Add(op, handle);
}
void Finish(absl::Status s) {
{
absl::MutexLock l(&mu_);
finished_ = true;
auto remaining = batched_.Consume();
if (remaining.delete_req != nullptr) {
LOG(WARNING) << "RpcHelper::Batch: Finish() called while there are "
"still batched delete operations";
}
if (remaining.destruct_req != nullptr) {
LOG(WARNING) << "RpcHelper::Batch: Finish() called while there are "
"still batched destruct operations";
}
}
thread_pool_.reset();
session_->Finish(s);
}
private:
void PeriodicFlusher() {
while (true) {
absl::SleepFor(kPeriodicFlushInterval);
absl::MutexLock l(&mu_);
if (finished_) {
return;
}
{
bool periodic_flush_paused = false;
TestHookCall(TestHookName::kRpcBatcherPausePeriodicFlush,
&periodic_flush_paused);
if (periodic_flush_paused) {
continue;
}
}
tsl::profiler::TraceMe traceme("proxy_periodic_flush");
Flush();
}
}
void Flush() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
auto reqs = batched_.Consume();
if (reqs.delete_req != nullptr) {
XFlowHelper x_flow_helper("batch_delete");
auto traceme = x_flow_helper.Span<XFlowHelper::kSend>();
session_->Enqueue(std::move(reqs.delete_req))
.OnReady(
absl::bind_front(HandleBatchResponse, session_, x_flow_helper));
}
if (reqs.destruct_req != nullptr) {
XFlowHelper x_flow_helper("batch_destruct");
auto traceme = x_flow_helper.Span<XFlowHelper::kSend>();
session_->Enqueue(std::move(reqs.destruct_req))
.OnReady(
absl::bind_front(HandleBatchResponse, session_, x_flow_helper));
}
}
static void HandleBatchResponse(
std::shared_ptr<ClientSession> session, XFlowHelper x_flow_helper,
absl::StatusOr<std::shared_ptr<IfrtResponse>> r) {
if (!r.ok()) {
x_flow_helper.InstantActivity<XFlowHelper::kRecv>();
LOG(WARNING) << "Batched response from ifrt proxy server: " << r.status();
return;
}
if (r.value()->has_delete_array_response()) {
auto traceme = x_flow_helper.Span<XFlowHelper::kRecvSend>();
auto ifrt_req = std::make_unique<IfrtRequest>();
ifrt_req->mutable_check_future_request()->set_future_handle(
r.value()->delete_array_response().deletion_future_handle());
session->Enqueue(std::move(ifrt_req))
.OnReady(
absl::bind_front(HandleBatchResponse, session, x_flow_helper));
} else if (r.value()->has_destruct_array_response() ||
r.value()->has_check_future_response()) {
x_flow_helper.InstantActivity<XFlowHelper::kRecv>();
} else {
LOG(ERROR) << "Unrecognized response from server for batched request: "
<< (*r)->DebugString();
}
}
const std::shared_ptr<ClientSession> session_;
BatchedOps batched_;
absl::Mutex mu_;
bool finished_ ABSL_GUARDED_BY(mu_) = false;
std::optional<tsl::thread::ThreadPool> thread_pool_;
};
template <typename Req, typename Resp>
Future<std::shared_ptr<Resp>> DoRpc(RpcHelper::Batcher* batcher,
void (IfrtRequest::*set_req)(Req*),
Resp* (IfrtResponse::*get_resp)(),
bool (IfrtResponse::*has_resp)() const,
std::unique_ptr<Req> req,
absl::string_view profiling_name) {
auto ifrt_req = std::make_unique<IfrtRequest>();
(ifrt_req.get()->*set_req)(req.release());
XFlowHelper x_flow_helper(profiling_name);
auto traceme = x_flow_helper.Span<XFlowHelper::kSend>();
auto promise = Future<std::shared_ptr<Resp>>::CreatePromise();
auto on_ready = [promise, has_resp, get_resp, x_flow_helper](
absl::StatusOr<std::shared_ptr<IfrtResponse>> r) mutable {
auto traceme = x_flow_helper.Span<XFlowHelper::kRecv>();
if (!r.ok()) {
LOG_EVERY_N_SEC(ERROR, 10)
<< "Connection to IFRT proxy server was terminated: " << r.status();
promise.Set(absl::UnavailableError(
absl::StrCat("Connection to IFRT proxy server was terminated: ",
r.status().ToString())));
return;
}
std::shared_ptr<IfrtResponse> response = *std::move(r);
if (!response->has_response_metadata()) {
promise.Set(absl::InternalError(
absl::StrCat("IFRT server sent a message without metadata: ",
response->DebugString())));
return;
}
const absl::Status metadata_status =
tsl::StatusFromProto(response->response_metadata().status());
const bool has_expected_response = (response.get()->*has_resp)();
const auto has_some_response =
response->response_case() != IfrtResponse::RESPONSE_NOT_SET;
if (metadata_status.ok() && !has_some_response) {
promise.Set(absl::InternalError(
absl::StrCat("OK response with no actual response set: ",
response->DebugString())));
return;
}
if (!has_expected_response && has_some_response) {
promise.Set(absl::InternalError(absl::StrCat(
"Response with wrong type (expected ", Resp::GetDescriptor()->name(),
"): ", response->DebugString())));
return;
}
if (!has_some_response) {
promise.Set(metadata_status);
} else {
promise.Set(
std::make_shared<Resp>(*std::move((response.get()->*get_resp)())));
}
};
batcher->Immediate(std::move(ifrt_req)).OnReady(on_ready);
return Future<std::shared_ptr<Resp>>(promise);
}
#define RPC(METHOD, PROPERTY) \
RpcHelper::ResponseFuture<METHOD##Response> RpcHelper::METHOD( \
std::unique_ptr<METHOD##Request> req) { \
return DoRpc( \
batcher_.get(), &IfrtRequest::set_allocated_##PROPERTY##_request, \
&IfrtResponse::mutable_##PROPERTY##_response, \
&IfrtResponse::has_##PROPERTY##_response, std::move(req), #PROPERTY); \
}
RPC(Init, init);
RPC(GetDefaultDeviceAssignment, get_default_device_assignment);
RPC(CheckFuture, check_future);
RPC(CheckValueReady, check_value_ready);
RPC(MakeArrayFromHostBuffer, make_array_from_host_buffer);
RPC(AssembleArrayFromSingleDeviceArrays,
assemble_array_from_single_device_arrays);
RPC(RemapArrays, remap_arrays);
RPC(DisassembleIntoSingleDeviceArrays, disassemble_into_single_device_arrays);
RPC(CopyToHostBuffer, copy_to_host_buffer);
RPC(IsArrayDeleted, is_array_deleted);
RPC(DestructArray, destruct_array)
RPC(CopyArrays, copy_arrays);
RPC(Reshard, reshard);
RPC(FullyReplicatedShard, fully_replicated_shard);
RPC(DeleteArray, delete_array);
RPC(Compile, compile);
RPC(LoadedExecutableMetadata, loaded_executable_metadata);
RPC(LoadedExecutableExecute, loaded_executable_execute);
RPC(LoadedExecutableDelete, loaded_executable_delete);
RPC(LoadedExecutableIsDeleted, loaded_executable_is_deleted);
RPC(LoadedExecutableDestruct, loaded_executable_destruct);
RPC(LoadedHostCallbackPoll, loaded_host_callback_poll);
RPC(LoadedHostCallbackReturn, loaded_host_callback_return);
Future<> RpcHelper::CheckFuture(uint64_t handle) {
auto req = std::make_unique<CheckFutureRequest>();
req->set_future_handle(handle);
auto promise = Future<>::CreatePromise();
CheckFuture(std::move(req))
.OnReady(
[promise](absl::StatusOr<std::shared_ptr<CheckFutureResponse>>
response) mutable { promise.Set(response.status()); });
return Future<>(std::move(promise));
}
RpcHelper::RpcHelper(IfrtProxyVersion version,
std::shared_ptr<ClientSession> session)
: batcher_(std::make_unique<Batcher>(std::move(session))),
version_(std::move(version)) {}
RpcHelper::~RpcHelper() { Disconnect(); }
void RpcHelper::Batch(BatchOperation op, ArrayHandle handle) {
return batcher_->Batch(op, handle);
}
void RpcHelper::Disconnect() {
batcher_->Finish(absl::CancelledError("Disconnected by client"));
}
}
}
} | #include "xla/python/ifrt_proxy/client/rpc_helper.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt_proxy/client/client_session.h"
#include "xla/python/ifrt_proxy/client/mock_client_session.h"
#include "xla/python/ifrt_proxy/client/version.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/common/test_utils.h"
#include "xla/python/ifrt_proxy/common/types.h"
#include "xla/python/ifrt_proxy/common/types.pb.h"
#include "tsl/platform/test.h"
using ::testing::_;
using ::testing::UnorderedElementsAre;
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
constexpr absl::Duration kMaxFlushTimeout = absl::Seconds(10);
void PausePeriodicFlushes() {
struct AtomicBool {
absl::Mutex mu;
bool b = false;
};
auto called_at_least_once = std::make_shared<AtomicBool>();
auto periodic_flusher_pause_hook = [called_at_least_once](bool* paused) {
*paused = true;
absl::MutexLock l(&called_at_least_once->mu);
called_at_least_once->b = true;
};
TestHookSet(TestHookName::kRpcBatcherPausePeriodicFlush,
std::move(periodic_flusher_pause_hook));
absl::MutexLock l(&called_at_least_once->mu);
CHECK(called_at_least_once->mu.AwaitWithTimeout(
absl::Condition(&called_at_least_once->b), kMaxFlushTimeout));
}
void ResumePeriodicFlushes() {
TestHookClear(TestHookName::kRpcBatcherPausePeriodicFlush);
}
class RpcHelperTest : public ::testing::Test {
public:
RpcHelperTest() : requests_(kMaxFlushTimeout) {
session_ = std::make_shared<MockClientSession>();
IfrtProxyVersion version;
version.set_protocol_version(kClientMaxVersion);
rpc_helper_ = std::make_shared<RpcHelper>(version, session_);
EXPECT_CALL(*session_, Finish(_)).Times(1);
ON_CALL(*session_, Enqueue)
.WillByDefault([this](std::unique_ptr<IfrtRequest> req) {
requests_.Push(std::move(req));
return Future<ClientSession::Response>(
absl::InternalError("Fake error response"));
});
}
std::shared_ptr<MockClientSession> session_;
std::shared_ptr<RpcHelper> rpc_helper_;
TestQueue<std::unique_ptr<IfrtRequest>> requests_;
};
TEST_F(RpcHelperTest, BatchedPeriodicFlush) {
PausePeriodicFlushes();
rpc_helper_->Batch(RpcHelper::kDestructArray, ArrayHandle{1});
rpc_helper_->Batch(RpcHelper::kDeleteArray, ArrayHandle{2});
rpc_helper_->Batch(RpcHelper::kDestructArray, ArrayHandle{3});
rpc_helper_->Batch(RpcHelper::kDeleteArray, ArrayHandle{4});
rpc_helper_->Batch(RpcHelper::kDestructArray, ArrayHandle{9});
rpc_helper_->Batch(RpcHelper::kDeleteArray, ArrayHandle{8});
rpc_helper_->Batch(RpcHelper::kDestructArray, ArrayHandle{7});
rpc_helper_->Batch(RpcHelper::kDeleteArray, ArrayHandle{6});
ResumePeriodicFlushes();
auto delete_req = requests_.Pop();
auto destruct_req = requests_.Pop();
if (destruct_req->has_delete_array_request()) {
destruct_req.swap(delete_req);
}
EXPECT_THAT(destruct_req->destruct_array_request().array_handle(),
UnorderedElementsAre(1, 3, 9, 7));
EXPECT_THAT(delete_req->delete_array_request().array_handle(),
UnorderedElementsAre(2, 4, 8, 6));
}
TEST_F(RpcHelperTest, BatchedNoPeriodicFlush) {
PausePeriodicFlushes();
rpc_helper_->Batch(RpcHelper::kDestructArray, ArrayHandle{1});
rpc_helper_->Batch(RpcHelper::kDeleteArray, ArrayHandle{2});
rpc_helper_->Batch(RpcHelper::kDestructArray, ArrayHandle{3});
rpc_helper_->Batch(RpcHelper::kDeleteArray, ArrayHandle{4});
rpc_helper_->Batch(RpcHelper::kDestructArray, ArrayHandle{9});
rpc_helper_->Batch(RpcHelper::kDeleteArray, ArrayHandle{8});
rpc_helper_->Batch(RpcHelper::kDestructArray, ArrayHandle{7});
rpc_helper_->Batch(RpcHelper::kDeleteArray, ArrayHandle{6});
{
auto dummy_request = std::make_unique<CheckFutureRequest>();
dummy_request->set_future_handle(1);
rpc_helper_->CheckFuture(std::move(dummy_request));
requests_.AllowNonEmptyDestruction(true);
}
auto delete_req = requests_.Pop();
auto destruct_req = requests_.Pop();
if (destruct_req->has_delete_array_request()) {
destruct_req.swap(delete_req);
}
EXPECT_THAT(destruct_req->destruct_array_request().array_handle(),
UnorderedElementsAre(1, 3, 9, 7));
EXPECT_THAT(delete_req->delete_array_request().array_handle(),
UnorderedElementsAre(2, 4, 8, 6));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/client/rpc_helper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/client/rpc_helper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
957c48a5-fd83-42db-b8cb-de8f81d5dac2 | cpp | tensorflow/tensorflow | compiler | tensorflow/lite/delegates/gpu/gl/compiler.cc | third_party/xla/xla/service/compiler_test.cc | #include "tensorflow/lite/delegates/gpu/gl/compiler.h"
#include <algorithm>
#include <any>
#include <memory>
#include <string>
#include <unordered_set>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/memory/memory.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/gpu_info.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/fuse_inline.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/fuse_inplace.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/shader_codegen.h"
#include "tensorflow/lite/delegates/gpu/gl/float16_conversions.h"
#ifdef __ANDROID__
#include <sys/system_properties.h>
#endif
namespace tflite {
namespace gpu {
namespace gl {
namespace {
struct ExceedSizeChecker {
bool operator()(uint32_t v) const { return v > max_size.x; }
bool operator()(const uint2& v) const {
return v.x > max_size.x || v.y > max_size.y;
}
bool operator()(const uint3& v) const {
return v.x > max_size.x || v.y > max_size.y || v.z > max_z_size;
}
int2 max_size;
int max_z_size;
};
bool ExceedsMaxSize(const Object& object, const GpuInfo& gpu_info) {
ExceedSizeChecker size_checker;
size_checker.max_size =
int2(gpu_info.GetMaxImage2DWidth(), gpu_info.GetMaxImage2DHeight());
size_checker.max_z_size = gpu_info.GetMaxImage2DArrayLayers();
return std::visit(size_checker, object.size);
}
ObjectType ChooseFastestObjectType(const GpuInfo& gpu_info) {
return gpu_info.IsAdreno() ? ObjectType::TEXTURE : ObjectType::BUFFER;
}
ObjectType ChooseFastestRefObjectType(const GpuInfo& gpu_info,
const CompilationOptions& options) {
if (!gpu_info.IsAdreno()) {
return ObjectType::BUFFER;
}
if (gpu_info.adreno_info.adreno_gpu == AdrenoGpu::kAdreno630) {
return ObjectType::TEXTURE;
} else {
return options.allow_precision_loss ? ObjectType::TEXTURE
: ObjectType::BUFFER;
}
}
class CompilerImpl : public Compiler {
public:
CompilerImpl(const NodeShader* node_shader, const GpuInfo* gpu_info,
const CompilationOptions& options)
: node_shader_(*node_shader), gpu_info_(*gpu_info), options_(options) {
if (options_.preferred_obj_type == ObjectType::UNKNOWN) {
options_.preferred_obj_type = ChooseFastestObjectType(*gpu_info);
}
if (options_.ref_obj_type == ObjectType::UNKNOWN) {
options_.ref_obj_type = ChooseFastestRefObjectType(*gpu_info, options);
}
#ifdef __ANDROID__
if (gpu_info_.IsAdreno() &&
gpu_info_.adreno_info.adreno_gpu == AdrenoGpu::kAdreno660) {
char sdk_version[PROP_VALUE_MAX];
__system_property_get("ro.build.version.sdk", sdk_version);
if (!strcmp(sdk_version, "30")) options_.allow_precision_loss = false;
}
#endif
}
absl::Status Compile(
const GraphFloat32& graph,
const std::unordered_set<int>& tflite_graph_io,
const ShaderCodeCallback& callback) final {
RETURN_IF_ERROR(graph.MakeExactCopy(&compiled_graph_));
if (options_.dynamic_batch) {
for (auto value : compiled_graph_.values()) {
value->tensor.shape.b = 1;
}
}
for (auto node : compiled_graph_.nodes()) {
CompiledNodeAttributes attr;
attr.node_indices.push_back(node->id);
NodeShader::GenerationContext ctx = {&gpu_info_, options_,
node->operation.type,
node->operation.attributes};
for (const auto& tensor : graph.FindInputs(node->id)) {
const auto& shape = tensor->tensor.shape;
ctx.input_shapes.push_back({shape.b, shape.h, shape.w, shape.c});
}
for (const auto& tensor : graph.FindOutputs(node->id)) {
const auto& shape = tensor->tensor.shape;
ctx.output_shapes.push_back({shape.b, shape.h, shape.w, shape.c});
}
RETURN_IF_ERROR(node_shader_.GenerateCode(ctx, &attr.code));
node->operation.attributes = std::move(attr);
}
ModelTransformer transformer(&compiled_graph_);
if (options_.fuse_operations) {
FuseAutoOutputWithInline fuse_inline;
if (!transformer.Apply("fuse_auto_with_inline", &fuse_inline)) {
return absl::InternalError("fuse_auto_with_inline failed");
}
FuseInplaceUpdate fuse_inplace;
if (!transformer.Apply("fuse_inplace_update", &fuse_inplace)) {
return absl::InternalError("fuse_inplace failed");
}
if (options_.auto_input_fusion) {
FuseAutoInput fuse_auto_input;
if (!transformer.Apply("fuse_auto_input", &fuse_auto_input)) {
return absl::InternalError("fuse_auto_input failed");
}
}
}
RemoveUnusedInplaceUpdates remove_inplace_updates;
if (!transformer.Apply("remove_inplace_updates", &remove_inplace_updates)) {
return absl::InternalError("remove_inplace_updates failed");
}
absl::flat_hash_map<ValueId, Object> objects;
for (auto value : compiled_graph_.values()) {
Object object = MakePHWC4Ref(value->id, value->tensor.shape);
object.data_type = value->tensor.type;
const bool is_external =
graph.IsGraphInput(value->id) || graph.IsGraphOutput(value->id) ||
tflite_graph_io.find(value->tensor.ref) != tflite_graph_io.end();
if (is_external) {
object.object_type = ObjectType::BUFFER;
} else if (options_.allow_precision_loss) {
MaybeConvertToFloat16(&object);
}
objects[value->id] = std::move(object);
}
for (auto node : compiled_graph_.nodes()) {
auto& attr =
std::any_cast<CompiledNodeAttributes&>(node->operation.attributes);
if (attr.code.workload == uint3()) {
auto outputs = compiled_graph_.FindOutputs(node->id);
auto shape = outputs[0]->tensor.shape;
for (auto output : outputs) {
if (shape != output->tensor.shape) {
return absl::FailedPreconditionError(
"Workload uint3() requires all output sizes to match");
}
}
attr.code.workload = uint3(shape.w, shape.h, DivideRoundUp(shape.c, 4));
}
int num_textures = 0;
auto set_object_type = [&](Object* object) {
if (object->object_type == ObjectType::BUFFER) {
return;
}
bool is_ref = IsRef(*object);
if (num_textures < gpu_info_.GetMaxImageArguments() &&
!ExceedsMaxSize(*object, gpu_info_) &&
(object->object_type == ObjectType::TEXTURE ||
(is_ref && options_.ref_obj_type == ObjectType::TEXTURE) ||
(!is_ref && options_.preferred_obj_type == ObjectType::TEXTURE))) {
object->object_type = ObjectType::TEXTURE;
num_textures++;
} else {
object->object_type = ObjectType::BUFFER;
}
};
for (auto& object : attr.code.objects) {
if (options_.allow_precision_loss) {
MaybeConvertToFloat16(&object.second);
}
set_object_type(&object.second);
}
for (auto ref : compiled_graph_.FindInputs(node->id)) {
set_object_type(&objects[ref->id]);
}
for (auto ref : compiled_graph_.FindOutputs(node->id)) {
set_object_type(&objects[ref->id]);
}
}
ShaderCodegen codegen(options_, gpu_info_);
for (auto node : compiled_graph_.nodes()) {
auto& attr =
std::any_cast<CompiledNodeAttributes&>(node->operation.attributes);
if (attr.code.source_code.empty()) {
continue;
}
for (auto ref : compiled_graph_.FindInputs(node->id)) {
auto object = objects[ref->id];
object.access = AccessType::READ;
attr.inputs.push_back(object);
}
for (auto ref : compiled_graph_.FindOutputs(node->id)) {
auto object = objects[ref->id];
object.access = AccessType::WRITE;
attr.outputs.push_back(object);
}
uint32_t binding = 0;
auto set_binding = [&](ObjectType type, Object& object) {
if (object.object_type == type) {
object.binding = binding++;
}
};
for (auto& object : attr.inputs) {
set_binding(ObjectType::TEXTURE, object);
}
for (auto& object : attr.outputs) {
set_binding(ObjectType::TEXTURE, object);
}
for (auto& object : attr.code.objects) {
set_binding(ObjectType::TEXTURE, object.second);
}
for (auto& object : attr.inputs) {
set_binding(ObjectType::BUFFER, object);
}
for (auto& object : attr.outputs) {
set_binding(ObjectType::BUFFER, object);
}
for (auto& object : attr.code.objects) {
set_binding(ObjectType::BUFFER, object.second);
}
ShaderCode shader_code;
RETURN_IF_ERROR(codegen.Build(std::move(attr), &shader_code));
RETURN_IF_ERROR(callback(std::move(shader_code)));
}
return absl::OkStatus();
}
private:
const NodeShader& node_shader_;
const GpuInfo& gpu_info_;
CompilationOptions options_;
GraphFloat32 compiled_graph_;
};
}
std::unique_ptr<Compiler> NewCompiler(const NodeShader* node_shader,
const GpuInfo* gpu_info,
const CompilationOptions& options) {
return std::make_unique<CompilerImpl>(node_shader, gpu_info, options);
}
}
}
} | #include "xla/service/compiler.h"
#include <gtest/gtest.h>
#include "xla/autotune_results.pb.h"
#include "xla/stream_executor/device_description.pb.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tests/test_macros.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
TEST(TargetConfigTest, DISABLED_ON_CPU(ExecutorConstructorFillsAllFields)) {
TF_ASSERT_OK(stream_executor::ValidateGPUMachineManager());
TF_ASSERT_OK_AND_ASSIGN(
stream_executor::StreamExecutor * executor,
stream_executor::GPUMachineManager()->ExecutorForDevice(0));
Compiler::TargetConfig config(executor);
stream_executor::GpuTargetConfigProto target = config.ToProto();
EXPECT_GT(target.dnn_version_info().major(), 0) << target.DebugString();
EXPECT_GT(target.gpu_device_info().threads_per_block_limit(), 0)
<< target.DebugString();
EXPECT_NE(target.device_description_str(), "") << target.DebugString();
EXPECT_NE(target.platform_name(), "") << target.DebugString();
EXPECT_EQ(target.autotune_results().version(), 0);
EXPECT_EQ(5,
stream_executor::GpuTargetConfigProto::descriptor()->field_count())
<< "Make sure all the fields in GpuTargetConfigProto are set and "
"validated!";
}
TEST(TargetConfigTest, ProtoConstructorFillsAllFields) {
stream_executor::GpuTargetConfigProto config_proto;
config_proto.set_platform_name("platform");
config_proto.mutable_dnn_version_info()->set_major(2);
config_proto.mutable_gpu_device_info()->set_threads_per_block_limit(5);
config_proto.set_device_description_str("foo");
Compiler::TargetConfig config(config_proto);
stream_executor::GpuTargetConfigProto target = config.ToProto();
EXPECT_EQ(target.dnn_version_info().major(),
config_proto.dnn_version_info().major())
<< target.DebugString();
EXPECT_EQ(target.gpu_device_info().threads_per_block_limit(), 5)
<< target.DebugString();
EXPECT_EQ(target.device_description_str(), "foo") << target.DebugString();
EXPECT_EQ(target.platform_name(), "platform") << target.DebugString();
EXPECT_EQ(target.autotune_results().version(), 0);
EXPECT_EQ(5,
stream_executor::GpuTargetConfigProto::descriptor()->field_count())
<< "Make sure all the fields in GpuTargetConfigProto are set and "
"validated!";
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/compiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/compiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f3b52274-8026-4ff3-9390-a910d61136c0 | cpp | tensorflow/tensorflow | array_spec | third_party/xla/xla/python/ifrt/array_spec.cc | third_party/xla/xla/python/ifrt/array_spec_test.cc | #include "xla/python/ifrt/array_spec.h"
#include <string>
#include <utility>
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/python/ifrt/array_spec.pb.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
absl::StatusOr<ArraySpec> ArraySpec::FromProto(
DeviceList::LookupDeviceFunc lookup_device, const ArraySpecProto& proto) {
TF_ASSIGN_OR_RETURN(auto dtype, DType::FromProto(proto.dtype()));
TF_ASSIGN_OR_RETURN(auto shape, Shape::FromProto(proto.shape()));
TF_ASSIGN_OR_RETURN(auto sharding,
Sharding::FromProto(lookup_device, proto.sharding()));
return ArraySpec{dtype, std::move(shape),
std::move(sharding)};
}
absl::StatusOr<ArraySpecProto> ArraySpec::ToProto() const {
ArraySpecProto proto;
*proto.mutable_dtype() = dtype.ToProto();
*proto.mutable_shape() = shape.ToProto();
TF_ASSIGN_OR_RETURN(*proto.mutable_sharding(), sharding->ToProto());
return proto;
}
std::string ArraySpec::DebugString() const {
return absl::StrCat("ArraySpec(dtype=", dtype.DebugString(),
",shape=", shape.DebugString(),
",sharding=", sharding->DebugString(), ")");
}
}
} | #include "xla/python/ifrt/array_spec.h"
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "llvm/Support/Casting.h"
#include "xla/python/ifrt/array_spec.pb.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_test_util.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
class ArraySpecTest : public test_util::DeviceTest {};
TEST_P(ArraySpecTest, ToFromProto) {
auto device_list = GetDevices({0, 1});
DType dtype(DType::kS32);
Shape shape({4, 2});
Shape shard_shape({2, 2});
ArraySpec spec{dtype, shape,
ConcreteEvenSharding::Create(device_list, MemoryKind(),
shape,
shard_shape)};
auto lookup_device_func = [&](DeviceId device_id) -> absl::StatusOr<Device*> {
return client()->LookupDevice(device_id);
};
TF_ASSERT_OK_AND_ASSIGN(const ArraySpecProto proto, spec.ToProto());
TF_ASSERT_OK_AND_ASSIGN(const ArraySpec array_spec_copy,
ArraySpec::FromProto(lookup_device_func, proto));
EXPECT_EQ(array_spec_copy.dtype, dtype);
EXPECT_EQ(array_spec_copy.shape, shape);
const auto* sharding =
llvm::dyn_cast<ConcreteEvenSharding>(array_spec_copy.sharding.get());
ASSERT_NE(sharding, nullptr);
EXPECT_EQ(*sharding->devices(), *spec.sharding->devices());
EXPECT_EQ(sharding->memory_kind(), spec.sharding->memory_kind());
EXPECT_EQ(sharding->shape(), shape);
EXPECT_EQ(sharding->shard_shape(), shard_shape);
}
INSTANTIATE_TEST_SUITE_P(NumDevices, ArraySpecTest,
testing::Values(test_util::DeviceTestParam{
2,
2}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/array_spec.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/array_spec_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
387533af-8dbc-4729-9ca7-b372cd4e2a20 | cpp | tensorflow/tensorflow | value | third_party/xla/xla/python/ifrt/value.cc | tensorflow/core/tfrt/mlrt/interpreter/value_test.cc | #include "xla/python/ifrt/value.h"
namespace xla {
namespace ifrt {
char Value::ID = 0;
}
} | #include "tensorflow/core/tfrt/mlrt/interpreter/value.h"
#include <utility>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
namespace mlrt {
namespace {
TEST(ValueTest, SmallCopyable) {
struct SmallCopyable {
int v;
};
Value value(SmallCopyable{100});
EXPECT_EQ(value.Get<SmallCopyable>().v, 100);
Value value_copy(value);
EXPECT_EQ(value_copy.Get<SmallCopyable>().v, 100);
EXPECT_EQ(value.Get<SmallCopyable>().v, 100);
Value value_move = std::move(value);
EXPECT_EQ(value_move.Get<SmallCopyable>().v, 100);
EXPECT_FALSE(value.HasValue());
ASSERT_TRUE(value_move.HasValue());
value_move.Destroy<SmallCopyable>();
EXPECT_FALSE(value_move.HasValue());
value_move = SmallCopyable{100};
EXPECT_EQ(value_move.Get<SmallCopyable>().v, 100);
}
TEST(ValueTest, LargeCopyable) {
constexpr char kData[] =
"<<This line contains 32 bytes>>\n"
"<<This line contains 32 bytes>>\n"
"<<This line contains 32 bytes>>\n"
"<<This line contains 32 bytes>>";
static_assert(sizeof(kData) == 128);
struct LargeCopyable {
char data[128] =
"<<This line contains 32 bytes>>\n"
"<<This line contains 32 bytes>>\n"
"<<This line contains 32 bytes>>\n"
"<<This line contains 32 bytes>>";
};
Value value(LargeCopyable{});
EXPECT_EQ(absl::string_view(value.Get<LargeCopyable>().data), kData);
Value value_copy = value;
EXPECT_EQ(absl::string_view(value_copy.Get<LargeCopyable>().data), kData);
EXPECT_EQ(absl::string_view(value.Get<LargeCopyable>().data), kData);
Value value_move = std::move(value);
EXPECT_EQ(absl::string_view(value_move.Get<LargeCopyable>().data), kData);
EXPECT_FALSE(value.HasValue());
ASSERT_TRUE(value_move.HasValue());
value_move.Destroy<LargeCopyable>();
EXPECT_FALSE(value_move.HasValue());
value_move = LargeCopyable{};
EXPECT_EQ(absl::string_view(value_move.Get<LargeCopyable>().data), kData);
}
TEST(ValueTest, SmallMoveOnly) {
struct SmallMoveOnly {
int v;
explicit SmallMoveOnly(int v) : v(v) {}
SmallMoveOnly(const SmallMoveOnly&) = delete;
SmallMoveOnly& operator=(const SmallMoveOnly&) = delete;
SmallMoveOnly(SmallMoveOnly&&) = default;
SmallMoveOnly& operator=(SmallMoveOnly&&) = default;
};
Value value(SmallMoveOnly(100));
EXPECT_EQ(value.Get<SmallMoveOnly>().v, 100);
Value value_move = std::move(value);
EXPECT_EQ(value_move.Get<SmallMoveOnly>().v, 100);
EXPECT_FALSE(value.HasValue());
}
TEST(ValueTest, LargeMoveOnly) {
constexpr char kData[] =
"<<This line contains 32 bytes>>\n"
"<<This line contains 32 bytes>>\n"
"<<This line contains 32 bytes>>\n"
"<<This line contains 32 bytes>>";
static_assert(sizeof(kData) == 128);
struct LargeMoveOnly {
char data[128] =
"<<This line contains 32 bytes>>\n"
"<<This line contains 32 bytes>>\n"
"<<This line contains 32 bytes>>\n"
"<<This line contains 32 bytes>>";
LargeMoveOnly() = default;
LargeMoveOnly(const LargeMoveOnly&) = delete;
LargeMoveOnly& operator=(const LargeMoveOnly&) = delete;
LargeMoveOnly(LargeMoveOnly&&) = default;
LargeMoveOnly& operator=(LargeMoveOnly&&) = default;
};
Value value(LargeMoveOnly{});
EXPECT_EQ(absl::string_view(value.Get<LargeMoveOnly>().data), kData);
Value value_move = std::move(value);
EXPECT_EQ(absl::string_view(value_move.Get<LargeMoveOnly>().data), kData);
EXPECT_FALSE(value.HasValue());
}
TEST(ValueTest, Error) {
Value arg(100);
arg.HandleError(arg);
EXPECT_EQ(arg.Get<int>(), 100);
struct Small {
int* v = nullptr;
void HandleError(Value* arg) { *v = arg->Get<int>(); }
};
int v = 0;
Value value(Small{&v});
value.HandleError(arg);
EXPECT_EQ(v, 100);
EXPECT_EQ(*value.Get<Small>().v, 100);
struct Large {
int* v = nullptr;
char data[128];
void HandleError(Value* arg) { *v = arg->Get<int>(); }
};
v = 0;
value = Value(Large{&v});
value.HandleError(arg);
EXPECT_EQ(v, 100);
EXPECT_EQ(*value.Get<Large>().v, 100);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/value.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/interpreter/value_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8065cdc5-9bf1-4411-a088-8e357458b8c8 | cpp | tensorflow/tensorflow | custom_call_program_serdes | third_party/xla/xla/python/ifrt/custom_call_program_serdes.cc | third_party/xla/xla/python/ifrt/custom_call_program_serdes_test.cc | #include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/python/ifrt/array_spec.h"
#include "xla/python/ifrt/array_spec.pb.h"
#include "xla/python/ifrt/custom_call_program.h"
#include "xla/python/ifrt/custom_call_program.pb.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/program_serdes.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/sharding.pb.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
class CustomCallProgramSerDes
: public llvm::RTTIExtends<CustomCallProgramSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::CustomCallProgram";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
const CustomCallProgram& program =
llvm::cast<CustomCallProgram>(serializable);
CustomCallProgramProto proto;
proto.set_type(program.type);
proto.set_name(program.name);
absl::CopyCordToString(program.serialized_program_text,
proto.mutable_serialized_program_text());
*proto.mutable_devices() = program.devices->ToProto();
for (const ArraySpec& spec : program.input_specs) {
TF_ASSIGN_OR_RETURN(*proto.add_input_specs(), spec.ToProto());
}
for (const ArraySpec& spec : program.output_specs) {
TF_ASSIGN_OR_RETURN(*proto.add_output_specs(), spec.ToProto());
}
return proto.SerializeAsString();
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
const auto* deserialize_program_options =
llvm::cast<DeserializeProgramOptions>(options.get());
CustomCallProgramProto proto;
if (!proto.ParseFromString(serialized)) {
return absl::InvalidArgumentError(
"Failed to parse serialized CustomCallProgramProto");
}
TF_ASSIGN_OR_RETURN(
tsl::RCReference<DeviceList> devices,
DeviceList::FromProto(deserialize_program_options->lookup_device,
proto.devices()));
std::vector<ArraySpec> input_specs;
input_specs.reserve(proto.input_specs_size());
for (const ArraySpecProto& spec_proto : proto.input_specs()) {
TF_ASSIGN_OR_RETURN(
ArraySpec spec,
ArraySpec::FromProto(deserialize_program_options->lookup_device,
spec_proto));
input_specs.push_back(std::move(spec));
}
std::vector<ArraySpec> output_specs;
output_specs.reserve(proto.output_specs_size());
for (const ArraySpecProto& spec_proto : proto.output_specs()) {
TF_ASSIGN_OR_RETURN(
ArraySpec spec,
ArraySpec::FromProto(deserialize_program_options->lookup_device,
spec_proto));
output_specs.push_back(std::move(spec));
}
return std::make_unique<CustomCallProgram>(
proto.type(), proto.name(),
absl::Cord(std::move(*proto.mutable_serialized_program_text())),
std::move(devices),
std::move(input_specs),
std::move(output_specs));
}
static char ID;
};
class CustomCallCompileOptionsSerDes
: public llvm::RTTIExtends<CustomCallCompileOptionsSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::CustomCallCompileOptions";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
return "";
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
if (!serialized.empty()) {
return absl::InvalidArgumentError(
"Invalid serialized CustomCallCompileOptions; a serialized "
"CustomCallCompileOptions is expected to be an empty string");
}
return std::make_unique<CustomCallCompileOptions>();
}
static char ID;
};
[[maybe_unused]] char CustomCallProgramSerDes::ID = 0;
[[maybe_unused]] char CustomCallCompileOptionsSerDes::ID = 0;
bool register_custom_call_program_serdes = ([]{
RegisterSerDes<CustomCallProgram>(
std::make_unique<CustomCallProgramSerDes>());
}(), true);
bool register_custom_call_compile_options_serdes = ([]{
RegisterSerDes<CustomCallCompileOptions>(
std::make_unique<CustomCallCompileOptionsSerDes>());
}(), true);
}
}
} | #include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/functional/bind_front.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "llvm/Support/Casting.h"
#include "xla/python/ifrt/array_spec.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/custom_call_program.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/device_test_util.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/program_serdes.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::MatchesRegex;
using ::testing::SizeIs;
using ::tsl::testing::StatusIs;
class CustomCallProgramSerDesTest : public test_util::DeviceTest {};
TEST_P(CustomCallProgramSerDesTest, RoundTrip) {
Shape shape0({10, 20});
Shape shard_shape0({5, 20});
tsl::RCReference<DeviceList> devices = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding0 =
ConcreteEvenSharding::Create(devices, MemoryKind(),
shape0,
shard_shape0);
Shape shape1({});
Shape shard_shape1({});
std::shared_ptr<const Sharding> sharding1 =
ConcreteEvenSharding::Create(devices, MemoryKind(),
shape1,
shard_shape1);
CustomCallProgram orig(
"test type",
"test name",
absl::Cord("test\0program\0text\0"),
std::move(devices),
{
ArraySpec{DType(DType::kF32), shape0,
sharding0},
},
{
ArraySpec{DType(DType::kF32), shape1,
sharding1},
});
TF_ASSERT_OK_AND_ASSIGN(Serialized serialized, Serialize(orig));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<CustomCallProgram> deserialized_program,
Deserialize<CustomCallProgram>(
serialized, std::make_unique<DeserializeProgramOptions>(
absl::bind_front(&Client::LookupDevice, client()))));
EXPECT_EQ(deserialized_program->type, "test type");
EXPECT_EQ(deserialized_program->name, "test name");
EXPECT_EQ(deserialized_program->serialized_program_text,
absl::Cord("test\0program\0text\0").Flatten());
EXPECT_EQ(*deserialized_program->devices, *orig.devices);
ASSERT_THAT(deserialized_program->input_specs, SizeIs(1));
EXPECT_EQ(deserialized_program->input_specs.front().dtype,
DType(DType::kF32));
EXPECT_EQ(deserialized_program->input_specs.front().shape, shape0);
const auto* deserialized_sharding0 = llvm::dyn_cast<ConcreteEvenSharding>(
deserialized_program->input_specs.front().sharding.get());
ASSERT_NE(deserialized_sharding0, nullptr);
EXPECT_EQ(*deserialized_sharding0->devices(), *sharding0->devices());
EXPECT_EQ(deserialized_sharding0->shape(), shape0);
EXPECT_EQ(deserialized_sharding0->shard_shape(), shard_shape0);
ASSERT_THAT(deserialized_program->output_specs, SizeIs(1));
EXPECT_EQ(deserialized_program->output_specs.front().dtype,
DType(DType::kF32));
EXPECT_EQ(deserialized_program->output_specs.front().shape, shape1);
const auto* deserialized_sharding1 = llvm::dyn_cast<ConcreteEvenSharding>(
deserialized_program->output_specs.front().sharding.get());
ASSERT_NE(deserialized_sharding1, nullptr);
EXPECT_EQ(*deserialized_sharding1->devices(), *sharding1->devices());
EXPECT_EQ(deserialized_sharding1->shape(), shape1);
EXPECT_EQ(deserialized_sharding1->shard_shape(), shard_shape1);
}
INSTANTIATE_TEST_SUITE_P(NumDevices, CustomCallProgramSerDesTest,
testing::Values(test_util::DeviceTestParam{
2,
2}));
TEST(CustomCallCompileOptionsSerDesTest, RoundTrip) {
CustomCallCompileOptions orig;
TF_ASSERT_OK_AND_ASSIGN(Serialized serialized, Serialize(orig));
TF_EXPECT_OK(
Deserialize<CustomCallCompileOptions>(serialized, nullptr)
.status());
}
TEST(CustomCallCompileOptionsSerDesTest, InvalidSerialized) {
CustomCallCompileOptions orig;
TF_ASSERT_OK_AND_ASSIGN(Serialized serialized, Serialize(orig));
serialized.set_data("abc");
EXPECT_THAT(
Deserialize<CustomCallCompileOptions>(serialized, nullptr),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex("Invalid serialized CustomCallCompileOptions.*")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/custom_call_program_serdes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/custom_call_program_serdes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4c398408-d0f2-4cfb-a406-fe8a0f809c2d | cpp | tensorflow/tensorflow | device_list | third_party/xla/xla/python/ifrt/device_list.cc | third_party/xla/xla/python/ifrt/device_list_test.cc | #include "xla/python/ifrt/device_list.h"
#include <atomic>
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/base/optimization.h"
#include "absl/hash/hash.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device.pb.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
char DeviceList::ID = 0;
char BasicDeviceList::ID = 0;
absl::StatusOr<tsl::RCReference<DeviceList>> DeviceList::FromProto(
LookupDeviceFunc lookup_device, const DeviceListProto& proto) {
BasicDeviceList::Devices devices;
devices.reserve(proto.device_ids_size());
for (int device_id : proto.device_ids()) {
TF_ASSIGN_OR_RETURN(Device * device, lookup_device(DeviceId(device_id)));
devices.push_back(device);
}
return BasicDeviceList::Create(std::move(devices));
}
DeviceListProto DeviceList::ToProto() const {
DeviceListProto proto;
proto.mutable_device_ids()->Reserve(devices().size());
for (Device* device : devices()) {
proto.mutable_device_ids()->AddAlreadyReserved(device->Id().value());
}
return proto;
}
tsl::RCReference<DeviceList> BasicDeviceList::Create(Devices devices) {
return tsl::MakeRef<BasicDeviceList>(std::move(devices));
}
BasicDeviceList::BasicDeviceList(Devices devices)
: devices_(std::move(devices)), hash_(kUnsetHash) {}
DeviceList* BasicDeviceList::AddressableDeviceList() const {
absl::call_once(addressable_device_list_cache_.once_flag, [this] {
Devices addressable_devices;
for (Device* device : devices_) {
if (device->IsAddressable()) {
addressable_devices.push_back(device);
}
}
const bool already_fully_addressable =
addressable_devices.size() == devices_.size();
if (already_fully_addressable) {
addressable_device_list_cache_.device_list =
const_cast<BasicDeviceList*>(this);
} else {
addressable_device_list_cache_.device_list_holder =
BasicDeviceList::Create(std::move(addressable_devices));
addressable_device_list_cache_.device_list =
addressable_device_list_cache_.device_list_holder.get();
}
});
return addressable_device_list_cache_.device_list;
}
uint64_t BasicDeviceList::hash() const {
uint64_t hash = hash_.load(std::memory_order_relaxed);
if (ABSL_PREDICT_FALSE(hash == kUnsetHash)) {
hash = absl::HashOf(devices());
if (ABSL_PREDICT_FALSE(hash == kUnsetHash)) {
++hash;
}
hash_.store(hash, std::memory_order_relaxed);
}
return hash;
}
std::string BasicDeviceList::ToString() const {
return absl::StrCat("BasicDeviceList([",
absl::StrJoin(devices_, ",",
[](std::string* out, Device* device) {
absl::StrAppend(out,
device->DebugString());
}),
"])");
}
std::vector<DeviceId> GetDeviceIds(
const tsl::RCReference<DeviceList>& device_list) {
std::vector<DeviceId> ids;
ids.reserve(device_list->devices().size());
for (const Device* device : device_list->devices()) {
ids.push_back(device->Id());
}
return ids;
}
}
} | #include "xla/python/ifrt/device_list.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device.pb.h"
#include "xla/python/ifrt/device_test_util.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::ElementsAreArray;
class DeviceListTest : public test_util::DeviceTest {};
TEST_P(DeviceListTest, ToFromProto) {
auto device_list = GetDevices({0, 1});
DeviceListProto proto = device_list->ToProto();
auto lookup_device_func = [&](DeviceId device_id) -> absl::StatusOr<Device*> {
return client()->LookupDevice(device_id);
};
TF_ASSERT_OK_AND_ASSIGN(auto device_list_copy,
DeviceList::FromProto(lookup_device_func, proto));
EXPECT_EQ(*device_list_copy, *device_list);
}
TEST_P(DeviceListTest, AddressableDevices) {
auto device_list = GetDevices({0, 1});
std::vector<Device*> addressable_devices;
for (Device* device : device_list->devices()) {
if (device->IsAddressable()) {
addressable_devices.push_back(device);
}
}
EXPECT_THAT(device_list->AddressableDeviceList()->devices(),
ElementsAreArray(addressable_devices));
}
TEST_P(DeviceListTest, AddressableDevicesFromConcurrentCalls) {
auto device_list = GetDevices({0, 1});
const int num_threads = 16;
auto thread_pool = std::make_unique<tsl::thread::ThreadPool>(
tsl::Env::Default(), tsl::ThreadOptions(), "test_pool",
std::min(num_threads, tsl::port::MaxParallelism()));
std::vector<DeviceList*> addressable_device_lists(num_threads);
for (int i = 0; i < num_threads; ++i) {
thread_pool->Schedule([&, i]() {
addressable_device_lists[i] = device_list->AddressableDeviceList();
addressable_device_lists[i]->devices().front()->Id();
});
}
thread_pool.reset();
for (int i = 0; i < num_threads; ++i) {
EXPECT_EQ(*addressable_device_lists[i],
*device_list->AddressableDeviceList());
}
}
TEST_P(DeviceListTest, IdenticalHashFromConcurrentCalls) {
auto device_list = GetDevices({0, 1});
const int num_threads = 16;
auto thread_pool = std::make_unique<tsl::thread::ThreadPool>(
tsl::Env::Default(), tsl::ThreadOptions(), "test_pool",
std::min(num_threads, tsl::port::MaxParallelism()));
std::vector<uint64_t> hashes(num_threads);
for (int i = 0; i < num_threads; ++i) {
thread_pool->Schedule([&, i]() { hashes[i] = device_list->hash(); });
}
thread_pool.reset();
for (int i = 0; i < num_threads; ++i) {
EXPECT_EQ(hashes[i], device_list->hash());
}
EXPECT_NE(device_list->hash(), 0);
}
TEST_P(DeviceListTest, EqualityTest) {
auto device_list1 = GetDevices({0, 1});
auto device_list2 = GetDevices({0, 1});
EXPECT_EQ(*device_list1, *device_list2);
auto device_list3 = device_list1;
EXPECT_EQ(*device_list1, *device_list3);
auto device_list4 = std::move(device_list2);
EXPECT_EQ(*device_list1, *device_list4);
auto device_list5 = GetDevices({0});
EXPECT_NE(*device_list1, *device_list5);
auto device_list6 = GetDevices({1, 0});
EXPECT_NE(*device_list1, *device_list6);
}
INSTANTIATE_TEST_SUITE_P(
NumDevices, DeviceListTest,
testing::Values(test_util::DeviceTestParam{2,
1},
test_util::DeviceTestParam{2,
2}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/device_list.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/device_list_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
86b85201-3336-4062-a4b9-5007b589da3e | cpp | tensorflow/tensorflow | index_domain | third_party/xla/xla/python/ifrt/index_domain.cc | third_party/xla/xla/python/ifrt/index_domain_test.cc | #include "xla/python/ifrt/index_domain.h"
#include <ostream>
#include <string>
#include "absl/strings/str_cat.h"
namespace xla {
namespace ifrt {
std::string IndexDomain::DebugString() const {
return absl::StrCat("IndexDomain(origin=", origin_.DebugString(),
",shape=", shape_.DebugString(), ")");
}
std::ostream& operator<<(std::ostream& os, const IndexDomain& index_domain) {
return os << index_domain.DebugString();
}
}
} | #include "xla/python/ifrt/index_domain.h"
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/hash/hash_testing.h"
#include "xla/python/ifrt/index.h"
#include "xla/python/ifrt/shape.h"
namespace xla {
namespace ifrt {
namespace {
TEST(IndexDomainTest, Construction) {
IndexDomain a(Index({1, 2}), Shape({3, 4}));
EXPECT_EQ(a.origin(), Index({1, 2}));
EXPECT_EQ(a.shape(), Shape({3, 4}));
IndexDomain b(Shape({3, 4}));
EXPECT_EQ(b.origin(), Index({0, 0}));
EXPECT_EQ(b.shape(), Shape({3, 4}));
}
TEST(IndexDomainTest, Operations) {
IndexDomain a(Index({1, 2}), Shape({3, 4}));
Index b({1, 2});
EXPECT_EQ(a + b, IndexDomain(Index({2, 4}), Shape({3, 4})));
{
IndexDomain c = a;
EXPECT_EQ(c += b, IndexDomain(Index({2, 4}), Shape({3, 4})));
}
EXPECT_EQ(a - b, IndexDomain(Index({0, 0}), Shape({3, 4})));
{
IndexDomain c = a;
EXPECT_EQ(c -= b, IndexDomain(Index({0, 0}), Shape({3, 4})));
}
}
TEST(IndexDomainTest, Hash) {
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
{IndexDomain(Index({1, 2}), Shape({3, 4})),
IndexDomain(Index({1, 2}), Shape({4, 3})),
IndexDomain(Index({2, 1}), Shape({3, 4})),
IndexDomain(Index({2, 1}), Shape({4, 3}))}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/index_domain.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/index_domain_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c504c794-17d6-46c9-82b6-e56d0e7dac4f | cpp | tensorflow/tensorflow | serdes | third_party/xla/xla/python/ifrt/serdes.cc | third_party/xla/xla/python/ifrt/serdes_test.cc | #include "xla/python/ifrt/serdes.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "xla/python/ifrt/serdes.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
struct Registry {
absl::Mutex mu;
absl::flat_hash_map<const void*, SerDes*> type_id_to_serdes
ABSL_GUARDED_BY(mu);
absl::flat_hash_map<absl::string_view, SerDes*> name_to_serdes
ABSL_GUARDED_BY(mu);
};
Registry* registry() {
static auto* r = new Registry();
return r;
}
}
char Serializable::ID = 0;
char DeserializeOptions::ID = 0;
char SerDes::ID = 0;
void RegisterSerDes(const void* type_id, std::unique_ptr<SerDes> serdes) {
Registry* const r = registry();
absl::MutexLock l(&r->mu);
CHECK(r->type_id_to_serdes.insert({type_id, serdes.get()}).second)
<< "xla::ifrt::SerDes cannot be registered more than once for the same "
"type id: "
<< type_id;
const absl::string_view name = serdes->type_name();
CHECK(r->name_to_serdes.insert({name, serdes.get()}).second)
<< "xla::ifrt::SerDes cannot be registered more than once for the same "
"name: "
<< name;
serdes.release();
}
absl::StatusOr<Serialized> Serialize(Serializable& serializable) {
SerDes* serdes;
{
Registry* const r = registry();
absl::MutexLock l(&r->mu);
auto it = r->type_id_to_serdes.find(serializable.dynamicClassID());
if (it == r->type_id_to_serdes.end()) {
return absl::UnimplementedError(
"Serialize call failed. Serializable has no associated SerDes "
"implementation");
}
serdes = it->second;
}
TF_ASSIGN_OR_RETURN(std::string data, serdes->Serialize(serializable));
Serialized proto;
proto.set_type_name(std::string(serdes->type_name()));
proto.set_data(std::move(data));
return proto;
}
namespace serdes_internal {
absl::StatusOr<std::unique_ptr<Serializable>> DeserializeUnchecked(
const Serialized& serialized, std::unique_ptr<DeserializeOptions> options) {
SerDes* serdes;
{
Registry* const r = registry();
absl::MutexLock l(&r->mu);
auto it = r->name_to_serdes.find(serialized.type_name());
if (it == r->name_to_serdes.end()) {
return absl::UnimplementedError(absl::StrCat(
"Deserialize call failed. Serializable has no associated SerDes ",
"implementation. type_name: ", serialized.type_name()));
}
serdes = it->second;
}
return serdes->Deserialize(serialized.data(), std::move(options));
}
}
}
} | #include "xla/python/ifrt/serdes.h"
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/python/ifrt/serdes.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
using ::tsl::testing::StatusIs;
struct TestNumberDeserializeOptions;
struct TestNumber : llvm::RTTIExtends<TestNumber, Serializable> {
using DeserializeOptions = TestNumberDeserializeOptions;
int number;
explicit TestNumber(int number) : number(number) {}
static char ID;
};
[[maybe_unused]] char TestNumber::ID = 0;
struct TestNumberDeserializeOptions
: llvm::RTTIExtends<TestNumberDeserializeOptions, DeserializeOptions> {
absl::Status injected_failure;
static char ID;
};
[[maybe_unused]] char TestNumberDeserializeOptions::ID = 0;
class TestNumberSerDes : public llvm::RTTIExtends<TestNumberSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::TestNumber";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
const TestNumber& obj = llvm::cast<TestNumber>(serializable);
return absl::StrCat(obj.number);
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
if (options != nullptr) {
auto* deserialize_options =
llvm::cast<TestNumberDeserializeOptions>(options.get());
TF_RETURN_IF_ERROR(deserialize_options->injected_failure);
}
int number;
if (!absl::SimpleAtoi(serialized, &number)) {
return absl::DataLossError("Unable to parse serialized TestNumber");
}
return std::make_unique<TestNumber>(number);
}
static char ID;
};
[[maybe_unused]] char TestNumberSerDes::ID = 0;
class TestNumberTest : public testing::Test {
protected:
static void SetUpTestSuite() {
RegisterSerDes<TestNumber>(std::make_unique<TestNumberSerDes>());
}
};
TEST_F(TestNumberTest, RoundTrip) {
auto obj = std::make_unique<TestNumber>(1234);
TF_ASSERT_OK_AND_ASSIGN(Serialized serialized, Serialize(*obj));
TF_ASSERT_OK_AND_ASSIGN(
auto deserialized,
Deserialize<TestNumber>(serialized, nullptr));
EXPECT_EQ(obj->number, deserialized->number);
}
TEST_F(TestNumberTest, WithOptions) {
auto obj = std::make_unique<TestNumber>(1234);
TF_ASSERT_OK_AND_ASSIGN(Serialized serialized, Serialize(*obj));
auto options = std::make_unique<TestNumberDeserializeOptions>();
options->injected_failure = absl::InternalError("injected failure");
EXPECT_THAT(Deserialize<TestNumber>(serialized, std::move(options)),
StatusIs(absl::StatusCode::kInternal, "injected failure"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/serdes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/serdes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
901d41c8-873e-4d38-b328-5d78ce6e163b | cpp | tensorflow/tensorflow | dtype | third_party/xla/xla/python/ifrt/dtype.cc | third_party/xla/xla/python/ifrt/dtype_test.cc | #include "xla/python/ifrt/dtype.h"
#include <optional>
#include <ostream>
#include <string>
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/python/ifrt/dtype.pb.h"
namespace xla {
namespace ifrt {
std::optional<int> DType::byte_size() const {
switch (kind_) {
case kS2:
case kU2:
case kS4:
case kU4:
return std::nullopt;
case kPred:
case kS8:
case kU8:
case kF8E3M4:
case kF8E4M3:
case kF8E4M3FN:
case kF8E4M3B11FNUZ:
case kF8E4M3FNUZ:
case kF8E5M2:
case kF8E5M2FNUZ:
return 1;
case kS16:
case kU16:
case kF16:
case kBF16:
return 2;
case kS32:
case kU32:
case kF32:
return 4;
case kS64:
case kU64:
case kF64:
case kC64:
return 8;
case kC128:
return 16;
case kToken:
case kInvalid:
case kString:
return std::nullopt;
}
}
std::optional<int> DType::bit_size() const {
switch (kind_) {
case kS2:
case kU2:
return 2;
case kS4:
case kU4:
return 4;
case kPred:
case kS8:
case kU8:
case kF8E3M4:
case kF8E4M3:
case kF8E4M3FN:
case kF8E4M3B11FNUZ:
case kF8E4M3FNUZ:
case kF8E5M2:
case kF8E5M2FNUZ:
return 8;
case kS16:
case kU16:
case kF16:
case kBF16:
return 16;
case kS32:
case kU32:
case kF32:
return 32;
case kS64:
case kU64:
case kF64:
case kC64:
return 64;
case kC128:
return 128;
case kToken:
case kInvalid:
case kString:
return std::nullopt;
}
}
absl::StatusOr<DType> DType::FromProto(const DTypeProto& dtype_proto) {
switch (dtype_proto.kind()) {
case DTypeProto::KIND_PRED:
return DType(DType::Kind::kPred);
case DTypeProto::KIND_TOKEN:
return DType(DType::Kind::kToken);
#define CASE(X) \
case DTypeProto::KIND_##X: \
return DType(DType::Kind::k##X);
CASE(S4);
CASE(S8);
CASE(S16);
CASE(S32);
CASE(S64);
CASE(U4);
CASE(U8);
CASE(U16);
CASE(U32);
CASE(U64);
CASE(F16);
CASE(F32);
CASE(F64);
CASE(BF16);
CASE(C64);
CASE(C128);
CASE(F8E4M3FN);
CASE(F8E4M3B11FNUZ);
CASE(F8E4M3FNUZ);
CASE(F8E5M2);
CASE(F8E5M2FNUZ);
#undef CASE
case DTypeProto::KIND_STRING:
return DType(DType::Kind::kString);
default:
return DType(DType::Kind::kInvalid);
}
}
DTypeProto DType::ToProto() const {
DTypeProto dtype_proto;
switch (kind()) {
case DType::Kind::kPred:
dtype_proto.set_kind(DTypeProto::KIND_PRED);
break;
case DType::Kind::kToken:
dtype_proto.set_kind(DTypeProto::KIND_TOKEN);
break;
#define CASE(X) \
case DType::Kind::k##X: \
dtype_proto.set_kind(DTypeProto::KIND_##X); \
break;
CASE(S4);
CASE(S8);
CASE(S16);
CASE(S32);
CASE(S64);
CASE(U4);
CASE(U8);
CASE(U16);
CASE(U32);
CASE(U64);
CASE(F16);
CASE(F32);
CASE(F64);
CASE(BF16);
CASE(C64);
CASE(C128);
CASE(F8E4M3FN);
CASE(F8E4M3B11FNUZ);
CASE(F8E4M3FNUZ);
CASE(F8E5M2);
CASE(F8E5M2FNUZ);
#undef CASE
case DType::Kind::kString:
dtype_proto.set_kind(DTypeProto::KIND_STRING);
break;
default:
dtype_proto.set_kind(DTypeProto::KIND_UNSPECIFIED);
break;
}
return dtype_proto;
}
std::string DType::DebugString() const {
switch (kind_) {
case kInvalid:
return "INVALID";
case kPred:
return "PRED";
case kS8:
return "S8";
case kS16:
return "S16";
case kS32:
return "S32";
case kS64:
return "S64";
case kU8:
return "U8";
case kU16:
return "U16";
case kU32:
return "U32";
case kU64:
return "U64";
case kF16:
return "F16";
case kF32:
return "F32";
case kF64:
return "F64";
case kBF16:
return "BF16";
case kC64:
return "C64";
case kC128:
return "C128";
case kToken:
return "TOKEN";
case kString:
return "STRING";
default:
return absl::StrCat("UNKNOWN(", static_cast<int>(kind_), ")");
}
}
std::ostream& operator<<(std::ostream& os, const DType& dtype) {
return os << dtype.DebugString();
}
}
} | #include "xla/python/ifrt/dtype.h"
#include <optional>
#include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include "xla/python/ifrt/dtype.pb.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace {
TEST(DTypeTest, FromToFromProto) {
for (int i = 0; i < DTypeProto::Kind_descriptor()->value_count(); ++i) {
DTypeProto proto;
proto.set_kind(static_cast<DTypeProto::Kind>(
DTypeProto::Kind_descriptor()->value(i)->number()));
TF_ASSERT_OK_AND_ASSIGN(DType dtype, DType::FromProto(proto));
TF_ASSERT_OK_AND_ASSIGN(DType dtype_copy,
DType::FromProto(dtype.ToProto()));
EXPECT_EQ(dtype_copy, dtype);
}
}
TEST(DTypeTest, ByteSize) {
for (const auto& [kind, byte_size] :
std::vector<std::tuple<DType::Kind, int>>({
{DType::kS2, -1},
{DType::kU2, -1},
{DType::kS4, -1},
{DType::kU4, -1},
{DType::kPred, 1},
{DType::kS8, 1},
{DType::kU8, 1},
{DType::kF8E3M4, 1},
{DType::kF8E4M3, 1},
{DType::kF8E4M3FN, 1},
{DType::kF8E4M3B11FNUZ, 1},
{DType::kF8E4M3FNUZ, 1},
{DType::kF8E5M2, 1},
{DType::kF8E5M2FNUZ, 1},
{DType::kS16, 2},
{DType::kU16, 2},
{DType::kF16, 2},
{DType::kBF16, 2},
{DType::kS32, 4},
{DType::kU32, 4},
{DType::kF32, 4},
{DType::kS64, 8},
{DType::kU64, 8},
{DType::kF64, 8},
{DType::kC64, 8},
{DType::kC128, 16},
{DType::kToken, -1},
{DType::kInvalid, -1},
{DType::kString, -1},
})) {
EXPECT_EQ(DType(kind).byte_size(),
byte_size == -1 ? std::nullopt : std::make_optional(byte_size));
}
}
TEST(DTypeTest, BitSize) {
for (const auto& [kind, bit_size] :
std::vector<std::tuple<DType::Kind, int>>({
{DType::kS2, 2},
{DType::kU2, 2},
{DType::kS4, 4},
{DType::kU4, 4},
{DType::kPred, 8},
{DType::kS8, 8},
{DType::kU8, 8},
{DType::kF8E3M4, 8},
{DType::kF8E4M3, 8},
{DType::kF8E4M3FN, 8},
{DType::kF8E4M3B11FNUZ, 8},
{DType::kF8E4M3FNUZ, 8},
{DType::kF8E5M2, 8},
{DType::kF8E5M2FNUZ, 8},
{DType::kS16, 16},
{DType::kU16, 16},
{DType::kF16, 16},
{DType::kBF16, 16},
{DType::kS32, 32},
{DType::kU32, 32},
{DType::kF32, 32},
{DType::kS64, 64},
{DType::kU64, 64},
{DType::kF64, 64},
{DType::kC64, 64},
{DType::kC128, 128},
{DType::kToken, -1},
{DType::kInvalid, -1},
{DType::kString, -1},
})) {
EXPECT_EQ(DType(kind).bit_size(),
bit_size == -1 ? std::nullopt : std::make_optional(bit_size));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/dtype.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/dtype_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
28604415-bb4e-4d01-bd3b-610ccff48128 | cpp | tensorflow/tensorflow | test_util | tensorflow/compiler/jit/test_util.cc | tensorflow/lite/kernels/shim/test_util_test.cc | #include "tensorflow/compiler/jit/test_util.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "tensorflow/compiler/jit/shape_inference.h"
#include "xla/status_macros.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
Status ShapeAnnotationsMatch(
const Graph& graph, const GraphShapeInfo& shape_info,
std::map<string, std::vector<PartialTensorShape>> expected_shapes) {
for (Node* node : graph.op_nodes()) {
auto sit = shape_info.find(node->name());
TF_RET_CHECK(sit != shape_info.end())
<< "Missing shape information for node " << node->name();
std::vector<PartialTensorShape> shapes;
for (const auto& output : sit->second) shapes.push_back(output.shape);
auto it = expected_shapes.find(node->name());
if (it != expected_shapes.end()) {
if (!PartialTensorShapeUtils::AreIdentical(shapes, it->second)) {
return errors::InvalidArgument(
"Shape mismatch for ", node->name(), ". Expected: ",
PartialTensorShapeUtils::PartialShapeListString(it->second),
", actual: ",
PartialTensorShapeUtils::PartialShapeListString(shapes));
}
expected_shapes.erase(it);
}
}
if (!expected_shapes.empty()) {
std::vector<string> missing;
missing.reserve(expected_shapes.size());
for (const auto& entry : expected_shapes) {
missing.push_back(entry.first);
}
return errors::InvalidArgument("Missing shapes for nodes: ",
absl::StrJoin(missing, ","));
}
return absl::OkStatus();
}
void DeviceSetup::AddDevicesAndSetUp(
const std::vector<std::string>& device_names,
const std::optional<FunctionDef>& fdef) {
SessionOptions options;
auto* device_count = options.config.mutable_device_count();
for (const auto& device_name : device_names) {
device_count->insert({device_name, 1});
}
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(
options, "/job:localhost/replica:0/task:0", &devices));
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices));
OptimizerOptions opts;
lib_def_ = std::make_unique<FunctionLibraryDefinition>(OpRegistry::Global(),
FunctionDefLibrary());
if (fdef.has_value()) {
TF_CHECK_OK(lib_def_->AddFunctionDef(*fdef));
}
pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr_.get(), Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, lib_def_.get(), opts,
nullptr, nullptr);
flr_ = pflr_->GetFLR("/job:localhost/replica:0/task:0/cpu:0");
}
Device* DeviceSetup::GetDevice(const string& device_name) {
if (device_mgr_ == nullptr) {
return nullptr;
}
string full_device_name = absl::StrCat(
"/job:localhost/replica:0/task:0/device:", device_name, ":0");
Device* device;
TF_CHECK_OK(device_mgr_->LookupDevice(full_device_name, &device));
return device;
}
} | #include "tensorflow/lite/kernels/shim/test_util.h"
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
namespace tflite {
namespace {
TEST(TfliteTensorDebugString, Basic) {
::tflite::Interpreter interpreter;
interpreter.AddTensors(3);
interpreter.AllocateTensors();
auto t_int32 = UniqueTfLiteTensor(interpreter.tensor(0));
PopulateTfLiteTensor<int32_t>({1, 2, 3, 4, 5}, {5}, t_int32.get());
EXPECT_EQ("[1, 2, 3, 4, 5]", TfliteTensorDebugString(t_int32.get()));
auto t_int64 = UniqueTfLiteTensor(interpreter.tensor(1));
PopulateTfLiteTensor<int32_t>({1, 2, 3, 4}, {2, 2}, t_int64.get());
EXPECT_EQ("[[1, 2], [3, 4]]", TfliteTensorDebugString(t_int64.get()));
auto t_str = UniqueTfLiteTensor(interpreter.tensor(2));
PopulateTfLiteTensor<std::string>({"ab", "cde", "f"}, {1, 3}, t_str.get());
EXPECT_EQ("[[ab, cde, f]]", TfliteTensorDebugString(t_str.get()));
}
TEST(TfliteTensorDebugString, MaxVal) {
::tflite::Interpreter interpreter;
interpreter.AddTensors(2);
interpreter.AllocateTensors();
auto t_int32 = UniqueTfLiteTensor(interpreter.tensor(0));
PopulateTfLiteTensor<int32_t>({1, 2, 3, 4}, {4}, t_int32.get());
EXPECT_EQ("[1, 2, 3, 4]",
TfliteTensorDebugString(t_int32.get(), 4));
t_int32 = UniqueTfLiteTensor(interpreter.tensor(0));
PopulateTfLiteTensor<int32_t>({1, 2, 3, 4, 5}, {5}, t_int32.get());
EXPECT_EQ("[1, 2, 3, 4, ...]",
TfliteTensorDebugString(t_int32.get(), 4));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/test_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/test_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cd5d9ea9-18fe-4df4-93c5-bc52f4a12e63 | cpp | tensorflow/tensorflow | plugin_program_serdes | third_party/xla/xla/python/ifrt/plugin_program_serdes.cc | third_party/xla/xla/python/ifrt/plugin_program_serdes_test.cc | #include <memory>
#include <string>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/python/ifrt/plugin_program.h"
#include "xla/python/ifrt/serdes.h"
namespace xla {
namespace ifrt {
namespace {
constexpr absl::string_view kSerializationPrefix =
"__serialized_plugin_program ";
class PluginProgramSerDes
: public llvm::RTTIExtends<PluginProgramSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::PluginProgram";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
return absl::StrCat(kSerializationPrefix,
llvm::cast<PluginProgram>(serializable).data);
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions>) override {
if (!absl::StartsWith(serialized, kSerializationPrefix)) {
return absl::InvalidArgumentError(
absl::StrCat("Bad serialized ", type_name()));
}
absl::string_view data(serialized);
data.remove_prefix(kSerializationPrefix.size());
auto result = std::make_unique<PluginProgram>();
result->data = data;
return result;
}
static char ID;
};
[[maybe_unused]] char PluginProgramSerDes::ID = 0;
bool register_plugin_program_serdes = ([]() {
RegisterSerDes<PluginProgram>(
std::make_unique<PluginProgramSerDes>());
}(), true);
class PluginCompileOptionsSerDes
: public llvm::RTTIExtends<PluginCompileOptionsSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::PluginCompileOptions";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
return "";
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions>) override {
return std::make_unique<PluginCompileOptions>();
}
static char ID;
};
[[maybe_unused]] char PluginCompileOptionsSerDes::ID = 0;
bool register_plugin_compile_options_serdes = ([]() {
RegisterSerDes<PluginCompileOptions>(
std::make_unique<PluginCompileOptionsSerDes>());
}(), true);
}
}
} | #include <memory>
#include <gtest/gtest.h>
#include "xla/python/ifrt/plugin_program.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/serdes.pb.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/protobuf/status.pb.h"
#include "tsl/platform/statusor.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace xla {
namespace ifrt {
namespace {
TEST(PluginProgramSerDesTest, RoundTrip) {
PluginProgram orig;
orig.data = "foo";
TF_ASSERT_OK_AND_ASSIGN(Serialized serialized, Serialize(orig));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<PluginProgram> deserialized_program,
Deserialize<PluginProgram>(serialized, nullptr));
EXPECT_EQ(deserialized_program->data, "foo");
}
TEST(PluginCompileOptionsSerDesTest, RoundTrip) {
PluginCompileOptions orig;
TF_ASSERT_OK_AND_ASSIGN(Serialized serialized, Serialize(orig));
TF_EXPECT_OK(
Deserialize<PluginCompileOptions>(serialized, nullptr)
.status());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/plugin_program_serdes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/plugin_program_serdes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
eab7bed0-da11-471e-bab9-a472e9e04c08 | cpp | tensorflow/tensorflow | remap_plan | third_party/xla/xla/python/ifrt/remap_plan.cc | third_party/xla/xla/python/ifrt/remap_plan_test.cc | #include "xla/python/ifrt/remap_plan.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/python/ifrt/array_spec.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/remap_plan.pb.h"
#include "xla/status_macros.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
absl::StatusOr<RemapPlan::Mapping> MappingFromProto(
const RemapPlanProto::MappingProto& mapping_proto) {
RemapPlan::Mapping mapping;
mapping.in_array = mapping_proto.in_array();
mapping.out_array = mapping_proto.out_array();
const int64_t num_intervals = mapping_proto.from_start_size();
TF_RET_CHECK(mapping_proto.from_end_size() == num_intervals);
TF_RET_CHECK(mapping_proto.from_step_size() == num_intervals);
TF_RET_CHECK(mapping_proto.to_start_size() == num_intervals);
TF_RET_CHECK(mapping_proto.to_end_size() == num_intervals);
TF_RET_CHECK(mapping_proto.to_step_size() == num_intervals);
mapping.from.reserve(num_intervals);
mapping.to.reserve(num_intervals);
for (int64_t i = 0; i < num_intervals; ++i) {
mapping.from.push_back(
RemapPlan::Interval{mapping_proto.from_start(i),
mapping_proto.from_end(i),
mapping_proto.from_step(i)});
mapping.to.push_back(
RemapPlan::Interval{mapping_proto.to_start(i),
mapping_proto.to_end(i),
mapping_proto.to_step(i)});
}
return mapping;
}
absl::StatusOr<RemapPlanProto::MappingProto> MappingToProto(
const RemapPlan::Mapping& mapping) {
TF_RET_CHECK(mapping.from.size() == mapping.to.size());
RemapPlanProto::MappingProto proto;
proto.set_in_array(mapping.in_array);
proto.set_out_array(mapping.out_array);
const int64_t num_intervals = mapping.from.size();
proto.mutable_from_start()->Reserve(num_intervals);
proto.mutable_from_end()->Reserve(num_intervals);
proto.mutable_from_step()->Reserve(num_intervals);
proto.mutable_to_start()->Reserve(num_intervals);
proto.mutable_to_end()->Reserve(num_intervals);
proto.mutable_to_step()->Reserve(num_intervals);
for (int64_t i = 0; i < mapping.from.size(); ++i) {
proto.add_from_start(mapping.from[i].start);
proto.add_from_end(mapping.from[i].end);
proto.add_from_step(mapping.from[i].step);
proto.add_to_start(mapping.to[i].start);
proto.add_to_end(mapping.to[i].end);
proto.add_to_step(mapping.to[i].step);
}
return proto;
}
absl::Status CheckRange(int64_t num_shards,
const RemapPlan::Interval& interval) {
if (interval.start < 0 || interval.start > num_shards - 1) {
return InvalidArgument("start must be in [0, %d], but is %d",
num_shards - 1, interval.start);
}
if (interval.end < 0 || interval.end > num_shards) {
return InvalidArgument("end must be in [0, %d], but is %d", num_shards,
interval.end);
}
if (interval.step <= 0) {
return InvalidArgument("step must be positive, but is %d", interval.step);
}
return absl::OkStatus();
}
int64_t GetNumberOfSteps(const RemapPlan::Interval& interval) {
return (interval.end - interval.start + interval.step - 1) / interval.step;
}
}
std::string RemapPlan::Interval::DebugString() const {
return absl::StrCat("[", start, ":", end, ":", step, "]");
}
std::string RemapPlan::Mapping::DebugString() const {
auto format_intervals = [](absl::Span<const RemapPlan::Interval> intervals) {
return absl::StrCat(
"[",
absl::StrJoin(
intervals, ",",
[](std::string* out, const RemapPlan::Interval& interval) {
absl::StrAppend(out, interval.DebugString());
}),
"]");
};
return absl::StrCat("Mapping(in_array=", in_array, ",",
"out_array=", out_array, ",from=", format_intervals(from),
",to=", format_intervals(to), ")");
}
absl::Status RemapPlan::Validate() const {
const int num_inputs = input_specs.size();
if (num_inputs == 0) {
return InvalidArgument("Must have at least one input");
}
for (int i = 0; i < num_inputs; ++i) {
if (input_specs[i].dtype != input_specs.front().dtype) {
return InvalidArgument(
"Input must have the same dtype: %s (input 0) vs. %s (input "
"%d)",
input_specs.front().dtype.DebugString(),
input_specs[i].dtype.DebugString(), i);
}
}
const int num_outputs = output_specs.size();
for (int i = 0; i < num_outputs; ++i) {
if (output_specs[i].dtype != input_specs.front().dtype) {
return InvalidArgument(
"Input and output must have the same dtype: %s (input 0) vs. %s "
"(output %d)",
output_specs.front().dtype.DebugString(),
output_specs[i].dtype.DebugString(), i);
}
}
std::vector<std::vector<bool>> in_used_buffers_list(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
in_used_buffers_list[i].resize(
input_specs[i].sharding->devices()->size(),
false);
}
std::vector<BasicDeviceList::Devices> out_assigned_devices_list(num_outputs);
for (int i = 0; i < num_outputs; ++i) {
out_assigned_devices_list[i].resize(
output_specs[i].sharding->devices()->size(),
nullptr);
}
for (int64_t i = 0; i < mappings->size(); ++i) {
const RemapPlan::Mapping& mapping = (*mappings)[i];
if (mapping.in_array < 0 || mapping.in_array >= num_inputs) {
return InvalidArgument(
"mappings[%d].in_array must be in [0, %d], but is %d", i,
num_inputs - 1, mapping.in_array);
}
if (mapping.out_array < 0 || mapping.out_array >= num_outputs) {
return InvalidArgument(
"mappings[%d].out_array must be in [0, %d], but is %d", i,
num_outputs - 1, mapping.out_array);
}
if (mapping.from.size() != mapping.to.size()) {
return InvalidArgument(
"mappings[%d].from and mappings[%d].to must have the same number of "
"intervals, but has %d and %d intervals",
i, i, mapping.from.size(), mapping.to.size());
}
std::vector<bool>& in_used_buffers = in_used_buffers_list[mapping.in_array];
absl::Span<Device* const> in_devices =
input_specs[mapping.in_array].sharding->devices()->devices();
BasicDeviceList::Devices& out_assigned_devices =
out_assigned_devices_list[mapping.out_array];
const int64_t in_shards_count = in_used_buffers.size();
const int64_t out_shards_count = out_assigned_devices.size();
for (int s = 0; s < mapping.from.size(); ++s) {
const RemapPlan::Interval& in_interval = mapping.from[s];
const RemapPlan::Interval& out_interval = mapping.to[s];
TF_RETURN_IF_ERROR(CheckRange(in_shards_count, in_interval));
TF_RETURN_IF_ERROR(CheckRange(out_shards_count, out_interval));
if (GetNumberOfSteps(in_interval) != GetNumberOfSteps(out_interval)) {
return InvalidArgument(
"mappings[%d].from[%d] and mappings[%d].to[%d] must have the same "
"number of steps, but were %d and %d "
"(%s vs. %s)",
i, s, i, s, GetNumberOfSteps(in_interval),
GetNumberOfSteps(out_interval), in_interval.DebugString(),
out_interval.DebugString());
}
int64_t in_shard = in_interval.start;
int64_t out_shard = out_interval.start;
while (in_shard < in_interval.end) {
if (in_used_buffers[in_shard]) {
return InvalidArgument("Input array %d shard %d is already used",
mapping.in_array, in_shard);
}
in_used_buffers[in_shard] = true;
if (out_assigned_devices[out_shard] != nullptr) {
return InvalidArgument("Output array %d shard %d is already assigned",
mapping.out_array, out_shard);
}
out_assigned_devices[out_shard] = in_devices[in_shard];
in_shard += in_interval.step;
out_shard += out_interval.step;
}
}
}
for (int i = 0; i < num_outputs; ++i) {
for (int out_shard = 0;
out_shard < output_specs[i].sharding->devices()->size(); ++out_shard) {
if (out_assigned_devices_list[i][out_shard] == nullptr) {
return InvalidArgument("Output array %d shard %d is unassigned", i,
out_shard);
}
}
if (out_assigned_devices_list[i] !=
output_specs[i].sharding->devices()->devices()) {
return InvalidArgument(
"Output array %d devices and sharding devices do not match: "
"Expected %v, but got %v",
i, *output_specs[i].sharding->devices(),
*BasicDeviceList::Create(std::move(out_assigned_devices_list[i])));
}
}
return absl::OkStatus();
}
absl::StatusOr<RemapPlan> RemapPlan::FromProto(
DeviceList::LookupDeviceFunc lookup_device, const RemapPlanProto& proto) {
RemapPlan plan;
plan.input_specs.reserve(proto.input_specs_size());
for (const auto& input_spec_proto : proto.input_specs()) {
TF_ASSIGN_OR_RETURN(ArraySpec input_spec,
ArraySpec::FromProto(lookup_device, input_spec_proto));
plan.input_specs.push_back(std::move(input_spec));
}
plan.output_specs.reserve(proto.output_specs_size());
for (const auto& output_spec_proto : proto.output_specs()) {
TF_ASSIGN_OR_RETURN(ArraySpec output_spec,
ArraySpec::FromProto(lookup_device, output_spec_proto));
plan.output_specs.push_back(std::move(output_spec));
}
plan.mappings = std::make_shared<std::vector<Mapping>>();
plan.mappings->reserve(proto.mappings_size());
for (const auto& mapping_proto : proto.mappings()) {
TF_ASSIGN_OR_RETURN(auto mapping, MappingFromProto(mapping_proto));
plan.mappings->push_back(std::move(mapping));
}
return plan;
}
absl::StatusOr<RemapPlanProto> RemapPlan::ToProto() const {
RemapPlanProto proto;
proto.mutable_input_specs()->Reserve(input_specs.size());
for (const auto& input_spec : input_specs) {
TF_ASSIGN_OR_RETURN(*proto.add_input_specs(), input_spec.ToProto());
}
proto.mutable_output_specs()->Reserve(output_specs.size());
for (const auto& output_spec : output_specs) {
TF_ASSIGN_OR_RETURN(*proto.add_output_specs(), output_spec.ToProto());
}
proto.mutable_mappings()->Reserve(mappings->size());
for (const auto& mapping : *mappings) {
TF_ASSIGN_OR_RETURN(*proto.add_mappings(), MappingToProto(mapping));
}
return proto;
}
std::string RemapPlan::DebugString() const {
auto format_array_specs = [](absl::Span<const ArraySpec> array_specs) {
return absl::StrCat(
"[",
absl::StrJoin(array_specs, ",",
[](std::string* out, const ArraySpec& spec) {
absl::StrAppend(out, spec.DebugString());
}),
"]");
};
auto format_mappings = [](absl::Span<const Mapping> mappings) {
return absl::StrCat(
"[",
absl::StrJoin(mappings, ",",
[](std::string* out, const Mapping& mapping) {
absl::StrAppend(out, mapping.DebugString());
}),
"]");
};
return absl::StrCat("RemapPlan(input_specs=", format_array_specs(input_specs),
",output_specs=", format_array_specs(output_specs), ",",
"mappings=", format_mappings(*mappings), ")");
}
}
} | #include "xla/python/ifrt/remap_plan.h"
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/functional/bind_front.h"
#include "absl/status/status.h"
#include "llvm/Support/Casting.h"
#include "xla/python/ifrt/array_spec.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/device_test_util.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::testing::SizeIs;
using ::tsl::testing::StatusIs;
class RemapPlanTest : public test_util::DeviceTest {};
TEST_P(RemapPlanTest, ToFromProto) {
RemapPlan plan;
Shape shape({20, 20});
Shape shard_shape({5, 20});
tsl::RCReference<DeviceList> devices = GetDevices({0, 1, 2, 3});
std::shared_ptr<const Sharding> sharding =
ConcreteEvenSharding::Create(devices, MemoryKind(), shape,
shard_shape);
plan.input_specs.reserve(2);
plan.input_specs.push_back(ArraySpec{DType(DType::kF32),
shape, sharding});
plan.input_specs.push_back(ArraySpec{DType(DType::kF32),
shape, sharding});
plan.output_specs.reserve(2);
plan.output_specs.push_back(ArraySpec{
DType(DType::kF32), shape, sharding});
plan.output_specs.push_back(ArraySpec{
DType(DType::kF32), shape, sharding});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->reserve(2);
plan.mappings->push_back(RemapPlan::Mapping{
0, 1,
{RemapPlan::Interval{0, 2, 1}, RemapPlan::Interval{2, 4, 1}},
{RemapPlan::Interval{1, 4, 2}, RemapPlan::Interval{0, 4, 2}}});
plan.mappings->push_back(RemapPlan::Mapping{
1, 0,
{RemapPlan::Interval{0, 4, 2}, RemapPlan::Interval{1, 4, 2}},
{RemapPlan::Interval{0, 2, 1}, RemapPlan::Interval{2, 4, 1}}});
TF_ASSERT_OK_AND_ASSIGN(RemapPlanProto plan_proto, plan.ToProto());
TF_ASSERT_OK_AND_ASSIGN(
RemapPlan plan_copy,
RemapPlan::FromProto(absl::bind_front(&Client::LookupDevice, client()),
plan_proto));
EXPECT_THAT(*plan_copy.mappings, ElementsAreArray(*plan.mappings));
EXPECT_THAT(plan_copy.output_specs, SizeIs(2));
for (const auto& spec : plan_copy.input_specs) {
EXPECT_EQ(spec.dtype, DType(DType::kF32));
EXPECT_EQ(spec.shape, shape);
const auto* sharding_copy =
llvm::dyn_cast<ConcreteEvenSharding>(spec.sharding.get());
ASSERT_NE(sharding_copy, nullptr);
EXPECT_EQ(*sharding_copy->devices(), *devices);
EXPECT_EQ(sharding_copy->shape(), shape);
EXPECT_EQ(sharding_copy->shard_shape(), shard_shape);
}
for (const auto& spec : plan_copy.output_specs) {
EXPECT_EQ(spec.dtype, DType(DType::kF32));
EXPECT_EQ(spec.shape, shape);
const auto* sharding_copy =
llvm::dyn_cast<ConcreteEvenSharding>(spec.sharding.get());
ASSERT_NE(sharding_copy, nullptr);
EXPECT_EQ(*sharding_copy->devices(), *devices);
EXPECT_EQ(sharding_copy->shape(), shape);
EXPECT_EQ(sharding_copy->shard_shape(), shard_shape);
}
}
TEST_P(RemapPlanTest, InvalidInputDtype) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.input_specs.push_back(
ArraySpec{DType(DType::kF32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
EXPECT_THAT(plan.Validate(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Input must have the same dtype")));
}
TEST_P(RemapPlanTest, InvalidOutputDtype) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kF32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
EXPECT_THAT(plan.Validate(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Input and output must have the same dtype")));
}
TEST_P(RemapPlanTest, InvalidInputArrayIndex) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(
RemapPlan::Mapping{1,
0,
{RemapPlan::Interval{0, 1, 1}},
{RemapPlan::Interval{0, 1, 1}}});
EXPECT_THAT(
plan.Validate(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("mappings[0].in_array must be in [0, 0], but is 1")));
}
TEST_P(RemapPlanTest, InvalidOutputArrayIndex) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(
RemapPlan::Mapping{0,
1,
{RemapPlan::Interval{0, 1, 1}},
{RemapPlan::Interval{0, 1, 1}}});
EXPECT_THAT(
plan.Validate(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("mappings[0].out_array must be in [0, 0], but is 1")));
}
TEST_P(RemapPlanTest, InvalidIntervalCount) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(RemapPlan::Mapping{
0,
0,
{RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{0, 1, 1}},
{RemapPlan::Interval{0, 1, 1}}});
EXPECT_THAT(
plan.Validate(),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("mappings[0].from and mappings[0].to must have the same "
"number of intervals, but has 2 and 1 intervals")));
}
TEST_P(RemapPlanTest, InvalidShardIndex) {
auto run = [&](RemapPlan::Interval from, RemapPlan::Interval to) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(RemapPlan::Mapping{0, 0,
{from},
{to}});
return plan.Validate();
};
EXPECT_THAT(run(RemapPlan::Interval{-1, 1, 1}, RemapPlan::Interval{0, 1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("start must be in [0, 0], but is -1")));
EXPECT_THAT(run(RemapPlan::Interval{1, 1, 1}, RemapPlan::Interval{0, 1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("start must be in [0, 0], but is 1")));
EXPECT_THAT(run(RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{-1, 1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("start must be in [0, 0], but is -1")));
EXPECT_THAT(run(RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{1, 1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("start must be in [0, 0], but is 1")));
EXPECT_THAT(run(RemapPlan::Interval{0, -1, 1}, RemapPlan::Interval{0, 1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("end must be in [0, 1], but is -1")));
EXPECT_THAT(run(RemapPlan::Interval{0, 2, 1}, RemapPlan::Interval{0, 1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("end must be in [0, 1], but is 2")));
EXPECT_THAT(run(RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{0, -1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("end must be in [0, 1], but is -1")));
EXPECT_THAT(run(RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{0, 2, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("end must be in [0, 1], but is 2")));
EXPECT_THAT(run(RemapPlan::Interval{0, 1, 0}, RemapPlan::Interval{0, 1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("step must be positive, but is 0")));
EXPECT_THAT(run(RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{0, 1, -1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("step must be positive, but is -1")));
}
TEST_P(RemapPlanTest, AlreadyUsedInputShard) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({4, 3}),
ConcreteEvenSharding::Create(GetDevices({0, 1}), MemoryKind(),
Shape({4, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(RemapPlan::Mapping{
0,
0,
{RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{0, 1, 1}},
{RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{1, 2, 1}}});
EXPECT_THAT(plan.Validate(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Input array 0 shard 0 is already used")));
}
TEST_P(RemapPlanTest, UnassignedOutputShard) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({4, 3}),
ConcreteEvenSharding::Create(GetDevices({0, 1}), MemoryKind(),
Shape({4, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(
RemapPlan::Mapping{0,
0,
{RemapPlan::Interval{0, 1, 1}},
{RemapPlan::Interval{0, 1, 1}}});
EXPECT_THAT(plan.Validate(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Output array 0 shard 1 is unassigned")));
}
TEST_P(RemapPlanTest, AlreadyAssignedOutputShard) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({4, 3}),
ConcreteEvenSharding::Create(GetDevices({0, 1}), MemoryKind(),
Shape({4, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(RemapPlan::Mapping{
0,
0,
{RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{1, 2, 1}},
{RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{0, 1, 1}}});
EXPECT_THAT(
plan.Validate(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Output array 0 shard 0 is already assigned")));
}
TEST_P(RemapPlanTest, InvalidOutputDevices) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({4, 3}),
ConcreteEvenSharding::Create(GetDevices({0, 1}), MemoryKind(),
Shape({4, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({4, 3}),
ConcreteEvenSharding::Create(GetDevices({1, 0}), MemoryKind(),
Shape({4, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(
RemapPlan::Mapping{0,
0,
{RemapPlan::Interval{0, 2, 1}},
{RemapPlan::Interval{0, 2, 1}}});
EXPECT_THAT(
plan.Validate(),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr(
"Output array 0 devices and sharding devices do not match")));
}
INSTANTIATE_TEST_SUITE_P(NumDevices, RemapPlanTest,
testing::Values(test_util::DeviceTestParam{
4,
4}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/remap_plan.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/remap_plan_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ed33b6cb-931a-44ad-a7c0-aa72370d85d7 | cpp | tensorflow/tensorflow | tuple | third_party/xla/xla/hlo/builder/lib/tuple.cc | third_party/xla/xla/hlo/builder/lib/tuple_test.cc | #include "xla/hlo/builder/lib/tuple.h"
#include <utility>
#include "absl/container/inlined_vector.h"
#include "absl/status/statusor.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::StatusOr<ShapeTree<XlaOp>> DisassembleTuple(XlaOp tuple) {
TF_ASSIGN_OR_RETURN(Shape shape, tuple.builder()->GetShape(tuple));
ShapeTree<XlaOp> result(shape);
result.ForEachMutableElement([&](ShapeIndexView index, XlaOp* element) {
if (index.empty()) {
*element = tuple;
} else {
ShapeIndexView parent_index = index.subspan(0, index.size() - 1);
XlaOp parent = result.element(parent_index);
*element = GetTupleElement(parent, index.back());
}
});
return std::move(result);
}
XlaOp AssembleTuple(XlaBuilder* builder, ShapeTree<XlaOp> elements) {
elements.ForEachMutableElementPostOrder(
[&](const ShapeIndex& index, XlaOp* element) {
const Shape& subshape = ShapeUtil::GetSubshape(elements.shape(), index);
if (subshape.IsTuple()) {
absl::InlinedVector<XlaOp, 2> children;
ShapeIndex child_index = index;
for (int i = 0; i < subshape.tuple_shapes_size(); ++i) {
child_index.push_back(i);
children.push_back(elements.element(child_index));
child_index.pop_back();
}
*element = Tuple(builder, children);
}
});
return elements.element({});
}
} | #include "xla/hlo/builder/lib/tuple.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "xla/error_spec.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/service.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class TupleTest : public ClientLibraryTestBase {};
XLA_TEST_F(TupleTest, DisassembleAssemble) {
XlaBuilder builder(TestName());
Shape shape = ShapeUtil::MakeTupleShape({
ShapeUtil::MakeShape(S32, {3}),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(S32, {4}), ShapeUtil::MakeShape(S32, {5})}),
ShapeUtil::MakeShape(S32, {6}),
});
Literal input = LiteralUtil::MakeTupleOwned(
LiteralUtil::CreateFullWithDescendingLayout({3}, int32_t{42}),
LiteralUtil::MakeTupleOwned(
LiteralUtil::CreateFullWithDescendingLayout({4}, int32_t{43}),
LiteralUtil::CreateFullWithDescendingLayout({5}, int32_t{44})),
LiteralUtil::CreateFullWithDescendingLayout({6}, int32_t{45}));
XlaOp param = Parameter(&builder, 0, shape, "param");
TF_ASSERT_OK_AND_ASSIGN(ShapeTree<XlaOp> disassembled_tuple,
DisassembleTuple(param));
int32_t addend = 1;
disassembled_tuple.ForEachMutableElement([&](const ShapeIndex& index,
XlaOp* element) {
const Shape& subshape = ShapeUtil::GetSubshape(shape, index);
if (subshape.IsArray()) {
*element = Add(
*element,
ConstantLiteral(&builder, LiteralUtil::CreateFullWithDescendingLayout(
subshape.dimensions(), addend)));
++addend;
}
});
AssembleTuple(&builder, std::move(disassembled_tuple));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<GlobalData> data,
client_->TransferToServer(input));
Literal expected = LiteralUtil::MakeTupleOwned(
LiteralUtil::CreateFullWithDescendingLayout({3}, int32_t{43}),
LiteralUtil::MakeTupleOwned(
LiteralUtil::CreateFullWithDescendingLayout({4}, int32_t{45}),
LiteralUtil::CreateFullWithDescendingLayout({5}, int32_t{47})),
LiteralUtil::CreateFullWithDescendingLayout({6}, int32_t{49}));
ComputeAndCompareLiteral(&builder, expected, {data.get()}, ErrorSpec(0),
&shape);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/tuple.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/tuple_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
080bb713-9ec6-4917-8c00-a8802f2a3a3c | cpp | tensorflow/tensorflow | attribute_map | tensorflow/lite/core/async/interop/c/attribute_map.cc | tensorflow/lite/core/async/interop/c/attribute_map_test.cc | #include "tensorflow/lite/core/async/interop/c/attribute_map.h"
#include <cstddef>
#include <cstdint>
#include "tensorflow/lite/core/async/interop/attribute_map_internal.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
extern "C" {
TfLiteAttributeMap* TfLiteAttributeMapCreate(TfLiteAttrMapType type) {
return new TfLiteAttributeMap(type);
}
void TfLiteAttributeMapDelete(TfLiteAttributeMap* attrs) { delete attrs; }
bool TfLiteAttributeMapIsBufferAttributeMap(const TfLiteAttributeMap* attrs) {
if (attrs) return attrs->impl.IsBufferAttributeMap();
return false;
}
bool TfLiteAttributeMapIsSyncAttributeMap(const TfLiteAttributeMap* attrs) {
if (attrs) return attrs->impl.IsSyncAttributeMap();
return false;
}
void TfLiteAttributeMapCopy(const TfLiteAttributeMap* src,
TfLiteAttributeMap* dst) {
if (src && dst) {
dst->impl = src->impl;
}
}
bool TfLiteAttributeMapGetSizeTBufferAttr(const TfLiteAttributeMap* attrs,
TfLiteBufferAttrKey key,
size_t* val) {
return attrs && attrs->impl.IsBufferAttributeMap() &&
attrs->impl.GetAttr(key, val);
}
bool TfLiteAttributeMapSetSizeTBufferAttr(TfLiteAttributeMap* attrs,
TfLiteBufferAttrKey key, size_t val) {
if (attrs && attrs->impl.IsBufferAttributeMap()) {
attrs->impl.SetAttr(key, val);
return true;
}
return false;
}
bool TfLiteAttributeMapGetStringBufferAttr(const TfLiteAttributeMap* attrs,
TfLiteBufferAttrKey key,
const char** val) {
return attrs && attrs->impl.IsBufferAttributeMap() &&
attrs->impl.GetAttr(key, val);
}
bool TfLiteAttributeMapSetStringBufferAttr(TfLiteAttributeMap* attrs,
TfLiteBufferAttrKey key,
const char* val) {
if (attrs && attrs->impl.IsBufferAttributeMap()) {
attrs->impl.SetAttr(key, val);
return true;
}
return false;
}
bool TfLiteAttributeMapGetBoolBufferAttr(const TfLiteAttributeMap* attrs,
TfLiteBufferAttrKey key, bool* val) {
return attrs && attrs->impl.IsBufferAttributeMap() &&
attrs->impl.GetAttr(key, val);
}
bool TfLiteAttributeMapSetBoolBufferAttr(TfLiteAttributeMap* attrs,
TfLiteBufferAttrKey key, bool val) {
if (attrs && attrs->impl.IsBufferAttributeMap()) {
attrs->impl.SetAttr(key, val);
return true;
}
return false;
}
bool TfLiteAttributeMapGetStringSyncAttr(const TfLiteAttributeMap* attrs,
TfLiteSynchronizationAttrKey key,
const char** val) {
return attrs && attrs->impl.IsSyncAttributeMap() &&
attrs->impl.GetAttr(key, val);
}
bool TfLiteAttributeMapSetStringSyncAttr(TfLiteAttributeMap* attrs,
TfLiteSynchronizationAttrKey key,
const char* val) {
if (attrs && attrs->impl.IsSyncAttributeMap()) {
attrs->impl.SetAttr(key, val);
return true;
}
return false;
}
#define DEFINE_ATTR_MAP_ACCESSOR(type, type_name) \
bool TfLiteAttributeMapGet##type_name##Attr(const TfLiteAttributeMap* attrs, \
uint32_t key, type* val) { \
return attrs ? attrs->impl.GetAttr(static_cast<TfLiteBufferAttrKey>(key), \
val) \
: false; \
} \
void TfLiteAttributeMapSet##type_name##Attr(TfLiteAttributeMap* attrs, \
uint32_t key, type val) { \
if (attrs) { \
attrs->impl.SetAttr(static_cast<TfLiteBufferAttrKey>(key), val); \
} \
} \
bool TfLiteAttributeMapGetCustom##type_name##Attr( \
const TfLiteAttributeMap* attrs, const char* key, type* val) { \
return attrs ? attrs->impl.GetCustomAttr(key, val) : false; \
} \
void TfLiteAttributeMapSetCustom##type_name##Attr( \
TfLiteAttributeMap* attrs, const char* key, type val) { \
if (attrs) { \
attrs->impl.SetCustomAttr(key, val); \
} \
}
DEFINE_ATTR_MAP_ACCESSOR(int, Int);
DEFINE_ATTR_MAP_ACCESSOR(size_t, SizeT);
DEFINE_ATTR_MAP_ACCESSOR(const char*, String);
DEFINE_ATTR_MAP_ACCESSOR(bool, Bool);
#undef DEFINE_ATTR_MAP_ACCESSOR
} | #include "tensorflow/lite/core/async/interop/c/attribute_map.h"
#include <cstddef>
#include <cstdint>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/async/interop/c/types.h"
namespace {
TEST(AttributeMapTest, AttributeMapCreateTypeCheckTest) {
{
auto* attr = TfLiteAttributeMapCreate(kTfLiteAttrMapTypeBuffer);
EXPECT_TRUE(TfLiteAttributeMapIsBufferAttributeMap(attr));
EXPECT_FALSE(TfLiteAttributeMapIsSyncAttributeMap(attr));
TfLiteAttributeMapDelete(attr);
}
{
auto* attr = TfLiteAttributeMapCreate(kTfLiteAttrMapTypeSync);
EXPECT_FALSE(TfLiteAttributeMapIsBufferAttributeMap(attr));
EXPECT_TRUE(TfLiteAttributeMapIsSyncAttributeMap(attr));
TfLiteAttributeMapDelete(attr);
}
}
TEST(AttributeMapTest, AttributeMapAccessor) {
auto* attr = TfLiteAttributeMapCreate(kTfLiteAttrMapTypeBuffer);
{
TfLiteAttributeMapSetSizeTBufferAttr(attr, kTfLiteBufferAttrKeyAlignment,
42);
size_t result = 0;
EXPECT_TRUE(TfLiteAttributeMapGetSizeTBufferAttr(
attr, kTfLiteBufferAttrKeyAlignment, &result));
EXPECT_EQ(42, result);
EXPECT_FALSE(TfLiteAttributeMapGetSizeTBufferAttr(
attr, kTfLiteBufferAttrKeyOffset, &result));
}
{
const char str[] = "some string";
TfLiteAttributeMapSetStringBufferAttr(
attr, kTfLiteBufferAttrKeyResourceTypeName, str);
const char* result = nullptr;
EXPECT_TRUE(TfLiteAttributeMapGetStringBufferAttr(
attr, kTfLiteBufferAttrKeyResourceTypeName, &result));
EXPECT_EQ(str, result);
EXPECT_FALSE(TfLiteAttributeMapGetStringBufferAttr(
attr, kTfLiteBufferAttrKeyAlignment, &result));
EXPECT_FALSE(TfLiteAttributeMapSetStringSyncAttr(
attr, kTfLiteSynchronizationAttrKeyObjectTypeName, str));
EXPECT_FALSE(TfLiteAttributeMapGetStringSyncAttr(
attr, kTfLiteSynchronizationAttrKeyObjectTypeName, &result));
}
TfLiteAttributeMapDelete(attr);
}
TEST(AttributeMapTest, UnCheckedAttributeMapAccessor) {
auto* attr = TfLiteAttributeMapCreate(kTfLiteAttrMapTypeBuffer);
{
TfLiteAttributeMapSetSizeTAttr(attr, 1, 42);
size_t result = 0;
EXPECT_TRUE(TfLiteAttributeMapGetSizeTAttr(attr, 1, &result));
EXPECT_EQ(42, result);
EXPECT_FALSE(TfLiteAttributeMapGetSizeTAttr(attr, 2, &result));
}
{
TfLiteAttributeMapSetIntAttr(attr, 3, 21);
int result = 0;
EXPECT_TRUE(TfLiteAttributeMapGetIntAttr(attr, 3, &result));
EXPECT_EQ(21, result);
EXPECT_FALSE(TfLiteAttributeMapGetIntAttr(attr, 4, &result));
}
{
const char str[] = "some string";
TfLiteAttributeMapSetStringAttr(attr, 1, str);
const char* result = nullptr;
EXPECT_TRUE(TfLiteAttributeMapGetStringAttr(attr, 1, &result));
EXPECT_EQ(str, result);
EXPECT_FALSE(TfLiteAttributeMapGetStringAttr(attr, 2, &result));
}
{
TfLiteAttributeMapSetBoolAttr(
attr, kTfLiteBufferAttrKeyCurrentHostCoherencyState, true);
bool result = false;
EXPECT_TRUE(TfLiteAttributeMapGetBoolAttr(
attr, kTfLiteBufferAttrKeyCurrentHostCoherencyState, &result));
EXPECT_TRUE(result);
EXPECT_FALSE(TfLiteAttributeMapGetBoolAttr(
attr, kTfLiteBufferAttrKeyPreferredHostCoherencyState, &result));
}
TfLiteAttributeMapDelete(attr);
}
TEST(AttributeMapTest, UnCheckedAttributeMapCustomAccessor) {
auto* attr = TfLiteAttributeMapCreate(kTfLiteAttrMapTypeBuffer);
{
TfLiteAttributeMapSetCustomSizeTAttr(attr, "foo", 42);
size_t result = 0;
EXPECT_TRUE(TfLiteAttributeMapGetCustomSizeTAttr(attr, "foo", &result));
EXPECT_EQ(42, result);
EXPECT_FALSE(TfLiteAttributeMapGetCustomSizeTAttr(attr, "bar", &result));
}
{
TfLiteAttributeMapSetCustomIntAttr(attr, "baz", 21);
int result = 0;
EXPECT_TRUE(TfLiteAttributeMapGetCustomIntAttr(attr, "baz", &result));
EXPECT_EQ(21, result);
EXPECT_FALSE(TfLiteAttributeMapGetCustomIntAttr(attr, "quux", &result));
}
{
const char str[] = "some string";
TfLiteAttributeMapSetCustomStringAttr(attr, "foo", str);
const char* result = nullptr;
EXPECT_TRUE(TfLiteAttributeMapGetCustomStringAttr(attr, "foo", &result));
EXPECT_EQ(str, result);
EXPECT_FALSE(TfLiteAttributeMapGetCustomStringAttr(attr, "bar", &result));
}
{
TfLiteAttributeMapSetCustomBoolAttr(attr, "foo", true);
bool result = false;
EXPECT_TRUE(TfLiteAttributeMapGetCustomBoolAttr(attr, "foo", &result));
EXPECT_TRUE(result);
EXPECT_FALSE(TfLiteAttributeMapGetCustomBoolAttr(attr, "bar", &result));
}
TfLiteAttributeMapDelete(attr);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/interop/c/attribute_map.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/interop/c/attribute_map_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
56ea50e1-e04c-4cea-bb42-857fa1568bc0 | cpp | tensorflow/tensorflow | index | third_party/xla/xla/python/ifrt/index.cc | third_party/xla/xla/python/ifrt/index_test.cc | #include "xla/python/ifrt/index.h"
#include <ostream>
#include <string>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
namespace xla {
namespace ifrt {
std::string Index::DebugString() const {
return absl::StrCat("[", absl::StrJoin(elements_, ","), "]");
}
std::ostream& operator<<(std::ostream& os, const Index& index) {
return os << index.DebugString();
}
}
} | #include "xla/python/ifrt/index.h"
#include <memory>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/hash/hash_testing.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::ElementsAre;
TEST(IndexTest, Construction) {
EXPECT_THAT(Index({1, 2}).elements(), ElementsAre(1, 2));
EXPECT_THAT(Index::Zeros(2).elements(), ElementsAre(0, 0));
}
TEST(IndexTest, Operations) {
EXPECT_EQ(Index({1, 2}), Index({1, 2}));
EXPECT_NE(Index({1, 2}), Index({1, 3}));
Index a({11, 22});
Index b({2, 3});
EXPECT_EQ(a + b, Index({13, 25}));
{
Index c = a;
EXPECT_EQ(c += b, Index({13, 25}));
}
EXPECT_EQ(a - b, Index({9, 19}));
{
Index c = a;
EXPECT_EQ(c -= b, Index({9, 19}));
}
EXPECT_EQ(a * std::vector<int64_t>({1, 2}), Index({11, 44}));
{
Index c = a;
EXPECT_EQ(c *= std::vector<int64_t>({1, 2}), Index({11, 44}));
}
}
TEST(IndexTest, Hash) {
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({
Index({}),
Index({1}),
Index({2}),
Index({1, 2}),
Index({1, 3}),
Index({2, 1}),
Index({1, 2, 3}),
Index({1, 2, 4}),
}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/index.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/index_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
eb5aeb38-bfcb-4297-bbdf-da5dfa1107c5 | cpp | tensorflow/tensorflow | memory | third_party/xla/xla/python/ifrt/memory.cc | third_party/xla/xla/python/ifrt/memory_test.cc | #include "xla/python/ifrt/memory.h"
#include <optional>
#include <string>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/container/node_hash_set.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "xla/python/ifrt/device.h"
namespace xla {
namespace ifrt {
namespace {
struct MemoryKindsSet {
absl::Mutex mu;
absl::node_hash_set<std::string> memory_kinds_set ABSL_GUARDED_BY(mu);
};
}
MemoryKind::MemoryKind(std::optional<absl::string_view> memory_kind) {
static auto* const global_set = new MemoryKindsSet();
if (!memory_kind.has_value()) {
return;
}
absl::MutexLock lock(&global_set->mu);
auto it = global_set->memory_kinds_set.find(*memory_kind);
if (it == global_set->memory_kinds_set.end()) {
memory_kind_ =
*global_set->memory_kinds_set.insert(std::string(*memory_kind)).first;
} else {
memory_kind_ = *it;
}
}
std::string MemoryKind::ToString() const {
if (memory_kind_.has_value()) {
return std::string(*memory_kind_);
}
return "(default)";
}
MemoryKind CanonicalizeMemoryKind(MemoryKind memory_kind, Device* device) {
if (memory_kind.memory_kind().has_value()) {
return memory_kind;
}
auto default_memory = device->DefaultMemory();
if (default_memory.ok()) {
return (*default_memory)->Kind();
}
return MemoryKind();
}
char Memory::ID = 0;
}
} | #include "xla/python/ifrt/memory.h"
#include <memory>
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
using ::testing::Optional;
namespace xla {
namespace ifrt {
namespace {
TEST(MemoryKindTest, EqualityForUnspecified) {
MemoryKind memory_kind1;
MemoryKind memory_kind2;
EXPECT_EQ(memory_kind1, memory_kind2);
}
TEST(MemoryKindTest, EqualityForSameString) {
MemoryKind memory_kind1("abc");
MemoryKind memory_kind2("abc");
EXPECT_EQ(memory_kind1, memory_kind2);
}
TEST(MemoryKindTest, EqualityForSameStringContent) {
MemoryKind memory_kind1("abc");
MemoryKind memory_kind2(absl::StrCat("ab", "c"));
EXPECT_EQ(memory_kind1, memory_kind2);
}
TEST(MemoryKindTest, InequalityForDifferentStringContent) {
MemoryKind memory_kind1("abc");
MemoryKind memory_kind2("def");
EXPECT_NE(memory_kind1, memory_kind2);
}
TEST(MemoryKindTest, InequalityBetweenSpecifiedAndUnspecified) {
{
MemoryKind memory_kind1("abc");
MemoryKind memory_kind2;
EXPECT_NE(memory_kind1, memory_kind2);
}
{
MemoryKind memory_kind1;
MemoryKind memory_kind2("abc");
EXPECT_NE(memory_kind1, memory_kind2);
}
}
TEST(MemoryKindTest, MemorySafety) {
auto memory_kind_str = std::make_unique<std::string>("abc");
MemoryKind memory_kind(*memory_kind_str);
memory_kind_str.reset();
EXPECT_THAT(memory_kind.memory_kind(), Optional(absl::string_view("abc")));
}
TEST(MemoryKindTest, EqualityForUnspecifiedAndNullopt) {
MemoryKind memory_kind1;
MemoryKind memory_kind2(std::nullopt);
EXPECT_EQ(memory_kind1, memory_kind2);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/memory.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/memory_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a94ec4bd-38b0-43ce-b94c-ab20abb9e699 | cpp | tensorflow/tensorflow | sharding_serdes | third_party/xla/xla/python/ifrt/sharding_serdes.cc | third_party/xla/xla/python/ifrt/sharding_serdes_test.cc | #include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/ifrt/sharding_serdes.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
class SingleDeviceShardingSerDes
: public llvm::RTTIExtends<SingleDeviceShardingSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::SingleDeviceSharding";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
const SingleDeviceSharding& sharding =
llvm::cast<SingleDeviceSharding>(serializable);
SingleDeviceShardingProto proto;
proto.set_device_id(sharding.devices()->devices().front()->Id().value());
if (sharding.memory_kind().memory_kind().has_value()) {
proto.set_memory_kind(std::string(*sharding.memory_kind().memory_kind()));
}
return proto.SerializeAsString();
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
const auto* deserialize_sharding_options =
llvm::cast<DeserializeShardingOptions>(options.get());
SingleDeviceShardingProto proto;
if (!proto.ParseFromString(serialized)) {
return absl::InvalidArgumentError(
"Failed to parse serialized SimpleDeviceSharding");
}
TF_ASSIGN_OR_RETURN(Device * device,
deserialize_sharding_options->lookup_device(
DeviceId(proto.device_id())));
MemoryKind memory_kind;
if (proto.has_memory_kind()) {
memory_kind = MemoryKind(proto.memory_kind());
}
return SingleDeviceSharding::Create(device, memory_kind);
}
static char ID;
};
class OpaqueShardingSerDes
: public llvm::RTTIExtends<OpaqueShardingSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::OpaqueSharding";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
const OpaqueSharding& sharding = llvm::cast<OpaqueSharding>(serializable);
OpaqueShardingProto proto;
*proto.mutable_devices() = sharding.devices()->ToProto();
if (sharding.memory_kind().memory_kind().has_value()) {
proto.set_memory_kind(std::string(*sharding.memory_kind().memory_kind()));
}
return proto.SerializeAsString();
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
const auto* deserialize_sharding_options =
llvm::cast<DeserializeShardingOptions>(options.get());
OpaqueShardingProto proto;
if (!proto.ParseFromString(serialized)) {
return absl::InvalidArgumentError(
"Failed to parse serialized OpaqueSharding");
}
TF_ASSIGN_OR_RETURN(
auto devices,
DeviceList::FromProto(deserialize_sharding_options->lookup_device,
proto.devices()));
MemoryKind memory_kind;
if (proto.has_memory_kind()) {
memory_kind = MemoryKind(proto.memory_kind());
}
return OpaqueSharding::Create(std::move(devices), memory_kind);
}
static char ID;
};
class ConcreteShardingSerDes
: public llvm::RTTIExtends<ConcreteShardingSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::ConcreteSharding";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
const ConcreteSharding& sharding =
llvm::cast<ConcreteSharding>(serializable);
ConcreteShardingProto proto;
*proto.mutable_devices() = sharding.devices()->ToProto();
if (sharding.memory_kind().memory_kind().has_value()) {
proto.set_memory_kind(std::string(*sharding.memory_kind().memory_kind()));
}
if (sharding.has_static_shape()) {
*proto.mutable_shape() = sharding.shape().ToProto();
for (const Shape& shape : sharding.shard_shapes()) {
*proto.add_shard_shapes() = shape.ToProto();
}
} else {
*proto.mutable_dynamic_shape() = sharding.dynamic_shape().ToProto();
for (const DynamicShape& dynamic_shape :
sharding.shard_dynamic_shapes()) {
*proto.add_shard_dynamic_shapes() = dynamic_shape.ToProto();
}
}
return proto.SerializeAsString();
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
const auto* deserialize_sharding_options =
llvm::cast<DeserializeShardingOptions>(options.get());
ConcreteShardingProto proto;
if (!proto.ParseFromString(serialized)) {
return absl::InvalidArgumentError(
"Failed to parse serialized ConcreteSharding");
}
TF_ASSIGN_OR_RETURN(
auto devices,
DeviceList::FromProto(deserialize_sharding_options->lookup_device,
proto.devices()));
MemoryKind memory_kind;
if (proto.has_memory_kind()) {
memory_kind = MemoryKind(proto.memory_kind());
}
if (proto.has_shape()) {
TF_ASSIGN_OR_RETURN(auto shape, Shape::FromProto(proto.shape()));
std::vector<Shape> shard_shapes;
shard_shapes.reserve(proto.shard_shapes_size());
for (const auto& shard_shape_proto : proto.shard_shapes()) {
TF_ASSIGN_OR_RETURN(auto shard_shape,
Shape::FromProto(shard_shape_proto));
shard_shapes.push_back(std::move(shard_shape));
}
return ConcreteSharding::Create(std::move(devices), memory_kind,
std::move(shape),
std::move(shard_shapes));
}
if (!proto.has_dynamic_shape()) {
return absl::InvalidArgumentError(
"ConcreteSharding must have Shape or DynamicShape.");
}
TF_ASSIGN_OR_RETURN(auto dynamic_shape,
DynamicShape::FromProto(proto.dynamic_shape()));
std::vector<DynamicShape> shard_dynamic_shapes;
shard_dynamic_shapes.reserve(proto.shard_dynamic_shapes_size());
for (const auto& shard_dynamic_shape_proto : proto.shard_dynamic_shapes()) {
TF_ASSIGN_OR_RETURN(auto dynamic_shape,
DynamicShape::FromProto(shard_dynamic_shape_proto));
shard_dynamic_shapes.push_back(std::move(dynamic_shape));
}
return ConcreteSharding::Create(std::move(devices), memory_kind,
std::move(dynamic_shape),
std::move(shard_dynamic_shapes));
}
static char ID;
};
class ConcreteEvenShardingSerDes
: public llvm::RTTIExtends<ConcreteEvenShardingSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::ConcreteEvenSharding";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
const ConcreteEvenSharding& sharding =
llvm::cast<ConcreteEvenSharding>(serializable);
ConcreteEvenShardingProto proto;
*proto.mutable_devices() = sharding.devices()->ToProto();
if (sharding.memory_kind().memory_kind().has_value()) {
proto.set_memory_kind(std::string(*sharding.memory_kind().memory_kind()));
}
*proto.mutable_shape() = sharding.shape().ToProto();
*proto.mutable_shard_shape() = sharding.shard_shape().ToProto();
proto.set_is_fully_replicated(sharding.IsFullyReplicated());
return proto.SerializeAsString();
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
const auto* deserialize_sharding_options =
llvm::cast<DeserializeShardingOptions>(options.get());
ConcreteEvenShardingProto proto;
if (!proto.ParseFromString(serialized)) {
return absl::InvalidArgumentError(
"Failed to parse serialized ConcreteEvenSharding");
}
TF_ASSIGN_OR_RETURN(
auto devices,
DeviceList::FromProto(deserialize_sharding_options->lookup_device,
proto.devices()));
MemoryKind memory_kind;
if (proto.has_memory_kind()) {
memory_kind = MemoryKind(proto.memory_kind());
}
TF_ASSIGN_OR_RETURN(auto shape, Shape::FromProto(proto.shape()));
TF_ASSIGN_OR_RETURN(auto shard_shape,
Shape::FromProto(proto.shard_shape()));
return ConcreteEvenSharding::Create(
std::move(devices), memory_kind, std::move(shape),
std::move(shard_shape), proto.is_fully_replicated());
}
static char ID;
};
[[maybe_unused]] char SingleDeviceShardingSerDes::ID = 0;
[[maybe_unused]] char OpaqueShardingSerDes::ID = 0;
[[maybe_unused]] char ConcreteShardingSerDes::ID = 0;
[[maybe_unused]] char ConcreteEvenShardingSerDes::ID = 0;
bool register_single_device_sharding_serdes = ([]{
RegisterSerDes<SingleDeviceSharding>(
std::make_unique<SingleDeviceShardingSerDes>());
}(), true);
bool register_opaque_sharding_serdes = ([]{
RegisterSerDes<OpaqueSharding>(
std::make_unique<OpaqueShardingSerDes>());
}(), true);
bool register_concrete_sharding_serdes = ([]{
RegisterSerDes<ConcreteSharding>(
std::make_unique<ConcreteShardingSerDes>());
}(), true);
bool register_concrete_even_sharding_serdes = ([]{
RegisterSerDes<ConcreteEvenSharding>(
std::make_unique<ConcreteEvenShardingSerDes>());
}(), true);
}
}
} | #include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/functional/bind_front.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/device_test_util.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/serdes.pb.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::ElementsAreArray;
class ShardingSerDesTest : public test_util::DeviceTest {};
TEST_P(ShardingSerDesTest, SingleDeviceShardingRoundTrip) {
auto sharding = SingleDeviceSharding::Create(
GetDevices({0})->devices().front(), MemoryKind("abc"));
TF_ASSERT_OK_AND_ASSIGN(auto serialized, Serialize(*sharding));
TF_ASSERT_OK_AND_ASSIGN(
auto out_sharding,
Deserialize<SingleDeviceSharding>(
serialized, std::make_unique<DeserializeShardingOptions>(
absl::bind_front(&Client::LookupDevice, client()))));
EXPECT_THAT(out_sharding->devices()->devices(),
ElementsAreArray(sharding->devices()->devices()));
}
TEST_P(ShardingSerDesTest, OpaqueShardingRoundTrip) {
auto sharding = OpaqueSharding::Create(GetDevices({0, 1}), MemoryKind("abc"));
TF_ASSERT_OK_AND_ASSIGN(auto serialized, Serialize(*sharding));
TF_ASSERT_OK_AND_ASSIGN(
auto out_sharding,
Deserialize<OpaqueSharding>(
serialized, std::make_unique<DeserializeShardingOptions>(
absl::bind_front(&Client::LookupDevice, client()))));
EXPECT_THAT(out_sharding->devices()->devices(),
ElementsAreArray(sharding->devices()->devices()));
}
TEST_P(ShardingSerDesTest, ConcreteShardingRoundTrip) {
auto sharding = ConcreteSharding::Create(
GetDevices({0, 1}), MemoryKind("abc"),
Shape({10, 20}),
{Shape({3, 20}), Shape({7, 20})});
TF_ASSERT_OK_AND_ASSIGN(auto serialized, Serialize(*sharding));
TF_ASSERT_OK_AND_ASSIGN(
auto out_sharding,
Deserialize<ConcreteSharding>(
serialized, std::make_unique<DeserializeShardingOptions>(
absl::bind_front(&Client::LookupDevice, client()))));
EXPECT_THAT(out_sharding->devices()->devices(),
ElementsAreArray(sharding->devices()->devices()));
EXPECT_THAT(out_sharding->shape(), sharding->shape());
EXPECT_THAT(out_sharding->shard_shapes(),
ElementsAreArray(sharding->shard_shapes()));
}
TEST_P(ShardingSerDesTest, ConcreteShardingWithDynamicShapeRoundTrip) {
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape dynamic_shape,
DynamicShape::Create(Shape({10, 20}),
BoundedDynamicShapeTag({false, true})));
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape shard_dynamic_shape1,
DynamicShape::Create(Shape({3, 20}),
BoundedDynamicShapeTag({false, true})));
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape shard_dynamic_shape2,
DynamicShape::Create(Shape({7, 20}),
BoundedDynamicShapeTag({false, true})));
auto sharding = ConcreteSharding::Create(
GetDevices({0, 1}), MemoryKind("abc"),
dynamic_shape,
{shard_dynamic_shape1, shard_dynamic_shape2});
TF_ASSERT_OK_AND_ASSIGN(Serialized serialized, Serialize(*sharding));
TF_ASSERT_OK_AND_ASSIGN(
auto out_sharding,
Deserialize<ConcreteSharding>(
serialized, std::make_unique<DeserializeShardingOptions>(
absl::bind_front(&Client::LookupDevice, client()))));
EXPECT_THAT(out_sharding->devices()->devices(),
ElementsAreArray(sharding->devices()->devices()));
EXPECT_THAT(out_sharding->dynamic_shape(), sharding->dynamic_shape());
EXPECT_THAT(out_sharding->shard_dynamic_shapes(),
ElementsAreArray(sharding->shard_dynamic_shapes()));
}
TEST_P(ShardingSerDesTest, ConcreteEvenShardingRoundTrip) {
auto sharding = ConcreteEvenSharding::Create(
GetDevices({0, 1}), MemoryKind("abc"),
Shape({10, 20}),
Shape({5, 20}), true);
TF_ASSERT_OK_AND_ASSIGN(auto serialized, Serialize(*sharding));
TF_ASSERT_OK_AND_ASSIGN(
auto out_sharding,
Deserialize<ConcreteEvenSharding>(
serialized, std::make_unique<DeserializeShardingOptions>(
absl::bind_front(&Client::LookupDevice, client()))));
EXPECT_THAT(out_sharding->devices()->devices(),
ElementsAreArray(sharding->devices()->devices()));
EXPECT_THAT(out_sharding->shape(), sharding->shape());
EXPECT_THAT(out_sharding->shard_shape(), sharding->shard_shape());
EXPECT_THAT(out_sharding->IsFullyReplicated(), sharding->IsFullyReplicated());
}
INSTANTIATE_TEST_SUITE_P(NumDevices, ShardingSerDesTest,
testing::Values(test_util::DeviceTestParam{
2,
2}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/sharding_serdes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/sharding_serdes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
acbd2b8c-12d1-4da6-b36c-77c6289018e6 | cpp | tensorflow/tensorflow | ifrt_ir_program_serdes | third_party/xla/xla/python/ifrt/ir/ifrt_ir_program_serdes.cc | third_party/xla/xla/python/ifrt/ir/ifrt_ir_program_serdes_test.cc | #include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Bytecode/BytecodeWriter.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Support/LLVM.h"
#include "xla/mlir/utils/error_util.h"
#include "xla/python/ifrt/ir/ifrt_ir_program.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/support/module_parsing.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
class IfrtIRProgramSerDes
: public llvm::RTTIExtends<IfrtIRProgramSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::IfrtIRProgram";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
const auto& program = llvm::cast<IfrtIRProgram>(serializable);
if (program.mlir_module == nullptr) {
return absl::InvalidArgumentError("Unable to serialize null MLIR module");
}
std::string serialized;
llvm::raw_string_ostream out(serialized);
mlir::BytecodeWriterConfig config;
mlir::BaseScopedDiagnosticHandler diagnostic_handler(
program.mlir_module->getContext());
if (mlir::failed(
mlir::writeBytecodeToFile(program.mlir_module, out, config))) {
return absl::InvalidArgumentError(
absl::StrFormat("Failed to serialize IFRT IR module string: %s",
diagnostic_handler.ConsumeStatus().message()));
}
return serialized;
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions>) override {
auto context = std::make_unique<mlir::MLIRContext>();
TF_ASSIGN_OR_RETURN(auto module,
support::ParseMlirModuleString(serialized, *context));
return std::make_unique<IfrtIRProgram>(std::move(context),
std::move(module));
}
static char ID;
};
char IfrtIRProgramSerDes::ID = 0;
bool register_ifrt_ir_program_serdes = ([]() {
RegisterSerDes<IfrtIRProgram>(std::make_unique<IfrtIRProgramSerDes>());
}(), true);
}
}
} | #include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "xla/python/ifrt/ir/ifrt_ir_program.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/support/module_parsing.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::HasSubstr;
using ::testing::Not;
using ::tsl::testing::StatusIs;
std::string PrintModule(mlir::ModuleOp module) {
std::string module_str;
llvm::raw_string_ostream os(module_str);
module->print(os, mlir::OpPrintingFlags().enableDebugInfo());
return module_str;
}
TEST(IfrtIRProgramSerDesTest, RoundTrip) {
static constexpr absl::string_view kMlirModuleStr = R"(
!array = !ifrt.array<tensor<2xi32>, #ifrt.sharding_param<1 to [0] on 1>, [0]>
module {
func.func @main(%arg0: !array) -> !array attributes {ifrt.function} {
%0, %ctrl_0 = ifrt.Call @add_one::@main(%arg0) on devices [0]
: (!array) -> !array
return %0 : !array
}
module @add_one {
func.func @main(%arg0: tensor<2xi32>) -> tensor<2xi32> {
%0 = mhlo.constant dense<1> : tensor<2xi32>
%1 = mhlo.add %arg0, %0 : tensor<2xi32>
return %1 : tensor<2xi32>
}
}
}
)";
Serialized serialized;
auto context = std::make_unique<mlir::MLIRContext>();
TF_ASSERT_OK_AND_ASSIGN(
mlir::OwningOpRef<mlir::ModuleOp> module,
support::ParseMlirModuleString(kMlirModuleStr, *context));
auto initial_program =
std::make_unique<IfrtIRProgram>(std::move(context), std::move(module));
TF_ASSERT_OK_AND_ASSIGN(serialized, Serialize(*initial_program));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<IfrtIRProgram> deserialized_program,
Deserialize<IfrtIRProgram>(serialized, nullptr));
EXPECT_EQ(PrintModule(initial_program->mlir_module),
PrintModule(deserialized_program->mlir_module));
}
TEST(IfrtIRProgramSerDesTest, DeserializationError) {
static constexpr absl::string_view kMlirModuleStr = R"(
!array = !ifrt.array<tensor<2xi32>, #ifrt.sharding_param<1 to [0] on 1>, [0]>
module {
func.func @main(%arg0: !array) -> !array attributes {ifrt.function} {
%0, %ctrl_0 = ifrt.Call @add_one::@main(%arg0) on devices [0]
: (!array) -> !array
return %0 : !array
}
module @add_one {
func.func @main(%arg0: tensor<2xi32>) -> tensor<2xi32> {
%0 = mhlo.constant dense<1> : tensor<2xi32>
%1 = mhlo.add %arg0, %0 : tensor<2xi32>
return %1 : tensor<2xi32>
}
}
}
)";
Serialized serialized;
{
auto context = std::make_unique<mlir::MLIRContext>();
TF_ASSERT_OK_AND_ASSIGN(
mlir::OwningOpRef<mlir::ModuleOp> module,
support::ParseMlirModuleString(kMlirModuleStr, *context));
auto program =
std::make_unique<IfrtIRProgram>(std::move(context), std::move(module));
TF_ASSERT_OK_AND_ASSIGN(serialized, Serialize(*program));
}
serialized.set_data("invalid data");
EXPECT_THAT(Deserialize<IfrtIRProgram>(serialized, nullptr),
StatusIs(Not(absl::StatusCode::kOk),
HasSubstr("Failed to parse IFRT IR module string")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/ir/ifrt_ir_program_serdes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/ir/ifrt_ir_program_serdes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
134ec802-5d04-47ce-83bd-c7083b178e4b | cpp | tensorflow/tensorflow | sharding_conversions | third_party/xla/xla/python/ifrt/support/sharding_conversions.cc | third_party/xla/xla/python/ifrt/support/sharding_conversions_test.cc | #include "xla/python/ifrt/support/sharding_conversions.h"
#include <cstdint>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/ir/sharding_param.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace ifrt {
namespace support {
absl::StatusOr<OpSharding> ToOpSharding(const Sharding& sharding) {
if (auto* sharding_param_sharding =
llvm::dyn_cast<xla::ifrt::ShardingParamSharding>(&sharding)) {
return ToOpSharding(sharding_param_sharding->sharding_param(),
sharding_param_sharding->devices());
} else {
return absl::InvalidArgumentError(
"Only conversion from `ShardingParamSharding` to `OpSharding` is "
"supported.");
}
}
absl::StatusOr<OpSharding> ToOpSharding(
const ShardingParam& sharding_param,
const tsl::RCReference<xla::ifrt::DeviceList>& device_mapping) {
OpSharding op_sharding;
{
bool all_dim_replicated = true;
for (const int64_t dim_shard : sharding_param.dim_shards()) {
if (dim_shard != 1) {
all_dim_replicated = false;
break;
}
}
if (all_dim_replicated) {
op_sharding.set_type(OpSharding::REPLICATED);
return op_sharding;
}
}
op_sharding.set_type(OpSharding::OTHER);
auto* tile_assignment_dims = op_sharding.mutable_tile_assignment_dimensions();
int64_t cum_size = 1;
tile_assignment_dims->Reserve(sharding_param.dim_shards().size() + 1);
for (const int64_t dim_shard : sharding_param.dim_shards()) {
cum_size *= dim_shard;
tile_assignment_dims->Add(dim_shard);
}
int device_count = 1;
for (const int axis_size : sharding_param.minor_to_major().axis_sizes) {
device_count *= axis_size;
}
if (device_count != cum_size) {
op_sharding.set_replicate_on_last_tile_dim(true);
tile_assignment_dims->Add(device_count / cum_size);
}
llvm::SmallVector<int> logical_device_ids;
sharding_param.minor_to_major().ToDeviceList(logical_device_ids);
auto* tile_assignment_devices = op_sharding.mutable_tile_assignment_devices();
tile_assignment_devices->Reserve(logical_device_ids.size());
const absl::Span<Device* const> device_mapping_devices =
device_mapping->devices();
for (const int logical_device_id : logical_device_ids) {
if (logical_device_id < 0 ||
logical_device_id >= device_mapping_devices.size()) {
return absl::OutOfRangeError(
absl::StrCat("Can't map device with logical id ", logical_device_id,
". The logical device id should be within [0, ",
device_mapping_devices.size(), ")."));
}
tile_assignment_devices->Add(
device_mapping_devices[logical_device_id]->Id().value());
}
return op_sharding;
}
absl::StatusOr<HloSharding> ToHloSharding(const ShardingParam& sharding_param) {
auto axis_sizes = sharding_param.minor_to_major().axis_sizes;
llvm::SmallVector<int64_t> reshape_dims;
reshape_dims.reserve(axis_sizes.size());
int device_count = 1;
for (auto axis_size : llvm::reverse(axis_sizes)) {
reshape_dims.push_back(axis_size);
device_count *= axis_size;
}
if (device_count == 1) {
return HloSharding::Replicate();
}
int64_t cum_size = 1;
llvm::SmallVector<int64_t> dims;
dims.reserve(sharding_param.dim_shards().size());
for (const int64_t dim_shard : sharding_param.dim_shards()) {
cum_size *= dim_shard;
dims.push_back(dim_shard);
}
llvm::SmallVector<int, 4> permutation;
int num_axis = sharding_param.minor_to_major().permutation.size();
permutation.reserve(num_axis);
for (const int axis_id :
llvm::reverse(sharding_param.minor_to_major().permutation)) {
permutation.push_back(num_axis - axis_id - 1);
}
if (device_count != cum_size) {
dims.push_back(device_count / cum_size);
return HloSharding::PartialTile(
TileAssignment(dims, reshape_dims, permutation));
} else {
return HloSharding::IotaTile(dims, reshape_dims, permutation);
}
}
absl::StatusOr<ShardingParam> ToShardingParam(const HloSharding& hlo_sharding,
int rank, int num_devices) {
ShardingParam::MinorToMajor minor_to_major;
if (hlo_sharding.IsReplicated() ||
(hlo_sharding.IsTileMaximal() && hlo_sharding.HasUniqueDevice() &&
num_devices == 1)) {
llvm::SmallVector<int64_t> dim_shards(rank, 1);
minor_to_major.permutation.push_back(0);
minor_to_major.axis_sizes.push_back(num_devices);
return ShardingParam(dim_shards, std::move(minor_to_major));
} else if (hlo_sharding.IsTiled()) {
const xla::TileAssignment& tile_assignment = hlo_sharding.tile_assignment();
if (!tile_assignment.iota()) {
return absl::InvalidArgumentError(absl::StrCat(
"Conversion from `HloSharding` without `IotaTileAssignment` is not "
"supported; sharding=",
hlo_sharding.ToString()));
}
if (rank != hlo_sharding.TiledDataRank()) {
return absl::InvalidArgumentError(absl::StrFormat(
"`TiledData` expected to have have %d dimensions, but has %d "
"dimensions; sharding=%s",
rank, hlo_sharding.TiledDataRank(), hlo_sharding.ToString()));
}
if (hlo_sharding.subgroup_types().size() > 1 ||
(hlo_sharding.subgroup_types().size() == 1 &&
hlo_sharding.subgroup_types()[0] != xla::OpSharding::REPLICATED)) {
return absl::InvalidArgumentError(absl::StrCat(
"Unsupported conversion to `ShardingParam` from `HloSharding` that "
"has more than a subgroup or a subgroup that is not REPLICATED; "
"sharding=",
hlo_sharding.ToString()));
}
llvm::SmallVector<int64_t> dim_shards(tile_assignment.dimensions().begin(),
tile_assignment.dimensions().end());
if (hlo_sharding.ReplicateOnLastTileDim() ||
(hlo_sharding.subgroup_types().size() == 1 &&
hlo_sharding.subgroup_types()[0] == xla::OpSharding::REPLICATED)) {
dim_shards.pop_back();
}
if (tile_assignment.iota()->reshape_dims().empty()) {
minor_to_major.permutation.push_back(0);
minor_to_major.axis_sizes.push_back(num_devices);
} else {
for (auto reshape_dim :
llvm::reverse(tile_assignment.iota()->reshape_dims())) {
minor_to_major.axis_sizes.push_back(reshape_dim);
}
int num_axis = tile_assignment.iota()->transpose_perm().size();
for (int axis_id :
llvm::reverse(tile_assignment.iota()->transpose_perm())) {
minor_to_major.permutation.push_back(num_axis - axis_id - 1);
}
}
return ShardingParam(dim_shards, std::move(minor_to_major));
}
return absl::UnimplementedError(
absl::StrCat("Unsupported conversion to `ShardingParam` from "
"`HloSharding`; sharding=",
hlo_sharding.ToString()));
}
}
}
} | #include "xla/python/ifrt/support/sharding_conversions.h"
#include <memory>
#include <numeric>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/tile_assignment.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/index_domain.h"
#include "xla/python/ifrt/ir/sharding_param.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/mock.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/shape.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace support {
namespace {
using ::testing::Return;
using ::tsl::testing::StatusIs;
using xla::HloSharding;
absl::StatusOr<HloSharding> ToHloShardingViaOpSharding(
const ShardingParam& sharding_param,
const tsl::RCReference<DeviceList>& device_list) {
TF_ASSIGN_OR_RETURN(xla::OpSharding op_sharding,
ToOpSharding(sharding_param, device_list));
return HloSharding::FromProto(op_sharding);
}
struct ShardingConversionTestClientState {
absl::flat_hash_map<DeviceId, std::unique_ptr<Device>> device_map;
std::vector<Device*> devices;
};
std::shared_ptr<MockClient> MakeTestClient(int num_devices) {
auto state = std::make_shared<ShardingConversionTestClientState>();
state->devices.reserve(num_devices);
for (int i = 0; i < num_devices; ++i) {
auto device = std::make_unique<MockDevice>();
ON_CALL(*device, Id).WillByDefault(Return(DeviceId(i)));
state->devices.push_back(device.get());
state->device_map.insert({DeviceId(i), std::move(device)});
}
auto client = std::make_shared<MockClient>();
ON_CALL(*client, devices)
.WillByDefault(
[state]() -> absl::Span<Device* const> { return state->devices; });
return client;
}
class ShardingConversionsTest : public testing::TestWithParam<int> {
public:
void SetUp() override { client_ = MakeTestClient(GetParam()); }
tsl::RCReference<DeviceList> GetDevices(
absl::Span<const int> device_indices) {
return test_util::GetDevices(client_.get(), device_indices).value();
}
void AssertSameTiling(const ShardingParam& sharding_param,
const HloSharding& hlo_sharding, const Shape& shape) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<const Sharding> sharding,
ShardingParamSharding::Create(
sharding_param, device_list, MemoryKind()));
const xla::Shape xla_shape(PrimitiveType::F16, shape.dims(), {}, {});
TF_ASSERT_OK_AND_ASSIGN(const std::vector<IndexDomain> index_domains,
sharding->IndexDomains(shape));
ASSERT_EQ(index_domains.size(),
hlo_sharding.tile_assignment().num_elements());
const xla::Shape xla_tile_shape = hlo_sharding.TileShape(xla_shape);
for (int i = 0; i < index_domains.size(); ++i) {
SCOPED_TRACE(absl::StrCat("on device ", i));
EXPECT_EQ(index_domains[i].origin().elements(),
hlo_sharding.TileOffsetForDevice(xla_shape, i));
EXPECT_EQ(index_domains[i].shape().dims(), xla_tile_shape.dimensions());
}
}
private:
std::shared_ptr<Client> client_;
};
TEST_P(ShardingConversionsTest, Replicated) {
ShardingParam expected_sharding_param{
{1, 1, 1},
{{0, 1}, {2, 3}}};
TF_EXPECT_OK(expected_sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_iota_sharding,
ToHloSharding(expected_sharding_param));
TF_ASSERT_OK_AND_ASSIGN(
const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(expected_sharding_param,
GetDevices({0, 1, 2, 3, 4, 5})));
EXPECT_EQ(hlo_sharding.ToString(), "{replicated}");
EXPECT_EQ(hlo_sharding, hlo_iota_sharding);
TF_ASSERT_OK_AND_ASSIGN(auto sharding_param,
ToShardingParam(hlo_iota_sharding, 3, 6));
TF_ASSERT_OK_AND_ASSIGN(const HloSharding actual_hlo_sharding,
ToHloSharding(sharding_param));
EXPECT_EQ(hlo_iota_sharding, actual_hlo_sharding);
}
TEST_P(ShardingConversionsTest, SingleDeviceReplicated) {
ShardingParam expected_sharding_param{
{1, 1}, {{0}, {1}}};
TF_EXPECT_OK(expected_sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_iota_sharding,
ToHloSharding(expected_sharding_param));
TF_ASSERT_OK_AND_ASSIGN(
const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(expected_sharding_param, GetDevices({0})));
EXPECT_EQ(hlo_sharding.ToString(), "{replicated}");
EXPECT_EQ(hlo_sharding, hlo_iota_sharding);
TF_ASSERT_OK_AND_ASSIGN(auto sharding_param,
ToShardingParam(hlo_iota_sharding, 2, 1));
EXPECT_EQ(expected_sharding_param, sharding_param);
}
TEST_P(ShardingConversionsTest, Permutation) {
ShardingParam expected_sharding_param{
{2, 1, 3},
{{1, 0}, {3, 2}}};
TF_EXPECT_OK(expected_sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_iota_sharding,
ToHloSharding(expected_sharding_param));
TF_ASSERT_OK_AND_ASSIGN(
const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(expected_sharding_param,
GetDevices({0, 1, 2, 3, 4, 5})));
EXPECT_EQ(hlo_sharding.ToString(), "{devices=[2,1,3]0,3,1,4,2,5}");
EXPECT_EQ(hlo_sharding, hlo_iota_sharding);
TF_ASSERT_OK_AND_ASSIGN(auto sharding_param,
ToShardingParam(hlo_iota_sharding, 3, 6));
EXPECT_EQ(expected_sharding_param, sharding_param);
}
TEST_P(ShardingConversionsTest, Partial) {
ShardingParam expected_sharding_param{
{2, 1}, {{0, 1}, {2, 3}}};
TF_EXPECT_OK(expected_sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_iota_sharding,
ToHloSharding(expected_sharding_param));
TF_ASSERT_OK_AND_ASSIGN(
const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(expected_sharding_param,
GetDevices({0, 1, 2, 3, 4, 5})));
EXPECT_EQ(hlo_sharding.ToString(),
"{devices=[2,1,3]0,1,2,3,4,5 last_tile_dim_replicate}");
EXPECT_EQ(hlo_sharding, hlo_iota_sharding);
TF_ASSERT_OK_AND_ASSIGN(auto sharding_param,
ToShardingParam(hlo_iota_sharding, 2, 6));
TF_ASSERT_OK_AND_ASSIGN(const HloSharding actual_hlo_sharding,
ToHloSharding(sharding_param));
EXPECT_EQ(hlo_iota_sharding, actual_hlo_sharding);
}
TEST_P(ShardingConversionsTest, OneDimToTwoAxes) {
ShardingParam expected_sharding_param{
{4}, {{1, 0}, {2, 2}}};
TF_EXPECT_OK(expected_sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_iota_sharding,
ToHloSharding(expected_sharding_param));
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(expected_sharding_param,
GetDevices({0, 1, 2, 3})));
EXPECT_EQ(hlo_sharding.ToString(), "{devices=[4]0,2,1,3}");
EXPECT_EQ(hlo_sharding, hlo_iota_sharding);
TF_ASSERT_OK_AND_ASSIGN(auto sharding_param,
ToShardingParam(hlo_iota_sharding, 1, 4));
EXPECT_EQ(expected_sharding_param, sharding_param);
}
TEST_P(ShardingConversionsTest, NonTrivialDeviceAssignment) {
ShardingParam expected_sharding_param{
{2, 1, 3},
{{1, 0}, {3, 2}}};
TF_EXPECT_OK(expected_sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(
const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(expected_sharding_param,
GetDevices({6, 5, 4, 3, 2, 1})));
EXPECT_EQ(hlo_sharding.ToString(), "{devices=[2,1,3]6,3,5,2,4,1}");
}
TEST_P(ShardingConversionsTest, VerifyIncorrectShardings) {
ShardingParam different_permutation_and_axis{
{1, 1}, {{0, 1}, {2}}};
EXPECT_FALSE(different_permutation_and_axis.verify().ok());
ShardingParam too_many_slices{{2, 2},
{{0}, {2}}};
EXPECT_FALSE(too_many_slices.verify().ok());
ShardingParam incorrect_permutation{
{4, 1},
{{0, 1, 1}, {2, 2, 2}}};
EXPECT_FALSE(incorrect_permutation.verify().ok());
}
TEST_P(ShardingConversionsTest, ErrorOnDeviceAssignment) {
ShardingParam sharding_param{{2, 1, 3},
{{1, 0}, {3, 2}}};
TF_EXPECT_OK(sharding_param.verify());
EXPECT_THAT(
ToHloShardingViaOpSharding(sharding_param, GetDevices({6, 5, 4, 3, 2})),
StatusIs(absl::StatusCode::kOutOfRange,
::testing::HasSubstr("Can't map device with logical id 5")));
}
TEST_P(ShardingConversionsTest, ShardingParamFullySharded) {
ShardingParam sharding_param{{2, 3},
{{0, 1}, {2, 3}}};
TF_EXPECT_OK(sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(
sharding_param, GetDevices({0, 1, 2, 3, 4, 5})));
AssertSameTiling(sharding_param, hlo_sharding, Shape({6, 6}));
}
TEST_P(ShardingConversionsTest, ShardingParamWithPermutation) {
ShardingParam sharding_param{{2, 3},
{{1, 0}, {3, 2}}};
TF_EXPECT_OK(sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(
sharding_param, GetDevices({0, 1, 2, 3, 4, 5})));
AssertSameTiling(sharding_param, hlo_sharding, Shape({6, 6}));
}
TEST_P(ShardingConversionsTest, ShardingParamWithReplication) {
ShardingParam sharding_param{{2, 1},
{{0, 1}, {2, 3}}};
TF_EXPECT_OK(sharding_param.verify());
TF_ASSERT_OK_AND_ASSIGN(const HloSharding hlo_sharding,
ToHloShardingViaOpSharding(
sharding_param, GetDevices({0, 1, 2, 3, 4, 5})));
AssertSameTiling(sharding_param, hlo_sharding, Shape({6, 6}));
}
TEST_P(ShardingConversionsTest, OpShardingReplicated) {
OpSharding op_sharding;
op_sharding.set_type(OpSharding::REPLICATED);
TF_ASSERT_OK_AND_ASSIGN(auto hlo_sharding,
HloSharding::FromProto(op_sharding));
TF_ASSERT_OK_AND_ASSIGN(auto actual, ToShardingParam(hlo_sharding, 2, 6));
ShardingParam expected{{1, 1},
{{0}, {6}}};
TF_EXPECT_OK(expected.verify());
EXPECT_EQ(actual, expected);
}
INSTANTIATE_TEST_SUITE_P(NumDevices, ShardingConversionsTest,
testing::Values(7));
struct HloShardingTestStruct {
HloSharding hlo_sharding;
int rank;
int num_devices;
};
class HloShardingToShardingParamTest
: public testing::TestWithParam<HloShardingTestStruct> {
public:
void SetUp() override {
const auto& param = GetParam();
client_ = MakeTestClient(param.num_devices);
}
tsl::RCReference<DeviceList> GetDevices(
absl::Span<const int> device_indices) {
return test_util::GetDevices(client_.get(), device_indices).value();
}
private:
std::shared_ptr<Client> client_;
};
TEST_P(HloShardingToShardingParamTest, HloShardingToShardingParam) {
const auto& param = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
auto sharding_param,
ToShardingParam(param.hlo_sharding, param.rank, param.num_devices));
EXPECT_TRUE(sharding_param.verify().ok());
TF_ASSERT_OK_AND_ASSIGN(auto actual_hlo_sharding,
ToHloSharding(sharding_param));
EXPECT_EQ(param.hlo_sharding, actual_hlo_sharding);
std::vector<int> device_ids(param.num_devices);
std::iota(device_ids.begin(), device_ids.end(), 0);
TF_ASSERT_OK_AND_ASSIGN(
auto hlo_via_op_sharding,
ToHloShardingViaOpSharding(sharding_param,
GetDevices(absl::MakeSpan(device_ids))));
EXPECT_EQ(param.hlo_sharding, hlo_via_op_sharding);
}
INSTANTIATE_TEST_SUITE_P(
HloShardingConversionTests, HloShardingToShardingParamTest,
testing::ValuesIn<HloShardingTestStruct>({
{HloSharding::IotaTile({4, 2}), 2, 8},
{HloSharding::IotaTile({2, 4}, {4, 2}, {1, 0}), 2, 8},
{HloSharding::IotaTile({8, 1}), 2, 8},
{HloSharding::IotaTile({8, 1}, {4, 2}, {1, 0}), 2, 8},
{HloSharding::PartialTile(TileAssignment({4, 1, 2}, {8}, {0})), 2, 8},
{HloSharding::PartialTile(TileAssignment({2, 1, 4}, {4, 2}, {1, 0})), 2,
8},
{HloSharding::PartialTile(TileAssignment({1, 4, 2}, {8}, {0})), 2, 8},
{HloSharding::PartialTile(TileAssignment({1, 2, 4}, {4, 2}, {1, 0})), 2,
8},
{HloSharding::PartialTile(TileAssignment({4, 3, 2}, {2, 3, 4},
{2, 1, 0})),
2, 24},
{HloSharding::PartialTile(TileAssignment({4, 2, 3}, {6, 4}, {1, 0})), 2,
24},
{HloSharding::PartialTile(TileAssignment({6, 1, 4}, {24}, {0})), 2, 24},
{HloSharding::PartialTile(TileAssignment({12, 1, 2}, {2, 12}, {1, 0})),
2, 24},
{HloSharding::PartialTile(TileAssignment({8, 1, 3}, {6, 4}, {1, 0})), 2,
24},
{HloSharding::PartialTile(TileAssignment({2, 1, 12}, {24}, {0})), 2,
24},
{HloSharding::PartialTile(TileAssignment({3, 1, 8}, {2, 3, 4},
{1, 0, 2})),
2, 24},
{HloSharding::PartialTile(TileAssignment({1, 4, 6}, {6, 4}, {1, 0})), 2,
24},
{HloSharding::PartialTile(TileAssignment({1, 12, 2}, {2, 12}, {1, 0})),
2, 24},
{HloSharding::PartialTile(TileAssignment({3, 2, 1, 4}, {2, 3, 4},
{1, 0, 2})),
3, 24},
{HloSharding::PartialTile(TileAssignment({2, 4, 1, 3}, {2, 3, 4},
{0, 2, 1})),
3, 24},
{HloSharding::PartialTile(TileAssignment({4, 3, 1, 2}, {2, 3, 4},
{2, 1, 0})),
3, 24},
{HloSharding::PartialTile(TileAssignment({12, 1, 1, 2}, {2, 12},
{1, 0})),
3, 24},
}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/support/sharding_conversions.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/support/sharding_conversions_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d0c9aec6-77ab-4a35-8e60-bb07389258da | cpp | tensorflow/tensorflow | hlo_program_serdes | third_party/xla/xla/python/ifrt/hlo/hlo_program_serdes.cc | third_party/xla/xla/python/ifrt/hlo/hlo_program_serdes_test.cc | #include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "stablehlo/dialect/Serialization.h"
#include "xla/mlir/utils/error_util.h"
#include "xla/mlir_hlo/mhlo/transforms/passes.h"
#include "xla/pjrt/mlir_to_hlo.h"
#include "xla/python/ifrt/hlo/hlo_program.h"
#include "xla/python/ifrt/serdes.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
class HloProgramSerDes : public llvm::RTTIExtends<HloProgramSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::XlaProgram";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
const auto& program = llvm::cast<HloProgram>(serializable);
if (program.mlir_module == nullptr) {
return absl::InvalidArgumentError("Unable to serialize null MLIR module");
}
mlir::OwningOpRef<mlir::ModuleOp> module(
llvm::cast<mlir::ModuleOp>(program.mlir_module->clone()));
TF_ASSIGN_OR_RETURN(std::string serialized,
xla::SerializeUsingVersionedStablehlo(
*module, xla::GetDefaultStablehloVersion()));
return serialized;
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions>) override {
auto context = std::make_unique<mlir::MLIRContext>(
mlir::MLIRContext::Threading::DISABLED);
mlir::BaseScopedDiagnosticHandler diagnostic_handler(context.get());
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::stablehlo::deserializePortableArtifact(serialized, context.get());
if (!module) {
const absl::Status status = diagnostic_handler.ConsumeStatus();
return absl::InvalidArgumentError(
absl::StrCat("Failed to deserialize StableHLO module;\n\nDetailed "
"error from MLIR: ",
status.message()));
}
mlir::PassManager pm(context.get());
pm.addPass(mlir::mhlo::createStablehloLegalizeToHloPass());
if (!mlir::succeeded(pm.run(*module))) {
const absl::Status status = diagnostic_handler.ConsumeStatus();
return absl::InvalidArgumentError(absl::StrCat(
"Failed to legalize StableHLO to MHLO;\n\nDetailed error from MLIR: ",
status.message()));
}
return std::make_unique<HloProgram>(std::move(context), std::move(module));
}
static char ID;
};
char HloProgramSerDes::ID = 0;
bool register_xla_program_serdes = ([]() {
RegisterSerDes<HloProgram>(std::make_unique<HloProgramSerDes>());
}(), true);
}
}
} | #include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinDialect.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Support/DebugStringHelper.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/pjrt/mlir_to_hlo.h"
#include "xla/python/ifrt/hlo/hlo_program.h"
#include "xla/python/ifrt/serdes.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::HasSubstr;
using ::testing::Not;
using ::tsl::testing::StatusIs;
TEST(HloProgramSerDesTest, RoundTrip) {
static constexpr absl::string_view kMlirModuleStr = R"(
module {
func.func @main(%arg0: tensor<2x3xf32>) -> tensor<2x3xf32> {
%0 = "mhlo.copy"(%arg0) : (tensor<2x3xf32>) -> tensor<2x3xf32>
%1 = mhlo.constant dense<1.000000e+00> : tensor<f32>
%2 = "mhlo.broadcast"(%1) {broadcast_sizes = dense<[2, 3]> : tensor<2xi64>} : (tensor<f32>) -> tensor<2x3xf32>
%3 = mhlo.add %0, %2 : tensor<2x3xf32>
return %3 : tensor<2x3xf32>
}
})";
Serialized serialized;
{
auto context = std::make_unique<mlir::MLIRContext>();
TF_ASSERT_OK_AND_ASSIGN(
mlir::OwningOpRef<mlir::ModuleOp> module,
xla::ParseMlirModuleString(kMlirModuleStr, *context));
auto program =
std::make_unique<HloProgram>(std::move(context), std::move(module));
TF_ASSERT_OK_AND_ASSIGN(serialized, Serialize(*program));
}
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloProgram> xla_program,
Deserialize<HloProgram>(serialized, nullptr));
bool has_unsupported_dialect = false;
xla_program->mlir_module->walk([&](mlir::Operation *op) {
if (!llvm::isa<mlir::BuiltinDialect, mlir::func::FuncDialect,
mlir::mhlo::MhloDialect>(op->getDialect())) {
LOG(ERROR) << "Found an op with an unsupported dialect: "
<< mlir::debugString(op);
has_unsupported_dialect = true;
}
});
EXPECT_FALSE(has_unsupported_dialect);
}
TEST(HloProgramSerDesTest, SerializationError) {
static constexpr absl::string_view kMlirModuleStr = R"(
module {
func.func @main(%arg0: tensor<f32>) -> tensor<f32> {
%0 = "UnknownOp"(%arg0) : (tensor<f32>) -> tensor<f32>
return %0 : tensor<f32>
}
})";
Serialized serialized;
{
auto context = std::make_unique<mlir::MLIRContext>();
context->allowUnregisteredDialects();
TF_ASSERT_OK_AND_ASSIGN(
mlir::OwningOpRef<mlir::ModuleOp> module,
xla::ParseMlirModuleString(kMlirModuleStr, *context));
auto program =
std::make_unique<HloProgram>(std::move(context), std::move(module));
EXPECT_THAT(Serialize(*program),
StatusIs(Not(absl::StatusCode::kOk),
HasSubstr("Failed to serialize StableHLO")));
}
}
TEST(HloProgramSerDesTest, DeserializationError) {
static constexpr absl::string_view kMlirModuleStr = R"(
module {
func.func @main(%arg0: tensor<f32>) -> tensor<f32> {
return %arg0 : tensor<f32>
}
})";
Serialized serialized;
{
auto context = std::make_unique<mlir::MLIRContext>();
TF_ASSERT_OK_AND_ASSIGN(
mlir::OwningOpRef<mlir::ModuleOp> module,
xla::ParseMlirModuleString(kMlirModuleStr, *context));
auto program =
std::make_unique<HloProgram>(std::move(context), std::move(module));
TF_ASSERT_OK_AND_ASSIGN(serialized, Serialize(*program));
}
serialized.set_data("invalid data");
EXPECT_THAT(Deserialize<HloProgram>(serialized, nullptr),
StatusIs(Not(absl::StatusCode::kOk),
HasSubstr("Failed to deserialize StableHLO module")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/hlo/hlo_program_serdes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/hlo/hlo_program_serdes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
174c1317-0ac1-4d0d-8fe0-760a2818d5a4 | cpp | tensorflow/tensorflow | stats_calculator | third_party/xla/xla/tsl/util/stats_calculator.cc | third_party/xla/xla/tsl/util/stats_calculator_test.cc | #include "xla/tsl/util/stats_calculator.h"
#include <iomanip>
#include <map>
#include <queue>
#include <sstream>
#include <string>
namespace tsl {
constexpr int kNodeTypeWidth = 40;
StatsCalculator::StatsCalculator(const StatSummarizerOptions& options)
: options_(options) {}
std::string StatsCalculator::GetShortSummary() const {
std::stringstream stream;
stream << "Timings (microseconds): ";
run_total_us_.OutputToStream(&stream);
stream << std::endl;
stream << "Memory (bytes): ";
memory_.OutputToStream(&stream);
stream << std::endl;
stream << details_.size() << " nodes observed" << std::endl;
return stream.str();
}
std::ostream& InitField(std::ostream& stream, int width) {
stream << "\t" << std::right << std::setw(width) << std::fixed
<< std::setprecision(3);
return stream;
}
std::string StatsCalculator::HeaderString(const std::string& title) const {
std::stringstream stream;
stream << "============================== " << title
<< " ==============================" << std::endl;
if (options_.format_as_csv) {
stream << "node type, first, avg_ms, %, cdf%, mem KB, times called, "
"name";
} else {
InitField(stream, kNodeTypeWidth) << "[node type]";
InitField(stream, 9) << "[first]";
InitField(stream, 9) << "[avg ms]";
InitField(stream, 8) << "[%]";
InitField(stream, 8) << "[cdf%]";
InitField(stream, 10) << "[mem KB]";
InitField(stream, 9) << "[times called]";
stream << "\t"
<< "[Name]";
}
return stream.str();
}
std::string StatsCalculator::ColumnString(const Detail& detail,
const int64_t cumulative_stat_on_node,
const Stat<int64_t>& stat) const {
const double first_time_ms = detail.elapsed_time.first() / 1000.0;
const double avg_time_ms = detail.elapsed_time.avg() / 1000.0;
const double percentage = detail.elapsed_time.sum() * 100.0 / stat.sum();
const double cdf_percentage = (cumulative_stat_on_node * 100.0f) / stat.sum();
const int64_t times_called = detail.times_called / num_runs();
std::stringstream stream;
if (options_.format_as_csv) {
std::string name(detail.name);
std::replace(name.begin(), name.end(), ',', '\t');
stream << detail.type << ", " << first_time_ms << ", " << avg_time_ms
<< ", " << percentage << "%, " << cdf_percentage << "%, "
<< detail.mem_used.newest() / 1000.0 << ", " << times_called << ", "
<< name;
} else {
InitField(stream, kNodeTypeWidth) << detail.type;
InitField(stream, 9) << first_time_ms;
InitField(stream, 9) << avg_time_ms;
InitField(stream, 7) << percentage << "%";
InitField(stream, 7) << cdf_percentage << "%";
InitField(stream, 10) << detail.mem_used.newest() / 1000.0;
InitField(stream, 9) << times_called;
stream << "\t" << detail.name;
}
return stream.str();
}
void StatsCalculator::OrderNodesByMetric(
SortingMetric metric, std::vector<const Detail*>* details) const {
std::priority_queue<std::pair<std::string, const Detail*>> sorted_list;
const int num_nodes = details_.size();
for (const auto& det : details_) {
const Detail* detail = &(det.second);
std::stringstream stream;
stream << std::setw(20) << std::right << std::setprecision(10)
<< std::fixed;
switch (metric) {
case BY_NAME:
stream << detail->name;
break;
case BY_RUN_ORDER:
stream << num_nodes - detail->run_order;
break;
case BY_TIME:
stream << detail->elapsed_time.avg();
break;
case BY_MEMORY:
stream << detail->mem_used.avg();
break;
case BY_TYPE:
stream << detail->type;
break;
default:
stream << "";
break;
}
sorted_list.emplace(stream.str(), detail);
}
while (!sorted_list.empty()) {
auto entry = sorted_list.top();
sorted_list.pop();
details->push_back(entry.second);
}
}
void StatsCalculator::ComputeStatsByType(
std::map<std::string, int64_t>* node_type_map_count,
std::map<std::string, int64_t>* node_type_map_time,
std::map<std::string, int64_t>* node_type_map_memory,
std::map<std::string, int64_t>* node_type_map_times_called,
int64_t* accumulated_us) const {
int64_t run_count = run_total_us_.count();
for (const auto& det : details_) {
const std::string node_name = det.first;
const Detail& detail = det.second;
int64_t curr_time_val =
static_cast<int64_t>(detail.elapsed_time.sum() / run_count);
*accumulated_us += curr_time_val;
int64_t curr_memory_val = detail.mem_used.newest();
const std::string& node_type = detail.type;
(*node_type_map_count)[node_type] += 1;
(*node_type_map_time)[node_type] += curr_time_val;
(*node_type_map_memory)[node_type] += curr_memory_val;
(*node_type_map_times_called)[node_type] += detail.times_called / run_count;
}
}
std::string StatsCalculator::GetStatsByNodeType() const {
std::stringstream stream;
stream << "Number of nodes executed: " << details_.size() << std::endl;
stream << "============================== Summary by node type "
"=============================="
<< std::endl;
std::map<std::string, int64_t> node_type_map_count;
std::map<std::string, int64_t> node_type_map_time;
std::map<std::string, int64_t> node_type_map_memory;
std::map<std::string, int64_t> node_type_map_times_called;
int64_t accumulated_us = 0;
ComputeStatsByType(&node_type_map_count, &node_type_map_time,
&node_type_map_memory, &node_type_map_times_called,
&accumulated_us);
std::priority_queue<std::pair<int64_t, std::pair<std::string, int64_t>>>
timings;
for (const auto& node_type : node_type_map_time) {
const int64_t mem_used = node_type_map_memory[node_type.first];
timings.emplace(node_type.second,
std::pair<std::string, int64_t>(node_type.first, mem_used));
}
if (options_.format_as_csv) {
stream << "node type, count, avg_ms, avg %, cdf %, mem KB, times called\n";
} else {
InitField(stream, kNodeTypeWidth) << "[Node type]";
InitField(stream, 9) << "[count]";
InitField(stream, 10) << "[avg ms]";
InitField(stream, 11) << "[avg %]";
InitField(stream, 11) << "[cdf %]";
InitField(stream, 10) << "[mem KB]";
InitField(stream, 10) << "[times called]";
stream << std::endl;
}
float cdf = 0.0f;
while (!timings.empty()) {
auto entry = timings.top();
timings.pop();
const std::string node_type = entry.second.first;
const float memory = entry.second.second / 1000.0f;
const int64_t node_type_total_us = entry.first;
const float time_per_run_ms = node_type_total_us / 1000.0f;
const float percentage =
((entry.first / static_cast<float>(accumulated_us)) * 100.0f);
cdf += percentage;
if (options_.format_as_csv) {
stream << node_type << ", " << node_type_map_count[node_type] << ", "
<< time_per_run_ms << ", " << percentage << "%, " << cdf << "%, "
<< memory << ", " << node_type_map_times_called[node_type]
<< std::endl;
} else {
InitField(stream, kNodeTypeWidth) << node_type;
InitField(stream, 9) << node_type_map_count[node_type];
InitField(stream, 10) << time_per_run_ms;
InitField(stream, 10) << percentage << "%";
InitField(stream, 10) << cdf << "%";
InitField(stream, 10) << memory;
InitField(stream, 9) << node_type_map_times_called[node_type];
stream << std::endl;
}
}
stream << std::endl;
return stream.str();
}
std::string StatsCalculator::GetStatsByMetric(const std::string& title,
SortingMetric sorting_metric,
int num_stats) const {
std::vector<const Detail*> details;
OrderNodesByMetric(sorting_metric, &details);
double cumulative_stat_on_node = 0;
std::stringstream stream;
stream << HeaderString(title) << std::endl;
int stat_num = 0;
for (auto detail : details) {
++stat_num;
if (num_stats > 0 && stat_num > num_stats) {
break;
}
cumulative_stat_on_node += detail->elapsed_time.sum();
stream << ColumnString(*detail, cumulative_stat_on_node, run_total_us_)
<< std::endl;
}
stream << std::endl;
return stream.str();
}
std::string StatsCalculator::GetOutputString() const {
std::stringstream stream;
if (options_.show_run_order) {
stream << GetStatsByMetric("Run Order", BY_RUN_ORDER,
options_.run_order_limit);
}
if (options_.show_time) {
stream << GetStatsByMetric("Top by Computation Time", BY_TIME,
options_.time_limit);
}
if (options_.show_memory) {
stream << GetStatsByMetric("Top by Memory Use", BY_MEMORY,
options_.memory_limit);
}
if (options_.show_type) {
stream << GetStatsByNodeType();
}
if (options_.show_summary) {
stream << GetShortSummary() << std::endl;
}
return stream.str();
}
void StatsCalculator::AddNodeStats(const std::string& name,
const std::string& type, int64_t run_order,
int64_t elapsed_time, int64_t mem_used) {
Detail* detail = nullptr;
if (details_.find(name) == details_.end()) {
details_.insert({name, {}});
detail = &details_.at(name);
detail->type = type;
detail->name = name;
detail->run_order = run_order;
} else {
detail = &details_.at(name);
}
detail->elapsed_time.UpdateStat(elapsed_time);
detail->mem_used.UpdateStat(mem_used);
detail->times_called++;
}
} | #include "xla/tsl/util/stats_calculator.h"
#include <cfloat>
#include "tsl/platform/test.h"
namespace tsl {
namespace {
using Detail = StatsCalculator::Detail;
TEST(StatsCalculatorTest, TotalTimeMs) {
auto options = StatSummarizerOptions();
StatsCalculator calc(options);
EXPECT_EQ(0, calc.num_runs());
calc.UpdateRunTotalUs(1);
EXPECT_EQ(1, calc.num_runs());
calc.UpdateRunTotalUs(2);
EXPECT_EQ(2, calc.num_runs());
auto run_time_us = calc.run_total_us();
EXPECT_EQ(1, run_time_us.min());
EXPECT_FLOAT_EQ(1.5, run_time_us.avg());
}
TEST(StatsCalculatorTest, AddNodeStatsUpdate) {
auto options = StatSummarizerOptions();
StatsCalculator calc(options);
EXPECT_TRUE(calc.GetDetails().empty());
const int64_t node1_run_order = 1;
const int64_t run1_start_us = 1;
const int64_t run1_end_us = 2;
const int64_t run1_mem_used = 45;
calc.AddNodeStats("node1", "type_1", node1_run_order,
run1_end_us - run1_start_us, run1_mem_used);
ASSERT_EQ(1, calc.GetDetails().size());
const Detail& detail = calc.GetDetails().at("node1");
EXPECT_EQ(1, detail.times_called);
EXPECT_EQ("node1", detail.name);
EXPECT_EQ("type_1", detail.type);
EXPECT_EQ(node1_run_order, detail.run_order);
const int64_t run2_start_us = 3;
const int64_t run2_end_us = 5;
const int64_t run2_mem_used = 145;
calc.AddNodeStats("node1", "type_1", node1_run_order,
run2_end_us - run2_start_us, run2_mem_used);
EXPECT_EQ(1, calc.GetDetails().size());
EXPECT_EQ(2, detail.times_called);
EXPECT_EQ("node1", detail.name);
EXPECT_EQ("type_1", detail.type);
EXPECT_EQ(node1_run_order, detail.run_order);
EXPECT_EQ((run1_end_us - run1_start_us) + (run2_end_us - run2_start_us),
detail.elapsed_time.sum());
EXPECT_EQ(run1_mem_used + run2_mem_used, detail.mem_used.sum());
}
TEST(StatsCalculatorTest, UpdateStat) {
Stat<double> stat;
EXPECT_TRUE(stat.empty());
EXPECT_TRUE(stat.all_same());
stat.UpdateStat(1);
EXPECT_TRUE(stat.all_same());
stat.UpdateStat(-1.0);
EXPECT_FALSE(stat.all_same());
stat.UpdateStat(100);
stat.UpdateStat(0);
EXPECT_EQ(4, stat.count());
EXPECT_EQ(-1, stat.min());
EXPECT_EQ(100, stat.max());
EXPECT_EQ(25, stat.avg());
EXPECT_EQ(1, stat.first());
EXPECT_EQ(0, stat.newest());
EXPECT_EQ(10002, stat.squared_sum());
EXPECT_EQ(625, stat.avg() * stat.avg());
EXPECT_EQ(7502.0 / 3, stat.sample_variance());
EXPECT_NEAR(50.00666622228147160678152, std::sqrt(stat.sample_variance()),
FLT_EPSILON);
EXPECT_NEAR(7502.0 / 4, stat.variance(), FLT_EPSILON);
EXPECT_NEAR(43.30704330706496060826769, stat.std_deviation(), FLT_EPSILON);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/util/stats_calculator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/util/stats_calculator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8ff2a112-c796-41ea-bcf0-13b123b8378b | cpp | tensorflow/tensorflow | device_name_utils | third_party/xla/xla/tsl/util/device_name_utils.cc | third_party/xla/xla/tsl/util/device_name_utils_test.cc | #include "xla/tsl/util/device_name_utils.h"
#include <algorithm>
#include "tsl/platform/errors.h"
namespace tsl {
static bool IsAlpha(char c) {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
}
static bool IsAlphaNumOrUnderscore(char c) {
return IsAlpha(c) || (c >= '0' && c <= '9') || c == '_';
}
static bool IsJobName(absl::string_view in) {
return !in.empty() && IsAlpha(in.front()) &&
std::all_of(in.begin(), in.end(), IsAlphaNumOrUnderscore);
}
static bool ConsumePrefix(absl::string_view* in, string* out,
absl::string_view prefix_terminators) {
if (in->empty() || !IsAlpha(in->front())) return false;
const auto end_it =
std::find_first_of(in->begin(), in->end(), prefix_terminators.begin(),
prefix_terminators.end());
if (!std::all_of(in->begin(), end_it, IsAlphaNumOrUnderscore)) {
return false;
}
out->assign(in->begin(), end_it);
in->remove_prefix(end_it - in->begin());
return true;
}
static bool ConsumeJobName(absl::string_view* in, string* job) {
return ConsumePrefix(in, job, "/");
}
static bool ConsumeDeviceType(absl::string_view* in, string* device_type) {
return ConsumePrefix(in, device_type, "/:");
}
static bool ConsumeNumber(absl::string_view* in, int* val) {
uint64 tmp;
if (str_util::ConsumeLeadingDigits(in, &tmp)) {
*val = tmp;
return true;
} else {
return false;
}
}
static string DeviceName(const string& job, int replica, int task,
const string& device_prefix, const string& device_type,
int id) {
CHECK(IsJobName(job)) << job;
CHECK_LE(0, replica);
CHECK_LE(0, task);
CHECK(!device_type.empty());
CHECK_LE(0, id);
return strings::StrCat("/job:", job, "/replica:", replica, "/task:", task,
device_prefix, device_type, ":", id);
}
string DeviceNameUtils::FullName(const string& job, int replica, int task,
const string& type, int id) {
return DeviceName(job, replica, task, "/device:", type, id);
}
namespace {
string LegacyName(const string& job, int replica, int task, const string& type,
int id) {
return DeviceName(job, replica, task, "/", absl::AsciiStrToLower(type), id);
}
}
bool DeviceNameUtils::ParseFullName(absl::string_view fullname, ParsedName* p) {
p->Clear();
if (fullname == "/") {
return true;
}
while (!fullname.empty()) {
bool progress = false;
if (absl::ConsumePrefix(&fullname, "/job:")) {
p->has_job = !absl::ConsumePrefix(&fullname, "*");
if (p->has_job && !ConsumeJobName(&fullname, &p->job)) {
return false;
}
progress = true;
}
if (absl::ConsumePrefix(&fullname, "/replica:")) {
p->has_replica = !absl::ConsumePrefix(&fullname, "*");
if (p->has_replica && !ConsumeNumber(&fullname, &p->replica)) {
return false;
}
progress = true;
}
if (absl::ConsumePrefix(&fullname, "/task:")) {
p->has_task = !absl::ConsumePrefix(&fullname, "*");
if (p->has_task && !ConsumeNumber(&fullname, &p->task)) {
return false;
}
progress = true;
}
if (absl::ConsumePrefix(&fullname, "/device:")) {
p->has_type = !absl::ConsumePrefix(&fullname, "*");
if (p->has_type && !ConsumeDeviceType(&fullname, &p->type)) {
return false;
}
if (!absl::ConsumePrefix(&fullname, ":")) {
p->has_id = false;
} else {
p->has_id = !absl::ConsumePrefix(&fullname, "*");
if (p->has_id && !ConsumeNumber(&fullname, &p->id)) {
return false;
}
}
progress = true;
}
if (absl::ConsumePrefix(&fullname, "/cpu:") ||
absl::ConsumePrefix(&fullname, "/CPU:")) {
p->has_type = true;
p->type = "CPU";
p->has_id = !absl::ConsumePrefix(&fullname, "*");
if (p->has_id && !ConsumeNumber(&fullname, &p->id)) {
return false;
}
progress = true;
}
if (absl::ConsumePrefix(&fullname, "/gpu:") ||
absl::ConsumePrefix(&fullname, "/GPU:")) {
p->has_type = true;
p->type = "GPU";
p->has_id = !absl::ConsumePrefix(&fullname, "*");
if (p->has_id && !ConsumeNumber(&fullname, &p->id)) {
return false;
}
progress = true;
}
if (!progress) {
return false;
}
}
return true;
}
bool DeviceNameUtils::ParseFullOrLocalName(absl::string_view fullname,
ParsedName* p) {
return ParseFullName(fullname, p) || ParseLocalName(fullname, p);
}
namespace {
void CompleteName(const DeviceNameUtils::ParsedName& parsed_basename,
DeviceNameUtils::ParsedName* parsed_name) {
if (!parsed_name->has_job) {
parsed_name->job = parsed_basename.job;
parsed_name->has_job = true;
}
if (!parsed_name->has_replica) {
parsed_name->replica = parsed_basename.replica;
parsed_name->has_replica = true;
}
if (!parsed_name->has_task) {
parsed_name->task = parsed_basename.task;
parsed_name->has_task = true;
}
if (!parsed_name->has_type) {
parsed_name->type = parsed_basename.type;
parsed_name->has_type = true;
}
if (!parsed_name->has_id) {
parsed_name->id = parsed_basename.id;
parsed_name->has_id = true;
}
}
}
absl::Status DeviceNameUtils::CanonicalizeDeviceName(absl::string_view fullname,
absl::string_view basename,
string* canonical_name) {
*canonical_name = "";
ParsedName parsed_basename;
if (!ParseFullName(basename, &parsed_basename)) {
return errors::InvalidArgument("Could not parse basename: ", basename,
" into a device specification.");
}
if (!(parsed_basename.has_job && parsed_basename.has_replica &&
parsed_basename.has_task && parsed_basename.has_type &&
parsed_basename.has_id)) {
return errors::InvalidArgument("Basename: ", basename,
" should be fully "
"specified.");
}
ParsedName parsed_name;
if (ParseLocalName(fullname, &parsed_name)) {
CompleteName(parsed_basename, &parsed_name);
*canonical_name = ParsedNameToString(parsed_name);
return absl::OkStatus();
}
if (ParseFullName(fullname, &parsed_name)) {
CompleteName(parsed_basename, &parsed_name);
*canonical_name = ParsedNameToString(parsed_name);
return absl::OkStatus();
}
return errors::InvalidArgument("Could not parse ", fullname,
" into a device "
"specification.");
}
string DeviceNameUtils::ParsedNameToString(const ParsedName& pn) {
string buf;
if (pn.has_job) strings::StrAppend(&buf, "/job:", pn.job);
if (pn.has_replica) strings::StrAppend(&buf, "/replica:", pn.replica);
if (pn.has_task) strings::StrAppend(&buf, "/task:", pn.task);
if (pn.has_type) {
strings::StrAppend(&buf, "/device:", pn.type, ":");
if (pn.has_id) {
strings::StrAppend(&buf, pn.id);
} else {
strings::StrAppend(&buf, "*");
}
}
return buf;
}
bool DeviceNameUtils::IsSpecification(const ParsedName& less_specific,
const ParsedName& more_specific) {
if (less_specific.has_job &&
(!more_specific.has_job || (less_specific.job != more_specific.job))) {
return false;
}
if (less_specific.has_replica &&
(!more_specific.has_replica ||
(less_specific.replica != more_specific.replica))) {
return false;
}
if (less_specific.has_task &&
(!more_specific.has_task || (less_specific.task != more_specific.task))) {
return false;
}
if (less_specific.has_type &&
(!more_specific.has_type || (less_specific.type != more_specific.type))) {
return false;
}
if (less_specific.has_id &&
(!more_specific.has_id || (less_specific.id != more_specific.id))) {
return false;
}
return true;
}
bool DeviceNameUtils::AreCompatibleDevNames(const ParsedName& a,
const ParsedName& b) {
if (a.has_job && b.has_job && (a.job != b.job)) {
return false;
}
if (a.has_replica && b.has_replica && (a.replica != b.replica)) {
return false;
}
if (a.has_task && b.has_task && (a.task != b.task)) {
return false;
}
if (a.has_type && b.has_type && (a.type != b.type)) {
return false;
}
if (a.has_id && b.has_id && (a.id != b.id)) {
return false;
}
return true;
}
void DeviceNameUtils::EnsureSpecification(ParsedName* more_specific,
const ParsedName& less_specific) {
if (less_specific.has_job) {
more_specific->has_job = true;
more_specific->job = less_specific.job;
}
if (less_specific.has_replica) {
more_specific->has_replica = true;
more_specific->replica = less_specific.replica;
}
if (less_specific.has_task) {
more_specific->has_task = true;
more_specific->task = less_specific.task;
}
if (less_specific.has_type) {
more_specific->has_type = true;
more_specific->type = less_specific.type;
}
if (less_specific.has_id) {
more_specific->has_id = true;
more_specific->id = less_specific.id;
}
}
bool DeviceNameUtils::IsCompleteSpecification(const ParsedName& pattern,
const ParsedName& name) {
CHECK(name.has_job && name.has_replica && name.has_task && name.has_type &&
name.has_id);
if (pattern.has_job && (pattern.job != name.job)) return false;
if (pattern.has_replica && (pattern.replica != name.replica)) return false;
if (pattern.has_task && (pattern.task != name.task)) return false;
if (pattern.has_type && (pattern.type != name.type)) return false;
if (pattern.has_id && (pattern.id != name.id)) return false;
return true;
}
namespace {
absl::Status MergeDevNamesImpl(DeviceNameUtils::ParsedName* target,
const DeviceNameUtils::ParsedName& other,
bool allow_soft_placement,
bool override_conflicts) {
const auto& ParsedNameToString = DeviceNameUtils::ParsedNameToString;
if (other.has_job) {
if (target->has_job && target->job != other.job) {
return errors::InvalidArgument(
"Cannot merge devices with incompatible jobs: '",
ParsedNameToString(*target), "' and '", ParsedNameToString(other),
"'");
} else {
target->has_job = other.has_job;
target->job = other.job;
}
}
if (other.has_replica) {
if (target->has_replica && target->replica != other.replica) {
return errors::InvalidArgument(
"Cannot merge devices with incompatible replicas: '",
ParsedNameToString(*target), "' and '", ParsedNameToString(other),
"'");
} else {
target->has_replica = other.has_replica;
target->replica = other.replica;
}
}
if (other.has_task) {
if (target->has_task && target->task != other.task) {
return errors::InvalidArgument(
"Cannot merge devices with incompatible tasks: '",
ParsedNameToString(*target), "' and '", ParsedNameToString(other),
"'");
} else {
target->has_task = other.has_task;
target->task = other.task;
}
}
if (other.has_type) {
if (target->has_type && target->type != other.type) {
if (!allow_soft_placement) {
return errors::InvalidArgument(
"Cannot merge devices with incompatible types: '",
ParsedNameToString(*target), "' and '", ParsedNameToString(other),
"'");
} else if (override_conflicts) {
target->type = other.type;
} else {
target->has_id = false;
target->has_type = false;
return absl::OkStatus();
}
} else {
target->has_type = other.has_type;
target->type = other.type;
}
}
if (other.has_id) {
if (target->has_id && target->id != other.id) {
if (!allow_soft_placement) {
return errors::InvalidArgument(
"Cannot merge devices with incompatible ids: '",
ParsedNameToString(*target), "' and '", ParsedNameToString(other),
"'");
} else if (override_conflicts) {
target->id = other.id;
} else {
target->has_id = false;
return absl::OkStatus();
}
} else {
target->has_id = other.has_id;
target->id = other.id;
}
}
return absl::OkStatus();
}
}
absl::Status DeviceNameUtils::MergeDevNames(ParsedName* target,
const ParsedName& other,
bool allow_soft_placement) {
return MergeDevNamesImpl(target, other, allow_soft_placement,
false);
}
absl::Status DeviceNameUtils::MergeOverrideDevNames(ParsedName* target,
const ParsedName& other) {
return MergeDevNamesImpl(target, other, true,
true);
}
void DeviceNameUtils::MergeUnsetDevNames(ParsedName* target,
const ParsedName& other) {
if (other.has_job && !target->has_job) {
target->has_job = other.has_job;
target->job = other.job;
}
if (other.has_replica && !target->has_replica) {
target->has_replica = other.has_replica;
target->replica = other.replica;
}
if (other.has_task && !target->has_task) {
target->has_task = other.has_task;
target->task = other.task;
}
if (other.has_type && !target->has_type) {
target->has_type = other.has_type;
target->type = other.type;
}
if (other.has_id && !target->has_id) {
target->has_id = other.has_id;
target->id = other.id;
}
}
bool DeviceNameUtils::IsSameAddressSpace(const ParsedName& a,
const ParsedName& b) {
return (a.has_job && b.has_job && (a.job == b.job)) &&
(a.has_replica && b.has_replica && (a.replica == b.replica)) &&
(a.has_task && b.has_task && (a.task == b.task));
}
bool DeviceNameUtils::IsSameAddressSpace(absl::string_view src,
absl::string_view dst) {
ParsedName x;
ParsedName y;
return ParseFullName(src, &x) && ParseFullName(dst, &y) &&
IsSameAddressSpace(x, y);
}
bool DeviceNameUtils::IsDifferentAddressSpace(const ParsedName& a,
const ParsedName& b) {
return (a.has_job && b.has_job && (a.job != b.job)) ||
(a.has_replica && b.has_replica && (a.replica != b.replica)) ||
(a.has_task && b.has_task && (a.task != b.task));
}
const DeviceNameUtils::ParsedName DeviceNameUtils::AddressSpace(
const ParsedName& name) {
ParsedName address_space;
address_space.has_job = name.has_job;
address_space.has_replica = name.has_replica;
address_space.has_task = name.has_task;
address_space.job = name.job;
address_space.replica = name.replica;
address_space.task = name.task;
return address_space;
}
string DeviceNameUtils::LocalName(absl::string_view type, int id) {
return strings::StrCat("/device:", type, ":", id);
}
namespace {
string LegacyLocalName(absl::string_view type, int id) {
return strings::StrCat(type, ":", id);
}
}
string DeviceNameUtils::LocalName(absl::string_view fullname) {
ParsedName x;
CHECK(ParseFullName(fullname, &x)) << fullname;
return LocalName(x.type, x.id);
}
bool DeviceNameUtils::ParseLocalName(absl::string_view name, ParsedName* p) {
if (!ConsumeDeviceType(&name, &p->type)) {
return false;
}
p->has_type = true;
if (!absl::ConsumePrefix(&name, ":")) {
return false;
}
if (!ConsumeNumber(&name, &p->id)) {
return false;
}
p->has_id = true;
return name.empty();
}
bool DeviceNameUtils::SplitDeviceName(absl::string_view name, string* task,
string* device) {
ParsedName pn;
if (ParseFullName(name, &pn) && pn.has_type && pn.has_id) {
task->clear();
task->reserve(
(pn.has_job ? (5 + pn.job.size()) : 0) +
(pn.has_replica ? (9 + 4 ) : 0) +
(pn.has_task ? (6 + 4 ) : 0));
if (pn.has_job) {
strings::StrAppend(task, "/job:", pn.job);
}
if (pn.has_replica) {
strings::StrAppend(task, "/replica:", pn.replica);
}
if (pn.has_task) {
strings::StrAppend(task, "/task:", pn.task);
}
device->clear();
strings::StrAppend(device, pn.type, ":", pn.id);
return true;
}
return false;
}
bool DeviceNameUtils::GetTaskName(const ParsedName& pn, string* task) {
if (pn.has_job && pn.has_replica && pn.has_task) {
task->clear();
task->reserve((5 + pn.job.size()) +
(9 + 4 ) +
(6 + 4 ));
strings::StrAppend(task, "/job:", pn.job);
strings::StrAppend(task, "/replica:", pn.replica);
strings::StrAppend(task, "/task:", pn.task);
return true;
}
return false;
}
std::vector<string> DeviceNameUtils::GetNamesForDeviceMappings(
const ParsedName& pn) {
if (pn.has_job && pn.has_replica && pn.has_task && pn.has_type && pn.has_id) {
return {
DeviceNameUtils::FullName(pn.job, pn.replica, pn.task, pn.type, pn.id),
LegacyName(pn.job, pn.replica, pn.task, pn.type, pn.id)};
} else {
return {};
}
}
std::vector<string> DeviceNameUtils::GetLocalNamesForDeviceMappings(
const ParsedName& pn) {
if (pn.has_type && pn.has_id) {
return {DeviceNameUtils::LocalName(pn.type, pn.id),
LegacyLocalName(pn.type, pn.id)};
} else {
return {};
}
}
absl::Status DeviceNameUtils::DeviceNameToCpuDeviceName(
const string& device_name, string* host_device_name) {
DeviceNameUtils::ParsedName device;
if (!DeviceNameUtils::ParseFullName(device_name, &device)) {
return errors::Internal("Could not parse device name ", device_name);
}
device.type = "CPU";
device.has_type = true;
device.id = 0;
device.has_id = true;
*host_device_name = DeviceNameUtils::ParsedNameToString(device);
return absl::OkStatus();
}
std::ostream& operator<<(std::ostream& os,
const DeviceNameUtils::ParsedName& x) {
os << DeviceNameUtils::ParsedNameToString(x);
return os;
}
} | #include "xla/tsl/util/device_name_utils.h"
#include <vector>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/strcat.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
namespace {
bool RoundTripParsedName(const string& original, const string& expected) {
DeviceNameUtils::ParsedName p;
if (!DeviceNameUtils::ParseFullName(original, &p)) {
return false;
}
string round_tripped = DeviceNameUtils::ParsedNameToString(p);
return (round_tripped == expected);
}
enum NamePart { kJob = 0x01, kReplica = 0x02, kTask = 0x04, kDevice = 0x08 };
bool RoundTripPartialName(int parts_to_test, const std::vector<string>& parts,
bool explicitDevice) {
string original, expected;
if (parts_to_test & kJob) {
strings::StrAppend(&original, "/job:", parts[0]);
strings::StrAppend(&expected, "/job:", parts[0]);
}
if (parts_to_test & kReplica) {
strings::StrAppend(&original, "/replica:", parts[1]);
strings::StrAppend(&expected, "/replica:", parts[1]);
}
if (parts_to_test & kTask) {
strings::StrAppend(&original, "/task:", parts[2]);
strings::StrAppend(&expected, "/task:", parts[2]);
}
if (parts_to_test & kDevice) {
if (explicitDevice) {
strings::StrAppend(&original, "/device:", parts[3]);
strings::StrAppend(&expected, "/device:", parts[3]);
} else {
strings::StrAppend(&original, "/", parts[3]);
strings::StrAppend(&expected,
"/device:", absl::AsciiStrToUpper(parts[3]));
}
}
return RoundTripParsedName(original, expected);
}
}
TEST(DeviceNameUtilsTest, Basic) {
EXPECT_EQ(DeviceNameUtils::FullName("hello", 1, 2, "CPU", 3),
"/job:hello/replica:1/task:2/device:CPU:3");
{
DeviceNameUtils::ParsedName p;
EXPECT_FALSE(DeviceNameUtils::ParseFullName("foobar", &p));
EXPECT_FALSE(DeviceNameUtils::ParseFullName(
"/job:123/replica:1/task:2/device:GPU:3", &p));
EXPECT_FALSE(
DeviceNameUtils::ParseFullName("/job:123/replica:1/task:2/gpu:", &p));
EXPECT_FALSE(DeviceNameUtils::ParseFullName(
"/job:123/replica:1/task:2/device:gpu:", &p));
EXPECT_FALSE(DeviceNameUtils::ParseFullName(
"/job:foo/replica:-1/task:2/device:GPU:3", &p));
EXPECT_FALSE(DeviceNameUtils::ParseFullName(
"/job:foo/replica:1/task:-2/device:GPU:3", &p));
EXPECT_FALSE(
DeviceNameUtils::ParseFullName("/job:foo/replica:1/task:2/bar:3", &p));
EXPECT_FALSE(DeviceNameUtils::ParseFullName(
"/job:foo/replica:1/task:2/device:GPU:3/extra", &p));
EXPECT_TRUE(DeviceNameUtils::ParseFullName(
"/job:foo/replica:1/task:2/device:GPU:3", &p));
EXPECT_TRUE(p.has_job);
EXPECT_TRUE(p.has_replica);
EXPECT_TRUE(p.has_task);
EXPECT_TRUE(p.has_type);
EXPECT_TRUE(p.has_id);
EXPECT_EQ(p.job, "foo");
EXPECT_EQ(p.replica, 1);
EXPECT_EQ(p.task, 2);
EXPECT_EQ(p.type, "GPU");
EXPECT_EQ(p.id, 3);
}
{
DeviceNameUtils::ParsedName p;
EXPECT_TRUE(DeviceNameUtils::ParseFullName(
"/job:foo_bar/replica:1/task:2/device:GPU:3", &p));
EXPECT_TRUE(DeviceNameUtils::ParseFullOrLocalName(
"/job:foo_bar/replica:1/task:2/device:GPU:3", &p));
EXPECT_TRUE(p.has_job);
EXPECT_TRUE(p.has_replica);
EXPECT_TRUE(p.has_task);
EXPECT_TRUE(p.has_type);
EXPECT_TRUE(p.has_id);
EXPECT_EQ(p.job, "foo_bar");
EXPECT_EQ(p.replica, 1);
EXPECT_EQ(p.task, 2);
EXPECT_EQ(p.type, "GPU");
EXPECT_EQ(p.id, 3);
}
{
DeviceNameUtils::ParsedName p;
EXPECT_TRUE(DeviceNameUtils::ParseFullName(
"/job:foo_bar/replica:1/task:2/device:GPU:3", &p));
EXPECT_TRUE(p.has_job);
EXPECT_TRUE(p.has_replica);
EXPECT_TRUE(p.has_task);
EXPECT_TRUE(p.has_type);
EXPECT_TRUE(p.has_id);
EXPECT_EQ(p.job, "foo_bar");
EXPECT_EQ(p.replica, 1);
EXPECT_EQ(p.task, 2);
EXPECT_EQ(p.type, "GPU");
EXPECT_EQ(p.id, 3);
}
{
DeviceNameUtils::ParsedName p;
EXPECT_TRUE(DeviceNameUtils::ParseFullName("/job:*/replica:4/gpu:*", &p));
EXPECT_FALSE(p.has_job);
EXPECT_TRUE(p.has_replica);
EXPECT_FALSE(p.has_task);
EXPECT_TRUE(p.has_type);
EXPECT_FALSE(p.has_id);
EXPECT_EQ(p.replica, 4);
EXPECT_EQ(p.type, "GPU");
}
{
DeviceNameUtils::ParsedName p;
EXPECT_TRUE(
DeviceNameUtils::ParseFullName("/job:*/replica:4/device:GPU:*", &p));
EXPECT_FALSE(p.has_job);
EXPECT_TRUE(p.has_replica);
EXPECT_FALSE(p.has_task);
EXPECT_TRUE(p.has_type);
EXPECT_FALSE(p.has_id);
EXPECT_EQ(p.replica, 4);
EXPECT_EQ(p.type, "GPU");
}
{
DeviceNameUtils::ParsedName p;
EXPECT_TRUE(
DeviceNameUtils::ParseFullName("/job:*/device:GPU/replica:4", &p));
EXPECT_FALSE(p.has_job);
EXPECT_TRUE(p.has_replica);
EXPECT_FALSE(p.has_task);
EXPECT_TRUE(p.has_type);
EXPECT_FALSE(p.has_id);
EXPECT_EQ(p.replica, 4);
EXPECT_EQ(p.type, "GPU");
}
{
DeviceNameUtils::ParsedName p;
EXPECT_TRUE(DeviceNameUtils::ParseFullName(
"/job:*/replica:4/device:myspecialdevice:13", &p));
EXPECT_FALSE(p.has_job);
EXPECT_TRUE(p.has_replica);
EXPECT_FALSE(p.has_task);
EXPECT_TRUE(p.has_type);
EXPECT_TRUE(p.has_id);
EXPECT_EQ(p.replica, 4);
EXPECT_EQ(p.type, "myspecialdevice");
EXPECT_EQ(p.id, 13);
}
{
DeviceNameUtils::ParsedName p;
EXPECT_TRUE(DeviceNameUtils::ParseFullName("/", &p));
EXPECT_FALSE(p.has_job);
EXPECT_FALSE(p.has_replica);
EXPECT_FALSE(p.has_task);
EXPECT_FALSE(p.has_type);
EXPECT_FALSE(p.has_id);
}
{
DeviceNameUtils::ParsedName p;
EXPECT_TRUE(
DeviceNameUtils::ParseFullName("/job:*/replica:4/device:GPU:5", &p));
EXPECT_FALSE(p.has_job);
EXPECT_TRUE(p.has_replica);
EXPECT_FALSE(p.has_task);
EXPECT_TRUE(p.has_type);
EXPECT_TRUE(p.has_id);
EXPECT_EQ(p.replica, 4);
EXPECT_EQ(p.type, "GPU");
EXPECT_EQ(p.id, 5);
}
{
DeviceNameUtils::ParsedName p;
EXPECT_TRUE(DeviceNameUtils::ParseFullName("/gpu:*/job:*/replica:4", &p));
EXPECT_FALSE(p.has_job);
EXPECT_TRUE(p.has_replica);
EXPECT_FALSE(p.has_task);
EXPECT_TRUE(p.has_type);
EXPECT_FALSE(p.has_id);
EXPECT_EQ(p.replica, 4);
EXPECT_EQ(p.type, "GPU");
}
EXPECT_TRUE(DeviceNameUtils::IsSameAddressSpace(
"/job:foo/replica:1/task:2/cpu:3",
"/job:foo/replica:1/task:2/device:GPU:4"));
EXPECT_FALSE(DeviceNameUtils::IsSameAddressSpace(
"/job:foo/replica:1/task:2/cpu:3",
"/job:foo/replica:1/task:3/device:GPU:4"));
EXPECT_FALSE(DeviceNameUtils::IsSameAddressSpace(
"/job:foo/replica:1/task:2/cpu:3",
"/job:foo/replica:10/task:2/device:GPU:4"));
EXPECT_FALSE(DeviceNameUtils::IsSameAddressSpace(
"/job:foo/replica:1/task:2/cpu:3",
"/job:bar/replica:1/task:2/device:GPU:4"));
EXPECT_EQ(DeviceNameUtils::LocalName("CPU", 1), "/device:CPU:1");
EXPECT_EQ(DeviceNameUtils::LocalName("GPU", 2), "/device:GPU:2");
EXPECT_EQ(DeviceNameUtils::LocalName("MySpecialDevice", 13),
"/device:MySpecialDevice:13");
EXPECT_EQ(
DeviceNameUtils::LocalName("/job:foo/replica:1/task:2/device:CPU:3"),
"/device:CPU:3");
EXPECT_EQ(DeviceNameUtils::LocalName("/job:foo/replica:1/task:2/cpu:3"),
"/device:CPU:3");
EXPECT_EQ(
DeviceNameUtils::LocalName("/job:foo/replica:1/task:2/device:abc:73"),
"/device:abc:73");
{
DeviceNameUtils::ParsedName p;
EXPECT_TRUE(DeviceNameUtils::ParseLocalName("CPU:10", &p));
EXPECT_TRUE(DeviceNameUtils::ParseFullOrLocalName("CPU:10", &p));
EXPECT_EQ(p.type, "CPU");
EXPECT_EQ(p.id, 10);
EXPECT_FALSE(DeviceNameUtils::ParseLocalName("cpu:abc", &p));
EXPECT_FALSE(DeviceNameUtils::ParseLocalName("abc:", &p));
EXPECT_FALSE(DeviceNameUtils::ParseLocalName("abc", &p));
EXPECT_FALSE(DeviceNameUtils::ParseLocalName("myspecialdevice", &p));
EXPECT_FALSE(DeviceNameUtils::ParseFullOrLocalName("myspecialdevice", &p));
}
{
for (int i = 0; i < 0x10; ++i) {
EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "CPU:3"},
false));
EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "GPU:3"},
false));
EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "cpu:3"},
false));
EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "gpu:3"},
false));
EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "CPU:3"},
true));
EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "GPU:3"},
true));
EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "cpu:3"},
true));
EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "gpu:3"},
true));
EXPECT_TRUE(RoundTripPartialName(i, {"foo", "3", "2", "someDevice:3"},
true));
}
}
{
DeviceNameUtils::ParsedName x, y;
DeviceNameUtils::ParseFullName("/job:work/replica:1/task:3/device:GPU:*",
&x);
DeviceNameUtils::ParseFullName("/device:CPU:*", &y);
EXPECT_FALSE(DeviceNameUtils::AreCompatibleDevNames(x, y));
}
{
DeviceNameUtils::ParsedName x, y;
DeviceNameUtils::ParseFullName("/job:work/replica:1/task:3", &x);
DeviceNameUtils::ParseFullName("/device:CPU:*", &y);
EXPECT_TRUE(DeviceNameUtils::AreCompatibleDevNames(x, y));
}
}
static bool IsCSHelper(absl::string_view pattern, absl::string_view actual) {
DeviceNameUtils::ParsedName p, a;
EXPECT_TRUE(DeviceNameUtils::ParseFullName(pattern, &p));
EXPECT_TRUE(DeviceNameUtils::ParseFullName(actual, &a));
return DeviceNameUtils::IsCompleteSpecification(p, a);
}
TEST(DeviceNameUtilsTest, IsCompleteSpecification) {
EXPECT_TRUE(IsCSHelper("/job:*", "/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsCSHelper("/job:*/replica:*",
"/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(
IsCSHelper("/job:*/task:*", "/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsCSHelper("/job:*/replica:*/task:*",
"/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsCSHelper("/job:*/replica:*/gpu:*",
"/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_FALSE(
IsCSHelper("/cpu:*", "/job:worker/replica:1/task:2/device:GPU:3"));
EXPECT_FALSE(
IsCSHelper("/device:GPU:2", "/job:worker/replica:1/task:2/device:GPU:1"));
EXPECT_TRUE(
IsCSHelper("/gpu:*", "/job:worker/replica:1/task:2/device:GPU:3"));
}
static bool IsSpecHelper(absl::string_view pattern, absl::string_view actual) {
DeviceNameUtils::ParsedName p, a;
EXPECT_TRUE(DeviceNameUtils::ParseFullName(pattern, &p));
EXPECT_TRUE(DeviceNameUtils::ParseFullName(actual, &a));
return DeviceNameUtils::IsSpecification(p, a);
}
TEST(DeviceNameUtilsTest, IsSpecification) {
EXPECT_TRUE(
IsSpecHelper("/job:*", "/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsSpecHelper("/job:*", "/job:work/replica:1/device:GPU:3"));
EXPECT_TRUE(IsSpecHelper("/job:*", "/job:work/replica:1"));
EXPECT_TRUE(IsSpecHelper("/job:*", "/replica:1"));
EXPECT_TRUE(IsSpecHelper("/job:*", "/job:work"));
EXPECT_TRUE(IsSpecHelper("/job:*/replica:*",
"/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsSpecHelper("/job:work/replica:1/gpu:*",
"/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsSpecHelper("/job:work/replica:1/device:GPU:3",
"/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsSpecHelper("/job:work/replica:1/task:2",
"/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsSpecHelper("/job:work/replica:*/task:2",
"/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsSpecHelper("/task:*", "/job:*/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsSpecHelper("/task:2", "/job:*/replica:1/task:2/device:GPU:3"));
EXPECT_TRUE(IsSpecHelper("/cpu:*", "/job:*/replica:1/task:2/cpu:1"));
EXPECT_TRUE(IsSpecHelper("/cpu:0", "/cpu:0"));
EXPECT_TRUE(
IsSpecHelper("/gpu:*", "/job:worker/replica:1/task:2/device:GPU:3"));
EXPECT_FALSE(
IsSpecHelper("/job:worker/replica:1/task:2/device:GPU:3", "/gpu:*"));
EXPECT_FALSE(IsSpecHelper("/cpu:*", "/job:*/replica:1/task:2"));
EXPECT_FALSE(IsSpecHelper("/cpu:*", "/job:*/replica:1/task:2/device:GPU:1"));
EXPECT_FALSE(
IsSpecHelper("/cpu:*", "/job:worker/replica:1/task:2/device:GPU:3"));
EXPECT_FALSE(IsSpecHelper("/device:GPU:2",
"/job:worker/replica:1/task:2/device:GPU:1"));
EXPECT_FALSE(IsSpecHelper("/job:work/replica:*/task:0",
"/job:work/replica:1/task:2/device:GPU:3"));
EXPECT_FALSE(IsSpecHelper("/job:work/replica:0/task:2",
"/job:work/replica:*/task:2/device:GPU:3"));
}
TEST(DeviceNameUtilsTest, SplitDeviceName) {
string task;
string device;
EXPECT_TRUE(DeviceNameUtils::SplitDeviceName(
"/job:foo/replica:1/task:2/cpu:1", &task, &device));
EXPECT_EQ("/job:foo/replica:1/task:2", task);
EXPECT_EQ("CPU:1", device);
EXPECT_TRUE(DeviceNameUtils::SplitDeviceName(
"/job:foo/cpu:1/task:2/replica:1", &task, &device));
EXPECT_EQ("/job:foo/replica:1/task:2", task);
EXPECT_EQ("CPU:1", device);
EXPECT_TRUE(
DeviceNameUtils::SplitDeviceName("/device:GPU:3", &task, &device));
EXPECT_EQ("", task);
EXPECT_EQ("GPU:3", device);
EXPECT_FALSE(DeviceNameUtils::SplitDeviceName("gpu:3", &task, &device));
EXPECT_FALSE(DeviceNameUtils::SplitDeviceName("/job:foo/task:2/replica:1",
&task, &device));
EXPECT_TRUE(DeviceNameUtils::SplitDeviceName("/device:myspecialdevice:3",
&task, &device));
EXPECT_EQ("", task);
EXPECT_EQ("myspecialdevice:3", device);
}
static DeviceNameUtils::ParsedName Name(const string& str) {
DeviceNameUtils::ParsedName ret;
CHECK(DeviceNameUtils::ParseFullName(str, &ret)) << "Invalid name: " << str;
return ret;
}
static void MergeDevNamesHelperImpl(const string& name_a, const string& name_b,
const string& expected_merge_name,
bool allow_soft_placement) {
DeviceNameUtils::ParsedName target_a = Name(name_a);
TF_EXPECT_OK(DeviceNameUtils::MergeDevNames(&target_a, Name(name_b),
allow_soft_placement));
DeviceNameUtils::ParsedName target_b = Name(name_b);
TF_EXPECT_OK(DeviceNameUtils::MergeDevNames(&target_b, Name(name_a),
allow_soft_placement));
EXPECT_EQ(target_a, target_b);
EXPECT_EQ(target_a, Name(expected_merge_name));
EXPECT_EQ(target_b, Name(expected_merge_name));
}
static void MergeDevNamesHelper(const string& name_a, const string& name_b,
const string& expected_merge_name) {
MergeDevNamesHelperImpl(name_a, name_b, expected_merge_name, false);
}
static void MergeDevNamesHelperAllowSoftPlacement(
const string& name_a, const string& name_b,
const string& expected_merge_name) {
MergeDevNamesHelperImpl(name_a, name_b, expected_merge_name, true);
}
static void MergeDevNamesError(const string& name_a, const string& name_b,
const string& expected_error_substr) {
DeviceNameUtils::ParsedName target_a = Name(name_a);
absl::Status s = DeviceNameUtils::MergeDevNames(&target_a, Name(name_b));
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(s.message(), expected_error_substr)) << s;
}
static void MergeOverrideHelper(const string& target, const string& name,
const string& expected_merge_name) {
DeviceNameUtils::ParsedName parsed_target = Name(target);
TF_EXPECT_OK(
DeviceNameUtils::MergeOverrideDevNames(&parsed_target, Name(name)));
DeviceNameUtils::ParsedName parsed_expected = Name(expected_merge_name);
EXPECT_EQ(parsed_target, parsed_expected)
<< "parsed_target: " << DeviceNameUtils::ParsedNameToString(parsed_target)
<< " expected_name: "
<< DeviceNameUtils::ParsedNameToString(parsed_expected);
}
static void MergeUnsetDevNamesHelper(const string& name_a, const string& name_b,
const string& expected_merge_name_ab,
const string& expected_merge_name_ba) {
DeviceNameUtils::ParsedName target_a = Name(name_a);
DeviceNameUtils::MergeUnsetDevNames(&target_a, Name(name_b));
EXPECT_EQ(target_a, Name(expected_merge_name_ab));
DeviceNameUtils::ParsedName target_b = Name(name_b);
DeviceNameUtils::MergeUnsetDevNames(&target_b, Name(name_a));
EXPECT_EQ(target_b, Name(expected_merge_name_ba));
}
TEST(DeviceNameUtilsTest, MergeDevNames) {
MergeDevNamesHelper("", "", "");
MergeDevNamesHelper("/job:foo/replica:1/task:2/cpu:1",
"/job:foo/replica:1/task:2/cpu:1",
"/job:foo/replica:1/task:2/cpu:1");
MergeDevNamesHelper("", "/job:foo", "/job:foo");
MergeDevNamesHelper("", "/replica:2", "/replica:2");
MergeDevNamesHelper("", "/task:7", "/task:7");
MergeDevNamesHelper("", "/device:GPU:1", "/device:GPU:1");
MergeDevNamesHelper("/job:foo", "/task:7", "/job:foo/task:7");
MergeDevNamesHelper("/job:foo", "/device:GPU:1", "/job:foo/device:GPU:1");
MergeDevNamesHelper("/job:foo/replica:0", "/replica:0/task:1",
"/job:foo/replica:0/task:1");
MergeDevNamesHelper("", "/gpu:*", "/gpu:*");
MergeDevNamesHelper("/gpu:*", "/gpu:*", "/gpu:*");
MergeDevNamesHelper("/device:GPU:1", "/gpu:*", "/device:GPU:1");
MergeDevNamesError("/job:foo", "/job:bar", "incompatible jobs");
MergeDevNamesError("/replica:0", "/replica:1", "incompatible replicas");
MergeDevNamesError("/task:0", "/task:1", "incompatible tasks");
MergeDevNamesError("/gpu:*", "/cpu:*", "incompatible types");
MergeDevNamesError("/device:GPU:0", "/device:GPU:1", "incompatible ids");
}
TEST(DeviceNameUtilsTest, MergeDevNamesAllowSoftPlacement) {
MergeDevNamesHelperAllowSoftPlacement("/gpu:*", "/cpu:1", "");
MergeDevNamesHelperAllowSoftPlacement("/cpu:*", "/device:GPU:1", "");
MergeDevNamesHelperAllowSoftPlacement("/device:GPU:1", "/device:GPU:2",
"/device:GPU:*");
}
TEST(DeviceNameUtilsTest, MergeOverrideDevNames) {
MergeOverrideHelper("", "", "");
MergeOverrideHelper("/job:foo/replica:1/task:2/cpu:1",
"/job:foo/replica:1/task:2/cpu:1",
"/job:foo/replica:1/task:2/cpu:1");
MergeOverrideHelper("", "/job:foo", "/job:foo");
MergeOverrideHelper("", "/replica:2", "/replica:2");
MergeOverrideHelper("", "/task:7", "/task:7");
MergeOverrideHelper("", "/device:GPU:1", "/device:GPU:1");
MergeOverrideHelper("/job:foo", "/task:7", "/job:foo/task:7");
MergeOverrideHelper("/job:foo", "/device:GPU:1", "/job:foo/device:GPU:1");
MergeOverrideHelper("/job:foo/replica:0", "/replica:0/task:1",
"/job:foo/replica:0/task:1");
MergeOverrideHelper("", "/gpu:*", "/gpu:*");
MergeOverrideHelper("/gpu:*", "/gpu:*", "/gpu:*");
MergeOverrideHelper("/device:GPU:1", "/gpu:*", "/device:GPU:1");
MergeOverrideHelper("/gpu:0", "/cpu:1", "/cpu:1");
MergeOverrideHelper("/gpu:*", "/cpu:1", "/cpu:1");
MergeOverrideHelper("/cpu:*", "/device:GPU:1", "/gpu:1");
MergeOverrideHelper("/device:GPU:1", "/device:GPU:2", "/device:GPU:2");
MergeOverrideHelper("/job:foo/CPU:*", "/device:GPU:1", "/job:foo/GPU:1");
MergeOverrideHelper("/cpu:*", "/job:foo/device:GPU:1", "/job:foo/GPU:1");
MergeOverrideHelper("/task:0/cpu:*", "/device:GPU:1", "/task:0/GPU:1");
MergeOverrideHelper("/cpu:*", "/task:0/device:GPU:1", "/task:0/GPU:1");
}
TEST(DeviceNameUtilsTest, MergeUnsetDevNames) {
MergeUnsetDevNamesHelper("", "", "", "");
MergeUnsetDevNamesHelper(
"/job:foo/replica:1/task:2/cpu:1", "/job:foo/replica:1/task:2/cpu:1",
"/job:foo/replica:1/task:2/cpu:1", "/job:foo/replica:1/task:2/cpu:1");
MergeUnsetDevNamesHelper("", "/job:foo", "/job:foo", "/job:foo");
MergeUnsetDevNamesHelper("", "/replica:2", "/replica:2", "/replica:2");
MergeUnsetDevNamesHelper("", "/task:7", "/task:7", "/task:7");
MergeUnsetDevNamesHelper("", "/device:GPU:1", "/device:GPU:1",
"/device:GPU:1");
MergeUnsetDevNamesHelper("/job:foo", "/task:7", "/job:foo/task:7",
"/job:foo/task:7");
MergeUnsetDevNamesHelper("/job:foo", "/device:GPU:1", "/job:foo/device:GPU:1",
"/job:foo/device:GPU:1");
MergeUnsetDevNamesHelper("/job:foo/replica:0", "/replica:0/task:1",
"/job:foo/replica:0/task:1",
"/job:foo/replica:0/task:1");
MergeUnsetDevNamesHelper("", "/gpu:*", "/gpu:*", "/gpu:*");
MergeUnsetDevNamesHelper("/gpu:*", "/gpu:*", "/gpu:*", "/gpu:*");
MergeUnsetDevNamesHelper("/device:GPU:1", "/gpu:*", "/device:GPU:1",
"/device:GPU:1");
MergeUnsetDevNamesHelper("/job:foo", "/job:bar", "/job:foo", "/job:bar");
MergeUnsetDevNamesHelper("/replica:0", "/replica:1", "/replica:0",
"/replica:1");
MergeUnsetDevNamesHelper("/task:0", "/task:1", "/task:0", "/task:1");
MergeUnsetDevNamesHelper("/gpu:*", "/cpu:*", "/gpu:*", "/cpu:*");
MergeUnsetDevNamesHelper("/device:GPU:0", "/device:GPU:1", "/device:GPU:0",
"/device:GPU:1");
MergeUnsetDevNamesHelper("/job:foo/device:GPU", "/job:bar",
"/job:foo/device:GPU", "/job:bar/device:GPU");
}
TEST(DeviceNameUtilsTest, GetNamesForDeviceMappings) {
DeviceNameUtils::ParsedName p =
Name("/job:foo/replica:10/task:0/device:GPU:1");
EXPECT_EQ(absl::StrJoin(DeviceNameUtils::GetNamesForDeviceMappings(p), ","),
"/job:foo/replica:10/task:0/device:GPU:1,"
"/job:foo/replica:10/task:0/gpu:1");
p.has_task = false;
EXPECT_EQ(absl::StrJoin(DeviceNameUtils::GetNamesForDeviceMappings(p), ","),
"");
}
TEST(DeviceNameUtilsTest, CanonicalizeDeviceName) {
string canonical_name;
{
string basename = "/job:foo/replica:10/task:0/device:CPU:0";
TF_EXPECT_OK(DeviceNameUtils::CanonicalizeDeviceName(
"/job:foo/replica:10/task:0/device:CPU:1", basename, &canonical_name));
EXPECT_EQ("/job:foo/replica:10/task:0/device:CPU:1", canonical_name);
TF_EXPECT_OK(DeviceNameUtils::CanonicalizeDeviceName(
"/job:foo/task:0/replica:10/device:CPU:1", basename, &canonical_name));
EXPECT_EQ("/job:foo/replica:10/task:0/device:CPU:1", canonical_name);
TF_EXPECT_OK(DeviceNameUtils::CanonicalizeDeviceName(
"/job:foo/task:0/replica:10/cpu:1", basename, &canonical_name));
EXPECT_EQ("/job:foo/replica:10/task:0/device:CPU:1", canonical_name);
TF_EXPECT_OK(DeviceNameUtils::CanonicalizeDeviceName("CPU:0", basename,
&canonical_name));
EXPECT_EQ("/job:foo/replica:10/task:0/device:CPU:0", canonical_name);
absl::Status s = DeviceNameUtils::CanonicalizeDeviceName(
"/job:foo/task:0/replica/cpu:1", basename, &canonical_name);
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_EQ("", canonical_name);
}
{
string fullname = "/device:CPU:0";
absl::Status s = DeviceNameUtils::CanonicalizeDeviceName(
fullname, "/device:CPU:0", &canonical_name);
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_EQ("", canonical_name);
s = DeviceNameUtils::CanonicalizeDeviceName(
fullname, "/job:foo/task:0/replica/cpu:1", &canonical_name);
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_EQ("", canonical_name);
}
}
TEST(DeviceNameUtilsTest, CompareFullNames) {
EXPECT_FALSE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:0/task:0/cpu:0", "/job:foo/replica:0/task:0/cpu:0"));
EXPECT_FALSE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:0/task:0/device:CPU:1",
"/job:foo/replica:0/task:0/device:CPU:0"));
EXPECT_FALSE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:0/task:1/device:CPU:0",
"/job:foo/replica:0/task:0/device:CPU:0"));
EXPECT_FALSE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:1/task:0/device:CPU:0",
"/job:foo/replica:0/task:0/device:CPU:0"));
EXPECT_FALSE(DeviceNameUtils::CompareFullNames(
"/job:goo/replica:0/task:0/device:CPU:0",
"/job:foo/replica:0/task:0/device:CPU:0"));
EXPECT_FALSE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:0/task:0/device:GPU:0",
"/job:foo/replica:0/task:0/device:CPU:0"));
EXPECT_TRUE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:0/task:0/device:CPU:0",
"/job:foo/replica:0/task:0/device:CPU:1"));
EXPECT_TRUE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:0/task:0/device:CPU:0",
"/job:foo/replica:0/task:1/device:CPU:0"));
EXPECT_TRUE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:0/task:0/device:CPU:0",
"/job:foo/replica:1/task:0/device:CPU:0"));
EXPECT_TRUE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:0/task:0/device:CPU:0",
"/job:goo/replica:0/task:0/device:CPU:0"));
EXPECT_TRUE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:0/task:0/device:CPU:0",
"/job:foo/replica:0/task:0/device:GPU:0"));
EXPECT_FALSE(
DeviceNameUtils::CompareFullNames("/device:CPU:1", "unparseablename"));
EXPECT_TRUE(
DeviceNameUtils::CompareFullNames("unparseablename", "/device:CPU:1"));
EXPECT_TRUE(DeviceNameUtils::CompareFullNames(
"/replica:0/task:0/device:CPU:1",
"/job:foo/replica:0/task:0/device:CPU:0"));
EXPECT_FALSE(DeviceNameUtils::CompareFullNames(
"/job:foo/replica:0/task:0/device:CPU:0",
"/replica:0/task:0/device:CPU:0"));
EXPECT_TRUE(DeviceNameUtils::CompareFullNames(
"/replica:0/task:0/device:CPU:0", "/replica:0/task:0/device:CPU:1"));
EXPECT_TRUE(DeviceNameUtils::CompareFullNames("/task:0/device:CPU:0",
"/task:0/device:CPU:1"));
EXPECT_TRUE(
DeviceNameUtils::CompareFullNames("/device:CPU:0", "/device:CPU:1"));
}
static void BM_ParseFullName(::testing::benchmark::State& state) {
DeviceNameUtils::ParsedName p;
for (auto s : state) {
DeviceNameUtils::ParseFullName("/job:worker/replica:3/task:0/cpu:0", &p);
}
}
BENCHMARK(BM_ParseFullName);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/util/device_name_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/util/device_name_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fd61695f-2245-4ebc-8116-3b853ed9f474 | cpp | tensorflow/tensorflow | command_line_flags | tensorflow/compiler/mlir/lite/tools/command_line_flags.cc | tensorflow/lite/tools/command_line_flags_test.cc | #include "tensorflow/compiler/mlir/lite/tools/command_line_flags.h"
#include <algorithm>
#include <cstring>
#include <functional>
#include <iomanip>
#include <numeric>
#include <sstream>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/log/log.h"
#include "absl/strings/match.h"
namespace mlir {
namespace {
template <typename T>
std::string ToString(T val) {
std::ostringstream stream;
stream << val;
return stream.str();
}
template <>
std::string ToString(bool val) {
return val ? "true" : "false";
}
template <>
std::string ToString(const std::string& val) {
return val;
}
bool ParseFlag(const std::string& arg, int argv_position,
const std::string& flag, bool positional,
const std::function<bool(const std::string&, int argv_position)>&
parse_func,
bool* value_parsing_ok) {
if (positional) {
*value_parsing_ok = parse_func(arg, argv_position);
return true;
}
*value_parsing_ok = true;
std::string flag_prefix = "--" + flag + "=";
if (!absl::StartsWith(arg, flag_prefix)) {
return false;
}
bool has_value = arg.size() >= flag_prefix.size();
*value_parsing_ok = has_value;
if (has_value) {
*value_parsing_ok =
parse_func(arg.substr(flag_prefix.size()), argv_position);
}
return true;
}
template <typename T>
bool ParseFlag(const std::string& flag_value, int argv_position,
const std::function<void(const T&, int)>& hook) {
std::istringstream stream(flag_value);
T read_value;
stream >> read_value;
if (!stream.eof() && !stream.good()) {
return false;
}
hook(read_value, argv_position);
return true;
}
template <>
bool ParseFlag(const std::string& flag_value, int argv_position,
const std::function<void(const bool&, int)>& hook) {
if (flag_value != "true" && flag_value != "false" && flag_value != "0" &&
flag_value != "1") {
return false;
}
hook(flag_value == "true" || flag_value == "1", argv_position);
return true;
}
template <typename T>
bool ParseFlag(const std::string& flag_value, int argv_position,
const std::function<void(const std::string&, int)>& hook) {
hook(flag_value, argv_position);
return true;
}
}
#define CONSTRUCTOR_IMPLEMENTATION(flag_T, default_value_T, flag_enum_val) \
Flag::Flag(const char* name, \
const std::function<void(const flag_T& , \
int )>& hook, \
default_value_T default_value, const std::string& usage_text, \
FlagType flag_type) \
: name_(name), \
type_(flag_enum_val), \
value_hook_([hook](const std::string& flag_value, int argv_position) { \
return ParseFlag<flag_T>(flag_value, argv_position, hook); \
}), \
default_for_display_(ToString<default_value_T>(default_value)), \
usage_text_(usage_text), \
flag_type_(flag_type) {}
CONSTRUCTOR_IMPLEMENTATION(int32_t, int32_t, TYPE_INT32)
CONSTRUCTOR_IMPLEMENTATION(int64_t, int64_t, TYPE_INT64)
CONSTRUCTOR_IMPLEMENTATION(float, float, TYPE_FLOAT)
CONSTRUCTOR_IMPLEMENTATION(bool, bool, TYPE_BOOL)
CONSTRUCTOR_IMPLEMENTATION(std::string, const std::string&, TYPE_STRING)
#undef CONSTRUCTOR_IMPLEMENTATION
bool Flag::Parse(const std::string& arg, int argv_position,
bool* value_parsing_ok) const {
return ParseFlag(
arg, argv_position, name_, flag_type_ == kPositional,
[&](const std::string& read_value, int argv_position) {
return value_hook_(read_value, argv_position);
},
value_parsing_ok);
}
bool Flags::Parse(int* argc, const char** argv,
const std::vector<Flag>& flag_list) {
bool result = true;
std::vector<bool> unknown_argvs(*argc, true);
std::unordered_map<std::string, int> processed_flags;
std::vector<int> sorted_idx(flag_list.size());
std::iota(std::begin(sorted_idx), std::end(sorted_idx), 0);
std::sort(sorted_idx.begin(), sorted_idx.end(), [&flag_list](int a, int b) {
return flag_list[a].GetFlagType() < flag_list[b].GetFlagType();
});
int positional_count = 0;
for (int idx = 0; idx < sorted_idx.size(); ++idx) {
const Flag& flag = flag_list[sorted_idx[idx]];
const auto it = processed_flags.find(flag.name_);
if (it != processed_flags.end()) {
#ifndef NDEBUG
LOG(WARNING) << "Duplicate flags: " << flag.name_;
#endif
if (it->second != -1) {
bool value_parsing_ok;
flag.Parse(argv[it->second], it->second, &value_parsing_ok);
if (!value_parsing_ok) {
LOG(ERROR) << "Failed to parse flag '" << flag.name_
<< "' against argv '" << argv[it->second] << "'";
result = false;
}
continue;
} else if (flag.flag_type_ == Flag::kRequired) {
LOG(ERROR) << "Required flag not provided: " << flag.name_;
result = false;
break;
}
}
if (flag.flag_type_ == Flag::kPositional) {
if (++positional_count >= *argc) {
LOG(ERROR) << "Too few command line arguments.";
return false;
}
bool value_parsing_ok;
flag.Parse(argv[positional_count], positional_count, &value_parsing_ok);
if (!value_parsing_ok) {
LOG(ERROR) << "Failed to parse positional flag: " << flag.name_;
return false;
}
unknown_argvs[positional_count] = false;
processed_flags[flag.name_] = positional_count;
continue;
}
bool was_found = false;
for (int i = positional_count + 1; i < *argc; ++i) {
if (!unknown_argvs[i]) continue;
bool value_parsing_ok;
was_found = flag.Parse(argv[i], i, &value_parsing_ok);
if (!value_parsing_ok) {
LOG(ERROR) << "Failed to parse flag '" << flag.name_
<< "' against argv '" << argv[i] << "'";
result = false;
}
if (was_found) {
unknown_argvs[i] = false;
processed_flags[flag.name_] = i;
break;
}
}
if (was_found) continue;
processed_flags[flag.name_] = -1;
if (flag.flag_type_ == Flag::kRequired) {
LOG(ERROR) << "Required flag not provided: " << flag.name_;
result = false;
break;
}
}
int dst = 1;
for (int i = 1; i < *argc; ++i) {
if (unknown_argvs[i]) {
argv[dst++] = argv[i];
}
}
*argc = dst;
return result && (*argc < 2 || std::strcmp(argv[1], "--help") != 0);
}
} | #include "tensorflow/lite/tools/command_line_flags.h"
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace {
TEST(CommandLineFlagsTest, BasicUsage) {
int some_int32 = 10;
int some_int1 = 8;
int some_int2 = 9;
int64_t some_int64 = 21474836470;
bool some_switch = false;
std::string some_name = "something_a";
float some_float = -23.23f;
float float_1 = -23.23f;
bool some_bool = false;
bool some_numeric_bool = true;
const char* argv_strings[] = {"program_name",
"12.2",
"--some_int32=20",
"--some_int2=5",
"--some_int64=214748364700",
"--some_switch=true",
"--some_name=somethingelse",
"--some_float=42.0",
"--some_bool=true",
"--some_numeric_bool=0"};
int argc = 10;
bool parsed_ok = Flags::Parse(
&argc, reinterpret_cast<const char**>(argv_strings),
{
Flag::CreateFlag("some_int32", &some_int32, "some int32"),
Flag::CreateFlag("some_int64", &some_int64, "some int64"),
Flag::CreateFlag("some_switch", &some_switch, "some switch"),
Flag::CreateFlag("some_name", &some_name, "some name"),
Flag::CreateFlag("some_float", &some_float, "some float"),
Flag::CreateFlag("some_bool", &some_bool, "some bool"),
Flag::CreateFlag("some_numeric_bool", &some_numeric_bool,
"some numeric bool"),
Flag::CreateFlag("some_int1", &some_int1, "some int"),
Flag::CreateFlag("some_int2", &some_int2, "some int",
Flag::kRequired),
Flag::CreateFlag("float_1", &float_1, "some float",
Flag::kPositional),
});
EXPECT_TRUE(parsed_ok);
EXPECT_EQ(20, some_int32);
EXPECT_EQ(8, some_int1);
EXPECT_EQ(5, some_int2);
EXPECT_EQ(214748364700, some_int64);
EXPECT_TRUE(some_switch);
EXPECT_EQ("somethingelse", some_name);
EXPECT_NEAR(42.0f, some_float, 1e-5f);
EXPECT_NEAR(12.2f, float_1, 1e-5f);
EXPECT_TRUE(some_bool);
EXPECT_FALSE(some_numeric_bool);
EXPECT_EQ(argc, 1);
}
TEST(CommandLineFlagsTest, EmptyStringFlag) {
int argc = 2;
std::string some_string = "invalid";
const char* argv_strings[] = {"program_name", "--some_string="};
bool parsed_ok = Flags::Parse(
&argc, reinterpret_cast<const char**>(argv_strings),
{Flag::CreateFlag("some_string", &some_string, "some string")});
EXPECT_TRUE(parsed_ok);
EXPECT_EQ(some_string, "");
EXPECT_EQ(argc, 1);
}
TEST(CommandLineFlagsTest, BadIntValue) {
int some_int = 10;
int argc = 2;
const char* argv_strings[] = {"program_name", "--some_int=notanumber"};
bool parsed_ok =
Flags::Parse(&argc, reinterpret_cast<const char**>(argv_strings),
{Flag::CreateFlag("some_int", &some_int, "some int")});
EXPECT_FALSE(parsed_ok);
EXPECT_EQ(10, some_int);
EXPECT_EQ(argc, 1);
}
TEST(CommandLineFlagsTest, BadBoolValue) {
bool some_switch = false;
int argc = 2;
const char* argv_strings[] = {"program_name", "--some_switch=notabool"};
bool parsed_ok = Flags::Parse(
&argc, reinterpret_cast<const char**>(argv_strings),
{Flag::CreateFlag("some_switch", &some_switch, "some switch")});
EXPECT_FALSE(parsed_ok);
EXPECT_FALSE(some_switch);
EXPECT_EQ(argc, 1);
}
TEST(CommandLineFlagsTest, BadFloatValue) {
float some_float = -23.23f;
int argc = 2;
const char* argv_strings[] = {"program_name", "--some_float=notanumber"};
bool parsed_ok =
Flags::Parse(&argc, reinterpret_cast<const char**>(argv_strings),
{Flag::CreateFlag("some_float", &some_float, "some float")});
EXPECT_FALSE(parsed_ok);
EXPECT_NEAR(-23.23f, some_float, 1e-5f);
EXPECT_EQ(argc, 1);
}
TEST(CommandLineFlagsTest, RequiredFlagNotFound) {
float some_float = -23.23f;
int argc = 2;
const char* argv_strings[] = {"program_name", "--flag=12"};
bool parsed_ok = Flags::Parse(
&argc, reinterpret_cast<const char**>(argv_strings),
{Flag::CreateFlag("some_flag", &some_float, "", Flag::kRequired)});
EXPECT_FALSE(parsed_ok);
EXPECT_NEAR(-23.23f, some_float, 1e-5f);
EXPECT_EQ(argc, 2);
}
TEST(CommandLineFlagsTest, NoArguments) {
float some_float = -23.23f;
int argc = 1;
const char* argv_strings[] = {"program_name"};
bool parsed_ok = Flags::Parse(
&argc, reinterpret_cast<const char**>(argv_strings),
{Flag::CreateFlag("some_flag", &some_float, "", Flag::kRequired)});
EXPECT_FALSE(parsed_ok);
EXPECT_NEAR(-23.23f, some_float, 1e-5f);
EXPECT_EQ(argc, 1);
}
TEST(CommandLineFlagsTest, NotEnoughArguments) {
float some_float = -23.23f;
int argc = 1;
const char* argv_strings[] = {"program_name"};
bool parsed_ok = Flags::Parse(
&argc, reinterpret_cast<const char**>(argv_strings),
{Flag::CreateFlag("some_flag", &some_float, "", Flag::kPositional)});
EXPECT_FALSE(parsed_ok);
EXPECT_NEAR(-23.23f, some_float, 1e-5f);
EXPECT_EQ(argc, 1);
}
TEST(CommandLineFlagsTest, PositionalFlagFailed) {
float some_float = -23.23f;
int argc = 2;
const char* argv_strings[] = {"program_name", "string"};
bool parsed_ok = Flags::Parse(
&argc, reinterpret_cast<const char**>(argv_strings),
{Flag::CreateFlag("some_flag", &some_float, "", Flag::kPositional)});
EXPECT_FALSE(parsed_ok);
EXPECT_NEAR(-23.23f, some_float, 1e-5f);
EXPECT_EQ(argc, 2);
}
static bool MatchWithAnyWhitespace(const std::string& str,
const std::string& pat) {
bool matching = true;
int pat_i = 0;
for (int str_i = 0; str_i != str.size() && matching; str_i++) {
if (isspace(str[str_i])) {
matching = (pat_i != pat.size() && isspace(pat[pat_i]));
} else {
while (pat_i != pat.size() && isspace(pat[pat_i])) {
pat_i++;
}
matching = (pat_i != pat.size() && str[str_i] == pat[pat_i++]);
}
}
while (pat_i != pat.size() && isspace(pat[pat_i])) {
pat_i++;
}
return (matching && pat_i == pat.size());
}
TEST(CommandLineFlagsTest, UsageString) {
int some_int = 10;
int64_t some_int64 = 21474836470;
bool some_switch = false;
std::string some_name = "something";
int some_int2 = 4;
const std::string tool_name = "some_tool_name";
std::string usage = Flags::Usage(
tool_name,
{Flag::CreateFlag("some_int", &some_int, "some int"),
Flag::CreateFlag("some_int64", &some_int64, "some int64"),
Flag::CreateFlag("some_switch", &some_switch, "some switch"),
Flag::CreateFlag("some_name", &some_name, "some name", Flag::kRequired),
Flag::CreateFlag("some_int2", &some_int2, "some int",
Flag::kPositional)});
const char* expected_usage =
" usage: some_tool_name <some_int2> <flags>\n"
"Where:\n"
"some_int2\tint32\trequired\tsome int\n"
"Flags:\n"
"--some_name=something\tstring\trequired\tsome name\n"
"--some_int=10\tint32\toptional\tsome int\n"
"--some_int64=21474836470\tint64\toptional\tsome int64\n"
"--some_switch=false\tbool\toptional\tsome switch\n";
ASSERT_EQ(MatchWithAnyWhitespace(usage, expected_usage), true) << usage;
usage = Flags::Usage(tool_name, {});
ASSERT_EQ(MatchWithAnyWhitespace(usage, " usage: some_tool_name\n"), true)
<< usage;
}
TEST(CommandLineFlagsTest, DuplicateArgsParsableValues) {
int some_int = -23;
int argc = 3;
const char* argv_strings[] = {"program_name", "--some_int=1", "--some_int=2"};
bool parsed_ok =
Flags::Parse(&argc, reinterpret_cast<const char**>(argv_strings),
{Flag::CreateFlag("some_int", &some_int, "some int")});
EXPECT_TRUE(parsed_ok);
EXPECT_EQ(1, some_int);
EXPECT_EQ(argc, 2);
EXPECT_EQ("--some_int=2", argv_strings[1]);
}
TEST(CommandLineFlagsTest, DuplicateArgsBadValueAppearFirst) {
int some_int = -23;
int argc = 3;
const char* argv_strings[] = {"program_name", "--some_int=value",
"--some_int=1"};
bool parsed_ok =
Flags::Parse(&argc, reinterpret_cast<const char**>(argv_strings),
{Flag::CreateFlag("some_int", &some_int, "some int")});
EXPECT_FALSE(parsed_ok);
EXPECT_EQ(-23, some_int);
EXPECT_EQ(argc, 2);
EXPECT_EQ("--some_int=1", argv_strings[1]);
}
TEST(CommandLineFlagsTest, DuplicateArgsBadValueAppearSecondly) {
int some_int = -23;
int argc = 3;
const char* argv_strings[] = {"program_name", "--some_int=1",
"--some_int=value"};
bool parsed_ok =
Flags::Parse(&argc, reinterpret_cast<const char**>(argv_strings),
{Flag::CreateFlag("some_int", &some_int, "some int")});
EXPECT_TRUE(parsed_ok);
EXPECT_EQ(1, some_int);
EXPECT_EQ(argc, 2);
EXPECT_EQ("--some_int=value", argv_strings[1]);
}
TEST(CommandLineFlagsTest, DuplicateFlags) {
int some_int1 = -23;
int some_int2 = -23;
int argc = 2;
const char* argv_strings[] = {"program_name", "--some_int=1"};
bool parsed_ok =
Flags::Parse(&argc, reinterpret_cast<const char**>(argv_strings),
{Flag::CreateFlag("some_int", &some_int1, "some int1"),
Flag::CreateFlag("some_int", &some_int2, "some int2")});
EXPECT_TRUE(parsed_ok);
EXPECT_EQ(1, some_int1);
EXPECT_EQ(1, some_int2);
EXPECT_EQ(argc, 1);
}
TEST(CommandLineFlagsTest, DuplicateFlagsNotFound) {
int some_int1 = -23;
int some_int2 = -23;
int argc = 2;
const char* argv_strings[] = {"program_name", "--some_float=1.0"};
bool parsed_ok = Flags::Parse(
&argc, reinterpret_cast<const char**>(argv_strings),
{Flag::CreateFlag("some_int", &some_int1, "some int1", Flag::kOptional),
Flag::CreateFlag("some_int", &some_int2, "some int2", Flag::kRequired)});
EXPECT_FALSE(parsed_ok);
EXPECT_EQ(-23, some_int1);
EXPECT_EQ(-23, some_int2);
EXPECT_EQ(argc, 2);
}
TEST(CommandLineFlagsTest, DuplicateFlagNamesButDifferentTypes) {
int some_int = -23;
bool some_bool = true;
int argc = 2;
const char* argv_strings[] = {"program_name", "--some_val=20"};
bool parsed_ok =
Flags::Parse(&argc, reinterpret_cast<const char**>(argv_strings),
{Flag::CreateFlag("some_val", &some_int, "some val-int"),
Flag::CreateFlag("some_val", &some_bool, "some val-bool")});
EXPECT_FALSE(parsed_ok);
EXPECT_EQ(20, some_int);
EXPECT_TRUE(some_bool);
EXPECT_EQ(argc, 1);
}
TEST(CommandLineFlagsTest, DuplicateFlagsAndArgs) {
int some_int1 = -23;
int some_int2 = -23;
int argc = 3;
const char* argv_strings[] = {"program_name", "--some_int=1", "--some_int=2"};
bool parsed_ok = Flags::Parse(
&argc, reinterpret_cast<const char**>(argv_strings),
{Flag::CreateFlag("some_int", &some_int1, "flag1: bind with some_int1"),
Flag::CreateFlag("some_int", &some_int2, "flag2: bind with some_int2")});
EXPECT_TRUE(parsed_ok);
EXPECT_EQ(1, some_int1);
EXPECT_EQ(1, some_int2);
EXPECT_EQ(argc, 2);
}
TEST(CommandLineFlagsTest, ArgsToString) {
int argc = 3;
const char* argv_strings[] = {"program_name", "--some_int=1", "--some_int=2"};
std::string args =
Flags::ArgsToString(argc, reinterpret_cast<const char**>(argv_strings));
EXPECT_EQ("--some_int=1 --some_int=2", args);
}
TEST(CommandLineFlagsTest, ArgvPositions) {
tools::ToolParams params;
params.AddParam("some_int", tools::ToolParam::Create<int>(13));
params.AddParam("some_float", tools::ToolParam::Create<float>(17.0f));
params.AddParam("some_bool", tools::ToolParam::Create<bool>(true));
const char* argv_strings[] = {"program_name", "--some_float=42.0",
"--some_bool=false", "--some_int=5"};
int argc = 4;
tools::ToolParams* const params_ptr = ¶ms;
bool parsed_ok = Flags::Parse(
&argc, reinterpret_cast<const char**>(argv_strings),
{
Flag(
"some_int",
[params_ptr](const int& val, int argv_position) {
params_ptr->Set<int>("some_int", val, argv_position);
},
13, "some int", Flag::kOptional),
Flag(
"some_float",
[params_ptr](const float& val, int argv_position) {
params_ptr->Set<float>("some_float", val, argv_position);
},
17.0f, "some float", Flag::kOptional),
Flag(
"some_bool",
[params_ptr](const bool& val, int argv_position) {
params_ptr->Set<bool>("some_bool", val, argv_position);
},
true, "some bool", Flag::kOptional),
});
EXPECT_TRUE(parsed_ok);
EXPECT_EQ(5, params.Get<int>("some_int"));
EXPECT_NEAR(42.0f, params.Get<float>("some_float"), 1e-5f);
EXPECT_FALSE(params.Get<bool>("some_bool"));
EXPECT_EQ(3, params.GetPosition<int>("some_int"));
EXPECT_EQ(1, params.GetPosition<float>("some_float"));
EXPECT_EQ(2, params.GetPosition<bool>("some_bool"));
EXPECT_EQ(argc, 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/tools/command_line_flags.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/command_line_flags_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7706d990-1619-4cbd-8817-82820269533e | cpp | tensorflow/tensorflow | reporter | third_party/xla/xla/tsl/util/reporter.cc | tensorflow/core/util/reporter_test.cc | #include "xla/tsl/util/reporter.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/str_util.h"
namespace tsl {
TestReportFile::TestReportFile(const string& fname, const string& test_name)
: closed_(true), fname_(fname), test_name_(test_name) {}
absl::Status TestReportFile::Append(const string& content) {
if (closed_) return absl::OkStatus();
return log_file_->Append(content);
}
absl::Status TestReportFile::Close() {
if (closed_) return absl::OkStatus();
closed_ = true;
return log_file_->Close();
}
absl::Status TestReportFile::Initialize() {
if (fname_.empty()) {
return absl::OkStatus();
}
string mangled_fname = strings::StrCat(
fname_, absl::StrJoin(str_util::Split(test_name_, '/'), "__"));
Env* env = Env::Default();
if (env->FileExists(mangled_fname).ok()) {
return errors::InvalidArgument(
"Cannot create TestReportFile, file exists: ", mangled_fname);
}
TF_RETURN_IF_ERROR(env->NewWritableFile(mangled_fname, &log_file_));
TF_RETURN_IF_ERROR(log_file_->Flush());
closed_ = false;
return absl::OkStatus();
}
TestReporter::TestReporter(const string& fname, const string& test_name)
: report_file_(fname, test_name) {
benchmark_entry_.set_name(test_name);
}
absl::Status TestReporter::Close() {
if (report_file_.IsClosed()) return absl::OkStatus();
tensorflow::BenchmarkEntries entries;
*entries.add_entry() = benchmark_entry_;
TF_RETURN_IF_ERROR(report_file_.Append(entries.SerializeAsString()));
benchmark_entry_.Clear();
return report_file_.Close();
}
absl::Status TestReporter::Benchmark(int64_t iters, double cpu_time,
double wall_time, double throughput) {
if (report_file_.IsClosed()) return absl::OkStatus();
benchmark_entry_.set_iters(iters);
benchmark_entry_.set_cpu_time(cpu_time / iters);
benchmark_entry_.set_wall_time(wall_time / iters);
benchmark_entry_.set_throughput(throughput);
return absl::OkStatus();
}
absl::Status TestReporter::SetProperty(const string& name,
const string& value) {
if (report_file_.IsClosed()) return absl::OkStatus();
(*benchmark_entry_.mutable_extras())[name].set_string_value(value);
return absl::OkStatus();
}
absl::Status TestReporter::SetProperty(const string& name, double value) {
if (report_file_.IsClosed()) return absl::OkStatus();
(*benchmark_entry_.mutable_extras())[name].set_double_value(value);
return absl::OkStatus();
}
absl::Status TestReporter::AddMetric(const string& name, double value) {
if (report_file_.IsClosed()) return absl::OkStatus();
auto* metric = benchmark_entry_.add_metrics();
metric->set_name(name);
metric->set_value(value);
return absl::OkStatus();
}
absl::Status TestReporter::Initialize() { return report_file_.Initialize(); }
} | #define _XOPEN_SOURCE
#include <cstdlib>
#include "tensorflow/core/util/reporter.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
static void ExpectHasSubstr(StringPiece s, StringPiece expected) {
EXPECT_TRUE(absl::StrContains(s, expected))
<< s << " does not contain " << expected;
}
TEST(TestReporter, NoLogging) {
TestReporter test_reporter("b1");
TF_EXPECT_OK(test_reporter.Initialize());
TF_EXPECT_OK(test_reporter.Close());
}
TEST(TestReporter, UsesEnv) {
const char* old_env = std::getenv(TestReporter::kTestReporterEnv);
setenv(TestReporter::kTestReporterEnv, "/cant/find/me:!", 1);
CHECK_EQ(string(std::getenv(TestReporter::kTestReporterEnv)),
string("/cant/find/me:!"));
TestReporter test_reporter("b1");
Status s = test_reporter.Initialize();
ExpectHasSubstr(s.ToString(), "/cant/find/me");
unsetenv(TestReporter::kTestReporterEnv);
CHECK_EQ(std::getenv(TestReporter::kTestReporterEnv), nullptr);
TestReporter test_reporter_empty("b1");
s = test_reporter_empty.Initialize();
TF_EXPECT_OK(s);
s = test_reporter_empty.Close();
TF_EXPECT_OK(s);
if (old_env == nullptr) {
unsetenv(TestReporter::kTestReporterEnv);
} else {
setenv(TestReporter::kTestReporterEnv, old_env, 1);
}
}
TEST(TestReporter, CreateTwiceFails) {
{
TestReporter test_reporter(
strings::StrCat(testing::TmpDir(), "/test_reporter_dupe"), "t1");
TF_EXPECT_OK(test_reporter.Initialize());
}
{
TestReporter test_reporter(
strings::StrCat(testing::TmpDir(), "/test_reporter_dupe"), "t1");
Status s = test_reporter.Initialize();
ExpectHasSubstr(s.ToString(), "file exists:");
}
}
TEST(TestReporter, CreateCloseCreateAgainSkipsSecond) {
TestReporter test_reporter(
strings::StrCat(testing::TmpDir(), "/test_reporter_create_close"), "t1");
TF_EXPECT_OK(test_reporter.Initialize());
TF_EXPECT_OK(test_reporter.Close());
TF_EXPECT_OK(test_reporter.Benchmark(1, 1.0, 2.0, 3.0));
TF_EXPECT_OK(test_reporter.Close());
Status s = test_reporter.Initialize();
ExpectHasSubstr(s.ToString(), "file exists:");
}
TEST(TestReporter, Benchmark) {
string fname =
strings::StrCat(testing::TmpDir(), "/test_reporter_benchmarks_");
TestReporter test_reporter(fname, "b1/2/3");
TF_EXPECT_OK(test_reporter.Initialize());
TF_EXPECT_OK(test_reporter.Benchmark(1, 1.0, 2.0, 3.0));
TF_EXPECT_OK(test_reporter.Close());
string expected_fname = strings::StrCat(fname, "b1__2__3");
string read;
TF_EXPECT_OK(ReadFileToString(Env::Default(), expected_fname, &read));
BenchmarkEntries benchmark_entries;
ASSERT_TRUE(benchmark_entries.ParseFromString(read));
ASSERT_EQ(1, benchmark_entries.entry_size());
const BenchmarkEntry& benchmark_entry = benchmark_entries.entry(0);
EXPECT_EQ(benchmark_entry.name(), "b1/2/3");
EXPECT_EQ(benchmark_entry.iters(), 1);
EXPECT_EQ(benchmark_entry.cpu_time(), 1.0);
EXPECT_EQ(benchmark_entry.wall_time(), 2.0);
EXPECT_EQ(benchmark_entry.throughput(), 3.0);
}
TEST(TestReporter, SetProperties) {
string fname =
strings::StrCat(testing::TmpDir(), "/test_reporter_benchmarks_");
TestReporter test_reporter(fname, "b2/3/4");
TF_EXPECT_OK(test_reporter.Initialize());
TF_EXPECT_OK(test_reporter.SetProperty("string_prop", "abc"));
TF_EXPECT_OK(test_reporter.SetProperty("double_prop", 4.0));
TF_EXPECT_OK(test_reporter.Close());
string expected_fname = strings::StrCat(fname, "b2__3__4");
string read;
TF_EXPECT_OK(ReadFileToString(Env::Default(), expected_fname, &read));
BenchmarkEntries benchmark_entries;
ASSERT_TRUE(benchmark_entries.ParseFromString(read));
ASSERT_EQ(1, benchmark_entries.entry_size());
const BenchmarkEntry& benchmark_entry = benchmark_entries.entry(0);
const auto& extras = benchmark_entry.extras();
ASSERT_EQ(2, extras.size());
EXPECT_EQ("abc", extras.at("string_prop").string_value());
EXPECT_EQ(4.0, extras.at("double_prop").double_value());
}
TEST(TestReporter, AddMetrics) {
string fname =
strings::StrCat(testing::TmpDir(), "/test_reporter_benchmarks_");
TestReporter test_reporter(fname, "b3/4/5");
TF_EXPECT_OK(test_reporter.Initialize());
TF_EXPECT_OK(test_reporter.AddMetric("metric1", 2.0));
TF_EXPECT_OK(test_reporter.AddMetric("metric2", 3.0));
TF_EXPECT_OK(test_reporter.Close());
string expected_fname = strings::StrCat(fname, "b3__4__5");
string read;
TF_EXPECT_OK(ReadFileToString(Env::Default(), expected_fname, &read));
BenchmarkEntries benchmark_entries;
ASSERT_TRUE(benchmark_entries.ParseFromString(read));
ASSERT_EQ(1, benchmark_entries.entry_size());
const BenchmarkEntry& benchmark_entry = benchmark_entries.entry(0);
const auto& metrics = benchmark_entry.metrics();
ASSERT_EQ(2, metrics.size());
EXPECT_EQ("metric1", metrics.at(0).name());
EXPECT_EQ(2.0, metrics.at(0).value());
EXPECT_EQ("metric2", metrics.at(1).name());
EXPECT_EQ(3.0, metrics.at(1).value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/util/reporter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/reporter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fa7caee5-32af-45de-b39f-cbd70c5eace5 | cpp | tensorflow/tensorflow | determinism | tensorflow/python/util/determinism.cc | third_party/xla/xla/service/gpu/determinism_test.cc | #include "tensorflow/core/util/determinism.h"
#include "pybind11/pybind11.h"
PYBIND11_MODULE(_pywrap_determinism, m) {
m.def("enable", &tensorflow::EnableOpDeterminism);
m.def("is_enabled", &tensorflow::OpDeterminismRequired);
} | #include <memory>
#include <optional>
#include <utility>
#include <variant>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/literal.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/service/gpu/tests/gpu_codegen_test.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/gpu/gpu_timer.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_utils.h"
#include "xla/xla.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
class DeterminismTest : public GpuCodegenTest {
public:
DeterminismTest() : debug_options_(HloTestBase::GetDebugOptionsForTest()) {
debug_options_.set_xla_gpu_exclude_nondeterministic_ops(true);
se::gpu::GpuTimer::ReturnRandomDurationsForTesting();
}
void AssertDeterminism(absl::string_view hlo_string, int num_runs = 10) {
std::vector<Literal> fake_arguments;
std::vector<Literal*> fake_arguments_ptrs;
std::optional<Literal> canonical_output;
for (int i = 0; i < num_runs; ++i) {
AutotunerUtil::ClearAutotuneResults();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
if (i == 0) {
fake_arguments = MakeFakeArguments(module.get()).value();
for (Literal& literal : fake_arguments) {
fake_arguments_ptrs.push_back(&literal);
}
}
TF_ASSERT_OK_AND_ASSIGN(Literal output,
Execute(std::move(module), fake_arguments_ptrs));
if (!canonical_output.has_value()) {
canonical_output = std::move(output);
} else {
ASSERT_TRUE(LiteralTestUtil::Equal(*canonical_output, output));
}
}
}
DebugOptions GetDebugOptionsForTest() override { return debug_options_; }
DebugOptions debug_options_;
bool IsVoltaOrLater() const {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability()
.IsAtLeastVolta();
}
bool IsRocm() const {
return std::holds_alternative<stream_executor::RocmComputeCapability>(
backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability());
}
bool HasHipblasLt() const {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.rocm_compute_capability()
.has_hipblaslt();
}
};
TEST_F(DeterminismTest, CublasDot) {
constexpr absl::string_view kHloText = R"(
ENTRY e {
p0 = f32[128,128] parameter(0)
p1 = f32[128,128] parameter(1)
ROOT d = f32[128,128] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
if (IsRocm()) {
if (!HasHipblasLt()) {
GTEST_SKIP() << "No hipblas-lt support on this architecture!";
}
debug_options_.set_xla_gpu_enable_triton_gemm(false);
}
debug_options_.set_xla_gpu_triton_fusion_level(0);
MatchOptimizedHlo(kHloText, R"(; CHECK: custom_call_target="__cublas$gemm")");
AssertDeterminism(kHloText);
debug_options_.set_xla_gpu_enable_cublaslt(true);
MatchOptimizedHlo(kHloText,
R"(; CHECK: custom_call_target="__cublas$lt$matmul")");
AssertDeterminism(kHloText);
}
TEST_F(DeterminismTest, DeterministicTritonGemmUsesDefaultConfig) {
if (!IsVoltaOrLater()) {
GTEST_SKIP() << "Triton is not supported on non-NVIDIA and "
"pre-Volta NVIDIA GPUs.";
}
constexpr absl::string_view kHloText = R"(
ENTRY e {
p0 = bf16[128,128] parameter(0)
p0_convert = f32[128,128] convert(p0)
p1 = f32[128,128] parameter(1)
ROOT d = f32[128,128] dot(p0_convert, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
debug_options_.set_xla_gpu_deterministic_ops(true);
AutotunerUtil::ClearAutotuneResults();
MatchOptimizedHlo(kHloText, R"(
CHECK: __triton_gemm
CHECK: {"block_m":"32","block_n":"32","block_k":"32","split_k":"1","num_stages":"1","num_warps":"4","num_ctas":"1"}
)");
AssertDeterminism(kHloText, 3);
}
TEST_F(DeterminismTest, ExcludingNonDeterministicOpsDoesNotDisableAutotuning) {
if (!IsVoltaOrLater()) {
GTEST_SKIP() << "Triton is not supported on non-NVIDIA and "
"pre-Volta NVIDIA GPUs.";
}
debug_options_.set_xla_gpu_cublas_fallback(false);
ASSERT_TRUE(debug_options_.xla_gpu_exclude_nondeterministic_ops());
ASSERT_FALSE(debug_options_.xla_gpu_deterministic_ops());
AutotunerUtil::ClearAutotuneResults();
MatchOptimizedHlo(R"(
ENTRY e {
p0 = bf16[128,128] parameter(0)
p0_convert = f32[128,128] convert(p0)
p1 = f32[128,128] parameter(1)
ROOT d = f32[128,128] dot(p0_convert, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})",
R"(
CHECK: __triton_gemm
CHECK-NOT: {"block_m":"32","block_n":"32","block_k":"32","split_k":"1","num_stages":"1","num_warps":"4","num_ctas":"1"}
)");
}
TEST_F(DeterminismTest, Conv) {
constexpr absl::string_view kHloText = R"(
ENTRY e {
input = f32[16,3,64,64] parameter(0)
filter = f32[3,3,3,64] parameter(1)
conv = f32[16,64,64,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
})";
AssertDeterminism(kHloText);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/python/util/determinism.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/determinism_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
44ee2e2d-d074-4a87-b125-2bf59679e221 | cpp | tensorflow/tensorflow | tracking_allocator | third_party/xla/xla/tsl/framework/tracking_allocator.cc | tensorflow/core/framework/tracking_allocator_test.cc | #include "xla/tsl/framework/tracking_allocator.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
namespace tsl {
TrackingAllocator::TrackingAllocator(Allocator* allocator, bool track_sizes)
: allocator_(allocator),
ref_(1),
allocated_(0),
high_watermark_(0),
total_bytes_(0),
track_sizes_locally_(track_sizes && !allocator_->TracksAllocationSizes()),
next_allocation_id_(0) {}
void* TrackingAllocator::AllocateRaw(
size_t alignment, size_t num_bytes,
const AllocationAttributes& allocation_attr) {
void* ptr = allocator_->AllocateRaw(alignment, num_bytes, allocation_attr);
if (nullptr == ptr) {
return ptr;
}
if (allocator_->TracksAllocationSizes()) {
size_t allocated_bytes = allocator_->AllocatedSize(ptr);
{
mutex_lock lock(mu_);
allocated_ += allocated_bytes;
high_watermark_ = std::max(high_watermark_, allocated_);
total_bytes_ += allocated_bytes;
allocations_.emplace_back(allocated_bytes, Env::Default()->NowMicros());
++ref_;
}
} else if (track_sizes_locally_) {
size_t allocated_bytes = allocator_->AllocatedSizeSlow(ptr);
allocated_bytes = std::max(num_bytes, allocated_bytes);
mutex_lock lock(mu_);
next_allocation_id_ += 1;
Chunk chunk = {num_bytes, allocated_bytes, next_allocation_id_};
in_use_.emplace(std::make_pair(ptr, chunk));
allocated_ += allocated_bytes;
high_watermark_ = std::max(high_watermark_, allocated_);
total_bytes_ += allocated_bytes;
allocations_.emplace_back(allocated_bytes, Env::Default()->NowMicros());
++ref_;
} else {
mutex_lock lock(mu_);
total_bytes_ += num_bytes;
allocations_.emplace_back(num_bytes, Env::Default()->NowMicros());
++ref_;
}
return ptr;
}
void TrackingAllocator::DeallocateRaw(void* ptr) {
if (nullptr == ptr) {
return;
}
bool should_delete;
bool tracks_allocation_sizes = allocator_->TracksAllocationSizes();
size_t allocated_bytes = 0;
if (tracks_allocation_sizes) {
allocated_bytes = allocator_->AllocatedSize(ptr);
} else if (track_sizes_locally_) {
mutex_lock lock(mu_);
auto itr = in_use_.find(ptr);
if (itr != in_use_.end()) {
tracks_allocation_sizes = true;
allocated_bytes = (*itr).second.allocated_size;
in_use_.erase(itr);
}
}
Allocator* allocator = allocator_;
{
mutex_lock lock(mu_);
if (tracks_allocation_sizes) {
CHECK_GE(allocated_, allocated_bytes);
allocated_ -= allocated_bytes;
allocations_.emplace_back(-allocated_bytes, Env::Default()->NowMicros());
}
should_delete = UnRef();
}
allocator->DeallocateRaw(ptr);
if (should_delete) {
delete this;
}
}
bool TrackingAllocator::TracksAllocationSizes() const {
return track_sizes_locally_ || allocator_->TracksAllocationSizes();
}
size_t TrackingAllocator::RequestedSize(const void* ptr) const {
if (track_sizes_locally_) {
mutex_lock lock(mu_);
auto it = in_use_.find(ptr);
if (it != in_use_.end()) {
return (*it).second.requested_size;
}
return 0;
} else {
return allocator_->RequestedSize(ptr);
}
}
size_t TrackingAllocator::AllocatedSize(const void* ptr) const {
if (track_sizes_locally_) {
mutex_lock lock(mu_);
auto it = in_use_.find(ptr);
if (it != in_use_.end()) {
return (*it).second.allocated_size;
}
return 0;
} else {
return allocator_->AllocatedSize(ptr);
}
}
int64_t TrackingAllocator::AllocationId(const void* ptr) const {
if (track_sizes_locally_) {
mutex_lock lock(mu_);
auto it = in_use_.find(ptr);
if (it != in_use_.end()) {
return (*it).second.allocation_id;
}
return 0;
} else {
return allocator_->AllocationId(ptr);
}
}
absl::optional<AllocatorStats> TrackingAllocator::GetStats() {
return allocator_->GetStats();
}
bool TrackingAllocator::ClearStats() { return allocator_->ClearStats(); }
std::tuple<size_t, size_t, size_t> TrackingAllocator::GetSizes() {
size_t high_watermark;
size_t total_bytes;
size_t still_live_bytes;
{
mutex_lock lock(mu_);
high_watermark = high_watermark_;
total_bytes = total_bytes_;
still_live_bytes = allocated_;
}
return std::make_tuple(total_bytes, high_watermark, still_live_bytes);
}
absl::InlinedVector<AllocRecord, 4UL> TrackingAllocator::GetRecordsAndUnRef() {
bool should_delete;
absl::InlinedVector<AllocRecord, 4UL> allocations;
{
mutex_lock lock(mu_);
allocations.swap(allocations_);
should_delete = UnRef();
}
if (should_delete) {
delete this;
}
return allocations;
}
absl::InlinedVector<AllocRecord, 4UL> TrackingAllocator::GetCurrentRecords() {
absl::InlinedVector<AllocRecord, 4UL> allocations;
{
mutex_lock lock(mu_);
for (const AllocRecord& alloc : allocations_) {
allocations.push_back(alloc);
}
}
return allocations;
}
bool TrackingAllocator::UnRef() {
CHECK_GE(ref_, 1);
--ref_;
return (ref_ == 0);
}
} | #include "tensorflow/core/framework/tracking_allocator.h"
#include <unordered_map>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class TestableSizeTrackingAllocator : public Allocator {
public:
string Name() override { return "test"; }
void* AllocateRaw(size_t , size_t num_bytes) override {
void* ptr = port::Malloc(num_bytes);
size_map_[ptr] = num_bytes;
return ptr;
}
void DeallocateRaw(void* ptr) override {
const auto& iter = size_map_.find(ptr);
EXPECT_NE(size_map_.end(), iter);
size_map_.erase(iter);
port::Free(ptr);
}
bool TracksAllocationSizes() const override { return true; }
size_t RequestedSize(const void* ptr) const override {
const auto& iter = size_map_.find(ptr);
EXPECT_NE(size_map_.end(), iter);
return iter->second;
}
absl::optional<AllocatorStats> GetStats() override { return absl::nullopt; }
private:
std::unordered_map<const void*, size_t> size_map_;
};
class NoMemoryAllocator : public Allocator {
public:
string Name() override { return "test"; }
void* AllocateRaw(size_t , size_t num_bytes) override {
return nullptr;
}
void DeallocateRaw(void* ptr) override {}
bool TracksAllocationSizes() const override { return true; }
absl::optional<AllocatorStats> GetStats() override { return absl::nullopt; }
};
TEST(TrackingAllocatorTest, SimpleNoTracking) {
Allocator* a = cpu_allocator();
EXPECT_FALSE(a->TracksAllocationSizes());
TrackingAllocator* ta = new TrackingAllocator(a, false);
void* p1 = ta->AllocateRaw(4, 4);
ta->DeallocateRaw(p1);
void* p2 = ta->AllocateRaw(4, 12);
std::tuple<size_t, size_t, size_t> sizes = ta->GetSizes();
EXPECT_EQ(16, std::get<0>(sizes));
EXPECT_EQ(0, std::get<1>(sizes));
EXPECT_EQ(0, std::get<2>(sizes));
ta->DeallocateRaw(p2);
auto records = ta->GetRecordsAndUnRef();
EXPECT_EQ(4, records[0].alloc_bytes);
EXPECT_EQ(12, records[1].alloc_bytes);
ta = new TrackingAllocator(a, true);
p1 = ta->AllocateRaw(4, 4);
EXPECT_EQ(4, ta->RequestedSize(p1));
EXPECT_LE(4, ta->AllocatedSize(p1));
EXPECT_EQ(1, ta->AllocationId(p1));
ta->DeallocateRaw(p1);
p2 = ta->AllocateRaw(4, 12);
EXPECT_EQ(12, ta->RequestedSize(p2));
EXPECT_LE(12, ta->AllocatedSize(p2));
EXPECT_EQ(2, ta->AllocationId(p2));
sizes = ta->GetSizes();
EXPECT_LE(16, std::get<0>(sizes));
EXPECT_LE(12, std::get<1>(sizes));
EXPECT_LE(12, std::get<2>(sizes));
ta->DeallocateRaw(p2);
records = ta->GetRecordsAndUnRef();
EXPECT_LE(4, records[0].alloc_bytes);
EXPECT_GE(-4, records[1].alloc_bytes);
EXPECT_LE(12, records[2].alloc_bytes);
EXPECT_GE(-12, records[3].alloc_bytes);
}
TEST(TrackingAllocatorTest, SimpleTracking) {
TestableSizeTrackingAllocator a = TestableSizeTrackingAllocator();
EXPECT_TRUE(a.TracksAllocationSizes());
TrackingAllocator* ta = new TrackingAllocator(&a, false);
void* p1 = ta->AllocateRaw(4, 12);
ta->DeallocateRaw(p1);
void* p2 = ta->AllocateRaw(4, 4);
std::tuple<size_t, size_t, size_t> sizes = ta->GetSizes();
EXPECT_EQ(16, std::get<0>(sizes));
EXPECT_EQ(12, std::get<1>(sizes));
EXPECT_EQ(4, std::get<2>(sizes));
ta->DeallocateRaw(p2);
auto records = ta->GetRecordsAndUnRef();
EXPECT_EQ(12, records[0].alloc_bytes);
EXPECT_EQ(-12, records[1].alloc_bytes);
EXPECT_EQ(4, records[2].alloc_bytes);
EXPECT_EQ(-4, records[3].alloc_bytes);
}
TEST(TrackingAllocatorTest, OutOfMemory) {
NoMemoryAllocator a;
EXPECT_TRUE(a.TracksAllocationSizes());
TrackingAllocator* ta = new TrackingAllocator(&a, false);
void* p1 = ta->AllocateRaw(4, 12);
EXPECT_EQ(nullptr, p1);
std::tuple<size_t, size_t, size_t> sizes = ta->GetSizes();
EXPECT_EQ(0, std::get<0>(sizes));
EXPECT_EQ(0, std::get<1>(sizes));
EXPECT_EQ(0, std::get<2>(sizes));
EXPECT_EQ(0, ta->GetRecordsAndUnRef().size());
}
TEST(TrackingAllocatorTest, FreeNullPtr) {
NoMemoryAllocator a;
EXPECT_TRUE(a.TracksAllocationSizes());
TrackingAllocator* ta = new TrackingAllocator(&a, false);
ta->DeallocateRaw(nullptr);
std::tuple<size_t, size_t, size_t> sizes = ta->GetSizes();
EXPECT_EQ(0, std::get<0>(sizes));
EXPECT_EQ(0, std::get<1>(sizes));
EXPECT_EQ(0, std::get<2>(sizes));
EXPECT_EQ(0, ta->GetRecordsAndUnRef().size());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/framework/tracking_allocator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/tracking_allocator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
99662d93-902f-4bd4-a780-65a01fee4b6b | cpp | tensorflow/tensorflow | allocator | third_party/xla/xla/tsl/framework/allocator.cc | tensorflow/core/framework/allocator_test.cc | #include "xla/tsl/framework/allocator.h"
#include <atomic>
#include "xla/tsl/framework/allocator_registry.h"
#include "xla/tsl/framework/tracking_allocator.h"
#include "tsl/platform/mem.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/strcat.h"
#include "tsl/platform/stringprintf.h"
#include "tsl/platform/types.h"
namespace tsl {
string AllocatorStats::DebugString() const {
return strings::Printf(
"Limit: %20lld\n"
"InUse: %20lld\n"
"MaxInUse: %20lld\n"
"NumAllocs: %20lld\n"
"MaxAllocSize: %20lld\n"
"Reserved: %20lld\n"
"PeakReserved: %20lld\n"
"LargestFreeBlock: %20lld\n",
static_cast<long long>(this->bytes_limit ? *this->bytes_limit : 0),
static_cast<long long>(this->bytes_in_use),
static_cast<long long>(this->peak_bytes_in_use),
static_cast<long long>(this->num_allocs),
static_cast<long long>(this->largest_alloc_size),
static_cast<long long>(this->bytes_reserved),
static_cast<long long>(this->peak_bytes_reserved),
static_cast<long long>(this->largest_free_block_bytes));
}
constexpr size_t Allocator::kAllocatorAlignment;
Allocator::~Allocator() {}
static bool cpu_allocator_collect_full_stats = false;
void EnableCPUAllocatorFullStats() { cpu_allocator_collect_full_stats = true; }
bool CPUAllocatorFullStatsEnabled() { return cpu_allocator_collect_full_stats; }
string AllocatorAttributes::DebugString() const {
return strings::StrCat("AllocatorAttributes(on_host=", on_host(),
" nic_compatible=", nic_compatible(),
" gpu_compatible=", gpu_compatible(), ")");
}
Allocator* cpu_allocator_base() {
static Allocator* cpu_alloc =
AllocatorFactoryRegistry::singleton()->GetAllocator();
if (cpu_allocator_collect_full_stats && !cpu_alloc->TracksAllocationSizes()) {
cpu_alloc = new TrackingAllocator(cpu_alloc, true);
}
return cpu_alloc;
}
Allocator* cpu_allocator(int numa_node) {
static ProcessStateInterface* ps =
AllocatorFactoryRegistry::singleton()->process_state();
if (ps) {
return ps->GetCPUAllocator(numa_node);
} else {
return cpu_allocator_base();
}
}
SubAllocator::SubAllocator(const std::vector<Visitor>& alloc_visitors,
const std::vector<Visitor>& free_visitors)
: alloc_visitors_(alloc_visitors), free_visitors_(free_visitors) {}
void SubAllocator::VisitAlloc(void* ptr, int index, size_t num_bytes) {
for (const auto& v : alloc_visitors_) {
v(ptr, index, num_bytes);
}
}
void SubAllocator::VisitFree(void* ptr, int index, size_t num_bytes) {
for (int i = free_visitors_.size() - 1; i >= 0; --i) {
free_visitors_[i](ptr, index, num_bytes);
}
}
} | #include "tensorflow/core/framework/allocator.h"
#include <algorithm>
#include <vector>
#include "xla/tsl/profiler/utils/xplane_utils.h"
#include "tensorflow/core/framework/typed_allocator.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/profiler/lib/profiler_session.h"
#include "tensorflow/core/profiler/protobuf/memory_profile.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
namespace tensorflow {
static void CheckStats(Allocator* a, int64_t num_allocs, int64_t bytes_in_use,
int64_t peak_bytes_in_use, int64_t largest_alloc_size) {
absl::optional<AllocatorStats> stats = a->GetStats();
EXPECT_TRUE(stats);
if (!stats) {
return;
}
LOG(INFO) << "Alloc stats: \n" << stats->DebugString();
#if defined(PLATFORM_GOOGLE) && defined(NDEBUG)
static const int64 kSlop = 5 * 1024;
EXPECT_GT(stats->bytes_in_use, bytes_in_use - kSlop);
EXPECT_LT(stats->bytes_in_use, bytes_in_use + kSlop);
EXPECT_GT(stats->peak_bytes_in_use, peak_bytes_in_use - kSlop);
EXPECT_LT(stats->peak_bytes_in_use, peak_bytes_in_use + kSlop);
EXPECT_EQ(stats->num_allocs, num_allocs);
EXPECT_EQ(stats->largest_alloc_size, largest_alloc_size);
#endif
}
TEST(AllocatorAttributesTest, AllCombos) {
for (bool on_host : {false, true}) {
for (bool nic_compatible : {false, true}) {
for (bool gpu_compatible : {false, true}) {
AllocatorAttributes aa;
aa.set_on_host(on_host);
aa.set_nic_compatible(nic_compatible);
aa.set_gpu_compatible(gpu_compatible);
EXPECT_EQ(on_host, aa.on_host());
EXPECT_EQ(nic_compatible, aa.nic_compatible());
EXPECT_EQ(gpu_compatible, aa.gpu_compatible());
}
}
}
}
TEST(AllocatorAttributesTest, IsEqualOrLessRestrictiveThan) {
AllocatorAttributes a, b;
EXPECT_TRUE(a.IsEqualOrLessRestrictiveThan(b));
EXPECT_TRUE(a.IsEqualOrLessRestrictiveThan(a));
EXPECT_TRUE(b.IsEqualOrLessRestrictiveThan(b));
b.set_gpu_compatible(true);
EXPECT_TRUE(a.IsEqualOrLessRestrictiveThan(b));
EXPECT_FALSE(b.IsEqualOrLessRestrictiveThan(a));
EXPECT_TRUE(a.IsEqualOrLessRestrictiveThan(a));
EXPECT_TRUE(b.IsEqualOrLessRestrictiveThan(b));
a.set_nic_compatible(true);
EXPECT_FALSE(a.IsEqualOrLessRestrictiveThan(b));
EXPECT_FALSE(b.IsEqualOrLessRestrictiveThan(a));
a.set_gpu_compatible(true);
EXPECT_TRUE(b.IsEqualOrLessRestrictiveThan(a));
EXPECT_FALSE(a.IsEqualOrLessRestrictiveThan(b));
}
TEST(AllocatorAttributesTest, Merge) {
AllocatorAttributes a, b;
EXPECT_EQ(a.value, 0);
EXPECT_EQ(b.value, 0);
EXPECT_FALSE(a.nic_compatible());
EXPECT_FALSE(b.nic_compatible());
b.set_nic_compatible(true);
a.Merge(b);
EXPECT_TRUE(a.nic_compatible());
EXPECT_TRUE(b.nic_compatible());
EXPECT_EQ(a.scope_id, 0);
EXPECT_EQ(b.scope_id, 0);
a.scope_id = 1;
a.Merge(b);
EXPECT_EQ(a.scope_id, 1);
EXPECT_EQ(b.scope_id, 0);
a.scope_id = 1;
b.scope_id = 0;
b.Merge(a);
EXPECT_EQ(a.scope_id, 1);
EXPECT_EQ(b.scope_id, 1);
a.scope_id = 2;
b.scope_id = 2;
a.Merge(b);
EXPECT_EQ(a.scope_id, 2);
EXPECT_EQ(b.scope_id, 2);
}
TEST(AllocatorAttributesDeathTest, MergeDifferentScopeIds) {
AllocatorAttributes a, b;
a.scope_id = 3;
b.scope_id = 4;
EXPECT_DEATH({ a.Merge(b); }, "");
}
TEST(CPUAllocatorTest, Simple) {
EnableCPUAllocatorStats();
Allocator* a = cpu_allocator();
std::vector<void*> ptrs;
for (int s = 1; s < 1024; s++) {
void* raw = a->AllocateRaw(1, s);
ptrs.push_back(raw);
}
std::sort(ptrs.begin(), ptrs.end());
CheckStats(a, 1023, 552640, 552640, 1024);
for (size_t i = 0; i < ptrs.size(); i++) {
if (i > 0) {
CHECK_NE(ptrs[i], ptrs[i - 1]);
}
a->DeallocateRaw(ptrs[i]);
}
CheckStats(a, 1023, 0, 552640, 1024);
float* t1 = TypedAllocator::Allocate<float>(a, 1024, {});
double* t2 = TypedAllocator::Allocate<double>(a, 1048576, {});
CheckStats(a, 1025, 1048576 * sizeof(double) + 1024 * sizeof(float),
1048576 * sizeof(double) + 1024 * sizeof(float),
1048576 * sizeof(double));
TypedAllocator::Deallocate(a, t1, 1024);
TypedAllocator::Deallocate(a, t2, 1048576);
CheckStats(a, 1025, 0, 1048576 * sizeof(double) + 1024 * sizeof(float),
1048576 * sizeof(double));
CHECK(a->ClearStats());
CheckStats(a, 0, 0, 0, 0);
DisableCPUAllocatorStats();
}
struct TestStruct {
int x;
};
TEST(CPUAllocatorTest, CheckStructSize) { CHECK_GT(sizeof(TestStruct), 1); }
TEST(CPUAllocatorTest, AllocateOverflowMaxSizeT) {
Allocator* a = cpu_allocator();
size_t count_to_allocate = std::numeric_limits<size_t>::max();
TestStruct* const test_pointer =
TypedAllocator::Allocate<TestStruct>(a, count_to_allocate, {});
CHECK_EQ(test_pointer, reinterpret_cast<TestStruct*>(NULL));
}
TEST(CPUAllocatorTest, AllocateOverflowSmallest) {
Allocator* a = cpu_allocator();
const size_t count_to_allocate =
(std::numeric_limits<size_t>::max() / sizeof(TestStruct)) + 1;
TestStruct* const test_pointer =
TypedAllocator::Allocate<TestStruct>(a, count_to_allocate, {});
CHECK_EQ(test_pointer, reinterpret_cast<TestStruct*>(NULL));
}
TEST(CPUAllocatorTest, Sizes) {
Allocator* a = cpu_allocator();
EXPECT_EQ(false, a->TracksAllocationSizes());
}
TEST(CPUAllocatorTest, ProfilerReporting) {
void* p = port::AlignedMalloc(8, 1);
const std::size_t alloc_size = port::MallocExtension_GetAllocatedSize(p);
port::AlignedFree(p);
if (alloc_size == 0) {
LOG(WARNING) << "Skipping Memory Debugging test. It requires "
<< "port::MallocExtension_GetAllocatedSize to work.";
return;
}
EnableCPUAllocatorStats();
Allocator* a = cpu_allocator();
void* p1 = a->AllocateRaw(1, 16);
std::unique_ptr<ProfilerSession> profiler =
tensorflow::ProfilerSession::Create(
tensorflow::ProfilerSession::DefaultOptions());
void* p2 = a->AllocateRaw(1, 32);
a->DeallocateRaw(p1);
tensorflow::profiler::XSpace xspace;
EXPECT_EQ(absl::OkStatus(), profiler->CollectData(&xspace));
const auto plane = ::tsl::profiler::FindPlaneWithName(
xspace, ::tensorflow::profiler::kHostThreadsPlaneName);
::tensorflow::profiler::XPlaneVisitor xplane(plane);
ASSERT_EQ(plane->name(), ::tensorflow::profiler::kHostThreadsPlaneName)
<< "XSpace: " << xspace.DebugString();
ASSERT_EQ(plane->event_metadata_size(), 2)
<< "XSpace: " << xspace.DebugString();
const auto& line = plane->lines(0);
ASSERT_EQ(line.events_size(), 2) << "XSpace: " << xspace.DebugString();
const auto& events = line.events();
::tensorflow::profiler::XEventVisitor e0(&xplane, &line, &events[0]);
EXPECT_EQ(e0.Name(), "MemoryAllocation")
<< "XSpace: " << xspace.DebugString();
{
absl::optional<std::string> bytes_allocated, peak_bytes_in_use,
requested_bytes, allocation_bytes;
e0.ForEachStat([&](const ::tensorflow::profiler::XStatVisitor& stat) {
LOG(ERROR) << "STAT " << stat.Name() << ": " << stat.ToString();
if (stat.Name() == "bytes_allocated") {
bytes_allocated = stat.ToString();
} else if (stat.Name() == "peak_bytes_in_use") {
peak_bytes_in_use = stat.ToString();
} else if (stat.Name() == "requested_bytes") {
requested_bytes = stat.ToString();
} else if (stat.Name() == "allocation_bytes") {
allocation_bytes = stat.ToString();
}
});
ASSERT_TRUE(bytes_allocated && peak_bytes_in_use && requested_bytes &&
allocation_bytes)
<< "XSpace: " << xspace.DebugString();
EXPECT_EQ(*bytes_allocated, "48") << "XSpace: " << xspace.DebugString();
EXPECT_EQ(*peak_bytes_in_use, "48") << "XSpace: " << xspace.DebugString();
EXPECT_EQ(*requested_bytes, "32") << "XSpace: " << xspace.DebugString();
EXPECT_EQ(*allocation_bytes, "32") << "XSpace: " << xspace.DebugString();
}
::tensorflow::profiler::XEventVisitor e1(&xplane, &line, &events[1]);
EXPECT_EQ(e1.Name(), "MemoryDeallocation")
<< "XSpace: " << xspace.DebugString();
{
absl::optional<std::string> bytes_allocated, peak_bytes_in_use,
allocation_bytes;
e1.ForEachStat([&](const ::tensorflow::profiler::XStatVisitor& stat) {
if (stat.Name() == "bytes_allocated") {
bytes_allocated = stat.ToString();
} else if (stat.Name() == "peak_bytes_in_use") {
peak_bytes_in_use = stat.ToString();
} else if (stat.Name() == "allocation_bytes") {
allocation_bytes = stat.ToString();
}
});
ASSERT_TRUE(bytes_allocated && peak_bytes_in_use && allocation_bytes)
<< "XSpace: " << xspace.DebugString();
EXPECT_EQ(*bytes_allocated, "32") << "XSpace: " << xspace.DebugString();
EXPECT_EQ(*peak_bytes_in_use, "48") << "XSpace: " << xspace.DebugString();
EXPECT_EQ(*allocation_bytes, "16") << "XSpace: " << xspace.DebugString();
}
a->DeallocateRaw(p2);
DisableCPUAllocatorStats();
}
namespace {
AllocatorAttributes DeviceAllocatorAttribute() {
AllocatorAttributes attr;
attr.value |= (0x1 << 24);
return attr;
}
bool HasDeviceAllocatorAttribute(const AllocatorAttributes& attr) {
return attr.value & (0x1 << 24);
}
}
TEST(CustomAllocatorAttributes, TestSetterAndGetter) {
AllocatorAttributes attr = DeviceAllocatorAttribute();
EXPECT_TRUE(HasDeviceAllocatorAttribute(attr));
EXPECT_FALSE(HasDeviceAllocatorAttribute(AllocatorAttributes()));
}
static void BM_Allocation(::testing::benchmark::State& state) {
const int arg = state.range(0);
Allocator* a = cpu_allocator();
std::vector<int> sizes = {256, 4096, 16384, 524288, 512, 1048576};
int size_index = 0;
if (arg) EnableCPUAllocatorStats();
for (auto s : state) {
int bytes = sizes[size_index++ % sizes.size()];
void* p = a->AllocateRaw(1, bytes);
a->DeallocateRaw(p);
}
if (arg) DisableCPUAllocatorStats();
}
BENCHMARK(BM_Allocation)->Arg(0)->Arg(1);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/framework/allocator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/allocator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
241962ce-9e32-45be-80a8-91426c83ee4c | cpp | tensorflow/tensorflow | device_id_manager | third_party/xla/xla/tsl/framework/device_id_manager.cc | tensorflow/core/common_runtime/device/device_id_manager_test.cc | #include "xla/tsl/framework/device_id_manager.h"
#include <unordered_map>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "xla/tsl/framework/device_id.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tsl {
namespace {
class TfToPlatformDeviceIdMap {
public:
static TfToPlatformDeviceIdMap* singleton() {
static auto* id_map = new TfToPlatformDeviceIdMap;
return id_map;
}
absl::Status Insert(const DeviceType& type, TfDeviceId tf_device_id,
PlatformDeviceId platform_device_id)
TF_LOCKS_EXCLUDED(mu_) {
std::pair<IdMapType::iterator, bool> result;
{
mutex_lock lock(mu_);
TypeIdMapType::iterator device_id_map_iter =
id_map_.insert({type.type_string(), IdMapType()}).first;
result = device_id_map_iter->second.insert(
{tf_device_id.value(), platform_device_id.value()});
}
if (!result.second && platform_device_id.value() != result.first->second) {
return errors::AlreadyExists(
"TensorFlow device (", type, ":", tf_device_id.value(),
") is being mapped to multiple devices (", platform_device_id.value(),
" now, and ", result.first->second,
" previously), which is not supported. "
"This may be the result of providing different ",
type, " configurations (ConfigProto.gpu_options, for example ",
"different visible_device_list) when creating multiple Sessions in ",
"the same process. This is not currently supported, see ",
"https:
}
return absl::OkStatus();
}
bool Find(const DeviceType& type, TfDeviceId tf_device_id,
PlatformDeviceId* platform_device_id) const TF_LOCKS_EXCLUDED(mu_) {
tf_shared_lock lock(mu_);
auto type_id_map_iter = id_map_.find(type.type_string());
if (type_id_map_iter == id_map_.end()) return false;
auto id_map_iter = type_id_map_iter->second.find(tf_device_id.value());
if (id_map_iter == type_id_map_iter->second.end()) return false;
*platform_device_id = id_map_iter->second;
return true;
}
absl::StatusOr<std::vector<TfDeviceId>> GetTfDevicesOnPlatform(
const DeviceType& type, PlatformDeviceId platform_device_id) const
TF_LOCKS_EXCLUDED(mu_) {
tf_shared_lock lock(mu_);
auto type_id_map_iter = id_map_.find(type.type_string());
if (type_id_map_iter == id_map_.end()) {
return absl::NotFoundError(
absl::StrCat("TensorFlow device type: ", type.type_string(),
" was not registered"));
}
std::vector<TfDeviceId> tf_device_ids;
for (const auto& [tf_device, platform_device] : type_id_map_iter->second) {
if (platform_device == platform_device_id.value()) {
tf_device_ids.push_back(TfDeviceId(tf_device));
}
}
return tf_device_ids;
}
private:
TfToPlatformDeviceIdMap() = default;
void TestOnlyReset() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
id_map_.clear();
}
using IdMapType = std::unordered_map<int32, int32>;
using TypeIdMapType = std::unordered_map<std::string, IdMapType>;
mutable mutex mu_;
TypeIdMapType id_map_ TF_GUARDED_BY(mu_);
friend class ::tsl::DeviceIdManager;
TfToPlatformDeviceIdMap(const TfToPlatformDeviceIdMap&) = delete;
void operator=(const TfToPlatformDeviceIdMap&) = delete;
};
}
absl::Status DeviceIdManager::InsertTfPlatformDeviceIdPair(
const DeviceType& type, TfDeviceId tf_device_id,
PlatformDeviceId platform_device_id) {
return TfToPlatformDeviceIdMap::singleton()->Insert(type, tf_device_id,
platform_device_id);
}
absl::Status DeviceIdManager::TfToPlatformDeviceId(
const DeviceType& type, TfDeviceId tf_device_id,
PlatformDeviceId* platform_device_id) {
if (TfToPlatformDeviceIdMap::singleton()->Find(type, tf_device_id,
platform_device_id)) {
return absl::OkStatus();
}
return errors::NotFound("TensorFlow device ", type, ":", tf_device_id.value(),
" was not registered");
}
absl::StatusOr<std::vector<TfDeviceId>> DeviceIdManager::GetTfDevicesOnPlatform(
const DeviceType& type, PlatformDeviceId platform_device_id) {
return TfToPlatformDeviceIdMap::singleton()->GetTfDevicesOnPlatform(
type, platform_device_id);
}
void DeviceIdManager::TestOnlyReset() {
TfToPlatformDeviceIdMap::singleton()->TestOnlyReset();
}
} | #include "tensorflow/core/common_runtime/device/device_id_manager.h"
#include <vector>
#include <gmock/gmock.h>
#include "tensorflow/core/common_runtime/device/device_id.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
using ::testing::IsEmpty;
using ::testing::UnorderedElementsAre;
PlatformDeviceId TfToPlatformDeviceId(const DeviceType& type, TfDeviceId tf) {
PlatformDeviceId platform_device_id;
TF_CHECK_OK(
DeviceIdManager::TfToPlatformDeviceId(type, tf, &platform_device_id));
return platform_device_id;
}
TEST(DeviceIdManagerTest, Basics) {
DeviceType device_type("GPU");
TfDeviceId key_0(0);
PlatformDeviceId value_0(0);
TF_ASSERT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(device_type, key_0,
value_0));
EXPECT_EQ(value_0, TfToPlatformDeviceId(device_type, key_0));
TF_ASSERT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(device_type, key_0,
value_0));
EXPECT_EQ(value_0, TfToPlatformDeviceId(device_type, key_0));
TfDeviceId key_1(3);
PlatformDeviceId value_1(2);
TF_ASSERT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(device_type, key_1,
value_1));
EXPECT_EQ(value_1, TfToPlatformDeviceId(device_type, key_1));
TfDeviceId key_2(10);
TF_ASSERT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(device_type, key_2,
value_1));
EXPECT_EQ(value_1, TfToPlatformDeviceId(device_type, key_2));
ASSERT_FALSE(
DeviceIdManager::InsertTfPlatformDeviceIdPair(device_type, key_2, value_0)
.ok());
ASSERT_FALSE(DeviceIdManager::TfToPlatformDeviceId(device_type,
TfDeviceId(100), &value_0)
.ok());
}
TEST(DeviceIdManagerTest, TwoDevices) {
DeviceType device_type0("GPU");
TfDeviceId key_0(0);
PlatformDeviceId value_0(0);
TF_ASSERT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(device_type0,
key_0, value_0));
DeviceType device_type1("XPU");
TfDeviceId key_1(2);
PlatformDeviceId value_1(3);
TF_ASSERT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(device_type1,
key_1, value_1));
EXPECT_EQ(value_0, TfToPlatformDeviceId(device_type0, key_0));
EXPECT_EQ(value_1, TfToPlatformDeviceId(device_type1, key_1));
ASSERT_FALSE(
DeviceIdManager::TfToPlatformDeviceId(device_type0, key_1, &value_0)
.ok());
ASSERT_FALSE(
DeviceIdManager::TfToPlatformDeviceId(device_type1, key_0, &value_1)
.ok());
ASSERT_FALSE(
DeviceIdManager::TfToPlatformDeviceId("FOO", key_0, &value_0).ok());
}
TEST(DeviceIdManagerTest, GetTfDevicesOnSamePlatform) {
DeviceType device_gpu("GPU");
TfDeviceId tf_device_0(0);
PlatformDeviceId platform_0(0);
TF_ASSERT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(
device_gpu, tf_device_0, platform_0));
TfDeviceId tf_device_1(1);
TF_ASSERT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(
device_gpu, tf_device_1, platform_0));
DeviceType device_xpu("XPU");
TfDeviceId tf_device_2(2);
PlatformDeviceId platform_1(3);
TF_ASSERT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(
device_xpu, tf_device_2, platform_1));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<TfDeviceId> tf_device_ids_gpu,
DeviceIdManager::GetTfDevicesOnPlatform(device_gpu, platform_0));
EXPECT_THAT(tf_device_ids_gpu,
UnorderedElementsAre(tf_device_0, tf_device_1));
TF_ASSERT_OK_AND_ASSIGN(
tf_device_ids_gpu,
DeviceIdManager::GetTfDevicesOnPlatform(device_gpu, platform_1));
EXPECT_THAT(tf_device_ids_gpu, IsEmpty());
TF_ASSERT_OK_AND_ASSIGN(
std::vector<TfDeviceId> tf_device_ids_xpu,
DeviceIdManager::GetTfDevicesOnPlatform(device_xpu, platform_1));
EXPECT_THAT(tf_device_ids_xpu, UnorderedElementsAre(tf_device_2));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/framework/device_id_manager.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device/device_id_manager_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1b92c6bd-6885-430a-9126-bdb0a1b79412 | cpp | tensorflow/tensorflow | cancellation | third_party/xla/xla/tsl/framework/cancellation.cc | third_party/xla/xla/tsl/framework/cancellation_test.cc | #include "xla/tsl/framework/cancellation.h"
#include <forward_list>
#include "absl/memory/memory.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace tsl {
const CancellationToken CancellationManager::kInvalidToken = -1;
CancellationManager::CancellationManager()
: is_cancelling_(false),
is_cancelled_(false),
next_cancellation_token_(0) {}
CancellationManager::CancellationManager(CancellationManager* parent)
: is_cancelling_(false), next_cancellation_token_(0), parent_(parent) {
is_cancelled_ = parent->RegisterChild(this);
}
void CancellationManager::StartCancel() {
StartCancelWithStatus(absl::OkStatus());
}
void CancellationManager::StartCancelWithStatus(const absl::Status& status) {
gtl::FlatMap<CancellationToken, CallbackConfiguration> callbacks_to_run;
std::forward_list<CancellationManager*> children_to_cancel;
Notification* cancelled_notification = nullptr;
{
mutex_lock l(mu_);
if (is_cancelled_.load(std::memory_order_relaxed) || is_cancelling_) {
return;
}
is_cancelling_ = true;
if (state_) {
std::swap(state_->callbacks, callbacks_to_run);
CancellationManager* child = state_->first_child;
while (child != nullptr) {
children_to_cancel.push_front(child);
child->is_removed_from_parent_ = true;
child = child->next_sibling_;
}
state_->first_child = nullptr;
cancelled_notification = &state_->cancelled_notification;
}
}
for (auto key_and_value : callbacks_to_run) {
CallbackConfiguration& config = key_and_value.second;
if (!status.ok() && config.log_error) {
LOG(WARNING) << "Cancellation callback \"" << config.name
<< "\" is triggered due to a "
<< (StatusGroup::IsDerived(status) ? "derived" : "root")
<< " error: " << status.ToString();
}
config.callback();
}
for (CancellationManager* child : children_to_cancel) {
child->StartCancelWithStatus(status);
}
{
mutex_lock l(mu_);
is_cancelling_ = false;
is_cancelled_.store(true, std::memory_order_release);
}
if (cancelled_notification) {
cancelled_notification->Notify();
}
}
bool CancellationManager::RegisterCallback(CancellationToken token,
CancelCallback callback) {
return RegisterCallbackConfig(
token, CallbackConfiguration{callback, "", false});
}
bool CancellationManager::RegisterCallbackWithErrorLogging(
CancellationToken token, CancelCallback callback,
absl::string_view callback_name) {
return RegisterCallbackConfig(
token, CallbackConfiguration{callback, std::string(callback_name), true});
}
bool CancellationManager::RegisterCallbackConfig(CancellationToken token,
CallbackConfiguration config) {
DCHECK_LT(token, next_cancellation_token_) << "Invalid cancellation token";
mutex_lock l(mu_);
bool should_register = !is_cancelled_ && !is_cancelling_;
if (should_register) {
if (!state_) {
state_ = absl::make_unique<State>();
}
std::swap(state_->callbacks[token], config);
}
return should_register;
}
bool CancellationManager::DeregisterCallback(CancellationToken token) {
mu_.lock();
if (is_cancelled_) {
mu_.unlock();
return false;
} else if (is_cancelling_) {
Notification* cancelled_notification =
state_ ? &state_->cancelled_notification : nullptr;
mu_.unlock();
if (cancelled_notification) {
cancelled_notification->WaitForNotification();
}
return false;
} else {
if (state_) {
state_->callbacks.erase(token);
}
mu_.unlock();
return true;
}
}
bool CancellationManager::RegisterChild(CancellationManager* child) {
mutex_lock l(mu_);
if (is_cancelled_.load(std::memory_order_relaxed) || is_cancelling_) {
child->is_removed_from_parent_ = true;
return true;
}
if (!state_) {
state_ = absl::make_unique<State>();
}
CancellationManager* current_head = state_->first_child;
state_->first_child = child;
child->prev_sibling_ = nullptr;
child->next_sibling_ = current_head;
if (current_head) {
current_head->prev_sibling_ = child;
}
return false;
}
void CancellationManager::DeregisterChild(CancellationManager* child) {
DCHECK_EQ(child->parent_, this);
Notification* cancelled_notification = nullptr;
{
mutex_lock l(mu_);
if (!child->is_removed_from_parent_) {
DCHECK(state_);
if (child->prev_sibling_ == nullptr) {
DCHECK_EQ(state_->first_child, child);
state_->first_child = child->next_sibling_;
} else {
child->prev_sibling_->next_sibling_ = child->next_sibling_;
}
if (child->next_sibling_ != nullptr) {
child->next_sibling_->prev_sibling_ = child->prev_sibling_;
}
child->is_removed_from_parent_ = true;
}
if (is_cancelling_) {
cancelled_notification = &state_->cancelled_notification;
}
}
if (cancelled_notification) {
cancelled_notification->WaitForNotification();
}
}
bool CancellationManager::TryDeregisterCallback(CancellationToken token) {
mutex_lock lock(mu_);
if (is_cancelled_ || is_cancelling_) {
return false;
} else {
if (state_) {
state_->callbacks.erase(token);
}
return true;
}
}
CancellationManager::~CancellationManager() {
if (parent_) {
parent_->DeregisterChild(this);
}
if (state_) {
StartCancel();
}
}
bool CancellationManager::IsCancelling() {
mutex_lock lock(mu_);
return is_cancelling_;
}
absl::Status RegisterCancellationCallback(
CancellationManager* cancellation_manager, CancelCallback callback,
std::function<void()>* deregister_fn) {
if (cancellation_manager) {
CancellationToken token = cancellation_manager->get_cancellation_token();
if (!cancellation_manager->RegisterCallback(token, std::move(callback))) {
return errors::Cancelled("Operation was cancelled");
}
*deregister_fn = [cancellation_manager, token]() {
cancellation_manager->DeregisterCallback(token);
};
} else {
VLOG(1) << "Cancellation manager is not set. Cancellation callback will "
"not be registered.";
*deregister_fn = []() {};
}
return absl::OkStatus();
}
} | #include "xla/tsl/framework/cancellation.h"
#include <algorithm>
#include <memory>
#include <numeric>
#include <random>
#include <vector>
#include "tsl/platform/notification.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace tsl {
TEST(Cancellation, SimpleNoCancel) {
bool is_cancelled = false;
CancellationManager* manager = new CancellationManager();
auto token = manager->get_cancellation_token();
bool registered = manager->RegisterCallback(
token, [&is_cancelled]() { is_cancelled = true; });
EXPECT_TRUE(registered);
bool deregistered = manager->DeregisterCallback(token);
EXPECT_TRUE(deregistered);
delete manager;
EXPECT_FALSE(is_cancelled);
}
TEST(Cancellation, SimpleCancel) {
bool is_cancelled = false;
CancellationManager* manager = new CancellationManager();
auto token = manager->get_cancellation_token();
bool registered = manager->RegisterCallback(
token, [&is_cancelled]() { is_cancelled = true; });
EXPECT_TRUE(registered);
manager->StartCancel();
EXPECT_TRUE(is_cancelled);
delete manager;
}
TEST(Cancellation, StartCancelTriggersAllCallbacks) {
bool is_cancelled_1 = false;
bool is_cancelled_2 = false;
auto manager = std::make_unique<CancellationManager>();
auto token_1 = manager->get_cancellation_token();
EXPECT_TRUE(manager->RegisterCallbackWithErrorLogging(
token_1, [&is_cancelled_1]() { is_cancelled_1 = true; }, "TestCallback"));
auto token_2 = manager->get_cancellation_token();
EXPECT_TRUE(manager->RegisterCallback(
token_2, [&is_cancelled_2]() { is_cancelled_2 = true; }));
manager->StartCancel();
EXPECT_TRUE(is_cancelled_1);
EXPECT_TRUE(is_cancelled_2);
}
TEST(Cancellation, StartCancelWithStatusTriggersAllCallbacks) {
bool is_cancelled_1 = false;
bool is_cancelled_2 = false;
auto manager = std::make_unique<CancellationManager>();
auto token_1 = manager->get_cancellation_token();
EXPECT_TRUE(manager->RegisterCallbackWithErrorLogging(
token_1, [&is_cancelled_1]() { is_cancelled_1 = true; }, "TestCallback"));
auto token_2 = manager->get_cancellation_token();
EXPECT_TRUE(manager->RegisterCallback(
token_2, [&is_cancelled_2]() { is_cancelled_2 = true; }));
manager->StartCancelWithStatus(absl::OkStatus());
EXPECT_TRUE(is_cancelled_1);
EXPECT_TRUE(is_cancelled_2);
}
TEST(Cancellation, CancelBeforeRegister) {
auto manager = std::make_unique<CancellationManager>();
auto token = manager->get_cancellation_token();
manager->StartCancel();
bool registered = manager->RegisterCallback(token, nullptr);
EXPECT_FALSE(registered);
}
TEST(Cancellation, DeregisterAfterCancel) {
bool is_cancelled = false;
auto manager = std::make_unique<CancellationManager>();
auto token = manager->get_cancellation_token();
bool registered = manager->RegisterCallback(
token, [&is_cancelled]() { is_cancelled = true; });
EXPECT_TRUE(registered);
manager->StartCancel();
EXPECT_TRUE(is_cancelled);
bool deregistered = manager->DeregisterCallback(token);
EXPECT_FALSE(deregistered);
}
TEST(Cancellation, CancelMultiple) {
bool is_cancelled_1 = false, is_cancelled_2 = false, is_cancelled_3 = false;
auto manager = std::make_unique<CancellationManager>();
auto token_1 = manager->get_cancellation_token();
bool registered_1 = manager->RegisterCallback(
token_1, [&is_cancelled_1]() { is_cancelled_1 = true; });
EXPECT_TRUE(registered_1);
auto token_2 = manager->get_cancellation_token();
bool registered_2 = manager->RegisterCallback(
token_2, [&is_cancelled_2]() { is_cancelled_2 = true; });
EXPECT_TRUE(registered_2);
EXPECT_FALSE(is_cancelled_1);
EXPECT_FALSE(is_cancelled_2);
manager->StartCancel();
EXPECT_TRUE(is_cancelled_1);
EXPECT_TRUE(is_cancelled_2);
EXPECT_FALSE(is_cancelled_3);
auto token_3 = manager->get_cancellation_token();
bool registered_3 = manager->RegisterCallback(
token_3, [&is_cancelled_3]() { is_cancelled_3 = true; });
EXPECT_FALSE(registered_3);
EXPECT_FALSE(is_cancelled_3);
}
TEST(Cancellation, IsCancelled) {
auto cm = std::make_unique<CancellationManager>();
thread::ThreadPool w(Env::Default(), "test", 4);
std::vector<Notification> done(8);
for (size_t i = 0; i < done.size(); ++i) {
Notification* n = &done[i];
w.Schedule([n, &cm]() {
while (!cm->IsCancelled()) {
}
ASSERT_FALSE(cm->IsCancelling());
n->Notify();
});
}
Env::Default()->SleepForMicroseconds(1000000 );
cm->StartCancel();
for (size_t i = 0; i < done.size(); ++i) {
done[i].WaitForNotification();
}
}
TEST(Cancellation, IsCancelling) {
CancellationManager cm;
Notification started_cancelling;
Notification can_finish_cancel;
Notification cancel_done;
thread::ThreadPool w(Env::Default(), "test", 1);
auto token = cm.get_cancellation_token();
ASSERT_TRUE(
cm.RegisterCallback(token, [&started_cancelling, &can_finish_cancel]() {
started_cancelling.Notify();
can_finish_cancel.WaitForNotification();
}));
w.Schedule([&cm, &cancel_done]() {
cm.StartCancel();
cancel_done.Notify();
});
started_cancelling.WaitForNotification();
ASSERT_TRUE(cm.IsCancelling());
can_finish_cancel.Notify();
cancel_done.WaitForNotification();
ASSERT_FALSE(cm.IsCancelling());
ASSERT_TRUE(cm.IsCancelled());
}
TEST(Cancellation, TryDeregisterWithoutCancel) {
bool is_cancelled = false;
auto manager = std::make_unique<CancellationManager>();
auto token = manager->get_cancellation_token();
bool registered = manager->RegisterCallback(
token, [&is_cancelled]() { is_cancelled = true; });
EXPECT_TRUE(registered);
bool deregistered = manager->TryDeregisterCallback(token);
EXPECT_TRUE(deregistered);
EXPECT_FALSE(is_cancelled);
}
TEST(Cancellation, TryDeregisterAfterCancel) {
bool is_cancelled = false;
auto manager = std::make_unique<CancellationManager>();
auto token = manager->get_cancellation_token();
bool registered = manager->RegisterCallback(
token, [&is_cancelled]() { is_cancelled = true; });
EXPECT_TRUE(registered);
manager->StartCancel();
EXPECT_TRUE(is_cancelled);
bool deregistered = manager->TryDeregisterCallback(token);
EXPECT_FALSE(deregistered);
}
TEST(Cancellation, TryDeregisterDuringCancel) {
Notification cancel_started, finish_callback, cancel_complete;
auto manager = std::make_unique<CancellationManager>();
auto token = manager->get_cancellation_token();
bool registered = manager->RegisterCallback(token, [&]() {
cancel_started.Notify();
finish_callback.WaitForNotification();
});
EXPECT_TRUE(registered);
thread::ThreadPool w(Env::Default(), "test", 1);
w.Schedule([&]() {
manager->StartCancel();
cancel_complete.Notify();
});
cancel_started.WaitForNotification();
bool deregistered = manager->TryDeregisterCallback(token);
EXPECT_FALSE(deregistered);
finish_callback.Notify();
cancel_complete.WaitForNotification();
}
TEST(Cancellation, Parent_CancelManyChildren) {
CancellationManager parent;
std::vector<std::unique_ptr<CancellationManager>> children;
for (size_t i = 0; i < 5; ++i) {
children.push_back(absl::make_unique<CancellationManager>(&parent));
EXPECT_FALSE(children.back()->IsCancelled());
}
parent.StartCancel();
for (auto& child : children) {
EXPECT_TRUE(child->IsCancelled());
}
}
TEST(Cancellation, Parent_NotCancelled) {
CancellationManager parent;
{
CancellationManager child(&parent);
child.StartCancel();
EXPECT_TRUE(child.IsCancelled());
}
EXPECT_FALSE(parent.IsCancelled());
}
TEST(Cancellation, Parent_AlreadyCancelled) {
CancellationManager parent;
parent.StartCancel();
EXPECT_TRUE(parent.IsCancelled());
CancellationManager child(&parent);
EXPECT_TRUE(child.IsCancelled());
}
TEST(Cancellation, Parent_RandomDestructionOrder) {
CancellationManager parent;
std::random_device rd;
std::mt19937 g(rd());
for (int rounds = 0; rounds < 100; ++rounds) {
std::vector<std::unique_ptr<CancellationManager>> children;
std::uniform_int_distribution<int> dist(1, 9);
const size_t round_size = dist(rd);
for (size_t i = 0; i < round_size; ++i) {
children.push_back(absl::make_unique<CancellationManager>(&parent));
EXPECT_FALSE(children.back()->IsCancelled());
}
std::vector<size_t> destruction_order(round_size);
std::iota(destruction_order.begin(), destruction_order.end(), 0);
std::shuffle(destruction_order.begin(), destruction_order.end(), g);
for (size_t index : destruction_order) {
children[index].reset();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/framework/cancellation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/framework/cancellation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
89a83e9a-b2e7-4752-bf5f-15838cc840ee | cpp | tensorflow/tensorflow | device_id_utils | third_party/xla/xla/tsl/framework/device_id_utils.cc | third_party/xla/xla/tsl/framework/device_id_utils_test.cc | #include "xla/tsl/framework/device_id_utils.h"
#include <numeric>
#include <set>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/numbers.h"
#include "xla/tsl/framework/device_id.h"
#include "xla/tsl/framework/device_id_manager.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/str_util.h"
namespace tsl {
void CheckValidTfDeviceId(const DeviceType& type,
const int visible_device_count,
const TfDeviceId tf_device_id) {
PlatformDeviceId platform_device_id;
TF_CHECK_OK(DeviceIdManager::TfToPlatformDeviceId(type, tf_device_id,
&platform_device_id));
CHECK_LT(platform_device_id.value(), visible_device_count)
<< "platform_device_id is outside discovered device range."
<< " TF " << type << " id: " << tf_device_id << ", platform " << type
<< " id: " << platform_device_id
<< ", visible device count: " << visible_device_count;
}
absl::Status ParseVisibleDeviceList(
const std::string& visible_device_list, const int visible_device_count,
std::vector<PlatformDeviceId>* visible_device_order) {
visible_device_order->clear();
if (visible_device_list.empty()) {
visible_device_order->resize(visible_device_count);
std::iota(visible_device_order->begin(), visible_device_order->end(), 0);
} else {
const std::vector<std::string> order_str =
tsl::str_util::Split(visible_device_list, ',');
for (const std::string& platform_device_id_str : order_str) {
int32_t platform_device_id;
if (!absl::SimpleAtoi(platform_device_id_str, &platform_device_id)) {
return tsl::errors::InvalidArgument(
"Could not parse entry in 'visible_device_list': '",
platform_device_id_str,
"'. visible_device_list = ", visible_device_list);
}
if (platform_device_id < 0 ||
platform_device_id >= visible_device_count) {
return tsl::errors::InvalidArgument(
"'visible_device_list' listed an invalid Device id '",
platform_device_id, "' but visible device count is ",
visible_device_count);
}
visible_device_order->push_back(
tsl::PlatformDeviceId(platform_device_id));
}
}
std::set<PlatformDeviceId> visible_device_set(visible_device_order->begin(),
visible_device_order->end());
if (visible_device_set.size() != visible_device_order->size()) {
return tsl::errors::InvalidArgument(
"visible_device_list contained a duplicate entry: ",
visible_device_list);
}
return absl::OkStatus();
}
absl::StatusOr<size_t> GetNumberTfDevicesAndConfigurePlatformDeviceId(
const absl::flat_hash_map<std::string, int64_t>&
session_option_device_counts,
absl::string_view device_type, absl::string_view visible_device_list,
const int visible_device_count) {
size_t num_tf_devices = INT_MAX;
const auto iter = session_option_device_counts.find(device_type);
if (iter != session_option_device_counts.end()) {
num_tf_devices = iter->second;
}
if (num_tf_devices == 0) {
return 0;
}
std::vector<PlatformDeviceId> visible_device_order;
TF_RETURN_IF_ERROR(ParseVisibleDeviceList(std::string(visible_device_list),
visible_device_count,
&visible_device_order));
if (num_tf_devices > visible_device_order.size()) {
num_tf_devices = visible_device_order.size();
}
for (int i = 0; i < num_tf_devices; ++i) {
const PlatformDeviceId platform_device_id = visible_device_order[i];
const TfDeviceId tf_device_id(i);
TF_RETURN_IF_ERROR(tsl::DeviceIdManager::InsertTfPlatformDeviceIdPair(
DeviceType(device_type), tf_device_id, platform_device_id));
}
return num_tf_devices;
}
absl::StatusOr<int> GetPlatformDeviceIdFromDeviceParsedName(
const DeviceNameUtils::ParsedName& device_name,
const DeviceType& device_type) {
const TfDeviceId tf_device_id(GetDeviceIdFromDeviceParsedName(device_name));
PlatformDeviceId platform_device_id;
absl::Status platform_id_status = DeviceIdManager::TfToPlatformDeviceId(
device_type, tf_device_id, &platform_device_id);
if (platform_id_status.ok()) {
return platform_device_id.value();
}
return platform_id_status;
}
int GetDeviceIdFromDeviceParsedName(
const DeviceNameUtils::ParsedName& device_name) {
return device_name.id;
}
} | #include "xla/tsl/framework/device_id_utils.h"
#include <string_view>
#include <vector>
#include "xla/tsl/framework/device_id_manager.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/util/device_name_utils.h"
#include "tsl/platform/status_matchers.h"
namespace tsl {
namespace {
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
constexpr std::string_view kTestDeviceType = "CPU";
PlatformDeviceId TfToPlatformDeviceId(TfDeviceId tf_device_id) {
PlatformDeviceId platform_device_id;
TF_CHECK_OK(DeviceIdManager::TfToPlatformDeviceId(
DeviceType(kTestDeviceType), tf_device_id, &platform_device_id));
return platform_device_id;
}
TEST(DeviceIdUtilsTest, CheckValidTfDeviceIdPass) {
TfDeviceId tf_device_id(0);
PlatformDeviceId platform_device_id(1);
TF_EXPECT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(
DeviceType(kTestDeviceType), tf_device_id, platform_device_id));
tsl::CheckValidTfDeviceId("CPU", 2, tf_device_id);
DeviceIdManager::TestOnlyReset();
}
TEST(DeviceIdUtilsTest, CheckValidTfDeviceIdNotFound) {
TfDeviceId tf_device_id(0);
EXPECT_DEATH(
tsl::CheckValidTfDeviceId(DeviceType(kTestDeviceType),
2, tf_device_id),
"NOT_FOUND: TensorFlow device CPU:0 was not registered");
}
TEST(DeviceIdUtilsTest, CheckValidTfDeviceIdOutsideVisibleDeviceRange) {
TfDeviceId tf_device_id(0);
PlatformDeviceId platform_device_id(1);
TF_EXPECT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(
DeviceType(kTestDeviceType), tf_device_id, platform_device_id));
EXPECT_DEATH(tsl::CheckValidTfDeviceId("CPU", 1,
tf_device_id),
"platform_device_id is outside discovered device range.");
DeviceIdManager::TestOnlyReset();
}
TEST(DeviceIdUtilsTest, ParseEmptyVisibleDeviceList) {
std::vector<PlatformDeviceId> visible_device_order;
TF_EXPECT_OK(ParseVisibleDeviceList("", 2, &visible_device_order));
PlatformDeviceId platform_device_id0(0), platform_device_id1(1);
std::vector<PlatformDeviceId> expected = {platform_device_id0,
platform_device_id1};
EXPECT_EQ(visible_device_order, expected);
}
TEST(DeviceIdUtilsTest, ParseVisibleDeviceList) {
std::vector<PlatformDeviceId> visible_device_order;
TF_EXPECT_OK(ParseVisibleDeviceList("2,1", 3, &visible_device_order));
PlatformDeviceId platform_device_id2(2), platform_device_id1(1);
std::vector<PlatformDeviceId> expected = {platform_device_id2,
platform_device_id1};
EXPECT_EQ(visible_device_order, expected);
}
TEST(DeviceIdUtilsTest, ParseInvalidVisibleDeviceList) {
std::vector<PlatformDeviceId> visible_device_order;
EXPECT_THAT(
ParseVisibleDeviceList("3,1", 3, &visible_device_order),
StatusIs(tensorflow::error::INVALID_ARGUMENT,
HasSubstr("'visible_device_list' listed an invalid Device id "
"'3' but visible device count is 3")));
}
TEST(DeviceIdUtilsTest, ParseDuplicateVisibleDeviceList) {
std::vector<PlatformDeviceId> visible_device_order;
EXPECT_THAT(
ParseVisibleDeviceList("1,1", 3, &visible_device_order),
StatusIs(
tensorflow::error::INVALID_ARGUMENT,
HasSubstr("visible_device_list contained a duplicate entry: 1,1")));
}
TEST(DeviceIdUtilsTest, GetNumberTfDevicesDefault) {
TF_ASSERT_OK_AND_ASSIGN(size_t num_tf_device,
GetNumberTfDevicesAndConfigurePlatformDeviceId(
{}, kTestDeviceType, "", 2));
EXPECT_EQ(num_tf_device, 2);
TfDeviceId tf_device_id_0(0);
PlatformDeviceId expected_0(0);
EXPECT_EQ(expected_0, TfToPlatformDeviceId(tf_device_id_0));
TfDeviceId tf_device_id_1(1);
PlatformDeviceId expected_1(1);
EXPECT_EQ(expected_1, TfToPlatformDeviceId(tf_device_id_1));
DeviceIdManager::TestOnlyReset();
}
TEST(DeviceIdUtilsTest, GetNumberTfDevicesWithVisibleDeviceList) {
TF_ASSERT_OK_AND_ASSIGN(size_t num_tf_device,
GetNumberTfDevicesAndConfigurePlatformDeviceId(
{}, kTestDeviceType, "2,0", 3));
EXPECT_EQ(num_tf_device, 2);
TfDeviceId tf_device_id_0(0);
PlatformDeviceId expected_2(2);
EXPECT_EQ(expected_2, TfToPlatformDeviceId(tf_device_id_0));
TfDeviceId tf_device_id_1(1);
PlatformDeviceId expected_0(0);
EXPECT_EQ(expected_0, TfToPlatformDeviceId(tf_device_id_1));
DeviceIdManager::TestOnlyReset();
}
TEST(DeviceIdUtilsTest, GetNumberTfDevicesWithSessionOptionDeviceCount) {
TF_ASSERT_OK_AND_ASSIGN(
size_t num_tf_device,
GetNumberTfDevicesAndConfigurePlatformDeviceId(
{{std::string(kTestDeviceType), 2}}, kTestDeviceType, "1,0,2", 3));
EXPECT_EQ(num_tf_device, 2);
TfDeviceId tf_device_id_0(0);
PlatformDeviceId expected_1(1);
EXPECT_EQ(expected_1, TfToPlatformDeviceId(tf_device_id_0));
TfDeviceId tf_device_id_1(1);
PlatformDeviceId expected_0(0);
EXPECT_EQ(expected_0, TfToPlatformDeviceId(tf_device_id_1));
DeviceIdManager::TestOnlyReset();
}
TEST(DeviceIdUtilsTest, GetPlatformDeviceId) {
TfDeviceId tf_device_id(0);
PlatformDeviceId platform_device_id(1);
TF_EXPECT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(
DeviceType(kTestDeviceType), tf_device_id, platform_device_id));
DeviceNameUtils::ParsedName device_name;
device_name.id = 0;
TF_ASSERT_OK_AND_ASSIGN(int device_id,
GetPlatformDeviceIdFromDeviceParsedName(
device_name, DeviceType(kTestDeviceType)));
EXPECT_EQ(device_id, 1);
DeviceIdManager::TestOnlyReset();
}
TEST(DeviceIdUtilsTest, GetPlatformDeviceIdNotFound) {
DeviceNameUtils::ParsedName device_name;
device_name.id = 0;
EXPECT_THAT(
GetPlatformDeviceIdFromDeviceParsedName(device_name,
DeviceType(kTestDeviceType)),
StatusIs(tensorflow::error::NOT_FOUND,
HasSubstr("TensorFlow device CPU:0 was not registered")));
}
TEST(DeviceIdUtilsTest, GetDeviceIdWithPlatformDeviceId) {
TfDeviceId tf_device_id(0);
PlatformDeviceId platform_device_id(1);
TF_EXPECT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(
DeviceType(kTestDeviceType), tf_device_id, platform_device_id));
DeviceNameUtils::ParsedName device_name;
device_name.id = 0;
EXPECT_EQ(GetDeviceIdFromDeviceParsedName(device_name), 0);
DeviceIdManager::TestOnlyReset();
}
TEST(DeviceIdUtilsTest, GetDeviceIdWithoutPlatformDeviceId) {
DeviceNameUtils::ParsedName device_name;
device_name.id = 0;
EXPECT_EQ(GetDeviceIdFromDeviceParsedName(device_name), 0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/framework/device_id_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/framework/device_id_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
78838912-1249-42c4-bc35-dbab34023c54 | cpp | tensorflow/tensorflow | async_value | third_party/xla/xla/tsl/concurrency/async_value.cc | third_party/xla/xla/tsl/concurrency/async_value_test.cc | #include "xla/tsl/concurrency/async_value.h"
#include <atomic>
#include <cstdint>
#include <cstdlib>
#include <limits>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/any_invocable.h"
#include "absl/synchronization/blocking_counter.h"
#include "absl/types/span.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tsl/platform/logging.h"
namespace tsl {
class NotifierListNode {
public:
explicit NotifierListNode(absl::AnyInvocable<void()> notification)
: next_(nullptr), notification_(std::move(notification)) {}
private:
friend class AsyncValue;
NotifierListNode* next_;
absl::AnyInvocable<void()> notification_;
};
uint16_t AsyncValue::CreateTypeInfoAndReturnTypeIdImpl(
const TypeInfo& type_info) {
size_t type_id = GetTypeInfoTableSingleton()->emplace_back(type_info) + 1;
DCHECK(type_id < std::numeric_limits<uint16_t>::max())
<< "Too many different AsyncValue types.";
return type_id;
}
AsyncValue::TypeInfoTable* AsyncValue::GetTypeInfoTableSingleton() {
constexpr int kInitialCapacity = 64;
static auto* type_info_table = new TypeInfoTable(kInitialCapacity);
return type_info_table;
}
std::atomic<size_t> AsyncValue::total_allocated_async_values_;
void AsyncValue::NotifyAvailable(State available_state) {
DCHECK((kind() == Kind::kConcrete || kind() == Kind::kIndirect))
<< "Should only be used by ConcreteAsyncValue or IndirectAsyncValue";
DCHECK(available_state == State::kConcrete ||
available_state == State::kError);
auto old_value = waiters_and_state_.exchange(
WaitersAndState(nullptr, available_state), std::memory_order_acq_rel);
DCHECK(old_value.state() == State::kUnconstructed ||
old_value.state() == State::kConstructed);
RunWaiters(old_value.waiter());
}
void AsyncValue::RunWaiters(NotifierListNode* list) {
while (list) {
NotifierListNode* node = list;
node->notification_();
list = node->next_;
delete node;
}
}
void AsyncValue::EnqueueWaiter(absl::AnyInvocable<void()> waiter,
WaitersAndState old_value) {
auto* node = new NotifierListNode(std::move(waiter));
auto old_state = old_value.state();
node->next_ = old_value.waiter();
auto new_value = WaitersAndState(node, old_state);
while (!waiters_and_state_.compare_exchange_weak(old_value, new_value,
std::memory_order_acq_rel,
std::memory_order_acquire)) {
if (old_value.state() == State::kConcrete ||
old_value.state() == State::kError) {
DCHECK(old_value.waiter() == nullptr);
node->notification_();
delete node;
return;
}
node->next_ = old_value.waiter();
}
DCHECK(old_value.state() == State::kUnconstructed ||
old_value.state() == State::kConstructed);
}
void AsyncValue::SetError(absl::Status status) {
DCHECK(!status.ok());
if (kind() == Kind::kConcrete) {
GetTypeInfo().set_error(this, std::move(status));
} else {
DCHECK(kind() == Kind::kIndirect);
auto error_av = MakeErrorAsyncValueRef(std::move(status));
static_cast<IndirectAsyncValue*>(this)->ForwardTo(std::move(error_av));
}
}
void IndirectAsyncValue::ForwardTo(RCReference<AsyncValue> value) {
DCHECK(IsUnavailable());
auto s = value->state();
if (s == State::kConcrete || s == State::kError) {
DCHECK(!value_) << "IndirectAsyncValue::ForwardTo is called more than once";
auto* concrete_value = value.release();
if (concrete_value->kind() == Kind::kIndirect) {
auto* indirect_value = static_cast<IndirectAsyncValue*>(concrete_value);
concrete_value = indirect_value->value_;
DCHECK(concrete_value != nullptr);
DCHECK(concrete_value->kind() == Kind::kConcrete);
concrete_value->AddRef();
indirect_value->DropRef();
}
DCHECK(type_id_ == kUnknownTypeId || type_id_ == concrete_value->type_id_ ||
concrete_value->IsType<DummyValueForErrorAsyncValue>())
<< "IndirectAsyncValue::ForwardTo value has an unexpected type id";
value_ = concrete_value;
type_id_ = concrete_value->type_id_;
NotifyAvailable(s);
} else {
AsyncValue* av = value.get();
av->AndThen([self = FormRef(this), value = std::move(value)]() mutable {
self->ForwardTo(std::move(value));
});
}
}
void BlockUntilReady(AsyncValue* async_value) {
if (ABSL_PREDICT_TRUE(async_value->IsAvailable())) return;
absl::BlockingCounter cnt(1);
async_value->AndThen([&] { cnt.DecrementCount(); });
cnt.Wait();
}
void RunWhenReady(absl::Span<AsyncValue* const> values,
absl::AnyInvocable<void()> callee) {
absl::InlinedVector<AsyncValue*, 4> unavailable_values;
for (auto i : values) {
if (!i->IsAvailable()) unavailable_values.push_back(i);
}
if (unavailable_values.empty()) return callee();
if (unavailable_values.size() == 1) {
unavailable_values[0]->AndThen(
[callee = std::move(callee)]() mutable { callee(); });
return;
}
struct CounterAndCallee {
std::atomic<size_t> counter;
absl::AnyInvocable<void()> callee;
};
auto* data =
new CounterAndCallee{{unavailable_values.size()}, std::move(callee)};
for (auto* val : unavailable_values) {
val->AndThen([data]() {
if (data->counter.fetch_sub(1) != 1) return;
data->callee();
delete data;
});
}
}
void RunWhenReady(absl::Span<RCReference<AsyncValue> const> values,
absl::AnyInvocable<void()> callee) {
absl::InlinedVector<AsyncValue*, 8> pointers;
pointers.reserve(values.size());
for (const auto& ref : values) {
pointers.push_back(ref.get());
}
RunWhenReady(pointers, std::move(callee));
}
} | #include "xla/tsl/concurrency/async_value.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/status/status.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/test.h"
namespace tsl {
TEST(AsyncValueTest, ConstructedToError) {
AsyncValue* value = MakeConstructedAsyncValueRef<int32_t>(123).release();
bool callback_triggered = false;
EXPECT_TRUE(value->IsConstructed());
EXPECT_FALSE(value->IsConcrete());
EXPECT_FALSE(value->IsAvailable());
value->AndThen([&] { callback_triggered = true; });
EXPECT_FALSE(callback_triggered);
value->SetError(absl::InternalError("test error"));
EXPECT_TRUE(callback_triggered);
EXPECT_TRUE(value->IsAvailable());
EXPECT_FALSE(value->IsConcrete());
EXPECT_TRUE(value->IsError());
value->DropRef();
}
TEST(AsyncValueTest, ConstructedToConcrete) {
AsyncValue* value = MakeConstructedAsyncValueRef<int32_t>(123).release();
EXPECT_TRUE(value->IsConstructed());
EXPECT_FALSE(value->IsConcrete());
EXPECT_FALSE(value->IsAvailable());
value->AndThen([] {});
value->SetStateConcrete();
EXPECT_TRUE(value->IsAvailable());
EXPECT_TRUE(value->IsConcrete());
EXPECT_FALSE(value->IsError());
EXPECT_EQ(123, value->get<int32_t>());
value->DropRef();
}
TEST(AsyncValueTest, UnconstructedEmplace) {
AsyncValue* value = MakeUnconstructedAsyncValueRef<int32_t>().release();
EXPECT_FALSE(value->IsConstructed());
EXPECT_FALSE(value->IsConcrete());
EXPECT_FALSE(value->IsAvailable());
value->AndThen([] {});
value->emplace<int32_t>(123);
EXPECT_FALSE(value->IsConstructed());
EXPECT_TRUE(value->IsAvailable());
EXPECT_TRUE(value->IsConcrete());
EXPECT_EQ(123, value->get<int32_t>());
value->DropRef();
}
TEST(AsyncValueTest, AddAndDropRef) {
AsyncValue* value = MakeConstructedAsyncValueRef<int32_t>(123).release();
value->AndThen([] {});
value->SetStateConcrete();
EXPECT_TRUE(value->IsConcrete());
EXPECT_TRUE(value->IsUnique());
value->AddRef();
EXPECT_FALSE(value->IsUnique());
EXPECT_EQ(123, value->get<int32_t>());
value->DropRef();
EXPECT_TRUE(value->IsUnique());
value->DropRef();
}
TEST(AsyncValueTest, KeepPayloadOnError) {
int payload_value = 0;
struct Payload : AsyncPayload::KeepOnError {
explicit Payload(int* value) : value{value} { *value = 1; }
~Payload() { *value = 2; }
int* value;
};
{
AsyncValueRef<Payload> value =
MakeConstructedAsyncValueRef<Payload>(&payload_value);
EXPECT_EQ(1, *value->value);
value.SetStateConcrete();
EXPECT_EQ(1, *value->value);
EXPECT_TRUE(!value.IsError());
}
EXPECT_EQ(2, payload_value);
{
AsyncValueRef<Payload> value =
MakeConstructedAsyncValueRef<Payload>(&payload_value);
EXPECT_TRUE(!value.IsError());
value.SetError(absl::InternalError("error"));
EXPECT_EQ(1, *value->value);
EXPECT_TRUE(value.IsError());
EXPECT_EQ("error", value.GetError().message());
}
EXPECT_EQ(2, payload_value);
}
TEST(AsyncValueTest, StackAllocatedAsyncValue) {
int32_t counter = 0;
class Payload {
public:
explicit Payload(int32_t& counter) : counter_{counter} { counter_++; }
~Payload() { counter_++; }
int32_t count() const { return counter_; }
private:
int32_t& counter_;
};
internal::AsyncValueStorage<Payload> storage;
AsyncValueOwningRef<Payload> owner =
MakeConstructedAsyncValueRef<Payload>(storage, counter);
AsyncValuePtr<Payload> ptr = owner.AsPtr();
AsyncValue* value = ptr.value();
EXPECT_TRUE(value->IsConstructed());
EXPECT_FALSE(value->IsAvailable());
EXPECT_EQ(1, counter);
EXPECT_EQ(1, ptr->count());
ptr.SetStateConcrete();
EXPECT_TRUE(ptr.IsAvailable());
std::make_unique<AsyncValueOwningRef<Payload>>(std::move(owner));
EXPECT_EQ(2, counter);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/concurrency/async_value.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/concurrency/async_value_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
28137463-1133-408c-a2a4-1261817f14ef | cpp | tensorflow/tensorflow | async_value_ref | third_party/xla/xla/tsl/concurrency/async_value_ref.cc | third_party/xla/xla/tsl/concurrency/async_value_ref_test.cc | #include "xla/tsl/concurrency/async_value_ref.h"
#include <string_view>
#include <utility>
#include "absl/status/status.h"
#include "xla/tsl/concurrency/async_value.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tsl/platform/logging.h"
namespace tsl {
RCReference<IndirectAsyncValue> MakeIndirectAsyncValue() {
return TakeRef(internal::AllocateAndConstruct<IndirectAsyncValue>());
}
RCReference<ErrorAsyncValue> MakeErrorAsyncValueRef(absl::Status status) {
CHECK(!status.ok()) << "status must be an error";
return TakeRef(
internal::AllocateAndConstruct<ErrorAsyncValue>(std::move(status)));
}
RCReference<ErrorAsyncValue> MakeErrorAsyncValueRef(std::string_view message) {
return MakeErrorAsyncValueRef(absl::InternalError(message));
}
} | #include "xla/tsl/concurrency/async_value_ref.h"
#include <any>
#include <array>
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/tsl/concurrency/async_value.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
class WrappedInt32 {
public:
explicit WrappedInt32(int32_t value) : value_(value) {}
int32_t value() const { return value_; }
private:
int32_t value_;
};
constexpr int32_t kTestValue = 42;
TEST(AsyncValueRefTest, MakeUnconstructedStatusOrOfAny) {
auto value = MakeUnconstructedAsyncValueRef<absl::StatusOr<std::any>>();
EXPECT_TRUE(value.IsUnavailable());
}
TEST(AsyncValueRefTest, MakeUnconstructedStatusOr) {
auto value = MakeUnconstructedAsyncValueRef<absl::StatusOr<int32_t>>();
EXPECT_TRUE(value.IsUnavailable());
}
TEST(AsyncValueRefTest, MakeConstructedStatusOr) {
auto value = MakeConstructedAsyncValueRef<absl::StatusOr<int32_t>>(42);
EXPECT_TRUE(value.IsUnavailable());
}
TEST(AsyncValueRefTest, MakeAvailableStatusOr) {
auto value = MakeAvailableAsyncValueRef<absl::StatusOr<int32_t>>(42);
EXPECT_TRUE(value.IsAvailable());
EXPECT_EQ(**value, 42);
}
TEST(AsyncValueRefTest, ImplicitStatusConversion) {
auto error = []() -> AsyncValueRef<WrappedInt32> {
return absl::InternalError("Error");
}();
EXPECT_TRUE(error.IsAvailable());
EXPECT_TRUE(error.IsError());
EXPECT_EQ(error.GetError(), absl::InternalError("Error"));
}
TEST(AsyncValueRefTest, ImplicitStatusConversionWithStatusOrPayloadAndStatus) {
auto status = []() -> absl::StatusOr<absl::StatusOr<int32_t>> {
return absl::InternalError("Error");
}();
auto error = []() -> AsyncValueRef<absl::StatusOr<int32_t>> {
return absl::InternalError("Error");
}();
ASSERT_FALSE(status.ok());
ASSERT_EQ(status.status(), absl::InternalError("Error"));
EXPECT_TRUE(error.IsError());
EXPECT_EQ(error.GetError(), absl::InternalError("Error"));
}
TEST(AsyncValueRefTest, ValueCheck) {
auto wrapped_int_value = MakeAvailableAsyncValueRef<WrappedInt32>(kTestValue);
EXPECT_EQ(wrapped_int_value.get().value(), kTestValue);
EXPECT_EQ(wrapped_int_value->value(), kTestValue);
EXPECT_EQ((*wrapped_int_value).value(), kTestValue);
}
TEST(AsyncValueRefTest, ValueCheckFromRCReference) {
auto wrapped_int_value = MakeAvailableAsyncValueRef<WrappedInt32>(kTestValue);
RCReference<AsyncValue> generic_value = std::move(wrapped_int_value);
EXPECT_EQ(generic_value->get<WrappedInt32>().value(), kTestValue);
}
TEST(AsyncValueRefTest, ValueCheckFromAliasedRCReference) {
auto wrapped_int_value = MakeAvailableAsyncValueRef<WrappedInt32>(kTestValue);
RCReference<AsyncValue> generic_value = std::move(wrapped_int_value);
AsyncValueRef<WrappedInt32> aliased_int_value(std::move(generic_value));
EXPECT_EQ(aliased_int_value.get().value(), kTestValue);
EXPECT_EQ(aliased_int_value->value(), kTestValue);
EXPECT_EQ((*aliased_int_value).value(), kTestValue);
}
TEST(AsyncValueRefTest, ConstructedToError) {
auto value = MakeConstructedAsyncValueRef<int32_t>(kTestValue);
EXPECT_FALSE(value.IsConcrete());
EXPECT_FALSE(value.IsAvailable());
value.AndThen([] {});
value.SetError(absl::InternalError("test error"));
EXPECT_TRUE(value.IsAvailable());
EXPECT_FALSE(value.IsConcrete());
EXPECT_TRUE(value.IsError());
}
TEST(AsyncValueRefTest, ConstructedToConcrete) {
auto value = MakeConstructedAsyncValueRef<int32_t>(kTestValue);
EXPECT_FALSE(value.IsConcrete());
EXPECT_FALSE(value.IsAvailable());
value.AndThen([] {});
value.SetStateConcrete();
EXPECT_TRUE(value.IsAvailable());
EXPECT_TRUE(value.IsConcrete());
EXPECT_FALSE(value.IsError());
EXPECT_EQ(kTestValue, value.get());
}
TEST(AsyncValueRefTest, UnconstructedEmplace) {
auto value = MakeUnconstructedAsyncValueRef<int32_t>();
EXPECT_FALSE(value.IsConcrete());
EXPECT_FALSE(value.IsAvailable());
value.AndThen([] {});
value.emplace(kTestValue);
EXPECT_TRUE(value.IsAvailable());
EXPECT_TRUE(value.IsConcrete());
EXPECT_EQ(kTestValue, value.get());
}
TEST(AsyncValueRefTest, CopyRef) {
auto value = MakeAvailableAsyncValueRef<int32_t>(kTestValue);
EXPECT_TRUE(value.IsConcrete());
EXPECT_TRUE(value.IsUnique());
auto copied_value = value.CopyRef();
EXPECT_FALSE(value.IsUnique());
EXPECT_EQ(value.GetAsyncValue(), copied_value.GetAsyncValue());
}
TEST(AsyncValueRefTest, AndThen) {
AsyncValueRef<int32_t> ref = MakeUnconstructedAsyncValueRef<int32_t>();
EXPECT_FALSE(ref.IsConcrete());
EXPECT_FALSE(ref.IsAvailable());
bool executed = false;
ref.AndThen([&]() { executed = true; });
ref.emplace(42);
EXPECT_TRUE(executed);
}
TEST(AsyncValueRefTest, AndThenError) {
AsyncValueRef<int32_t> ref = MakeConstructedAsyncValueRef<int32_t>(42);
auto error = absl::InternalError("test error");
ref.SetError(error);
ref.AndThen([&](absl::Status status) { EXPECT_EQ(status, error); });
}
TEST(AsyncValueRefTest, AndThenNoError) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
ref.AndThen([](absl::Status status) { EXPECT_TRUE(status.ok()); });
}
TEST(AsyncValueRefTest, AndThenStatusOrError) {
AsyncValueRef<int32_t> ref = MakeConstructedAsyncValueRef<int32_t>(42);
auto error = absl::InternalError("test error");
ref.SetError(error);
ref.AndThen([&](absl::StatusOr<int32_t*> v) {
EXPECT_FALSE(v.ok());
EXPECT_EQ(v.status(), error);
});
}
TEST(AsyncValueRefTest, AndThenStatusOrNoError) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
ref.AndThen([&](absl::StatusOr<int32_t*> v) { EXPECT_EQ(**v, 42); });
}
TEST(AsyncValueRefTest, Nullptr) {
AsyncValueRef<int> av_int = nullptr;
EXPECT_FALSE(av_int);
AsyncValueRef<int> av_int2 = MakeConstructedAsyncValueRef<int>(kTestValue);
EXPECT_TRUE(av_int2);
av_int2 = nullptr;
EXPECT_FALSE(av_int2);
}
TEST(AsyncValueRefTest, MapAvailable) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
AsyncValueRef<float> mapped_to_float =
ref.Map([](int32_t value) -> float { return value; });
EXPECT_TRUE(mapped_to_float.IsAvailable());
EXPECT_EQ(mapped_to_float.get(), 42.0f);
}
TEST(AsyncValueRefTest, MapUnvailable) {
AsyncValueRef<int32_t> ref = MakeConstructedAsyncValueRef<int32_t>(42);
AsyncValueRef<float> mapped_to_float =
ref.Map([](int32_t value) -> float { return value; });
EXPECT_FALSE(mapped_to_float.IsAvailable());
ref.SetStateConcrete();
EXPECT_TRUE(mapped_to_float.IsAvailable());
EXPECT_EQ(mapped_to_float.get(), 42.0f);
}
TEST(AsyncValueRefTest, MapToNonMoveable) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
AsyncValueRef<std::atomic<int32_t>> mapped_to_atomic =
ref.Map<std::atomic<int32_t>>([](int32_t value) { return value; });
EXPECT_TRUE(mapped_to_atomic.IsAvailable());
EXPECT_EQ(mapped_to_atomic->load(), 42);
}
TEST(AsyncValueRefTest, MapError) {
AsyncValueRef<int32_t> ref =
MakeErrorAsyncValueRef(absl::InternalError("error"));
AsyncValueRef<float> mapped_to_float =
ref.Map([](int32_t value) -> float { return value; });
EXPECT_TRUE(mapped_to_float.IsError());
EXPECT_EQ(mapped_to_float.GetError(), absl::InternalError("error"));
}
TEST(AsyncValueRefTest, MapUnvailableError) {
AsyncValueRef<int32_t> ref = MakeConstructedAsyncValueRef<int32_t>(42);
AsyncValueRef<float> mapped_to_float =
ref.Map([](int32_t value) -> float { return value; });
EXPECT_FALSE(mapped_to_float.IsAvailable());
ref.SetError(absl::InternalError("error"));
EXPECT_TRUE(mapped_to_float.IsError());
EXPECT_EQ(mapped_to_float.GetError(), absl::InternalError("error"));
}
TEST(AsyncValueRefTest, MapMultipleTimes) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
auto plus_one = [](int32_t value) { return value + 1; };
AsyncValueRef<int32_t> mapped = ref.Map(plus_one)
.Map(plus_one)
.Map(plus_one)
.Map(plus_one)
.Map(plus_one)
.Map(plus_one);
EXPECT_TRUE(mapped.IsAvailable());
EXPECT_EQ(mapped.get(), 42 + 6);
}
TEST(AsyncValuePtrTest, MapToStatus) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
AsyncValueRef<absl::Status> mapped_to_status =
ref.Map([](int32_t value) -> absl::Status { return absl::OkStatus(); });
EXPECT_TRUE(mapped_to_status.IsAvailable());
EXPECT_EQ(mapped_to_status.get(), absl::OkStatus());
}
TEST(AsyncValueRefTest, MapToStatusOr) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
AsyncValueRef<absl::StatusOr<float>> mapped_to_float =
ref.Map([](int32_t value) -> absl::StatusOr<float> { return value; });
EXPECT_TRUE(mapped_to_float.IsAvailable());
EXPECT_EQ(*mapped_to_float.get(), 42.0f);
}
TEST(AsyncValueRefTest, TryMap) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
AsyncValueRef<float> mapped_to_float =
ref.TryMap([](int32_t value) -> absl::StatusOr<float> { return value; });
EXPECT_TRUE(mapped_to_float.IsAvailable());
EXPECT_EQ(mapped_to_float.get(), 42.0f);
}
TEST(AsyncValueRefTest, TryMapError) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
AsyncValueRef<float> mapped_to_float =
ref.TryMap([](int32_t value) -> absl::StatusOr<float> {
return absl::InternalError("error");
});
EXPECT_TRUE(mapped_to_float.IsError());
EXPECT_EQ(mapped_to_float.GetError(), absl::InternalError("error"));
}
TEST(AsyncValueRefTest, TryMapConstructible) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
struct X {
explicit X(float value) : value(value) {}
float value;
};
AsyncValueRef<X> mapped_to_x = ref.TryMap<X>(
[](int32_t value) -> absl::StatusOr<float> { return value; });
EXPECT_TRUE(mapped_to_x.IsAvailable());
EXPECT_EQ(mapped_to_x->value, 42.0f);
}
TEST(AsyncValueRefTest, FlatMapAvailable) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
AsyncValueRef<float> fmapped_to_float = ref.FlatMap([](int32_t value) {
return MakeAvailableAsyncValueRef<float>(static_cast<float>(value));
});
EXPECT_TRUE(fmapped_to_float.IsAvailable());
EXPECT_EQ(fmapped_to_float.get(), 42.0f);
}
TEST(AsyncValueRefTest, FlatMapUnavailable) {
AsyncValueRef<int32_t> ref = MakeConstructedAsyncValueRef<int32_t>(42);
AsyncValueRef<float> fmapped_to_float = ref.FlatMap([](int32_t value) {
return MakeAvailableAsyncValueRef<float>(static_cast<float>(value));
});
EXPECT_FALSE(fmapped_to_float.IsAvailable());
ref.SetStateConcrete();
EXPECT_TRUE(fmapped_to_float.IsAvailable());
EXPECT_EQ(fmapped_to_float.get(), 42.0f);
}
TEST(AsyncValueRefTest, FlatMapAvailableError) {
AsyncValueRef<int32_t> ref =
MakeErrorAsyncValueRef(absl::InternalError("error"));
AsyncValueRef<float> fmapped_to_float = ref.FlatMap([](int32_t value) {
return MakeAvailableAsyncValueRef<float>(static_cast<float>(value));
});
EXPECT_TRUE(fmapped_to_float.IsError());
EXPECT_EQ(fmapped_to_float.GetError(), absl::InternalError("error"));
}
TEST(AsyncValueRefTest, FlatMapUnavailableError) {
AsyncValueRef<int32_t> ref = MakeConstructedAsyncValueRef<int32_t>(42);
AsyncValueRef<float> fmapped_to_float = ref.FlatMap([](int32_t value) {
return MakeAvailableAsyncValueRef<float>(static_cast<float>(value));
});
EXPECT_FALSE(fmapped_to_float.IsAvailable());
ref.SetError(absl::InternalError("error"));
EXPECT_TRUE(fmapped_to_float.IsError());
EXPECT_EQ(fmapped_to_float.GetError(), absl::InternalError("error"));
}
struct DeferredExecutor : public AsyncValue::Executor {
void Execute(Task task) final { tasks.push_back(std::move(task)); }
size_t Quiesce() {
size_t n = 0;
while (!tasks.empty()) {
Task task = std::move(tasks.back());
tasks.pop_back();
task();
++n;
}
return n;
}
std::vector<Task> tasks;
};
TEST(AsyncValueRefTest, MakeAsyncValueRef) {
DeferredExecutor executor;
{
AsyncValueRef<float> ref =
MakeAsyncValueRef<float>(executor, []() -> float { return 42.0f; });
EXPECT_FALSE(ref.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(ref.IsAvailable());
EXPECT_EQ(ref.get(), 42.0f);
}
{
AsyncValueRef<float> ref =
MakeAsyncValueRef(executor, []() -> float { return 42.0f; });
EXPECT_FALSE(ref.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(ref.IsAvailable());
EXPECT_EQ(ref.get(), 42.0f);
}
{
AsyncValueRef<float> ref = TryMakeAsyncValueRef<float>(
executor, []() -> absl::StatusOr<float> { return 42.0f; });
EXPECT_FALSE(ref.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(ref.IsAvailable());
EXPECT_EQ(ref.get(), 42.0f);
}
{
AsyncValueRef<float> ref = TryMakeAsyncValueRef(
executor, []() -> absl::StatusOr<float> { return 42.0f; });
EXPECT_FALSE(ref.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(ref.IsAvailable());
EXPECT_EQ(ref.get(), 42.0f);
}
{
AsyncValueRef<float> ref = TryMakeAsyncValueRef<float>(
executor,
[]() -> absl::StatusOr<float> { return absl::InternalError("test"); });
EXPECT_FALSE(ref.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(ref.IsError());
EXPECT_EQ(ref.GetError(), absl::InternalError("test"));
}
{
AsyncValueRef<float> ref = TryMakeAsyncValueRef(
executor,
[]() -> absl::StatusOr<float> { return absl::InternalError("test"); });
EXPECT_FALSE(ref.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(ref.IsError());
EXPECT_EQ(ref.GetError(), absl::InternalError("test"));
}
}
TEST(AsyncValueRefTest, MapAvailableOnExecutor) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
DeferredExecutor executor;
AsyncValueRef<float> mapped_to_float =
ref.Map(executor, [](int32_t value) -> float { return value; });
EXPECT_FALSE(mapped_to_float.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(mapped_to_float.IsAvailable());
EXPECT_EQ(mapped_to_float.get(), 42.0f);
}
TEST(AsyncValueRefTest, MapErrorOnExecutor) {
AsyncValueRef<int32_t> ref =
MakeErrorAsyncValueRef(absl::InternalError("error"));
DeferredExecutor executor;
AsyncValueRef<float> mapped_to_float =
ref.Map(executor, [](int32_t value) -> float { return value; });
EXPECT_FALSE(mapped_to_float.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(mapped_to_float.IsError());
EXPECT_EQ(mapped_to_float.GetError(), absl::InternalError("error"));
}
TEST(AsyncValueRefTest, MapUnavailableOnExecutor) {
AsyncValueRef<int32_t> ref = MakeConstructedAsyncValueRef<int32_t>(42);
DeferredExecutor executor;
AsyncValueRef<float> mapped_to_float =
ref.Map(executor, [](int32_t value) -> float { return value; });
ref.SetStateConcrete();
ref.release()->DropRef();
EXPECT_FALSE(mapped_to_float.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(mapped_to_float.IsAvailable());
EXPECT_EQ(mapped_to_float.get(), 42.0f);
}
TEST(AsyncValueRefTest, TryMapOnExecutor) {
AsyncValueRef<int32_t> ref = MakeConstructedAsyncValueRef<int32_t>(42);
DeferredExecutor executor;
AsyncValueRef<float> mapped_to_float = ref.TryMap(
executor, [](int32_t value) -> absl::StatusOr<float> { return value; });
ref.SetStateConcrete();
ref.release()->DropRef();
EXPECT_FALSE(mapped_to_float.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(mapped_to_float.IsAvailable());
EXPECT_EQ(mapped_to_float.get(), 42.0f);
}
TEST(AsyncValueRefTest, TryMapErrorOnExecutor) {
AsyncValueRef<int32_t> ref = MakeConstructedAsyncValueRef<int32_t>(42);
DeferredExecutor executor;
AsyncValueRef<float> mapped_to_float =
ref.TryMap(executor, [](int32_t value) -> absl::StatusOr<float> {
return absl::InternalError("error");
});
ref.SetStateConcrete();
ref.release()->DropRef();
EXPECT_FALSE(mapped_to_float.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(mapped_to_float.IsError());
EXPECT_EQ(mapped_to_float.GetError(), absl::InternalError("error"));
}
TEST(AsyncValueRefTest, FlatMapAvailableOnExecutor) {
AsyncValueRef<int32_t> ref = MakeConstructedAsyncValueRef<int32_t>(42);
DeferredExecutor executor;
AsyncValueRef<float> fmapped_to_float =
ref.FlatMap(executor, [](int32_t value) {
return MakeAvailableAsyncValueRef<float>(static_cast<float>(value));
});
ref.SetStateConcrete();
ref.release()->DropRef();
EXPECT_FALSE(fmapped_to_float.IsAvailable());
EXPECT_EQ(executor.Quiesce(), 1);
EXPECT_TRUE(fmapped_to_float.IsAvailable());
EXPECT_EQ(fmapped_to_float.get(), 42.0f);
}
TEST(AsyncValueRefTest, FlatMapDeferredAsyncValueOnExecutor) {
DeferredExecutor executor0;
DeferredExecutor executor1;
{
AsyncValueRef<float> fmapped_to_float =
MakeAsyncValueRef<std::unique_ptr<int32_t>>(executor0, [] {
return std::make_unique<int32_t>(42);
}).FlatMap([&](AsyncValuePtr<std::unique_ptr<int32_t>> ptr) {
return MakeAsyncValueRef<float>(
executor1, [ref = ptr.CopyRef()] { return **ref; });
});
EXPECT_FALSE(fmapped_to_float.IsAvailable());
EXPECT_EQ(executor0.Quiesce(), 1);
EXPECT_FALSE(fmapped_to_float.IsAvailable());
EXPECT_EQ(executor1.Quiesce(), 1);
EXPECT_TRUE(fmapped_to_float.IsAvailable());
EXPECT_EQ(fmapped_to_float.get(), 42.0f);
}
{
AsyncValueRef<float> fmapped_to_float =
MakeAsyncValueRef<std::unique_ptr<int32_t>>(executor0, [] {
return std::make_unique<int32_t>(42);
}).FlatMap(executor1, [&](AsyncValuePtr<std::unique_ptr<int32_t>> ptr) {
return MakeAsyncValueRef<float>(
executor1, [ref = ptr.CopyRef()] { return **ref; });
});
EXPECT_FALSE(fmapped_to_float.IsAvailable());
EXPECT_EQ(executor0.Quiesce(), 1);
EXPECT_FALSE(fmapped_to_float.IsAvailable());
EXPECT_EQ(executor1.Quiesce(), 2);
EXPECT_TRUE(fmapped_to_float.IsAvailable());
EXPECT_EQ(fmapped_to_float.get(), 42.0f);
}
}
TEST(AsyncValueRefTest, BlockUntilReady) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
BlockUntilReady(ref);
}
TEST(AsyncValueRefTest, RunWhenReady) {
AsyncValueRef<int32_t> ref = MakeAvailableAsyncValueRef<int32_t>(42);
bool executed = false;
RunWhenReady(absl::MakeConstSpan({ref}), [&] { executed = true; });
EXPECT_TRUE(executed);
}
namespace {
struct A {
alignas(16) int32_t a;
};
struct B : public A {
alignas(32) int32_t b;
};
struct C : public B {
alignas(64) int32_t c;
};
struct D : public B {
alignas(64) int32_t d;
};
}
TEST(AsyncValueRefTest, AlignedPayload) {
AsyncValueRef<D> d_ref = MakeAvailableAsyncValueRef<D>();
d_ref->a = 1;
d_ref->b = 2;
d_ref->d = 3;
EXPECT_EQ(d_ref->a, 1);
EXPECT_EQ(d_ref->b, 2);
EXPECT_EQ(d_ref->d, 3);
AsyncValueRef<B> b_ref = d_ref.CopyRef();
EXPECT_EQ(b_ref->a, 1);
EXPECT_EQ(b_ref->b, 2);
AsyncValueRef<A> a_ref = d_ref.CopyRef();
EXPECT_EQ(a_ref->a, 1);
}
TEST(AsyncValueRefTest, Isa) {
AsyncValueRef<A> null_ref;
EXPECT_FALSE(Isa<A>(null_ref));
AsyncValueRef<A> a_ref = MakeAvailableAsyncValueRef<A>();
AsyncValueRef<A> b_ref = MakeAvailableAsyncValueRef<B>();
AsyncValueRef<A> c_ref = MakeAvailableAsyncValueRef<C>();
AsyncValueRef<A> d_ref = MakeAvailableAsyncValueRef<D>();
EXPECT_TRUE(Isa<A>(a_ref));
EXPECT_TRUE(Isa<B>(b_ref));
EXPECT_TRUE(Isa<C>(c_ref));
EXPECT_TRUE(Isa<D>(d_ref));
AsyncValueRef<A> err = MakeErrorAsyncValueRef(absl::InternalError("error"));
EXPECT_TRUE(Isa<A>(err));
EXPECT_TRUE(Isa<B>(err));
EXPECT_TRUE(Isa<C>(err));
EXPECT_TRUE(Isa<D>(err));
AsyncValueRef<A> a_err = MakeConstructedAsyncValueRef<A>();
AsyncValueRef<B> b_err = MakeConstructedAsyncValueRef<B>();
a_err.SetError(absl::InternalError("error"));
b_err.SetError(absl::InternalError("error"));
EXPECT_TRUE(Isa<A>(a_err));
EXPECT_TRUE(Isa<B>(b_err));
auto indirect = MakeIndirectAsyncValue();
AsyncValueRef<A> c_indirect(indirect);
EXPECT_TRUE(Isa<A>(c_indirect));
EXPECT_FALSE(Isa<C>(c_indirect));
indirect->ForwardTo(c_ref.CopyRCRef());
EXPECT_TRUE(Isa<A>(c_indirect));
EXPECT_TRUE(Isa<C>(c_indirect));
auto typed_indirect = MakeIndirectAsyncValue<C>();
AsyncValueRef<A> c_typed_indirect(indirect);
EXPECT_TRUE(Isa<A>(c_typed_indirect));
EXPECT_TRUE(Isa<C>(c_typed_indirect));
typed_indirect->ForwardTo(c_ref.CopyRCRef());
EXPECT_TRUE(Isa<A>(c_typed_indirect));
EXPECT_TRUE(Isa<C>(c_typed_indirect));
auto typed_indirect_err = MakeIndirectAsyncValue<C>();
AsyncValueRef<A> c_typed_indirect_err(typed_indirect_err);
EXPECT_TRUE(Isa<A>(c_typed_indirect.AsPtr()));
EXPECT_TRUE(Isa<C>(c_typed_indirect.AsPtr()));
typed_indirect_err->SetError(absl::InternalError("error"));
EXPECT_TRUE(Isa<A>(c_typed_indirect_err.AsPtr()));
EXPECT_TRUE(Isa<C>(c_typed_indirect_err.AsPtr()));
}
TEST(AsyncValueRefTest, DynCast) {
AsyncValueRef<A> a_ref = MakeAvailableAsyncValueRef<A>();
AsyncValueRef<A> b_ref = MakeAvailableAsyncValueRef<B>();
AsyncValueRef<A> c_ref = MakeAvailableAsyncValueRef<C>();
AsyncValueRef<A> d_ref = MakeAvailableAsyncValueRef<D>();
EXPECT_TRUE(DynCast<A>(a_ref));
EXPECT_TRUE(DynCast<B>(b_ref));
EXPECT_TRUE(DynCast<C>(c_ref));
EXPECT_TRUE(DynCast<D>(d_ref));
EXPECT_TRUE(DynCast<A>(c_ref));
EXPECT_FALSE(DynCast<B>(c_ref));
EXPECT_FALSE(DynCast<C>(d_ref));
AsyncValueRef<A> err = MakeErrorAsyncValueRef(absl::InternalError("error"));
EXPECT_TRUE(DynCast<A>(err));
EXPECT_TRUE(DynCast<B>(err));
EXPECT_TRUE(DynCast<C>(err));
EXPECT_TRUE(DynCast<D>(err));
AsyncValueRef<A> a_err = MakeConstructedAsyncValueRef<A>();
AsyncValueRef<B> b_err = MakeConstructedAsyncValueRef<B>();
a_err.SetError(absl::InternalError("error"));
b_err.SetError(absl::InternalError("error"));
EXPECT_TRUE(DynCast<A>(a_err));
EXPECT_TRUE(DynCast<B>(b_err));
EXPECT_FALSE(DynCast<C>(a_err));
auto indirect = MakeIndirectAsyncValue();
AsyncValueRef<A> c_indirect(indirect);
EXPECT_TRUE(DynCast<A>(c_indirect));
EXPECT_FALSE(DynCast<C>(c_indirect));
indirect->ForwardTo(c_ref.CopyRCRef());
EXPECT_TRUE(DynCast<A>(c_indirect));
EXPECT_TRUE(DynCast<C>(c_indirect));
auto typed_indirect = MakeIndirectAsyncValue<C>();
AsyncValueRef<A> c_typed_indirect(indirect);
EXPECT_TRUE(DynCast<A>(c_typed_indirect));
EXPECT_TRUE(DynCast<C>(c_typed_indirect));
typed_indirect->ForwardTo(c_ref.CopyRCRef());
EXPECT_TRUE(DynCast<A>(c_typed_indirect));
EXPECT_TRUE(DynCast<C>(c_typed_indirect));
}
TEST(AsyncValueRefTest, Cast) {
AsyncValueRef<A> a_ref = MakeAvailableAsyncValueRef<A>();
AsyncValueRef<A> b_ref = MakeAvailableAsyncValueRef<B>();
AsyncValueRef<A> c_ref = MakeAvailableAsyncValueRef<C>();
AsyncValueRef<A> d_ref = MakeAvailableAsyncValueRef<D>();
EXPECT_TRUE(Cast<A>(a_ref));
EXPECT_TRUE(Cast<B>(b_ref));
EXPECT_TRUE(Cast<C>(c_ref));
EXPECT_TRUE(Cast<D>(d_ref));
EXPECT_TRUE(Cast<A>(c_ref));
AsyncValueRef<A> err = MakeErrorAsyncValueRef(absl::InternalError("error"));
EXPECT_TRUE(Cast<A>(err));
EXPECT_TRUE(Cast<B>(err));
EXPECT_TRUE(Cast<C>(err));
EXPECT_TRUE(Cast<D>(err));
AsyncValueRef<A> a_err = MakeConstructedAsyncValueRef<A>();
AsyncValueRef<B> b_err = MakeConstructedAsyncValueRef<B>();
a_err.SetError(absl::InternalError("error"));
b_err.SetError(absl::InternalError("error"));
EXPECT_TRUE(Cast<A>(a_err));
EXPECT_TRUE(Cast<B>(b_err));
auto indirect = MakeIndirectAsyncValue();
AsyncValueRef<A> c_indirect(indirect);
EXPECT_TRUE(Cast<A>(c_indirect));
indirect->ForwardTo(c_ref.CopyRCRef());
EXPECT_TRUE(Cast<A>(c_indirect));
EXPECT_TRUE(Cast<C>(c_indirect));
auto typed_indirect = MakeIndirectAsyncValue<C>();
AsyncValueRef<A> c_typed_indirect(indirect);
EXPECT_TRUE(Cast<A>(c_typed_indirect));
EXPECT_TRUE(Cast<C>(c_typed_indirect));
typed_indirect->ForwardTo(c_ref.CopyRCRef());
EXPECT_TRUE(Cast<A>(c_typed_indirect));
EXPECT_TRUE(Cast<C>(c_typed_indirect));
}
TEST(AsyncValueRefTest, RecursiveOwnership) {
struct State {
explicit State(AsyncValueRef<int32_t> value) : value(std::move(value)) {}
AsyncValueRef<int32_t> value;
};
AsyncValueRef<int32_t> value = MakeConstructedAsyncValueRef<int32_t>(42);
auto state = std::make_unique<State>(std::move(value));
State* state_ptr = state.get();
int64_t counter = 0;
state_ptr->value.AndThen([&, value = 1] { counter += value; });
state_ptr->value.AndThen([&, value = 2] { counter += value; });
state_ptr->value.AndThen([&, value = 3] { counter += value; });
state_ptr->value.AndThen([state = std::move(state)] {});
state_ptr->value.SetStateConcrete();
EXPECT_EQ(counter, 1 + 2 + 3);
}
template <size_t size>
static void BM_MakeConstructed(benchmark::State& state) {
for (auto _ : state) {
auto ref = MakeConstructedAsyncValueRef<std::array<char, size>>();
benchmark::DoNotOptimize(ref);
}
}
BENCHMARK(BM_MakeConstructed<1>);
BENCHMARK(BM_MakeConstructed<4>);
BENCHMARK(BM_MakeConstructed<8>);
BENCHMARK(BM_MakeConstructed<16>);
BENCHMARK(BM_MakeConstructed<32>);
BENCHMARK(BM_MakeConstructed<64>);
BENCHMARK(BM_MakeConstructed<128>);
BENCHMARK(BM_MakeConstructed<256>);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/concurrency/async_value_ref.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/concurrency/async_value_ref_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
be356233-8fba-4b09-a422-fc77a6275b18 | cpp | tensorflow/tensorflow | proto_serialization | third_party/xla/xla/tsl/lib/strings/proto_serialization.cc | tensorflow/core/lib/strings/proto_serialization_test.cc | #include "xla/tsl/lib/strings/proto_serialization.h"
#include <cstring>
#include <memory>
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/lib/gtl/inlined_vector.h"
#include "tsl/platform/hash.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
namespace tsl {
namespace {
class DeterministicSerializer {
public:
explicit DeterministicSerializer(const protobuf::MessageLite& msg)
: DeterministicSerializer(msg, msg.ByteSizeLong()) {}
DeterministicSerializer(const protobuf::MessageLite& msg, size_t size)
: size_(size) {
char* ptr = space_;
if (size_ > sizeof(space_)) {
ptr = new char[size_];
alloc_.reset(ptr);
}
bool ok = SerializeToBufferDeterministic(msg, ptr, size_);
DCHECK(ok);
}
size_t size() const { return size_; }
const char* data() const { return alloc_ == nullptr ? space_ : alloc_.get(); }
private:
static constexpr int kInlinedBufferSize = 256;
const size_t size_;
std::unique_ptr<char[]> alloc_;
char space_[kInlinedBufferSize];
};
}
bool SerializeToStringDeterministic(const protobuf::MessageLite& msg,
string* result) {
const size_t size = msg.ByteSizeLong();
DCHECK_LE(size, static_cast<size_t>(INT_MAX));
*result = string(size, '\0');
return SerializeToBufferDeterministic(msg, const_cast<char*>(result->data()),
result->size());
}
bool SerializeToBufferDeterministic(const protobuf::MessageLite& msg,
char* buffer, size_t size) {
DCHECK(msg.ByteSizeLong() == size && size <= static_cast<size_t>(INT_MAX));
protobuf::io::ArrayOutputStream array_stream(buffer, size);
protobuf::io::CodedOutputStream output_stream(&array_stream);
output_stream.SetSerializationDeterministic(true);
msg.SerializeWithCachedSizes(&output_stream);
return !output_stream.HadError() &&
size == static_cast<size_t>(output_stream.ByteCount());
}
bool AreSerializedProtosEqual(const protobuf::MessageLite& x,
const protobuf::MessageLite& y) {
const size_t size = x.ByteSizeLong();
if (size != y.ByteSizeLong()) return false;
if (size == 0) return true;
DeterministicSerializer x_serialized(x, size);
DeterministicSerializer y_serialized(y, size);
return memcmp(x_serialized.data(), y_serialized.data(), size) == 0;
}
uint64 DeterministicProtoHash64(const protobuf::MessageLite& proto,
uint64 seed) {
DeterministicSerializer serialized(proto);
return Hash64(serialized.data(), serialized.size(), seed);
}
uint64 DeterministicProtoHash64(const protobuf::MessageLite& proto) {
DeterministicSerializer serialized(proto);
return Hash64(serialized.data(), serialized.size());
}
} | #include "tensorflow/core/lib/strings/proto_serialization.h"
#include <string>
#include "absl/memory/memory.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
GraphDef MakeGraphDef(int num_nodes) {
GraphDef graph_def;
for (int i = 0; i < num_nodes; ++i) {
NodeDef* node = graph_def.add_node();
node->set_name(strings::StrCat("node", i));
node->set_op(strings::StrCat("op", i % 10));
(*node->mutable_attr())["foo"].set_f(3.14f);
(*node->mutable_attr())["bar"].set_s("baz");
}
return graph_def;
}
}
static void BM_ProtoSerializationToString(::testing::benchmark::State& state) {
int num_nodes = state.range(0);
GraphDef graph_def = MakeGraphDef(num_nodes);
for (auto i : state) {
string serialized;
testing::DoNotOptimize(
SerializeToStringDeterministic(graph_def, &serialized));
}
}
BENCHMARK(BM_ProtoSerializationToString)->Range(1, 10000);
static void BM_ProtoSerializationToBuffer(::testing::benchmark::State& state) {
int num_nodes = state.range(0);
GraphDef graph_def = MakeGraphDef(num_nodes);
const size_t size = graph_def.ByteSizeLong();
for (auto i : state) {
gtl::InlinedVector<char, 1024> buf(size);
testing::DoNotOptimize(
SerializeToBufferDeterministic(graph_def, buf.data(), size));
}
}
BENCHMARK(BM_ProtoSerializationToBuffer)->Range(1, 10000);
static void BM_DeterministicProtoHash64(::testing::benchmark::State& state) {
int num_nodes = state.range(0);
GraphDef graph_def = MakeGraphDef(num_nodes);
for (auto i : state) {
testing::DoNotOptimize(DeterministicProtoHash64(graph_def));
}
}
BENCHMARK(BM_DeterministicProtoHash64)->Range(1, 10000);
static void BM_AreSerializedProtosEqual(::testing::benchmark::State& state) {
int num_nodes = state.range(0);
GraphDef graph_def_a = MakeGraphDef(num_nodes);
GraphDef graph_def_b = MakeGraphDef(num_nodes);
graph_def_b.mutable_node(0)->mutable_name()[0] = 'l';
for (auto i : state) {
testing::DoNotOptimize(AreSerializedProtosEqual(graph_def_a, graph_def_a));
}
}
BENCHMARK(BM_AreSerializedProtosEqual)->Range(1, 10000);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/strings/proto_serialization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/strings/proto_serialization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
57928c37-5683-4eb1-9be1-ec95f566df77 | cpp | tensorflow/tensorflow | bitmap | third_party/xla/xla/tsl/lib/core/bitmap.cc | third_party/xla/xla/tsl/lib/core/bitmap_test.cc | #include "xla/tsl/lib/core/bitmap.h"
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <string>
#include "absl/numeric/bits.h"
namespace tsl {
namespace core {
void Bitmap::Reset(size_t n) {
const size_t num_words = NumWords(n);
if (num_words != NumWords(nbits_)) {
Word* w = new Word[num_words];
delete[] word_;
word_ = w;
}
memset(word_, 0, sizeof(word_[0]) * num_words);
nbits_ = n;
}
static size_t FindFirstSet(uint32_t w) {
return w == 0 ? 0 : absl::countr_zero(w) + 1;
}
size_t Bitmap::FirstUnset(size_t start) const {
if (start >= nbits_) {
return nbits_;
}
size_t mask = (1ull << (start % kBits)) - 1;
const size_t nwords = NumWords(nbits_);
for (size_t i = start / kBits; i < nwords; i++) {
Word word = word_[i] | mask;
mask = 0;
size_t r = FindFirstSet(~word);
if (r) {
size_t result = i * kBits + (r - 1);
if (result > nbits_) result = nbits_;
return result;
}
}
return nbits_;
}
std::string Bitmap::ToString() const {
std::string result;
result.resize(bits());
for (size_t i = 0; i < nbits_; i++) {
result[i] = get(i) ? '1' : '0';
}
return result;
}
}
} | #include "xla/tsl/lib/core/bitmap.h"
#include "xla/tsl/lib/random/simple_philox.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace core {
namespace {
size_t NextSize(size_t n) { return n + ((n < 75) ? 1 : 25); }
static void MakeRandomBitmap(random::SimplePhilox* rnd, Bitmap* bitmap) {
size_t n = rnd->Uniform(200);
bitmap->Reset(n);
for (size_t i = 0; i < n; i++) {
if (rnd->OneIn(2)) bitmap->set(i);
}
}
TEST(BitmapTest, Basic) {
for (size_t n = 0; n < 200; n = NextSize(n)) {
Bitmap bits(n);
for (size_t i = 0; i < n; i++) {
EXPECT_FALSE(bits.get(i)) << n << " " << i << " " << bits.ToString();
bits.set(i);
EXPECT_TRUE(bits.get(i)) << n << " " << i << " " << bits.ToString();
bits.clear(i);
EXPECT_FALSE(bits.get(i)) << n << " " << i << " " << bits.ToString();
}
}
}
TEST(BitmapTest, ToString) {
Bitmap bits(10);
bits.set(1);
bits.set(3);
EXPECT_EQ(bits.ToString(), "0101000000");
}
TEST(BitmapTest, FirstUnset) {
for (size_t n = 0; n < 200; n = NextSize(n)) {
for (size_t p = 0; p <= 100; p++) {
for (size_t q = 0; q <= 100; q++) {
Bitmap bitmap(n);
int one_count = 0;
size_t i = 0;
while (i < p && i < n) {
one_count++;
bitmap.set(i);
i++;
}
while (i < n) {
i++;
for (size_t j = 0; j < q && i < n; j++, i++) {
one_count++;
bitmap.set(i);
}
}
int seen = 0;
size_t pos = 0;
while (true) {
pos = bitmap.FirstUnset(pos);
if (pos == n) break;
ASSERT_FALSE(bitmap.get(pos)) << pos << " " << bitmap.ToString();
seen++;
pos++;
}
EXPECT_EQ(seen, n - one_count) << " " << bitmap.ToString();
}
}
}
}
TEST(BitmapTest, FirstUnsetRandom) {
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
for (int iter = 0; iter < 10000; iter++) {
Bitmap bitmap;
MakeRandomBitmap(&rnd, &bitmap);
size_t zero_bits = 0;
for (size_t i = 0; i < bitmap.bits(); i++) {
if (!bitmap.get(i)) zero_bits++;
}
int seen = 0;
size_t pos = 0;
while (true) {
pos = bitmap.FirstUnset(pos);
if (pos == bitmap.bits()) break;
ASSERT_FALSE(bitmap.get(pos)) << pos << " " << bitmap.ToString();
seen++;
pos++;
}
EXPECT_EQ(seen, zero_bits) << " " << bitmap.ToString();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/core/bitmap.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/core/bitmap_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4f642d61-6d96-4e9c-995b-78264081631f | cpp | tensorflow/tensorflow | crc32c | third_party/xla/xla/tsl/lib/hash/crc32c.cc | third_party/xla/xla/tsl/lib/hash/crc32c_test.cc | #include "xla/tsl/lib/hash/crc32c.h"
#include <stdint.h>
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace crc32c {
#if defined(TF_CORD_SUPPORT)
uint32 Extend(uint32 crc, const absl::Cord &cord) {
for (absl::string_view fragment : cord.Chunks()) {
crc = Extend(crc, fragment.data(), fragment.size());
}
return crc;
}
#endif
}
} | #include "xla/tsl/lib/hash/crc32c.h"
#include <string>
#include "absl/strings/cord.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace crc32c {
TEST(CRC, StandardResults) {
char buf[32];
memset(buf, 0, sizeof(buf));
ASSERT_EQ(0x8a9136aa, Value(buf, sizeof(buf)));
memset(buf, 0xff, sizeof(buf));
ASSERT_EQ(0x62a8ab43, Value(buf, sizeof(buf)));
for (int i = 0; i < 32; i++) {
buf[i] = i;
}
ASSERT_EQ(0x46dd794e, Value(buf, sizeof(buf)));
for (int i = 0; i < 32; i++) {
buf[i] = 31 - i;
}
ASSERT_EQ(0x113fdb5c, Value(buf, sizeof(buf)));
unsigned char data[48] = {
0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
ASSERT_EQ(0xd9963a56, Value(reinterpret_cast<char*>(data), sizeof(data)));
ASSERT_EQ(0xdd1b19be, Value(reinterpret_cast<char*>(data), sizeof(data) - 7));
ASSERT_EQ(0x4930c4b1,
Value(reinterpret_cast<char*>(data) + 1, sizeof(data) - 4));
}
TEST(CRC, Values) { ASSERT_NE(Value("a", 1), Value("foo", 3)); }
TEST(CRC, Extend) {
ASSERT_EQ(Value("hello world", 11), Extend(Value("hello ", 6), "world", 5));
}
TEST(CRC, Mask) {
uint32 crc = Value("foo", 3);
ASSERT_NE(crc, Mask(crc));
ASSERT_NE(crc, Mask(Mask(crc)));
ASSERT_EQ(crc, Unmask(Mask(crc)));
ASSERT_EQ(crc, Unmask(Unmask(Mask(Mask(crc)))));
}
#if defined(PLATFORM_GOOGLE)
TEST(CRC, ValuesWithCord) {
ASSERT_NE(Value(absl::Cord("a")), Value(absl::Cord("foo")));
}
TEST(CRC, ExtendWithCord) {
ASSERT_EQ(Value(absl::Cord("hello world")),
Extend(Value(absl::Cord("hello ")), absl::Cord("world")));
}
#endif
static void BM_CRC(::testing::benchmark::State& state) {
int len = state.range(0);
std::string input(len, 'x');
uint32 h = 0;
for (auto s : state) {
h = Extend(h, input.data() + 1, len - 1);
}
state.SetBytesProcessed(state.iterations() * len);
VLOG(1) << h;
}
BENCHMARK(BM_CRC)->Range(1, 256 * 1024);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/hash/crc32c.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/hash/crc32c_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2137774c-d1b0-416b-93a9-b2e5d467fb5e | cpp | tensorflow/tensorflow | table | tensorflow/lite/kernels/table.cc | tensorflow/lite/kernels/table_test.cc | #include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/lut.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace table {
constexpr int kInputTensor = 0;
constexpr int kTable = 1;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* table;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kTable, &table));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE(context,
input->type == kTfLiteInt8 || input->type == kTfLiteInt16);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
TF_LITE_ENSURE_TYPES_EQ(context, output->type, table->type);
if (input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
}
TF_LITE_ENSURE_EQ(context, NumDimensions(table), 1);
if (input->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, NumElements(table), LUTSize<int8_t>());
} else {
TF_LITE_ENSURE_EQ(context, input->type, kTfLiteInt16);
TF_LITE_ENSURE_EQ(context, NumElements(table), LUTSize<int16_t>());
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* table;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kTable, &table));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (input->type) {
case kTfLiteInt8:
reference_integer_ops::LookupTable(
GetTensorData<int8_t>(input),
MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)),
GetTensorData<int8_t>(table), GetTensorData<int8_t>(output));
return kTfLiteOk;
case kTfLiteInt16:
reference_integer_ops::LookupTable(
GetTensorData<int16_t>(input),
MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)),
GetTensorData<int16_t>(table), GetTensorData<int16_t>(output));
return kTfLiteOk;
default:
TF_LITE_UNSUPPORTED_TYPE(context, input->type, "Table");
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_TABLE() {
static TfLiteRegistration r = {nullptr, nullptr, table::Prepare, table::Eval};
return &r;
}
}
}
} | #include <cmath>
#include <limits>
#include <type_traits>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace custom {
TfLiteRegistration* Register_TABLE();
namespace {
using ::testing::ElementsAreArray;
class TableOpModel : public SingleOpModel {
public:
TableOpModel(const TensorData& input, const TensorData& table,
const TensorData& output) {
input_ = AddInput(input);
table_ = AddInput(table);
output_ = AddOutput(output);
SetCustomOp("Table", {}, Register_TABLE);
BuildInterpreter({GetShape(input_), GetShape(table_)});
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
template <typename integer_dtype>
std::vector<float> GetDequantizedOutput() {
return Dequantize<integer_dtype>(ExtractVector<integer_dtype>(output_),
GetScale(output_), GetZeroPoint(output_));
}
int input() { return input_; }
int table() { return table_; }
int output() { return output_; }
protected:
int input_;
int table_;
int output_;
};
template <typename T>
inline float GetLUTTolerance(float input_min, float input_max, float output_min,
float output_max) {
static_assert(
std::is_same<T, int8_t>::value || std::is_same<T, int16_t>::value,
"T must be an int8_t or int16_t.");
const float range_sum = (input_max - input_min) + (output_max - output_min);
if (std::is_same<T, int8_t>::value) {
return range_sum / 256.0f;
} else {
return range_sum / 512.0f;
}
}
template <typename T>
void TableWithExpLUTTest() {
float input_min = -0.5f;
float input_max = 0.8f;
if (std::is_same<T, int16_t>::value) {
input_min = -0.8f;
input_max = 0.8f * std::numeric_limits<T>::max() /
static_cast<float>(std::numeric_limits<T>::max() + 1);
}
float output_min = 0.0f;
float output_max = 2.4f;
if (std::is_same<T, int16_t>::value) {
output_min = -2.4f;
output_max = 2.4f * std::numeric_limits<T>::max() /
static_cast<float>(std::numeric_limits<T>::max() + 1);
}
const float kQuantizedTolerance =
GetLUTTolerance<T>(input_min, input_max, output_min, output_max);
TableOpModel m({GetTensorType<T>(), {1, 2, 3, 1}, input_min, input_max},
{GetTensorType<T>(), {LUTSize<T>()}},
{GetTensorType<T>(), {}, output_min, output_max});
T table[LUTSize<T>()];
LUTPopulate<T>(
m.GetScale(m.input()), m.GetZeroPoint(m.input()), m.GetScale(m.output()),
m.GetZeroPoint(m.output()), [](float v) { return std::exp(v); }, table);
m.QuantizeAndPopulate<T>(m.input(), {-0.5f, -0.2f, 0.0f, 0.1f, 0.3f, 0.8f});
m.PopulateTensor<T>(m.table(), 0, table, table + LUTSize<T>());
m.Invoke();
EXPECT_THAT(m.GetDequantizedOutput<T>(),
ElementsAreArray(ArrayFloatNear(
{std::exp(-0.5f), std::exp(-0.2f), std::exp(0.0f),
std::exp(0.1f), std::exp(0.3f), std::exp(0.8f)},
kQuantizedTolerance)));
}
TEST(TableOpTest, Int8ExpLUT) { TableWithExpLUTTest<int8_t>(); }
TEST(TableOpTest, Int16ExpLUT) { TableWithExpLUTTest<int16_t>(); }
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/table.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/table_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4a4e2c3c-500f-4632-a6de-198ec052515a | cpp | tensorflow/tensorflow | inputbuffer | third_party/xla/xla/tsl/lib/io/inputbuffer.cc | third_party/xla/xla/tsl/lib/io/inputbuffer_test.cc | #include "xla/tsl/lib/io/inputbuffer.h"
#include <algorithm>
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace tsl {
namespace io {
InputBuffer::InputBuffer(RandomAccessFile* file, size_t buffer_bytes)
: file_(file),
file_pos_(0),
size_(buffer_bytes),
buf_(new char[size_]),
pos_(buf_),
limit_(buf_) {}
InputBuffer::~InputBuffer() { delete[] buf_; }
absl::Status InputBuffer::FillBuffer() {
absl::string_view data;
absl::Status s = file_->Read(file_pos_, size_, &data, buf_);
if (data.data() != buf_) {
memmove(buf_, data.data(), data.size());
}
pos_ = buf_;
limit_ = pos_ + data.size();
file_pos_ += data.size();
return s;
}
template <typename T>
absl::Status InputBuffer::ReadLine(T* result) {
result->clear();
absl::Status s;
do {
size_t buf_remain = limit_ - pos_;
char* newline = static_cast<char*>(memchr(pos_, '\n', buf_remain));
if (newline != nullptr) {
size_t result_len = newline - pos_;
result->append(pos_, result_len);
pos_ = newline + 1;
if (!result->empty() && result->back() == '\r') {
result->resize(result->size() - 1);
}
return absl::OkStatus();
}
if (buf_remain > 0) result->append(pos_, buf_remain);
s = FillBuffer();
DCHECK_EQ(pos_, buf_);
} while (limit_ != buf_);
if (!result->empty() && result->back() == '\r') {
result->resize(result->size() - 1);
}
if (errors::IsOutOfRange(s) && !result->empty()) {
return absl::OkStatus();
}
return s;
}
template Status InputBuffer::ReadLine<std::string>(std::string* result);
template Status InputBuffer::ReadLine<tstring>(tstring* result);
absl::Status InputBuffer::ReadNBytes(int64_t bytes_to_read,
std::string* result) {
result->clear();
if (bytes_to_read < 0) {
return errors::InvalidArgument("Can't read a negative number of bytes: ",
bytes_to_read);
}
result->resize(bytes_to_read);
size_t bytes_read = 0;
absl::Status status = ReadNBytes(bytes_to_read, &(*result)[0], &bytes_read);
if (bytes_read < bytes_to_read) result->resize(bytes_read);
return status;
}
absl::Status InputBuffer::ReadNBytes(int64_t bytes_to_read, char* result,
size_t* bytes_read) {
if (bytes_to_read < 0) {
return errors::InvalidArgument("Can't read a negative number of bytes: ",
bytes_to_read);
}
absl::Status status;
*bytes_read = 0;
while (*bytes_read < static_cast<size_t>(bytes_to_read)) {
if (pos_ == limit_) {
status = FillBuffer();
if (limit_ == buf_) {
break;
}
}
const int64_t bytes_to_copy =
std::min<int64_t>(limit_ - pos_, bytes_to_read - *bytes_read);
memcpy(result + *bytes_read, pos_, bytes_to_copy);
pos_ += bytes_to_copy;
*bytes_read += bytes_to_copy;
}
if (errors::IsOutOfRange(status) &&
(*bytes_read == static_cast<size_t>(bytes_to_read))) {
return absl::OkStatus();
}
return status;
}
absl::Status InputBuffer::ReadVarint32Fallback(uint32* result) {
absl::Status s = ReadVarintFallback(result, core::kMaxVarint32Bytes);
if (errors::IsDataLoss(s)) {
return errors::DataLoss("Stored data is too large to be a varint32.");
}
return s;
}
absl::Status InputBuffer::ReadVarint64Fallback(uint64* result) {
absl::Status s = ReadVarintFallback(result, core::kMaxVarint64Bytes);
if (errors::IsDataLoss(s)) {
return errors::DataLoss("Stored data is too large to be a varint64.");
}
return s;
}
template <typename T>
absl::Status InputBuffer::ReadVarintFallback(T* result, int max_bytes) {
uint8 scratch = 0;
auto* p = reinterpret_cast<char*>(&scratch);
size_t unused_bytes_read = 0;
*result = 0;
for (int index = 0; index < max_bytes; index++) {
int shift = 7 * index;
TF_RETURN_IF_ERROR(ReadNBytes(1, p, &unused_bytes_read));
*result |= (static_cast<T>(scratch) & 127) << shift;
if (!(scratch & 128)) return absl::OkStatus();
}
return errors::DataLoss("Stored data longer than ", max_bytes, " bytes.");
}
absl::Status InputBuffer::SkipNBytes(int64_t bytes_to_skip) {
if (bytes_to_skip < 0) {
return errors::InvalidArgument("Can only skip forward, not ",
bytes_to_skip);
}
int64_t bytes_skipped = 0;
absl::Status s;
while (bytes_skipped < bytes_to_skip) {
if (pos_ == limit_) {
s = FillBuffer();
if (limit_ == buf_) {
break;
}
}
const int64_t bytes_to_advance =
std::min<int64_t>(limit_ - pos_, bytes_to_skip - bytes_skipped);
bytes_skipped += bytes_to_advance;
pos_ += bytes_to_advance;
}
if (errors::IsOutOfRange(s) && bytes_skipped == bytes_to_skip) {
return absl::OkStatus();
}
return s;
}
absl::Status InputBuffer::Seek(int64_t position) {
if (position < 0) {
return errors::InvalidArgument("Seeking to a negative position: ",
position);
}
const int64_t bufpos = file_pos_ - static_cast<int64_t>(limit_ - buf_);
if (position >= bufpos && position < file_pos_) {
pos_ = buf_ + (position - bufpos);
DCHECK(pos_ >= buf_ && pos_ < limit_);
} else {
pos_ = limit_ = buf_;
file_pos_ = position;
}
return absl::OkStatus();
}
absl::Status InputBuffer::Hint(int64_t bytes_to_read) {
if (bytes_to_read < 0) {
return errors::InvalidArgument("Can't read a negative number of bytes: ",
bytes_to_read);
}
if (bytes_to_read > size_) {
return absl::OkStatus();
}
const int64_t bytes_remain_in_buf = static_cast<int64_t>(limit_ - pos_);
if (bytes_to_read <= bytes_remain_in_buf) {
return absl::OkStatus();
}
memmove(buf_, pos_, bytes_remain_in_buf);
pos_ = buf_;
limit_ = buf_ + bytes_remain_in_buf;
bytes_to_read -= bytes_remain_in_buf;
absl::string_view data;
absl::Status s = file_->Read(file_pos_, bytes_to_read, &data, limit_);
if (data.data() != limit_) {
memmove(limit_, data.data(), data.size());
}
limit_ += data.size();
file_pos_ += data.size();
if (errors::IsOutOfRange(s) && data.size() == bytes_to_read) {
return absl::OkStatus();
} else {
return s;
}
}
}
} | #include "xla/tsl/lib/io/inputbuffer.h"
#include <vector>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/coding.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/strcat.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
static std::vector<int> BufferSizes() {
return {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 65536};
}
TEST(InputBuffer, ReadLine_Empty) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, ""));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string line;
io::InputBuffer in(file.get(), buf_size);
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(InputBuffer, ReadLine1) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_CHECK_OK(
WriteStringToFile(env, fname, "line one\nline two\nline three\n"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string line;
io::InputBuffer in(file.get(), buf_size);
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(InputBuffer, ReadLine_NoTrailingNewLine) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "line one\nline two\nline three"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string line;
io::InputBuffer in(file.get(), buf_size);
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(InputBuffer, ReadLine_EmptyLines) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_CHECK_OK(
WriteStringToFile(env, fname, "line one\n\n\nline two\nline three"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string line;
io::InputBuffer in(file.get(), buf_size);
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(InputBuffer, ReadLine_CRLF) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname,
"line one\r\n\r\n\r\nline two\r\nline three"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string line;
io::InputBuffer in(file.get(), buf_size);
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_CHECK_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(InputBuffer, ReadNBytes) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string read;
io::InputBuffer in(file.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
EXPECT_EQ(3, in.Tell());
TF_CHECK_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(3, in.Tell());
TF_CHECK_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
EXPECT_EQ(7, in.Tell());
TF_CHECK_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(7, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "789");
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
TF_CHECK_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
size_t bytes_read;
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
char read[5];
io::InputBuffer in(file.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(3, read, &bytes_read));
EXPECT_EQ(absl::string_view(read, 3), "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, read, &bytes_read));
EXPECT_EQ(absl::string_view(read, 3), "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(4, read, &bytes_read));
EXPECT_EQ(absl::string_view(read, 4), "3456");
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, read, &bytes_read));
EXPECT_EQ(absl::string_view(read, 4), "3456");
EXPECT_EQ(7, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, read, &bytes_read)));
EXPECT_EQ(absl::string_view(read, 3), "789");
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, read, &bytes_read)));
EXPECT_EQ(absl::string_view(read, 3), "789");
EXPECT_EQ(10, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, read, &bytes_read));
EXPECT_EQ(absl::string_view(read, 3), "789");
EXPECT_EQ(10, in.Tell());
}
}
TEST(InputBuffer, SkipNBytes) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string read;
io::InputBuffer in(file.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_CHECK_OK(in.SkipNBytes(3));
EXPECT_EQ(3, in.Tell());
TF_CHECK_OK(in.SkipNBytes(0));
EXPECT_EQ(3, in.Tell());
TF_CHECK_OK(in.ReadNBytes(2, &read));
EXPECT_EQ(read, "34");
EXPECT_EQ(5, in.Tell());
TF_CHECK_OK(in.SkipNBytes(0));
EXPECT_EQ(5, in.Tell());
TF_CHECK_OK(in.SkipNBytes(2));
EXPECT_EQ(7, in.Tell());
TF_CHECK_OK(in.ReadNBytes(1, &read));
EXPECT_EQ(read, "7");
EXPECT_EQ(8, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipNBytes(5)));
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipNBytes(5)));
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
}
TEST(InputBuffer, Seek) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string read;
io::InputBuffer in(file.get(), buf_size);
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "345");
TF_CHECK_OK(in.Seek(0));
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
TF_CHECK_OK(in.Seek(3));
TF_CHECK_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
TF_CHECK_OK(in.Seek(4));
TF_CHECK_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "4567");
TF_CHECK_OK(in.Seek(1 << 25));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(1, &read)));
EXPECT_TRUE(absl::StrContains(in.Seek(-1).ToString(), "negative position"));
}
}
TEST(InputBuffer, ReadVarint32) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
std::vector<uint32> data;
uint32 i = 0;
for (; i < (1U << 10); i += 1) data.push_back(i);
for (; i < (1U << 15); i += 5) data.push_back(i);
for (; i < (1U << 31); i += 132817) data.push_back(i);
data.push_back(std::numeric_limits<uint32>::max());
{
std::unique_ptr<WritableFile> file;
TF_CHECK_OK(env->NewWritableFile(fname, &file));
string varint;
for (uint32 number : data) {
varint.clear();
core::PutVarint32(&varint, number);
TF_CHECK_OK(file->Append(absl::string_view(varint)));
}
}
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
io::InputBuffer in(file.get(), buf_size);
uint32 result = 0;
for (uint32 expected : data) {
TF_ASSERT_OK(in.ReadVarint32(&result));
EXPECT_EQ(expected, result);
}
EXPECT_TRUE(errors::IsOutOfRange(in.ReadVarint32(&result)));
}
}
TEST(InputBuffer, ReadVarint64) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
std::vector<uint64> data;
uint64 i = 0;
for (; i < (1U << 10); i += 1) data.push_back(i);
for (; i < (1U << 15); i += 5) data.push_back(i);
for (; i < (1U << 31); i += 164817) data.push_back(i);
for (; i < (1ULL << 63); i += 16481797854795663UL) data.push_back(i);
data.push_back(std::numeric_limits<uint64>::max());
{
std::unique_ptr<WritableFile> file;
TF_CHECK_OK(env->NewWritableFile(fname, &file));
string varint;
for (uint64 number : data) {
varint.clear();
core::PutVarint64(&varint, number);
TF_CHECK_OK(file->Append(absl::string_view(varint)));
}
}
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
io::InputBuffer in(file.get(), buf_size);
uint64 result = 0;
for (uint64 expected : data) {
TF_ASSERT_OK(in.ReadVarint64(&result));
EXPECT_EQ(expected, result);
}
EXPECT_TRUE(errors::IsOutOfRange(in.ReadVarint64(&result)));
}
}
TEST(InputBuffer, Hint) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(env->NewRandomAccessFile(fname, &file));
string read;
io::InputBuffer in(file.get(), buf_size);
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
TF_CHECK_OK(in.Hint(4));
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "345");
TF_CHECK_OK(in.Hint(1));
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "678");
TF_CHECK_OK(in.Seek(0));
TF_CHECK_OK(in.Hint(7));
TF_CHECK_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
TF_CHECK_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
TF_CHECK_OK(in.Hint(2));
TF_CHECK_OK(in.Seek(4));
TF_CHECK_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "4567");
TF_CHECK_OK(in.Seek(0));
TF_CHECK_OK(in.Hint(1 << 25));
TF_CHECK_OK(in.Seek(1 << 25));
EXPECT_TRUE(errors::IsOutOfRange(in.Hint(1)));
EXPECT_TRUE(errors::IsInvalidArgument(in.Hint(-1)));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/inputbuffer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/inputbuffer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
Subsets and Splits