ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 13
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 78
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 93
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
4f18d1c7-9e2a-451b-b785-37047394c51e | cpp | tensorflow/tensorflow | inputstream_interface | third_party/xla/xla/tsl/lib/io/inputstream_interface.cc | third_party/xla/xla/tsl/lib/io/inputstream_interface_test.cc | #include "xla/tsl/lib/io/inputstream_interface.h"
#include "tsl/platform/errors.h"
namespace tsl {
namespace io {
static constexpr int64_t kMaxSkipSize = 8 * 1024 * 1024;
absl::Status InputStreamInterface::SkipNBytes(int64_t bytes_to_skip) {
if (bytes_to_skip < 0) {
return errors::InvalidArgument("Can't skip a negative number of bytes");
}
tstring unused;
while (bytes_to_skip > 0) {
int64_t bytes_to_read = std::min<int64_t>(kMaxSkipSize, bytes_to_skip);
TF_RETURN_IF_ERROR(ReadNBytes(bytes_to_read, &unused));
bytes_to_skip -= bytes_to_read;
}
return absl::OkStatus();
}
}
} | #include "xla/tsl/lib/io/inputstream_interface.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace io {
namespace {
class TestStringStream : public InputStreamInterface {
public:
explicit TestStringStream(const string& content) : content_(content) {}
absl::Status ReadNBytes(int64_t bytes_to_read, tstring* result) override {
result->clear();
if (pos_ + bytes_to_read > content_.size()) {
return errors::OutOfRange("limit reached");
}
*result = content_.substr(pos_, bytes_to_read);
pos_ += bytes_to_read;
return absl::OkStatus();
}
int64_t Tell() const override { return pos_; }
absl::Status Reset() override {
pos_ = 0;
return absl::OkStatus();
}
private:
string content_;
int64_t pos_ = 0;
};
TEST(InputStreamInterface, Basic) {
TestStringStream ss("This is a test string");
tstring res;
TF_ASSERT_OK(ss.ReadNBytes(4, &res));
EXPECT_EQ("This", res);
TF_ASSERT_OK(ss.SkipNBytes(6));
TF_ASSERT_OK(ss.ReadNBytes(11, &res));
EXPECT_EQ("test string", res);
EXPECT_TRUE(errors::IsOutOfRange(ss.SkipNBytes(1)));
TF_ASSERT_OK(ss.Reset());
TF_ASSERT_OK(ss.ReadNBytes(4, &res));
EXPECT_EQ("This", res);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/inputstream_interface.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/inputstream_interface_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a8e264be-afcc-4991-9b90-65765527a036 | cpp | tensorflow/tensorflow | random_inputstream | third_party/xla/xla/tsl/lib/io/random_inputstream.cc | third_party/xla/xla/tsl/lib/io/random_inputstream_test.cc | #include "xla/tsl/lib/io/random_inputstream.h"
#include <memory>
namespace tsl {
namespace io {
RandomAccessInputStream::RandomAccessInputStream(RandomAccessFile* file,
bool owns_file)
: file_(file), owns_file_(owns_file) {}
RandomAccessInputStream::~RandomAccessInputStream() {
if (owns_file_) {
delete file_;
}
}
absl::Status RandomAccessInputStream::ReadNBytes(int64_t bytes_to_read,
tstring* result) {
if (bytes_to_read < 0) {
return errors::InvalidArgument("Cannot read negative number of bytes");
}
result->clear();
result->resize_uninitialized(bytes_to_read);
char* result_buffer = &(*result)[0];
absl::string_view data;
absl::Status s = file_->Read(pos_, bytes_to_read, &data, result_buffer);
if (data.data() != result_buffer) {
memmove(result_buffer, data.data(), data.size());
}
result->resize(data.size());
if (s.ok() || errors::IsOutOfRange(s)) {
pos_ += data.size();
}
return s;
}
#if defined(TF_CORD_SUPPORT)
absl::Status RandomAccessInputStream::ReadNBytes(int64_t bytes_to_read,
absl::Cord* result) {
if (bytes_to_read < 0) {
return errors::InvalidArgument("Cannot read negative number of bytes");
}
int64_t current_size = result->size();
absl::Status s = file_->Read(pos_, bytes_to_read, result);
if (s.ok() || errors::IsOutOfRange(s)) {
pos_ += result->size() - current_size;
}
return s;
}
#endif
static constexpr int64_t kMaxSkipSize = 8 * 1024 * 1024;
absl::Status RandomAccessInputStream::SkipNBytes(int64_t bytes_to_skip) {
if (bytes_to_skip < 0) {
return errors::InvalidArgument("Can't skip a negative number of bytes");
}
std::unique_ptr<char[]> scratch(new char[kMaxSkipSize]);
if (bytes_to_skip > 0) {
absl::string_view data;
absl::Status s =
file_->Read(pos_ + bytes_to_skip - 1, 1, &data, scratch.get());
if ((s.ok() || errors::IsOutOfRange(s)) && data.size() == 1) {
pos_ += bytes_to_skip;
return absl::OkStatus();
}
}
while (bytes_to_skip > 0) {
int64_t bytes_to_read = std::min<int64_t>(kMaxSkipSize, bytes_to_skip);
absl::string_view data;
absl::Status s = file_->Read(pos_, bytes_to_read, &data, scratch.get());
if (s.ok() || errors::IsOutOfRange(s)) {
pos_ += data.size();
} else {
return s;
}
if (data.size() < static_cast<size_t>(bytes_to_read)) {
return errors::OutOfRange("reached end of file");
}
bytes_to_skip -= bytes_to_read;
}
return absl::OkStatus();
}
int64_t RandomAccessInputStream::Tell() const { return pos_; }
}
} | #include "xla/tsl/lib/io/random_inputstream.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace io {
namespace {
TEST(RandomInputStream, ReadNBytes) {
Env* env = Env::Default();
string fname = testing::TmpDir() + "/random_inputbuffer_test";
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
tstring read;
RandomAccessInputStream in(file.get());
TF_ASSERT_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(5, &read));
EXPECT_EQ(read, "34567");
EXPECT_EQ(8, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(8, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(20, &read)));
EXPECT_EQ(read, "89");
EXPECT_EQ(10, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
#if defined(TF_CORD_SUPPORT)
TEST(RandomInputStream, ReadNBytesWithCords) {
Env* env = Env::Default();
string fname = testing::TmpDir() + "/random_inputbuffer_test";
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
absl::Cord read;
RandomAccessInputStream in(file.get());
TF_ASSERT_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(5, &read));
EXPECT_EQ(read, "01234567");
EXPECT_EQ(8, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "01234567");
EXPECT_EQ(8, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(20, &read)));
EXPECT_EQ(read, "0123456789");
EXPECT_EQ(10, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "0123456789");
EXPECT_EQ(10, in.Tell());
}
#endif
TEST(RandomInputStream, SkipNBytes) {
Env* env = Env::Default();
string fname = testing::TmpDir() + "/random_inputbuffer_test";
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
tstring read;
RandomAccessInputStream in(file.get());
TF_ASSERT_OK(in.SkipNBytes(3));
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(0));
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(2, &read));
EXPECT_EQ(read, "78");
EXPECT_EQ(9, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipNBytes(20)));
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
TEST(RandomInputStream, Seek) {
Env* env = Env::Default();
string fname = testing::TmpDir() + "/random_inputbuffer_seek_test";
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
tstring read;
RandomAccessInputStream in(file.get());
TF_ASSERT_OK(in.Seek(3));
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.Seek(1));
TF_ASSERT_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "1234");
EXPECT_EQ(5, in.Tell());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/random_inputstream.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/random_inputstream_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
728c532d-716d-43c9-8ea2-fe54b64e4765 | cpp | tensorflow/tensorflow | buffered_inputstream | third_party/xla/xla/tsl/lib/io/buffered_inputstream.cc | third_party/xla/xla/tsl/lib/io/buffered_inputstream_test.cc | #include "xla/tsl/lib/io/buffered_inputstream.h"
#include "absl/status/status.h"
#include "xla/tsl/lib/io/random_inputstream.h"
namespace tsl {
namespace io {
BufferedInputStream::BufferedInputStream(InputStreamInterface* input_stream,
size_t buffer_bytes,
bool owns_input_stream)
: input_stream_(input_stream),
size_(buffer_bytes),
owns_input_stream_(owns_input_stream) {
buf_.reserve(size_);
}
BufferedInputStream::BufferedInputStream(RandomAccessFile* file,
size_t buffer_bytes)
: BufferedInputStream(new RandomAccessInputStream(file), buffer_bytes,
true) {}
BufferedInputStream::~BufferedInputStream() {
if (owns_input_stream_) {
delete input_stream_;
}
}
absl::Status BufferedInputStream::FillBuffer() {
if (!file_status_.ok()) {
pos_ = 0;
limit_ = 0;
return file_status_;
}
absl::Status s = input_stream_->ReadNBytes(size_, &buf_);
pos_ = 0;
limit_ = buf_.size();
if (!s.ok()) {
file_status_ = s;
}
return s;
}
template <typename StringType>
absl::Status BufferedInputStream::ReadLineHelper(StringType* result,
bool include_eol) {
result->clear();
absl::Status s;
size_t start_pos = pos_;
while (true) {
if (pos_ == limit_) {
result->append(buf_.data() + start_pos, pos_ - start_pos);
s = FillBuffer();
if (limit_ == 0) {
break;
}
start_pos = pos_;
}
char c = buf_[pos_];
if (c == '\n') {
result->append(buf_.data() + start_pos, pos_ - start_pos);
if (include_eol) {
result->append(1, c);
}
pos_++;
return absl::OkStatus();
}
if (c == '\r') {
result->append(buf_.data() + start_pos, pos_ - start_pos);
start_pos = pos_ + 1;
}
pos_++;
}
if (absl::IsOutOfRange(s) && !result->empty()) {
return absl::OkStatus();
}
return s;
}
absl::Status BufferedInputStream::ReadNBytes(int64_t bytes_to_read,
tstring* result) {
if (bytes_to_read < 0) {
return errors::InvalidArgument("Can't read a negative number of bytes: ",
bytes_to_read);
}
result->clear();
if (pos_ == limit_ && !file_status_.ok() && bytes_to_read > 0) {
return file_status_;
}
result->reserve(bytes_to_read);
absl::Status s;
while (result->size() < static_cast<size_t>(bytes_to_read)) {
if (pos_ == limit_) {
s = FillBuffer();
if (limit_ == 0) {
DCHECK(!s.ok());
file_status_ = s;
break;
}
}
const int64_t bytes_to_copy =
std::min<int64_t>(limit_ - pos_, bytes_to_read - result->size());
result->insert(result->size(), buf_, pos_, bytes_to_copy);
pos_ += bytes_to_copy;
}
if (absl::IsOutOfRange(s) &&
(result->size() == static_cast<size_t>(bytes_to_read))) {
return absl::OkStatus();
}
return s;
}
absl::Status BufferedInputStream::SkipNBytes(int64_t bytes_to_skip) {
if (bytes_to_skip < 0) {
return errors::InvalidArgument("Can only skip forward, not ",
bytes_to_skip);
}
if (pos_ + bytes_to_skip < limit_) {
pos_ += bytes_to_skip;
} else {
absl::Status s = input_stream_->SkipNBytes(bytes_to_skip - (limit_ - pos_));
pos_ = 0;
limit_ = 0;
if (absl::IsOutOfRange(s)) {
file_status_ = s;
}
return s;
}
return absl::OkStatus();
}
int64_t BufferedInputStream::Tell() const {
return input_stream_->Tell() - (limit_ - pos_);
}
absl::Status BufferedInputStream::Seek(int64_t position) {
if (position < 0) {
return errors::InvalidArgument("Seeking to a negative position: ",
position);
}
const int64_t buf_lower_limit = input_stream_->Tell() - limit_;
if (position < buf_lower_limit) {
TF_RETURN_IF_ERROR(Reset());
return SkipNBytes(position);
}
if (position < Tell()) {
pos_ -= Tell() - position;
return absl::OkStatus();
}
return SkipNBytes(position - Tell());
}
template <typename T>
absl::Status BufferedInputStream::ReadAll(T* result) {
result->clear();
absl::Status status;
while (status.ok()) {
status = FillBuffer();
if (limit_ == 0) {
break;
}
result->append(buf_);
pos_ = limit_;
}
if (absl::IsOutOfRange(status)) {
file_status_ = status;
return absl::OkStatus();
}
return status;
}
template Status BufferedInputStream::ReadAll<std::string>(std::string* result);
template Status BufferedInputStream::ReadAll<tstring>(tstring* result);
absl::Status BufferedInputStream::Reset() {
TF_RETURN_IF_ERROR(input_stream_->Reset());
pos_ = 0;
limit_ = 0;
file_status_ = absl::OkStatus();
return absl::OkStatus();
}
absl::Status BufferedInputStream::ReadLine(std::string* result) {
return ReadLineHelper(result, false);
}
absl::Status BufferedInputStream::ReadLine(tstring* result) {
return ReadLineHelper(result, false);
}
std::string BufferedInputStream::ReadLineAsString() {
std::string result;
ReadLineHelper(&result, true).IgnoreError();
return result;
}
absl::Status BufferedInputStream::SkipLine() {
absl::Status s;
bool skipped = false;
while (true) {
if (pos_ == limit_) {
s = FillBuffer();
if (limit_ == 0) {
break;
}
}
char c = buf_[pos_++];
skipped = true;
if (c == '\n') {
return absl::OkStatus();
}
}
if (absl::IsOutOfRange(s) && skipped) {
return absl::OkStatus();
}
return s;
}
}
} | #include "xla/tsl/lib/io/buffered_inputstream.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/lib/io/random_inputstream.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
namespace io {
namespace {
static std::vector<int> BufferSizes() {
return {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 65536};
}
class ReadOnceInputStream : public InputStreamInterface {
public:
ReadOnceInputStream() : start_(true) {}
virtual absl::Status ReadNBytes(int64_t bytes_to_read, tstring* result) {
if (bytes_to_read < 11) {
return errors::InvalidArgument("Not reading all bytes: ", bytes_to_read);
}
if (start_) {
*result = "0123456789";
start_ = false;
return errors::OutOfRange("Out of range.");
}
return errors::InvalidArgument(
"Redudant call to ReadNBytes after an OutOfRange error.");
}
int64_t Tell() const override { return start_ ? 0 : 10; }
absl::Status Reset() override {
start_ = true;
return absl::OkStatus();
}
private:
bool start_;
};
TEST(BufferedInputStream, ReadLine_Empty) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, ""));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(BufferedInputStream, ReadLine1) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(
WriteStringToFile(env, fname, "line one\nline two\nline three\n"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(BufferedInputStream, ReadLine_NoTrailingNewLine) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "line one\nline two\nline three"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(BufferedInputStream, ReadLine_EmptyLines) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(
WriteStringToFile(env, fname, "line one\n\n\nline two\nline three"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(BufferedInputStream, ReadLine_CRLF) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname,
"line one\r\n\r\n\r\nline two\r\nline three"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line one");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line three");
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
EXPECT_TRUE(errors::IsOutOfRange(in.ReadLine(&line)));
}
}
TEST(BufferedInputStream, SkipLine1) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(
WriteStringToFile(env, fname, "line one\nline two\nline three\n"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
TF_ASSERT_OK(in.SkipLine());
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_ASSERT_OK(in.SkipLine());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipLine()));
EXPECT_TRUE(errors::IsOutOfRange(in.SkipLine()));
}
}
TEST(BufferedInputStream, SkipLine_NoTrailingNewLine) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "line one\nline two\nline three"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
TF_ASSERT_OK(in.SkipLine());
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
TF_ASSERT_OK(in.SkipLine());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipLine()));
EXPECT_TRUE(errors::IsOutOfRange(in.SkipLine()));
}
}
TEST(BufferedInputStream, SkipLine_EmptyLines) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "line one\n\n\nline two"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
BufferedInputStream in(input_stream.get(), buf_size);
string line;
TF_ASSERT_OK(in.SkipLine());
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "");
TF_ASSERT_OK(in.SkipLine());
TF_ASSERT_OK(in.ReadLine(&line));
EXPECT_EQ(line, "line two");
}
}
TEST(BufferedInputStream, ReadNBytes) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
tstring read;
BufferedInputStream in(input_stream.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(7, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "789");
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
}
TEST(BufferedInputStream, OutOfRangeCache) {
for (auto buf_size : BufferSizes()) {
if (buf_size < 11) {
continue;
}
ReadOnceInputStream input_stream;
tstring read;
BufferedInputStream in(&input_stream, buf_size);
EXPECT_EQ(0, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK((in.ReadNBytes(7, &read)));
EXPECT_EQ(read, "3456789");
EXPECT_EQ(10, in.Tell());
absl::Status s = in.ReadNBytes(5, &read);
EXPECT_EQ(error::OUT_OF_RANGE, s.code()) << s;
EXPECT_EQ(read, "");
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
}
}
TEST(BufferedInputStream, SkipNBytes) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
tstring read;
BufferedInputStream in(input_stream.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(3));
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(0));
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(2, &read));
EXPECT_EQ(read, "34");
EXPECT_EQ(5, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(0));
EXPECT_EQ(5, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(2));
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(1, &read));
EXPECT_EQ(read, "7");
EXPECT_EQ(8, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipNBytes(5)));
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipNBytes(5)));
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
}
TEST(BufferedInputStream, ReadNBytesRandomAccessFile) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
tstring read;
BufferedInputStream in(file.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(7, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "789");
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
}
TEST(BufferedInputStream, SkipNBytesRandomAccessFile) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
tstring read;
BufferedInputStream in(file.get(), buf_size);
EXPECT_EQ(0, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(3));
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(0));
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(2, &read));
EXPECT_EQ(read, "34");
EXPECT_EQ(5, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(0));
EXPECT_EQ(5, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(2));
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(1, &read));
EXPECT_EQ(read, "7");
EXPECT_EQ(8, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipNBytes(5)));
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipNBytes(5)));
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
}
TEST(BufferedInputStream, Seek) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
tstring read;
BufferedInputStream in(input_stream.get(), buf_size);
TF_ASSERT_OK(in.Seek(3));
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.Seek(1));
TF_ASSERT_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "1234");
EXPECT_EQ(5, in.Tell());
}
}
TEST(BufferedInputStream, Seek_NotReset) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
std::unique_ptr<RandomAccessInputStream> input_stream(
new RandomAccessInputStream(file.get()));
tstring read;
BufferedInputStream in(input_stream.get(), 3);
TF_ASSERT_OK(in.ReadNBytes(4, &read));
int before_tell = input_stream.get()->Tell();
EXPECT_EQ(before_tell, 6);
TF_ASSERT_OK(in.Seek(3));
int after_tell = input_stream.get()->Tell();
EXPECT_EQ(before_tell, after_tell);
}
TEST(BufferedInputStream, ReadAll_Empty) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
const string expected = "";
TF_ASSERT_OK(WriteStringToFile(env, fname, expected));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
RandomAccessInputStream input_stream(file.get());
BufferedInputStream in(&input_stream, buf_size);
string contents;
TF_ASSERT_OK(in.ReadAll(&contents));
EXPECT_EQ(expected, contents);
}
}
TEST(BufferedInputStream, ReadAll_Text) {
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
const string expected = "line one\nline two\nline three";
TF_ASSERT_OK(WriteStringToFile(env, fname, expected));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
for (auto buf_size : BufferSizes()) {
RandomAccessInputStream input_stream(file.get());
BufferedInputStream in(&input_stream, buf_size);
string contents;
TF_ASSERT_OK(in.ReadAll(&contents));
EXPECT_EQ(expected, contents);
}
}
void BM_BufferedReaderSmallReads(::testing::benchmark::State& state) {
const int buff_size = state.range(0);
const int file_size = state.range(1);
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
const string file_elem = "0123456789";
std::unique_ptr<WritableFile> write_file;
TF_ASSERT_OK(env->NewWritableFile(fname, &write_file));
for (int i = 0; i < file_size; ++i) {
TF_ASSERT_OK(write_file->Append(file_elem));
}
TF_ASSERT_OK(write_file->Close());
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
tstring result;
int itr = 0;
for (auto s : state) {
BufferedInputStream in(file.get(), buff_size);
for (int64_t i = 0; i < 10 * file_size; ++i) {
TF_ASSERT_OK(in.ReadNBytes(1, &result))
<< "i: " << i << " itr: " << itr << " buff_size: " << buff_size
<< " file size: " << file_size;
}
++itr;
}
}
BENCHMARK(BM_BufferedReaderSmallReads)
->ArgPair(1, 5)
->ArgPair(1, 1024)
->ArgPair(10, 5)
->ArgPair(10, 1024)
->ArgPair(1024, 1024)
->ArgPair(1024 * 1024, 1024)
->ArgPair(1024 * 1024, 1024 * 1024)
->ArgPair(256 * 1024 * 1024, 1024);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/buffered_inputstream.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/buffered_inputstream_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
974b7765-e143-439c-bdc2-2ea618027569 | cpp | tensorflow/tensorflow | cache | third_party/xla/xla/tsl/lib/io/cache.cc | third_party/xla/xla/tsl/lib/io/cache_test.cc | #include "xla/tsl/lib/io/cache.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "tsl/platform/mutex.h"
#include "tsl/platform/raw_coding.h"
namespace tsl {
namespace table {
Cache::~Cache() {}
namespace {
struct LRUHandle {
void* value;
void (*deleter)(const Slice&, void* value);
LRUHandle* next_hash;
LRUHandle* next;
LRUHandle* prev;
size_t charge;
size_t key_length;
bool in_cache;
uint32_t refs;
uint32_t hash;
char key_data[1];
Slice key() const {
assert(next != this);
return Slice(key_data, key_length);
}
};
class HandleTable {
public:
HandleTable() : length_(0), elems_(0), list_(nullptr) { Resize(); }
~HandleTable() { delete[] list_; }
LRUHandle* Lookup(const Slice& key, uint32_t hash) {
return *FindPointer(key, hash);
}
LRUHandle* Insert(LRUHandle* h) {
LRUHandle** ptr = FindPointer(h->key(), h->hash);
LRUHandle* old = *ptr;
h->next_hash = (old == nullptr ? nullptr : old->next_hash);
*ptr = h;
if (old == nullptr) {
++elems_;
if (elems_ > length_) {
Resize();
}
}
return old;
}
LRUHandle* Remove(const Slice& key, uint32_t hash) {
LRUHandle** ptr = FindPointer(key, hash);
LRUHandle* result = *ptr;
if (result != nullptr) {
*ptr = result->next_hash;
--elems_;
}
return result;
}
private:
uint32_t length_;
uint32_t elems_;
LRUHandle** list_;
LRUHandle** FindPointer(const Slice& key, uint32_t hash) {
LRUHandle** ptr = &list_[hash & (length_ - 1)];
while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
ptr = &(*ptr)->next_hash;
}
return ptr;
}
void Resize() {
uint32_t new_length = 4;
while (new_length < elems_) {
new_length *= 2;
}
LRUHandle** new_list = new LRUHandle*[new_length];
memset(new_list, 0, sizeof(new_list[0]) * new_length);
uint32_t count = 0;
for (uint32_t i = 0; i < length_; i++) {
LRUHandle* h = list_[i];
while (h != nullptr) {
LRUHandle* next = h->next_hash;
uint32_t hash = h->hash;
LRUHandle** ptr = &new_list[hash & (new_length - 1)];
h->next_hash = *ptr;
*ptr = h;
h = next;
count++;
}
}
assert(elems_ == count);
delete[] list_;
list_ = new_list;
length_ = new_length;
}
};
class LRUCache {
public:
LRUCache();
~LRUCache();
void SetCapacity(size_t capacity) { capacity_ = capacity; }
Cache::Handle* Insert(const Slice& key, uint32_t hash, void* value,
size_t charge,
void (*deleter)(const Slice& key, void* value));
Cache::Handle* Lookup(const Slice& key, uint32_t hash);
void Release(Cache::Handle* handle);
void Erase(const Slice& key, uint32_t hash);
void Prune();
size_t TotalCharge() const {
mutex_lock l(mutex_);
return usage_;
}
private:
void LRU_Remove(LRUHandle* e);
void LRU_Append(LRUHandle* list, LRUHandle* e);
void Ref(LRUHandle* e);
void Unref(LRUHandle* e);
bool FinishErase(LRUHandle* e) TF_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
size_t capacity_;
mutable mutex mutex_;
size_t usage_ TF_GUARDED_BY(mutex_);
LRUHandle lru_ TF_GUARDED_BY(mutex_);
LRUHandle in_use_ TF_GUARDED_BY(mutex_);
HandleTable table_ TF_GUARDED_BY(mutex_);
};
LRUCache::LRUCache() : capacity_(0), usage_(0) {
lru_.next = &lru_;
lru_.prev = &lru_;
in_use_.next = &in_use_;
in_use_.prev = &in_use_;
}
LRUCache::~LRUCache() {
assert(in_use_.next == &in_use_);
for (LRUHandle* e = lru_.next; e != &lru_;) {
LRUHandle* next = e->next;
assert(e->in_cache);
e->in_cache = false;
assert(e->refs == 1);
Unref(e);
e = next;
}
}
void LRUCache::Ref(LRUHandle* e) {
if (e->refs == 1 && e->in_cache) {
LRU_Remove(e);
LRU_Append(&in_use_, e);
}
e->refs++;
}
void LRUCache::Unref(LRUHandle* e) {
assert(e->refs > 0);
e->refs--;
if (e->refs == 0) {
assert(!e->in_cache);
(*e->deleter)(e->key(), e->value);
free(e);
} else if (e->in_cache && e->refs == 1) {
LRU_Remove(e);
LRU_Append(&lru_, e);
}
}
void LRUCache::LRU_Remove(LRUHandle* e) {
e->next->prev = e->prev;
e->prev->next = e->next;
}
void LRUCache::LRU_Append(LRUHandle* list, LRUHandle* e) {
e->next = list;
e->prev = list->prev;
e->prev->next = e;
e->next->prev = e;
}
Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash) {
mutex_lock l(mutex_);
LRUHandle* e = table_.Lookup(key, hash);
if (e != nullptr) {
Ref(e);
}
return reinterpret_cast<Cache::Handle*>(e);
}
void LRUCache::Release(Cache::Handle* handle) {
mutex_lock l(mutex_);
Unref(reinterpret_cast<LRUHandle*>(handle));
}
Cache::Handle* LRUCache::Insert(const Slice& key, uint32_t hash, void* value,
size_t charge,
void (*deleter)(const Slice& key,
void* value)) {
mutex_lock l(mutex_);
LRUHandle* e =
reinterpret_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 1 + key.size()));
e->value = value;
e->deleter = deleter;
e->charge = charge;
e->key_length = key.size();
e->hash = hash;
e->in_cache = false;
e->refs = 1;
memcpy(e->key_data, key.data(), key.size());
if (capacity_ > 0) {
e->refs++;
e->in_cache = true;
LRU_Append(&in_use_, e);
usage_ += charge;
FinishErase(table_.Insert(e));
} else {
e->next = nullptr;
}
while (usage_ > capacity_ && lru_.next != &lru_) {
LRUHandle* old = lru_.next;
assert(old->refs == 1);
bool erased = FinishErase(table_.Remove(old->key(), old->hash));
if (!erased) {
assert(erased);
}
}
return reinterpret_cast<Cache::Handle*>(e);
}
bool LRUCache::FinishErase(LRUHandle* e) {
if (e != nullptr) {
assert(e->in_cache);
LRU_Remove(e);
e->in_cache = false;
usage_ -= e->charge;
Unref(e);
}
return e != nullptr;
}
void LRUCache::Erase(const Slice& key, uint32_t hash) {
mutex_lock l(mutex_);
FinishErase(table_.Remove(key, hash));
}
void LRUCache::Prune() {
mutex_lock l(mutex_);
while (lru_.next != &lru_) {
LRUHandle* e = lru_.next;
assert(e->refs == 1);
bool erased = FinishErase(table_.Remove(e->key(), e->hash));
if (!erased) {
assert(erased);
}
}
}
static const int kNumShardBits = 4;
static const int kNumShards = 1 << kNumShardBits;
class ShardedLRUCache : public Cache {
private:
LRUCache shard_[kNumShards];
mutex id_mutex_;
uint64_t last_id_;
static inline uint32_t HashSlice(const Slice& s) {
return Hash(s.data(), s.size(), 0);
}
static uint32_t Shard(uint32_t hash) { return hash >> (32 - kNumShardBits); }
public:
explicit ShardedLRUCache(size_t capacity) : last_id_(0) {
const size_t per_shard = (capacity + (kNumShards - 1)) / kNumShards;
for (int s = 0; s < kNumShards; s++) {
shard_[s].SetCapacity(per_shard);
}
}
~ShardedLRUCache() override {}
Handle* Insert(const Slice& key, void* value, size_t charge,
void (*deleter)(const Slice& key, void* value)) override {
const uint32_t hash = HashSlice(key);
return shard_[Shard(hash)].Insert(key, hash, value, charge, deleter);
}
Handle* Lookup(const Slice& key) override {
const uint32_t hash = HashSlice(key);
return shard_[Shard(hash)].Lookup(key, hash);
}
void Release(Handle* handle) override {
LRUHandle* h = reinterpret_cast<LRUHandle*>(handle);
shard_[Shard(h->hash)].Release(handle);
}
void Erase(const Slice& key) override {
const uint32_t hash = HashSlice(key);
shard_[Shard(hash)].Erase(key, hash);
}
void* Value(Handle* handle) override {
return reinterpret_cast<LRUHandle*>(handle)->value;
}
uint64_t NewId() override {
mutex_lock l(id_mutex_);
return ++(last_id_);
}
void Prune() override {
for (int s = 0; s < kNumShards; s++) {
shard_[s].Prune();
}
}
size_t TotalCharge() const override {
size_t total = 0;
for (int s = 0; s < kNumShards; s++) {
total += shard_[s].TotalCharge();
}
return total;
}
private:
static uint32_t Hash(const char* data, size_t n, uint32_t seed) {
const uint32_t m = 0xc6a4a793;
const uint32_t r = 24;
const char* limit = data + n;
uint32_t h = seed ^ (n * m);
while (data + 4 <= limit) {
uint32_t w = core::DecodeFixed32(data);
data += 4;
h += w;
h *= m;
h ^= (h >> 16);
}
switch (limit - data) {
case 3:
h += static_cast<uint8_t>(data[2]) << 16;
ABSL_FALLTHROUGH_INTENDED;
case 2:
h += static_cast<uint8_t>(data[1]) << 8;
ABSL_FALLTHROUGH_INTENDED;
case 1:
h += static_cast<uint8_t>(data[0]);
h *= m;
h ^= (h >> r);
break;
}
return h;
}
};
}
Cache* NewLRUCache(size_t capacity) { return new ShardedLRUCache(capacity); }
}
} | #include "xla/tsl/lib/io/cache.h"
#include <string>
#include <vector>
#include "tsl/platform/coding.h"
#include "tsl/platform/raw_coding.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace table {
static std::string EncodeKey(int k) {
std::string result;
core::PutFixed32(&result, k);
return result;
}
static int DecodeKey(const Slice& k) {
assert(k.size() == 4);
return core::DecodeFixed32(k.data());
}
static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
static int DecodeValue(void* v) { return reinterpret_cast<uintptr_t>(v); }
class CacheTest : public ::testing::Test {
public:
static void Deleter(const Slice& key, void* v) {
current_->deleted_keys_.push_back(DecodeKey(key));
current_->deleted_values_.push_back(DecodeValue(v));
}
static constexpr int kCacheSize = 1000;
std::vector<int> deleted_keys_;
std::vector<int> deleted_values_;
Cache* cache_;
CacheTest() : cache_(NewLRUCache(kCacheSize)) { current_ = this; }
~CacheTest() { delete cache_; }
int Lookup(int key) {
Cache::Handle* handle = cache_->Lookup(EncodeKey(key));
const int r = (handle == nullptr) ? -1 : DecodeValue(cache_->Value(handle));
if (handle != nullptr) {
cache_->Release(handle);
}
return r;
}
void Insert(int key, int value, int charge = 1) {
cache_->Release(cache_->Insert(EncodeKey(key), EncodeValue(value), charge,
&CacheTest::Deleter));
}
Cache::Handle* InsertAndReturnHandle(int key, int value, int charge = 1) {
return cache_->Insert(EncodeKey(key), EncodeValue(value), charge,
&CacheTest::Deleter);
}
void Erase(int key) { cache_->Erase(EncodeKey(key)); }
static CacheTest* current_;
};
CacheTest* CacheTest::current_;
TEST_F(CacheTest, HitAndMiss) {
ASSERT_EQ(-1, Lookup(100));
Insert(100, 101);
ASSERT_EQ(101, Lookup(100));
ASSERT_EQ(-1, Lookup(200));
ASSERT_EQ(-1, Lookup(300));
Insert(200, 201);
ASSERT_EQ(101, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(-1, Lookup(300));
Insert(100, 102);
ASSERT_EQ(102, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(-1, Lookup(300));
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
ASSERT_EQ(101, deleted_values_[0]);
}
TEST_F(CacheTest, Erase) {
Erase(200);
ASSERT_EQ(0, deleted_keys_.size());
Insert(100, 101);
Insert(200, 201);
Erase(100);
ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
ASSERT_EQ(101, deleted_values_[0]);
Erase(100);
ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(1, deleted_keys_.size());
}
TEST_F(CacheTest, EntriesArePinned) {
Insert(100, 101);
Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
Insert(100, 102);
Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
ASSERT_EQ(0, deleted_keys_.size());
cache_->Release(h1);
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
ASSERT_EQ(101, deleted_values_[0]);
Erase(100);
ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(1, deleted_keys_.size());
cache_->Release(h2);
ASSERT_EQ(2, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[1]);
ASSERT_EQ(102, deleted_values_[1]);
}
TEST_F(CacheTest, EvictionPolicy) {
Insert(100, 101);
Insert(200, 201);
Insert(300, 301);
Cache::Handle* h = cache_->Lookup(EncodeKey(300));
for (int i = 0; i < kCacheSize + 100; i++) {
Insert(1000 + i, 2000 + i);
ASSERT_EQ(2000 + i, Lookup(1000 + i));
ASSERT_EQ(101, Lookup(100));
}
ASSERT_EQ(101, Lookup(100));
ASSERT_EQ(-1, Lookup(200));
ASSERT_EQ(301, Lookup(300));
cache_->Release(h);
}
TEST_F(CacheTest, UseExceedsCacheSize) {
std::vector<Cache::Handle*> h;
for (int i = 0; i < kCacheSize + 100; i++) {
h.push_back(InsertAndReturnHandle(1000 + i, 2000 + i));
}
for (int i = 0; i < h.size(); i++) {
ASSERT_EQ(2000 + i, Lookup(1000 + i));
}
for (int i = 0; i < h.size(); i++) {
cache_->Release(h[i]);
}
}
TEST_F(CacheTest, HeavyEntries) {
const int kLight = 1;
const int kHeavy = 10;
int added = 0;
int index = 0;
while (added < 2 * kCacheSize) {
const int weight = (index & 1) ? kLight : kHeavy;
Insert(index, 1000 + index, weight);
added += weight;
index++;
}
int cached_weight = 0;
for (int i = 0; i < index; i++) {
const int weight = (i & 1 ? kLight : kHeavy);
int r = Lookup(i);
if (r >= 0) {
cached_weight += weight;
ASSERT_EQ(1000 + i, r);
}
}
ASSERT_LE(cached_weight, kCacheSize + kCacheSize / 10);
}
TEST_F(CacheTest, NewId) {
uint64_t a = cache_->NewId();
uint64_t b = cache_->NewId();
ASSERT_NE(a, b);
}
TEST_F(CacheTest, Prune) {
Insert(1, 100);
Insert(2, 200);
Cache::Handle* handle = cache_->Lookup(EncodeKey(1));
ASSERT_TRUE(handle);
cache_->Prune();
cache_->Release(handle);
ASSERT_EQ(100, Lookup(1));
ASSERT_EQ(-1, Lookup(2));
}
TEST_F(CacheTest, ZeroSizeCache) {
delete cache_;
cache_ = NewLRUCache(0);
Insert(1, 100);
ASSERT_EQ(-1, Lookup(1));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/cache.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/cache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
272d3b74-3a55-4eba-a682-59190d83978f | cpp | tensorflow/tensorflow | distribution_sampler | third_party/xla/xla/tsl/lib/random/distribution_sampler.cc | third_party/xla/xla/tsl/lib/random/distribution_sampler_test.cc | #include "xla/tsl/lib/random/distribution_sampler.h"
#include <memory>
#include <vector>
#include "absl/types/span.h"
namespace tsl {
namespace random {
DistributionSampler::DistributionSampler(
const absl::Span<const float> weights) {
DCHECK(!weights.empty());
int n = weights.size();
num_ = n;
data_.reset(new std::pair<float, int>[n]);
std::unique_ptr<double[]> pr(new double[n]);
double sum = 0.0;
for (int i = 0; i < n; i++) {
sum += weights[i];
set_alt(i, -1);
}
std::vector<int> high;
high.reserve(n);
std::vector<int> low;
low.reserve(n);
for (int i = 0; i < n; i++) {
double p = (weights[i] * n) / sum;
pr[i] = p;
if (p < 1.0) {
low.push_back(i);
} else {
high.push_back(i);
}
}
while (!high.empty() && !low.empty()) {
int l = low.back();
low.pop_back();
int h = high.back();
high.pop_back();
set_alt(l, h);
DCHECK_GE(pr[h], 1.0);
double remaining = pr[h] - (1.0 - pr[l]);
pr[h] = remaining;
if (remaining < 1.0) {
low.push_back(h);
} else {
high.push_back(h);
}
}
for (int i = 0; i < n; i++) {
set_prob(i, pr[i]);
}
for (size_t i = 0; i < high.size(); i++) {
int idx = high[i];
set_prob(idx, 1.0);
set_alt(idx, idx);
}
for (size_t i = 0; i < low.size(); i++) {
int idx = low[i];
set_prob(idx, 1.0);
set_alt(idx, idx);
}
}
}
} | #include "xla/tsl/lib/random/distribution_sampler.h"
#include <string.h>
#include <memory>
#include <vector>
#include "xla/tsl/lib/random/simple_philox.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace random {
class DistributionSamplerTest : public ::testing::Test {
protected:
float TestWeights(const std::vector<float>& weights, int trials_per_bin) {
int iters = weights.size() * trials_per_bin;
std::unique_ptr<float[]> counts(new float[weights.size()]);
memset(counts.get(), 0, sizeof(float) * weights.size());
DistributionSampler sampler(weights);
PhiloxRandom philox(testing::RandomSeed(), 17);
SimplePhilox random(&philox);
for (int i = 0; i < iters; i++) {
int r = sampler.Sample(&random);
EXPECT_LT(r, weights.size());
EXPECT_GE(r, 0);
counts[r] += 1.0;
}
float chi2 = 0.0;
for (size_t i = 0; i < weights.size(); i++) {
counts[i] /= iters;
float err = (counts[i] - weights[i]);
chi2 += (err * err) / weights[i];
}
return chi2;
}
void TestDistribution(float* arr, int n) {
std::vector<float> w;
w.reserve(n);
for (int i = 0; i < n; i++) {
w.push_back(arr[i]);
}
float var = TestWeights(w, 1000);
if (var < 0.001) return;
var = TestWeights(w, 100000);
if (var < 0.001) return;
EXPECT_TRUE(false) << "Chi2 is " << var << " in " << n * 100000
<< "iterations";
}
};
TEST_F(DistributionSamplerTest, KnownDistribution) {
float kEven2[] = {0.5, 0.5};
float kEven3[] = {0.33333333, 0.33333333, 0.33333333};
float kEven4[] = {0.25, 0.25, 0.25, 0.25};
float kDist1[] = {0.8, 0.15, 0.05};
TestDistribution(kEven2, TF_ARRAYSIZE(kEven2));
TestDistribution(kEven3, TF_ARRAYSIZE(kEven3));
TestDistribution(kEven4, TF_ARRAYSIZE(kEven4));
TestDistribution(kDist1, TF_ARRAYSIZE(kDist1));
}
static void BM_DistributionSampler(::testing::benchmark::State& state) {
const int n = state.range(0);
PhiloxRandom philox(173, 371);
SimplePhilox rand(&philox);
std::vector<float> weights(n, 0);
for (int i = 0; i < n; i++) {
weights[i] = rand.Uniform(100);
}
DistributionSampler picker(weights);
int r = 0;
for (auto s : state) {
r |= picker.Sample(&rand);
}
CHECK_NE(r, kint32max);
}
BENCHMARK(BM_DistributionSampler)->Arg(10)->Arg(100)->Arg(1000);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/random/distribution_sampler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/random/distribution_sampler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3e8d7744-e760-4b63-87c0-9027b944dc8f | cpp | tensorflow/tensorflow | weighted_picker | third_party/xla/xla/tsl/lib/random/weighted_picker.cc | third_party/xla/xla/tsl/lib/random/weighted_picker_test.cc | #include "xla/tsl/lib/random/weighted_picker.h"
#include <string.h>
#include <algorithm>
#include "xla/tsl/lib/random/simple_philox.h"
namespace tsl {
namespace random {
WeightedPicker::WeightedPicker(int N) {
CHECK_GE(N, 0);
N_ = N;
num_levels_ = 1;
while (LevelSize(num_levels_ - 1) < N) {
num_levels_++;
}
level_ = new int32*[num_levels_];
for (int l = 0; l < num_levels_; l++) {
level_[l] = new int32[LevelSize(l)];
}
SetAllWeights(1);
}
WeightedPicker::~WeightedPicker() {
for (int l = 0; l < num_levels_; l++) {
delete[] level_[l];
}
delete[] level_;
}
static int32 UnbiasedUniform(SimplePhilox* r, int32_t n) {
CHECK_LE(0, n);
const uint32 range = ~static_cast<uint32>(0);
if (n == 0) {
return r->Rand32() * n;
} else if (0 == (n & (n - 1))) {
return r->Rand32() & (n - 1);
} else {
uint32 rem = (range % n) + 1;
uint32 rnd;
do {
rnd = r->Rand32();
} while (rnd < rem);
return rnd % n;
}
}
int WeightedPicker::Pick(SimplePhilox* rnd) const {
if (total_weight() == 0) return -1;
return PickAt(UnbiasedUniform(rnd, total_weight()));
}
int WeightedPicker::PickAt(int32_t weight_index) const {
if (weight_index < 0 || weight_index >= total_weight()) return -1;
int32_t position = weight_index;
int index = 0;
for (int l = 1; l < num_levels_; l++) {
const int32_t left_weight = level_[l][2 * index];
if (position < left_weight) {
index = 2 * index;
} else {
index = 2 * index + 1;
position -= left_weight;
}
}
CHECK_GE(index, 0);
CHECK_LT(index, N_);
CHECK_LE(position, level_[num_levels_ - 1][index]);
return index;
}
void WeightedPicker::set_weight(int index, int32_t weight) {
assert(index >= 0);
assert(index < N_);
const int32_t delta = weight - get_weight(index);
for (int l = num_levels_ - 1; l >= 0; l--) {
level_[l][index] += delta;
index >>= 1;
}
}
void WeightedPicker::SetAllWeights(int32_t weight) {
int32* leaves = level_[num_levels_ - 1];
for (int i = 0; i < N_; i++) leaves[i] = weight;
for (int i = N_; i < LevelSize(num_levels_ - 1); i++) leaves[i] = 0;
RebuildTreeWeights();
}
void WeightedPicker::SetWeightsFromArray(int N, const int32* weights) {
Resize(N);
int32* leaves = level_[num_levels_ - 1];
for (int i = 0; i < N_; i++) leaves[i] = weights[i];
for (int i = N_; i < LevelSize(num_levels_ - 1); i++) leaves[i] = 0;
RebuildTreeWeights();
}
void WeightedPicker::RebuildTreeWeights() {
for (int l = num_levels_ - 2; l >= 0; l--) {
int32* level = level_[l];
int32* children = level_[l + 1];
for (int i = 0; i < LevelSize(l); i++) {
level[i] = children[2 * i] + children[2 * i + 1];
}
}
}
void WeightedPicker::Append(int32_t weight) {
Resize(num_elements() + 1);
set_weight(num_elements() - 1, weight);
}
void WeightedPicker::Resize(int new_size) {
CHECK_GE(new_size, 0);
if (new_size <= LevelSize(num_levels_ - 1)) {
for (int i = new_size; i < N_; i++) {
set_weight(i, 0);
}
N_ = new_size;
return;
}
assert(new_size > N_);
WeightedPicker new_picker(new_size);
int32* dst = new_picker.level_[new_picker.num_levels_ - 1];
int32* src = this->level_[this->num_levels_ - 1];
memcpy(dst, src, sizeof(dst[0]) * N_);
memset(dst + N_, 0, sizeof(dst[0]) * (new_size - N_));
new_picker.RebuildTreeWeights();
std::swap(new_picker.N_, this->N_);
std::swap(new_picker.num_levels_, this->num_levels_);
std::swap(new_picker.level_, this->level_);
assert(this->N_ == new_size);
}
}
} | #include "xla/tsl/lib/random/weighted_picker.h"
#include <string.h>
#include <vector>
#include "xla/tsl/lib/random/simple_philox.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace random {
static void TestPicker(SimplePhilox* rnd, int size);
static void CheckUniform(SimplePhilox* rnd, WeightedPicker* picker, int trials);
static void CheckSkewed(SimplePhilox* rnd, WeightedPicker* picker, int trials);
static void TestPickAt(int items, const int32* weights);
TEST(WeightedPicker, Simple) {
PhiloxRandom philox(testing::RandomSeed(), 17);
SimplePhilox rnd(&philox);
{
VLOG(0) << "======= Zero-length picker";
WeightedPicker picker(0);
EXPECT_EQ(picker.Pick(&rnd), -1);
}
{
VLOG(0) << "======= Singleton picker";
WeightedPicker picker(1);
EXPECT_EQ(picker.Pick(&rnd), 0);
EXPECT_EQ(picker.Pick(&rnd), 0);
EXPECT_EQ(picker.Pick(&rnd), 0);
}
{
VLOG(0) << "======= Grown picker";
WeightedPicker picker(0);
for (int i = 0; i < 10; i++) {
picker.Append(1);
}
CheckUniform(&rnd, &picker, 100000);
}
{
VLOG(0) << "======= Grown picker with zero weights";
WeightedPicker picker(1);
picker.Resize(10);
EXPECT_EQ(picker.Pick(&rnd), 0);
EXPECT_EQ(picker.Pick(&rnd), 0);
EXPECT_EQ(picker.Pick(&rnd), 0);
}
{
VLOG(0) << "======= Shrink picker and check weights";
WeightedPicker picker(1);
picker.Resize(10);
EXPECT_EQ(picker.Pick(&rnd), 0);
EXPECT_EQ(picker.Pick(&rnd), 0);
EXPECT_EQ(picker.Pick(&rnd), 0);
for (int i = 0; i < 10; i++) {
picker.set_weight(i, i);
}
EXPECT_EQ(picker.total_weight(), 45);
picker.Resize(5);
EXPECT_EQ(picker.total_weight(), 10);
picker.Resize(2);
EXPECT_EQ(picker.total_weight(), 1);
picker.Resize(1);
EXPECT_EQ(picker.total_weight(), 0);
}
}
TEST(WeightedPicker, BigWeights) {
PhiloxRandom philox(testing::RandomSeed() + 1, 17);
SimplePhilox rnd(&philox);
VLOG(0) << "======= Check uniform with big weights";
WeightedPicker picker(2);
picker.SetAllWeights(2147483646L / 3);
CheckUniform(&rnd, &picker, 100000);
}
TEST(WeightedPicker, Deterministic) {
VLOG(0) << "======= Testing deterministic pick";
static const int32 weights[] = {1, 0, 200, 5, 42};
TestPickAt(TF_ARRAYSIZE(weights), weights);
}
TEST(WeightedPicker, Randomized) {
PhiloxRandom philox(testing::RandomSeed() + 10, 17);
SimplePhilox rnd(&philox);
TestPicker(&rnd, 1);
TestPicker(&rnd, 2);
TestPicker(&rnd, 3);
TestPicker(&rnd, 4);
TestPicker(&rnd, 7);
TestPicker(&rnd, 8);
TestPicker(&rnd, 9);
TestPicker(&rnd, 10);
TestPicker(&rnd, 100);
}
static void TestPicker(SimplePhilox* rnd, int size) {
VLOG(0) << "======= Testing size " << size;
{
WeightedPicker picker(size);
picker.SetAllWeights(0);
for (int i = 0; i < 100; i++) EXPECT_EQ(picker.Pick(rnd), -1);
}
std::vector<int32> weights(size);
for (int elem = 0; elem < size; elem++) {
weights[elem] = 0;
}
for (int elem = 0; elem < size; elem++) {
WeightedPicker picker(size);
picker.SetAllWeights(0);
picker.set_weight(elem, elem + 1);
for (int i = 0; i < 100; i++) EXPECT_EQ(picker.Pick(rnd), elem);
weights[elem] = 10;
picker.SetWeightsFromArray(size, &weights[0]);
for (int i = 0; i < 100; i++) EXPECT_EQ(picker.Pick(rnd), elem);
weights[elem] = 0;
}
{
WeightedPicker picker(size);
CheckUniform(rnd, &picker, 100000);
}
if (size / 3 > 0) {
WeightedPicker picker(size / 3);
while (picker.num_elements() != size) {
picker.Append(1);
}
CheckUniform(rnd, &picker, 100000);
}
if (size <= 10) {
WeightedPicker picker(size);
int32_t weight = 1;
for (int elem = 0; elem < size; elem++) {
picker.set_weight(elem, weight);
weights[elem] = weight;
weight *= 2;
}
CheckSkewed(rnd, &picker, 1000000);
WeightedPicker array_picker(0);
array_picker.SetWeightsFromArray(size, &weights[0]);
CheckSkewed(rnd, &array_picker, 1000000);
}
}
static void CheckUniform(SimplePhilox* rnd, WeightedPicker* picker,
int trials) {
const int size = picker->num_elements();
int* count = new int[size];
memset(count, 0, sizeof(count[0]) * size);
for (int i = 0; i < size * trials; i++) {
const int elem = picker->Pick(rnd);
EXPECT_GE(elem, 0);
EXPECT_LT(elem, size);
count[elem]++;
}
const int expected_min = int(0.9 * trials);
const int expected_max = int(1.1 * trials);
for (int i = 0; i < size; i++) {
EXPECT_GE(count[i], expected_min);
EXPECT_LE(count[i], expected_max);
}
delete[] count;
}
static void CheckSkewed(SimplePhilox* rnd, WeightedPicker* picker, int trials) {
const int size = picker->num_elements();
int* count = new int[size];
memset(count, 0, sizeof(count[0]) * size);
for (int i = 0; i < size * trials; i++) {
const int elem = picker->Pick(rnd);
EXPECT_GE(elem, 0);
EXPECT_LT(elem, size);
count[elem]++;
}
for (int i = 0; i < size - 1; i++) {
LOG(INFO) << i << ": " << count[i];
const float ratio = float(count[i + 1]) / float(count[i]);
EXPECT_GE(ratio, 1.6f);
EXPECT_LE(ratio, 2.4f);
}
delete[] count;
}
static void TestPickAt(int items, const int32* weights) {
WeightedPicker picker(items);
picker.SetWeightsFromArray(items, weights);
int weight_index = 0;
for (int i = 0; i < items; ++i) {
for (int j = 0; j < weights[i]; ++j) {
int pick = picker.PickAt(weight_index);
EXPECT_EQ(pick, i);
++weight_index;
}
}
EXPECT_EQ(weight_index, picker.total_weight());
}
static void BM_Create(::testing::benchmark::State& state) {
int arg = state.range(0);
for (auto s : state) {
WeightedPicker p(arg);
}
}
BENCHMARK(BM_Create)->Range(1, 1024);
static void BM_CreateAndSetWeights(::testing::benchmark::State& state) {
int arg = state.range(0);
std::vector<int32> weights(arg);
for (int i = 0; i < arg; i++) {
weights[i] = i * 10;
}
for (auto s : state) {
WeightedPicker p(arg);
p.SetWeightsFromArray(arg, &weights[0]);
}
}
BENCHMARK(BM_CreateAndSetWeights)->Range(1, 1024);
static void BM_Pick(::testing::benchmark::State& state) {
int arg = state.range(0);
PhiloxRandom philox(301, 17);
SimplePhilox rnd(&philox);
WeightedPicker p(arg);
int result = 0;
for (auto s : state) {
result += p.Pick(&rnd);
}
VLOG(4) << result;
}
BENCHMARK(BM_Pick)->Range(1, 1024);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/random/weighted_picker.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/random/weighted_picker_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4b58cfbf-7b21-47ea-86db-812d0b5e7490 | cpp | tensorflow/tensorflow | simple_philox | third_party/xla/xla/tsl/lib/random/simple_philox.cc | third_party/xla/xla/tsl/lib/random/simple_philox_test.cc | #include "xla/tsl/lib/random/simple_philox.h"
#include "xla/tsl/lib/random/exact_uniform_int.h"
#include "tsl/platform/logging.h"
namespace tsl {
namespace random {
uint32 SimplePhilox::Uniform(uint32 n) {
return ExactUniformInt<uint32>(n, [this]() { return Rand32(); });
}
uint64 SimplePhilox::Uniform64(uint64 n) {
return ExactUniformInt<uint64>(n, [this]() { return Rand64(); });
}
uint32 SimplePhilox::Skewed(int max_log) {
CHECK(0 <= max_log && max_log <= 32);
const int shift = Rand32() % (max_log + 1);
const uint32 mask = shift == 32 ? ~static_cast<uint32>(0) : (1 << shift) - 1;
return Rand32() & mask;
}
}
} | #include "xla/tsl/lib/random/simple_philox.h"
#include <set>
#include <string>
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace random {
namespace {
TEST(SimplePhiloxTest, FloatTest) {
PhiloxRandom philox(7, 7);
SimplePhilox gen(&philox);
static const int kIters = 1000000;
for (int i = 0; i < kIters; ++i) {
float f = gen.RandFloat();
EXPECT_LE(0.0f, f);
EXPECT_GT(1.0f, f);
}
for (int i = 0; i < kIters; ++i) {
double d = gen.RandDouble();
EXPECT_LE(0.0, d);
EXPECT_GT(1.0, d);
}
}
static void DifferenceTest(const char *names, SimplePhilox *gen1,
SimplePhilox *gen2) {
static const int kIters = 100;
bool different = false;
for (int i = 0; i < kIters; ++i) {
if (gen1->Rand32() != gen2->Rand32()) {
different = true;
break;
}
}
CHECK(different) << "different seeds but same output!";
}
TEST(SimplePhiloxTest, DifferenceTest) {
PhiloxRandom philox1(1, 1), philox2(17, 17);
SimplePhilox gen1(&philox1), gen2(&philox2);
DifferenceTest("SimplePhilox: different seeds", &gen1, &gen2);
}
TEST(SimplePhiloxTest, DifferenceTestCloseSeeds) {
PhiloxRandom philox1(1, 1), philox2(2, 1);
SimplePhilox gen1(&philox1), gen2(&philox2);
DifferenceTest("SimplePhilox: close seeds", &gen1, &gen2);
}
TEST(SimplePhiloxTest, Regression_CloseSeedsAreDifferent) {
const int kCount = 1000;
PhiloxRandom philox1(0, 1), philox2(1, 1);
SimplePhilox gen1(&philox1), gen2(&philox2);
std::set<uint32> first;
std::set<uint32> all;
for (int i = 0; i < kCount; ++i) {
uint32 v = gen1.Rand32();
first.insert(v);
all.insert(v);
all.insert(gen2.Rand32());
}
EXPECT_EQ(kCount, first.size());
EXPECT_EQ(2 * kCount, all.size());
}
TEST(SimplePhiloxTest, TestUniform) {
PhiloxRandom philox(17, 17);
SimplePhilox gen(&philox);
uint32 range = 3 * (1L << 29);
uint32 threshold = 1L << 30;
size_t count = 0;
static const int kTrials = 100000;
for (int i = 0; i < kTrials; ++i) {
uint32 rnd = gen.Uniform(range);
if (rnd < threshold) {
++count;
}
}
EXPECT_LT(fabs((threshold + 0.0) / range - (count + 0.0) / kTrials), 0.005);
}
TEST(SimplePhiloxTest, TestUniform64) {
PhiloxRandom philox(17, 17);
SimplePhilox gen(&philox);
uint64 range = 3 * (1LL << 59);
uint64 threshold = 1LL << 60;
size_t count = 0;
static const int kTrials = 100000;
for (int i = 0; i < kTrials; ++i) {
uint64 rnd = gen.Uniform64(range);
if (rnd < threshold) {
++count;
}
}
EXPECT_LT(fabs((threshold + 0.0) / range - (count + 0.0) / kTrials), 0.005);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/random/simple_philox.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/random/simple_philox_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2246b064-c684-4a2b-b593-8c40c777b2fc | cpp | tensorflow/tensorflow | random_distributions | third_party/xla/xla/tsl/lib/random/random_distributions.cc | third_party/xla/xla/tsl/lib/random/random_distributions_test.cc | #include "xla/tsl/lib/random/distribution_sampler.h"
#include "xla/tsl/lib/random/philox_random.h"
namespace tsl {
namespace random {
template <>
void SingleSampleAdapter<PhiloxRandom>::SkipFromGenerator(uint64 num_skips) {
generator_->Skip(num_skips);
}
}
} | #include "xla/tsl/lib/random/random_distributions.h"
#include <algorithm>
#include <cmath>
#include <functional>
#include <numeric>
#include <unordered_map>
#include <vector>
#include "xla/tsl/lib/math/math_util.h"
#include "xla/tsl/lib/random/philox_random.h"
#include "xla/tsl/lib/random/philox_random_test_utils.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/random.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace random {
namespace {
static constexpr float kZLimit = 6.0;
static constexpr float kZLimitBfloat16 = 20.0;
template <class Distribution>
void FillRandomsWithSingles(PhiloxRandom gen,
typename Distribution::ResultElementType* p,
int64_t size) {
int granularity = Distribution::kResultElementCount;
CHECK(size % granularity == 0)
<< " size: " << size << " granularity: " << granularity;
SingleSampleAdapter<PhiloxRandom> single_samples(&gen);
Distribution dist;
for (int i = 0; i < size; i += granularity) {
auto sample = dist(&single_samples);
std::copy(&sample[0], &sample[0] + granularity, &p[i]);
}
}
template <typename T>
bool CheckSamplesMoments(const std::vector<T>& samples,
const std::function<double(int)>& theoretical_moments,
int max_moments, int stride, T z_limit) {
const T* const samples_data = &samples[0];
const int samples_size = samples.size();
std::vector<double> moments(max_moments + 1);
double* const moments_data = &moments[0];
std::vector<int> moments_sample_count(max_moments + 1);
int* const moments_sample_count_data = &moments_sample_count[0];
for (int k = 0; k < samples_size; ++k) {
double moment = 1.;
for (int i = 0; i <= max_moments; ++i) {
int index = k + i * stride;
if (index >= samples_size) {
break;
}
moments_data[i] += moment;
++moments_sample_count_data[i];
moment *= static_cast<double>(samples_data[index]);
}
}
for (int i = 0; i <= max_moments; ++i) {
moments[i] /= moments_sample_count[i];
}
bool status = true;
for (int i = 1; i <= max_moments; ++i) {
const double moments_i_mean =
(stride == 0) ? theoretical_moments(i)
: MathUtil::IPow(theoretical_moments(1), i);
const double moments_i_squared =
(stride == 0) ? theoretical_moments(2 * i)
: MathUtil::IPow(theoretical_moments(2), i);
const double moments_i_var =
moments_i_squared - moments_i_mean * moments_i_mean;
static const double kNumericalError = 1e-6;
const double error_per_moment = i * kNumericalError;
const double total_variance =
moments_i_var / moments_sample_count[i] + error_per_moment;
const double z_test =
fabs((moments[i] - moments_i_mean) / sqrt(total_variance));
if (z_test > static_cast<double>(z_limit)) {
LOG(ERROR) << "failing z_test:"
<< " moment: " << i << " stride: " << stride
<< " z_test: " << z_test << " z_limit: " << z_limit
<< " measured moments: " << moments[i]
<< " theoretical mean of the moments: " << moments_i_mean
<< " theoretical var of the moments: " << moments_i_var
<< " sample count: " << moments_sample_count[i];
status = false;
}
}
return status;
}
template <typename T>
void UniformMomentsTest(int count, int max_moments,
const std::vector<int>& strides, T z_limit) {
auto uniform_moments = [](int n) -> double { return 1. / (n + 1); };
std::vector<T> v1(count);
uint64 seed = GetTestSeed();
PhiloxRandom gen(seed);
FillRandoms<UniformDistribution<PhiloxRandom, T> >(gen, &v1[0], v1.size());
for (int stride : strides) {
bool status =
CheckSamplesMoments(v1, uniform_moments, max_moments, stride, z_limit);
ASSERT_TRUE(status) << " UniformMomentsTest failing. seed: " << seed;
}
}
template <typename T>
void NormalMomentsTest(int count, int max_moments,
const std::vector<int>& strides, T z_limit) {
auto normal_moments = [](int n) -> double {
if (n % 2 == 1) {
return 0.;
} else {
double v = 1.;
for (int i = n - 1; i >= 1; i -= 2) {
v *= i;
}
return v;
}
};
std::vector<T> v1(count);
uint64 seed = GetTestSeed();
PhiloxRandom gen(seed);
FillRandoms<NormalDistribution<PhiloxRandom, T> >(gen, &v1[0], v1.size());
for (int stride : strides) {
bool status =
CheckSamplesMoments(v1, normal_moments, max_moments, stride, z_limit);
ASSERT_TRUE(status) << " NormalMomentsTest failing. seed: " << seed;
}
}
class TruncatedNormalMoments {
public:
double operator()(int n) {
if (n == 0) {
return 1;
}
if (n % 2 == 1) {
return 0.;
}
auto iter = cached_results_.find(n);
if (iter != cached_results_.end()) {
return iter->second;
}
double bias = 2.0 * MathUtil::IPow(kV, n - 1) * kFV / (2.0 * kPhiV - 1.0);
double moment_n_minus_2 = (*this)(n - 2);
double moment_n = (n - 1) * moment_n_minus_2 - bias;
cached_results_[n] = moment_n;
return moment_n;
}
private:
const double kV = 2.0;
const double kFV = 1.0 / sqrt(2.0 * M_PI) * exp(-kV * kV / 2.0);
const double kPhiV = 0.977249868051821;
std::unordered_map<int, double> cached_results_;
};
template <typename T>
void RandomParametersMomentsTest(int count, int max_moments,
const std::vector<int>& strides, T z_limit) {
std::vector<T> v1(count);
uint64 seed = GetTestSeed();
PhiloxRandom gen(seed);
FillRandomsWithSingles<
TruncatedNormalDistribution<SingleSampleAdapter<PhiloxRandom>, T> >(
gen, &v1[0], v1.size());
for (int stride : strides) {
bool status = CheckSamplesMoments(v1, TruncatedNormalMoments(), max_moments,
stride, z_limit);
ASSERT_TRUE(status) << " NormalMomentsTest failing. seed: " << seed;
}
}
TEST(PhiloxRandomTest, UniformBfloat16MomentsTest) {
const std::vector<int> strides = {0, 1, 4, 17};
UniformMomentsTest<bfloat16>(1 << 20, 40, strides, bfloat16(kZLimitBfloat16));
}
TEST(PhiloxRandomTest, NormalBfloat16MomentsTest) {
const std::vector<int> strides = {0, 1, 4, 17};
NormalMomentsTest<bfloat16>(8 << 20, 25, strides, bfloat16(kZLimitBfloat16));
}
TEST(PhiloxRandomTest, RandomParametersBfloat16MomentsTest) {
const std::vector<int> strides = {0, 1, 4, 17};
RandomParametersMomentsTest<bfloat16>(1 << 20, 40, strides,
bfloat16(kZLimitBfloat16));
}
TEST(PhiloxRandomTest, UniformFloatMomentsTest) {
const std::vector<int> strides = {0, 1, 4, 17};
UniformMomentsTest<float>(1 << 20, 40, strides, kZLimit);
}
TEST(PhiloxRandomTest, NormalFloatMomentsTest) {
const std::vector<int> strides = {0, 1, 4, 17};
NormalMomentsTest<float>(8 << 20, 25, strides, kZLimit);
}
TEST(PhiloxRandomTest, RandomParametersFloatMomentsTest) {
const std::vector<int> strides = {0, 1, 4, 17};
RandomParametersMomentsTest<float>(1 << 20, 40, strides, kZLimit);
}
TEST(PhiloxRandomTest, UniformDoubleMomentsTest) {
const std::vector<int> strides = {0, 1, 4, 17};
UniformMomentsTest<double>(1 << 20, 40, strides, kZLimit);
}
TEST(PhiloxRandomTest, NormalDoubleMomentsTest) {
const std::vector<int> strides = {0, 1, 4, 17};
NormalMomentsTest<double>(8 << 20, 25, strides, kZLimit);
}
TEST(PhiloxRandomTest, RandomParametersDoubleMomentsTest) {
const std::vector<int> strides = {0, 1, 4, 17};
RandomParametersMomentsTest<double>(1 << 20, 40, strides, kZLimit);
}
class MockGenerator {
public:
explicit MockGenerator(uint64 seed) : counter_(seed) {}
using ResultType = std::vector<uint32>;
using ResultElementType = uint32;
static constexpr int kResultElementCount = 1;
ResultType operator()() {
ResultType result;
result.push_back(counter_++);
return result;
}
private:
uint32 counter_;
};
template <typename T>
void SingleSampleAdapterSkipTest() {
std::vector<uint64> skips(10);
std::vector<uint64> skip_afters(10);
std::iota(skips.begin(), skips.end(), 0);
std::iota(skip_afters.begin(), skip_afters.end(), 0);
uint64 total_samples = 100;
uint64 seed = GetTestSeed();
for (uint64 skip : skips) {
for (uint64 skip_after : skip_afters) {
T parent_gen(seed);
SingleSampleAdapter<T> gen(&parent_gen);
T parent_gen_to_skip(seed);
SingleSampleAdapter<T> gen_to_skip(&parent_gen_to_skip);
int cur = 0;
for (; cur < skip_after; cur++) {
gen();
gen_to_skip();
}
for (; cur < skip_after + skip; cur++) {
gen();
}
gen_to_skip.Skip(skip);
for (; cur < total_samples; cur++) {
ASSERT_EQ(gen(), gen_to_skip());
}
}
}
}
TEST(SingleSampleAdapterTest, PhiloxRandomSkip) {
SingleSampleAdapterSkipTest<PhiloxRandom>();
}
TEST(SingleSampleAdapterTest, MockGeneratorSkip) {
SingleSampleAdapterSkipTest<MockGenerator>();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/random/random_distributions.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/random/random_distributions_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c0e9b00c-577d-4692-b0de-b284f166c3d1 | cpp | tensorflow/tensorflow | sampler | third_party/xla/xla/tsl/lib/monitoring/sampler.cc | tensorflow/core/lib/monitoring/sampler_test.cc | #include "xla/tsl/lib/monitoring/sampler.h"
#include "absl/log/check.h"
#ifdef IS_MOBILE_PLATFORM
#else
namespace tsl {
namespace monitoring {
namespace {
class ExplicitBuckets : public Buckets {
public:
~ExplicitBuckets() override = default;
explicit ExplicitBuckets(std::vector<double> bucket_limits)
: bucket_limits_(std::move(bucket_limits)) {
CHECK_GT(bucket_limits_.size(), 0);
for (size_t i = 1; i < bucket_limits_.size(); i++) {
CHECK_GT(bucket_limits_[i], bucket_limits_[i - 1]);
}
if (bucket_limits_.back() != DBL_MAX) {
bucket_limits_.push_back(DBL_MAX);
}
}
const std::vector<double>& explicit_bounds() const override {
return bucket_limits_;
}
private:
std::vector<double> bucket_limits_;
ExplicitBuckets(const ExplicitBuckets&) = delete;
void operator=(const ExplicitBuckets&) = delete;
};
class ExponentialBuckets : public Buckets {
public:
~ExponentialBuckets() override = default;
ExponentialBuckets(double scale, double growth_factor, int bucket_count)
: explicit_buckets_(
ComputeBucketLimits(scale, growth_factor, bucket_count)) {}
const std::vector<double>& explicit_bounds() const override {
return explicit_buckets_.explicit_bounds();
}
private:
static std::vector<double> ComputeBucketLimits(double scale,
double growth_factor,
int bucket_count) {
CHECK_GT(bucket_count, 0);
std::vector<double> bucket_limits;
double bound = scale;
for (int i = 0; i < bucket_count; i++) {
bucket_limits.push_back(bound);
bound *= growth_factor;
}
return bucket_limits;
}
ExplicitBuckets explicit_buckets_;
ExponentialBuckets(const ExponentialBuckets&) = delete;
void operator=(const ExponentialBuckets&) = delete;
};
}
std::unique_ptr<Buckets> Buckets::Explicit(std::vector<double> bucket_limits) {
return std::unique_ptr<Buckets>(
new ExplicitBuckets(std::move(bucket_limits)));
}
std::unique_ptr<Buckets> Buckets::Explicit(
std::initializer_list<double> bucket_limits) {
return std::unique_ptr<Buckets>(new ExplicitBuckets(bucket_limits));
}
std::unique_ptr<Buckets> Buckets::Exponential(double scale,
double growth_factor,
int bucket_count) {
return std::unique_ptr<Buckets>(
new ExponentialBuckets(scale, growth_factor, bucket_count));
}
}
}
#endif | #include "tensorflow/core/lib/monitoring/sampler.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace monitoring {
namespace {
using histogram::Histogram;
void EqHistograms(const Histogram& expected,
const HistogramProto& actual_proto) {
Histogram actual;
ASSERT_TRUE(actual.DecodeFromProto(actual_proto));
EXPECT_EQ(expected.ToString(), actual.ToString());
}
auto* sampler_with_labels =
Sampler<1>::New({"/tensorflow/test/sampler_with_labels",
"Sampler with one label.", "MyLabel"},
Buckets::Explicit({10.0, 20.0}));
TEST(LabeledSamplerTest, InitializedEmpty) {
Histogram empty;
EqHistograms(empty, sampler_with_labels->GetCell("Empty")->value());
}
TEST(LabeledSamplerTest, ExplicitBucketBoundaries) {
Histogram expected({10.0, 20.0, DBL_MAX});
auto* cell = sampler_with_labels->GetCell("BucketBoundaries");
sampler_with_labels->GetCell("AddedToCheckPreviousCellValidity");
cell->Add(-1.0);
expected.Add(-1.0);
cell->Add(10.0);
expected.Add(10.0);
cell->Add(20.0);
expected.Add(20.0);
cell->Add(31.0);
expected.Add(31.0);
EqHistograms(expected, cell->value());
}
auto* init_sampler_without_labels =
Sampler<0>::New({"/tensorflow/test/init_sampler_without_labels",
"Sampler without labels initialized as empty."},
Buckets::Explicit(std::vector<double>{1.5, 2.8}));
TEST(UnlabeledSamplerTest, InitializedEmpty) {
Histogram empty;
EqHistograms(empty, init_sampler_without_labels->GetCell()->value());
}
auto* sampler_without_labels =
Sampler<0>::New({"/tensorflow/test/sampler_without_labels",
"Sampler without labels initialized as empty."},
Buckets::Explicit({1.5, 2.8}));
TEST(UnlabeledSamplerTest, ExplicitBucketBoundaries) {
Histogram expected({1.5, 2.8, DBL_MAX});
auto* cell = sampler_without_labels->GetCell();
cell->Add(-1.0);
expected.Add(-1.0);
cell->Add(2.0);
expected.Add(2.0);
cell->Add(31.0);
expected.Add(31.0);
EqHistograms(expected, cell->value());
}
auto* sampler_with_exponential =
Sampler<1>::New({"/tensorflow/test/sampler_with_exponential",
"Sampler with exponential buckets.", "MyLabel"},
Buckets::Exponential(1, 2, 3));
TEST(ExponentialSamplerTest, ExponentialBucketBoundaries) {
Histogram expected({1.0, 2.0, 4.0, DBL_MAX});
auto* cell = sampler_with_exponential->GetCell("BucketBoundaries");
sampler_with_exponential->GetCell("AddedToCheckPreviousCellValidity");
cell->Add(-1.0);
expected.Add(-1.0);
cell->Add(0.5);
expected.Add(0.5);
cell->Add(1.001);
expected.Add(1.001);
cell->Add(3.999);
expected.Add(3.999);
cell->Add(6.0);
expected.Add(6.0);
EqHistograms(expected, cell->value());
}
TEST(ExplicitSamplerTest, SameName) {
auto* same_sampler = Sampler<1>::New({"/tensorflow/test/sampler_with_labels",
"Sampler with one label.", "MyLabel"},
Buckets::Explicit({10.0, 20.0}));
EXPECT_TRUE(sampler_with_labels->GetStatus().ok());
EXPECT_TRUE(same_sampler->GetStatus().ok());
delete same_sampler;
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/monitoring/sampler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/monitoring/sampler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7bb6a813-dd1b-4d06-9ea9-8d2c113624c8 | cpp | tensorflow/tensorflow | collection_registry | third_party/xla/xla/tsl/lib/monitoring/collection_registry.cc | tensorflow/core/lib/monitoring/collection_registry_test.cc | #include "xla/tsl/lib/monitoring/collection_registry.h"
#include "xla/tsl/lib/monitoring/collected_metrics.h"
#include "xla/tsl/lib/monitoring/metric_def.h"
#include "tsl/platform/env.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/stringpiece.h"
#include "tsl/platform/types.h"
#ifndef IS_MOBILE_PLATFORM
#include "tsl/platform/logging.h"
namespace tsl {
namespace monitoring {
namespace internal {
void Collector::CollectMetricValues(
const CollectionRegistry::CollectionInfo& info) {
info.collection_function(MetricCollectorGetter(
this, info.metric_def, info.registration_time_millis));
}
std::unique_ptr<CollectedMetrics> Collector::ConsumeCollectedMetrics() {
mutex_lock l(mu_);
return std::move(collected_metrics_);
}
void Collector::CollectMetricDescriptor(
const AbstractMetricDef* const metric_def) {
auto* const metric_descriptor = [&]() {
mutex_lock l(mu_);
return collected_metrics_->metric_descriptor_map
.insert(std::make_pair(
string(metric_def->name()),
std::unique_ptr<MetricDescriptor>(new MetricDescriptor())))
.first->second.get();
}();
metric_descriptor->name = string(metric_def->name());
metric_descriptor->description = string(metric_def->description());
for (const absl::string_view label_name : metric_def->label_descriptions()) {
metric_descriptor->label_names.emplace_back(label_name);
}
metric_descriptor->metric_kind = metric_def->kind();
metric_descriptor->value_type = metric_def->value_type();
}
}
CollectionRegistry* CollectionRegistry::Default() {
static CollectionRegistry* default_registry =
new CollectionRegistry(Env::Default());
return default_registry;
}
CollectionRegistry::CollectionRegistry(Env* const env) : env_(env) {}
std::unique_ptr<CollectionRegistry::RegistrationHandle>
CollectionRegistry::Register(const AbstractMetricDef* const metric_def,
const CollectionFunction& collection_function) {
CHECK(collection_function)
<< "Requires collection_function to contain an implementation.";
mutex_lock l(mu_);
const auto found_it = registry_.find(metric_def->name());
if (found_it != registry_.end()) {
LOG(WARNING)
<< "Trying to register 2 metrics with the same name: "
<< metric_def->name()
<< ". The old value will be erased in order to register a new one. "
"Please check if you link the metric more than once, or "
"if the name is already used by other metrics.";
registry_.erase(found_it);
}
registry_.insert(
{metric_def->name(),
{metric_def, collection_function, env_->NowMicros() / 1000}});
return std::unique_ptr<RegistrationHandle>(
new RegistrationHandle(this, metric_def));
}
void CollectionRegistry::Unregister(const AbstractMetricDef* const metric_def) {
mutex_lock l(mu_);
registry_.erase(metric_def->name());
}
std::unique_ptr<CollectedMetrics> CollectionRegistry::CollectMetrics(
const CollectMetricsOptions& options) const {
internal::Collector collector(env_->NowMicros() / 1000);
mutex_lock l(mu_);
for (const auto& registration : registry_) {
if (options.collect_metric_descriptors) {
collector.CollectMetricDescriptor(registration.second.metric_def);
}
collector.CollectMetricValues(registration.second );
}
return collector.ConsumeCollectedMetrics();
}
}
}
#endif | #include "tensorflow/core/lib/monitoring/collection_registry.h"
#include <memory>
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/lib/monitoring/gauge.h"
#include "tensorflow/core/lib/monitoring/percentile_sampler.h"
#include "tensorflow/core/lib/monitoring/sampler.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace monitoring {
using histogram::Histogram;
namespace test_util {
class CollectionRegistryTestAccess {
public:
static std::unique_ptr<CollectionRegistry> CreateRegistry(Env* const env) {
return std::unique_ptr<CollectionRegistry>(new CollectionRegistry(env));
}
};
}
namespace {
void EmptyCollectionFunction(MetricCollectorGetter getter) {}
TEST(CollectionRegistryTest, RegistrationUnregistration) {
auto* collection_registry = CollectionRegistry::Default();
const MetricDef<MetricKind::kCumulative, int64_t, 0> metric_def0(
"/tensorflow/metric0", "An example metric with no labels.");
const MetricDef<MetricKind::kGauge, HistogramProto, 1> metric_def1(
"/tensorflow/metric1", "An example metric with one label.", "LabelName");
{
std::unique_ptr<CollectionRegistry::RegistrationHandle> handle0 =
collection_registry->Register(&metric_def0, EmptyCollectionFunction);
std::unique_ptr<CollectionRegistry::RegistrationHandle> handle1 =
collection_registry->Register(&metric_def1, EmptyCollectionFunction);
handle0.reset();
handle0 =
collection_registry->Register(&metric_def0, EmptyCollectionFunction);
}
}
TEST(CollectionRegistryDeathTest, DuplicateRegistration) {
auto* collection_registry = CollectionRegistry::Default();
const MetricDef<MetricKind::kCumulative, int64_t, 0> metric_def(
"/tensorflow/metric", "An example metric with no labels.");
auto handle =
collection_registry->Register(&metric_def, EmptyCollectionFunction);
auto duplicate_handle =
collection_registry->Register(&metric_def, EmptyCollectionFunction);
EXPECT_NE(duplicate_handle, nullptr);
}
TEST(CollectMetricsTest, Counter) {
auto counter_with_labels = std::unique_ptr<Counter<2>>(
Counter<2>::New("/tensorflow/test/counter_with_labels",
"Counter with labels.", "MyLabel0", "MyLabel1"));
auto counter_without_labels = std::unique_ptr<Counter<0>>(Counter<0>::New(
"/tensorflow/test/counter_without_labels", "Counter without labels."));
counter_with_labels->GetCell("Label00", "Label10")->IncrementBy(42);
counter_with_labels->GetCell("Label01", "Label11")->IncrementBy(58);
counter_without_labels->GetCell()->IncrementBy(7);
for (const bool collect_metric_descriptors : {true, false}) {
SCOPED_TRACE(strings::StrCat("collect_metric_descriptors: ",
collect_metric_descriptors));
auto* collection_registry = CollectionRegistry::Default();
CollectionRegistry::CollectMetricsOptions options;
options.collect_metric_descriptors = collect_metric_descriptors;
const std::unique_ptr<CollectedMetrics> collected_metrics =
collection_registry->CollectMetrics(options);
if (collect_metric_descriptors) {
ASSERT_GE(collected_metrics->metric_descriptor_map.size(), 2);
const MetricDescriptor& ld = *collected_metrics->metric_descriptor_map.at(
"/tensorflow/test/counter_with_labels");
EXPECT_EQ("/tensorflow/test/counter_with_labels", ld.name);
EXPECT_EQ("Counter with labels.", ld.description);
ASSERT_EQ(2, ld.label_names.size());
EXPECT_EQ("MyLabel0", ld.label_names[0]);
EXPECT_EQ("MyLabel1", ld.label_names[1]);
EXPECT_EQ(MetricKind::kCumulative, ld.metric_kind);
EXPECT_EQ(ValueType::kInt64, ld.value_type);
const MetricDescriptor& ud = *collected_metrics->metric_descriptor_map.at(
"/tensorflow/test/counter_without_labels");
EXPECT_EQ("/tensorflow/test/counter_without_labels", ud.name);
EXPECT_EQ("Counter without labels.", ud.description);
ASSERT_EQ(0, ud.label_names.size());
EXPECT_EQ(MetricKind::kCumulative, ud.metric_kind);
EXPECT_EQ(ValueType::kInt64, ud.value_type);
} else {
EXPECT_EQ(0, collected_metrics->metric_descriptor_map.size());
}
ASSERT_GE(collected_metrics->point_set_map.size(), 2);
const PointSet& lps = *collected_metrics->point_set_map.at(
"/tensorflow/test/counter_with_labels");
EXPECT_EQ("/tensorflow/test/counter_with_labels", lps.metric_name);
ASSERT_EQ(2, lps.points.size());
ASSERT_EQ(2, lps.points[0]->labels.size());
EXPECT_EQ("MyLabel0", lps.points[0]->labels[0].name);
EXPECT_EQ("Label00", lps.points[0]->labels[0].value);
EXPECT_EQ("MyLabel1", lps.points[0]->labels[1].name);
EXPECT_EQ("Label10", lps.points[0]->labels[1].value);
EXPECT_EQ(ValueType::kInt64, lps.points[0]->value_type);
EXPECT_EQ(42, lps.points[0]->int64_value);
EXPECT_LT(0, lps.points[0]->start_timestamp_millis);
EXPECT_LT(0, lps.points[0]->end_timestamp_millis);
EXPECT_GE(lps.points[0]->end_timestamp_millis,
lps.points[0]->start_timestamp_millis);
ASSERT_EQ(2, lps.points[1]->labels.size());
EXPECT_EQ("MyLabel0", lps.points[1]->labels[0].name);
EXPECT_EQ("Label01", lps.points[1]->labels[0].value);
EXPECT_EQ("MyLabel1", lps.points[1]->labels[1].name);
EXPECT_EQ("Label11", lps.points[1]->labels[1].value);
EXPECT_EQ(ValueType::kInt64, lps.points[1]->value_type);
EXPECT_EQ(58, lps.points[1]->int64_value);
EXPECT_LT(0, lps.points[1]->start_timestamp_millis);
EXPECT_LT(0, lps.points[1]->end_timestamp_millis);
EXPECT_GE(lps.points[1]->end_timestamp_millis,
lps.points[1]->start_timestamp_millis);
const PointSet& ups = *collected_metrics->point_set_map.at(
"/tensorflow/test/counter_without_labels");
EXPECT_EQ("/tensorflow/test/counter_without_labels", ups.metric_name);
ASSERT_EQ(1, ups.points.size());
EXPECT_EQ(0, ups.points[0]->labels.size());
EXPECT_EQ(ValueType::kInt64, ups.points[0]->value_type);
EXPECT_EQ(7, ups.points[0]->int64_value);
EXPECT_LT(0, ups.points[0]->start_timestamp_millis);
EXPECT_LT(0, ups.points[0]->end_timestamp_millis);
EXPECT_GE(ups.points[0]->end_timestamp_millis,
ups.points[0]->start_timestamp_millis);
}
}
TEST(CollectMetricsTest, Gauge) {
auto string_gauge_with_labels =
std::unique_ptr<Gauge<string, 2>>(Gauge<string, 2>::New(
"/tensorflow/test/string_gauge_with_labels",
"String gauge with labels.", "MyLabel0", "MyLabel1"));
auto inteter_gauge_without_labels = std::unique_ptr<Gauge<int64_t, 0>>(
Gauge<int64_t, 0>::New("/tensorflow/test/integer_gauge_without_labels",
"Integer gauge without labels."));
string_gauge_with_labels->GetCell("Label00", "Label10")->Set("test1");
string_gauge_with_labels->GetCell("Label01", "Label11")->Set("test2");
inteter_gauge_without_labels->GetCell()->Set(7);
for (const bool collect_metric_descriptors : {true, false}) {
SCOPED_TRACE(strings::StrCat("collect_metric_descriptors: ",
collect_metric_descriptors));
auto* collection_registry = CollectionRegistry::Default();
CollectionRegistry::CollectMetricsOptions options;
options.collect_metric_descriptors = collect_metric_descriptors;
const std::unique_ptr<CollectedMetrics> collected_metrics =
collection_registry->CollectMetrics(options);
if (collect_metric_descriptors) {
ASSERT_GE(collected_metrics->metric_descriptor_map.size(), 2);
const MetricDescriptor& ld = *collected_metrics->metric_descriptor_map.at(
"/tensorflow/test/string_gauge_with_labels");
EXPECT_EQ("/tensorflow/test/string_gauge_with_labels", ld.name);
EXPECT_EQ("String gauge with labels.", ld.description);
ASSERT_EQ(2, ld.label_names.size());
EXPECT_EQ("MyLabel0", ld.label_names[0]);
EXPECT_EQ("MyLabel1", ld.label_names[1]);
EXPECT_EQ(MetricKind::kGauge, ld.metric_kind);
EXPECT_EQ(ValueType::kString, ld.value_type);
const MetricDescriptor& ud = *collected_metrics->metric_descriptor_map.at(
"/tensorflow/test/integer_gauge_without_labels");
EXPECT_EQ("/tensorflow/test/integer_gauge_without_labels", ud.name);
EXPECT_EQ("Integer gauge without labels.", ud.description);
ASSERT_EQ(0, ud.label_names.size());
EXPECT_EQ(MetricKind::kGauge, ud.metric_kind);
EXPECT_EQ(ValueType::kInt64, ud.value_type);
} else {
EXPECT_EQ(0, collected_metrics->metric_descriptor_map.size());
}
ASSERT_GE(collected_metrics->point_set_map.size(), 2);
const PointSet& lps = *collected_metrics->point_set_map.at(
"/tensorflow/test/string_gauge_with_labels");
EXPECT_EQ("/tensorflow/test/string_gauge_with_labels", lps.metric_name);
ASSERT_EQ(2, lps.points.size());
ASSERT_EQ(2, lps.points[0]->labels.size());
EXPECT_EQ("MyLabel0", lps.points[0]->labels[0].name);
EXPECT_EQ("Label00", lps.points[0]->labels[0].value);
EXPECT_EQ("MyLabel1", lps.points[0]->labels[1].name);
EXPECT_EQ("Label10", lps.points[0]->labels[1].value);
EXPECT_EQ(ValueType::kString, lps.points[0]->value_type);
EXPECT_EQ("test1", lps.points[0]->string_value);
EXPECT_LT(0, lps.points[0]->start_timestamp_millis);
EXPECT_LT(0, lps.points[0]->end_timestamp_millis);
EXPECT_GE(lps.points[0]->end_timestamp_millis,
lps.points[0]->start_timestamp_millis);
ASSERT_EQ(2, lps.points[1]->labels.size());
EXPECT_EQ("MyLabel0", lps.points[1]->labels[0].name);
EXPECT_EQ("Label01", lps.points[1]->labels[0].value);
EXPECT_EQ("MyLabel1", lps.points[1]->labels[1].name);
EXPECT_EQ("Label11", lps.points[1]->labels[1].value);
EXPECT_EQ(ValueType::kString, lps.points[1]->value_type);
EXPECT_EQ("test2", lps.points[1]->string_value);
EXPECT_LT(0, lps.points[1]->start_timestamp_millis);
EXPECT_LT(0, lps.points[1]->end_timestamp_millis);
EXPECT_GE(lps.points[1]->end_timestamp_millis,
lps.points[1]->start_timestamp_millis);
const PointSet& ups = *collected_metrics->point_set_map.at(
"/tensorflow/test/integer_gauge_without_labels");
EXPECT_EQ("/tensorflow/test/integer_gauge_without_labels", ups.metric_name);
ASSERT_EQ(1, ups.points.size());
EXPECT_EQ(0, ups.points[0]->labels.size());
EXPECT_EQ(ValueType::kInt64, ups.points[0]->value_type);
EXPECT_EQ(7, ups.points[0]->int64_value);
EXPECT_LT(0, ups.points[0]->start_timestamp_millis);
EXPECT_LT(0, ups.points[0]->end_timestamp_millis);
EXPECT_GE(ups.points[0]->end_timestamp_millis,
ups.points[0]->start_timestamp_millis);
}
}
void EqHistograms(const Histogram& expected,
const HistogramProto& actual_proto) {
Histogram actual;
ASSERT_TRUE(actual.DecodeFromProto(actual_proto));
EXPECT_EQ(expected.ToString(), actual.ToString());
}
TEST(CollectMetricsTest, Sampler) {
auto sampler_with_labels = std::unique_ptr<Sampler<2>>(
Sampler<2>::New({"/tensorflow/test/sampler_with_labels",
"Sampler with labels.", "MyLabel0", "MyLabel1"},
Buckets::Explicit({1.0, 2.0})));
auto sampler_without_labels = std::unique_ptr<Sampler<0>>(Sampler<0>::New(
{"/tensorflow/test/sampler_without_labels", "Sampler without labels."},
Buckets::Explicit({0.0})));
Histogram with_labels0({1.0, 2.0, DBL_MAX});
sampler_with_labels->GetCell("Label00", "Label10")->Add(0.7);
with_labels0.Add(0.7);
Histogram with_labels1({1.0, 2.0, DBL_MAX});
sampler_with_labels->GetCell("Label01", "Label11")->Add(1.5);
with_labels1.Add(1.5);
Histogram without_labels({0.0, DBL_MAX});
sampler_without_labels->GetCell()->Add(0.5);
without_labels.Add(0.5);
for (const bool collect_metric_descriptors : {true, false}) {
SCOPED_TRACE(strings::StrCat("collect_metric_descriptors: ",
collect_metric_descriptors));
auto* collection_registry = CollectionRegistry::Default();
CollectionRegistry::CollectMetricsOptions options;
options.collect_metric_descriptors = collect_metric_descriptors;
const std::unique_ptr<CollectedMetrics> collected_metrics =
collection_registry->CollectMetrics(options);
if (collect_metric_descriptors) {
ASSERT_GE(collected_metrics->metric_descriptor_map.size(), 2);
const MetricDescriptor& ld = *collected_metrics->metric_descriptor_map.at(
"/tensorflow/test/sampler_with_labels");
EXPECT_EQ("/tensorflow/test/sampler_with_labels", ld.name);
EXPECT_EQ("Sampler with labels.", ld.description);
ASSERT_EQ(2, ld.label_names.size());
EXPECT_EQ("MyLabel0", ld.label_names[0]);
EXPECT_EQ("MyLabel1", ld.label_names[1]);
EXPECT_EQ(MetricKind::kCumulative, ld.metric_kind);
EXPECT_EQ(ValueType::kHistogram, ld.value_type);
const MetricDescriptor& ud = *collected_metrics->metric_descriptor_map.at(
"/tensorflow/test/sampler_without_labels");
EXPECT_EQ("/tensorflow/test/sampler_without_labels", ud.name);
EXPECT_EQ("Sampler without labels.", ud.description);
ASSERT_EQ(0, ud.label_names.size());
EXPECT_EQ(MetricKind::kCumulative, ud.metric_kind);
EXPECT_EQ(ValueType::kHistogram, ud.value_type);
} else {
EXPECT_EQ(0, collected_metrics->metric_descriptor_map.size());
}
ASSERT_GE(collected_metrics->point_set_map.size(), 2);
const PointSet& lps = *collected_metrics->point_set_map.at(
"/tensorflow/test/sampler_with_labels");
EXPECT_EQ("/tensorflow/test/sampler_with_labels", lps.metric_name);
ASSERT_EQ(2, lps.points.size());
ASSERT_EQ(2, lps.points[0]->labels.size());
EXPECT_EQ("MyLabel0", lps.points[0]->labels[0].name);
EXPECT_EQ("Label00", lps.points[0]->labels[0].value);
EXPECT_EQ("MyLabel1", lps.points[0]->labels[1].name);
EXPECT_EQ("Label10", lps.points[0]->labels[1].value);
EXPECT_EQ(ValueType::kHistogram, lps.points[0]->value_type);
EqHistograms(with_labels0, lps.points[0]->histogram_value);
EXPECT_LT(0, lps.points[0]->start_timestamp_millis);
EXPECT_LT(0, lps.points[0]->end_timestamp_millis);
EXPECT_GE(lps.points[0]->end_timestamp_millis,
lps.points[0]->start_timestamp_millis);
ASSERT_EQ(2, lps.points[1]->labels.size());
EXPECT_EQ("MyLabel0", lps.points[1]->labels[0].name);
EXPECT_EQ("Label01", lps.points[1]->labels[0].value);
EXPECT_EQ("MyLabel1", lps.points[1]->labels[1].name);
EXPECT_EQ("Label11", lps.points[1]->labels[1].value);
EXPECT_EQ(ValueType::kHistogram, lps.points[1]->value_type);
EqHistograms(with_labels1, lps.points[1]->histogram_value);
EXPECT_LT(0, lps.points[1]->start_timestamp_millis);
EXPECT_LT(0, lps.points[1]->end_timestamp_millis);
EXPECT_GE(lps.points[1]->end_timestamp_millis,
lps.points[1]->start_timestamp_millis);
const PointSet& ups = *collected_metrics->point_set_map.at(
"/tensorflow/test/sampler_without_labels");
EXPECT_EQ("/tensorflow/test/sampler_without_labels", ups.metric_name);
ASSERT_EQ(1, ups.points.size());
EXPECT_EQ(0, ups.points[0]->labels.size());
EXPECT_EQ(ValueType::kHistogram, ups.points[0]->value_type);
EqHistograms(without_labels, ups.points[0]->histogram_value);
EXPECT_LT(0, ups.points[0]->start_timestamp_millis);
EXPECT_LT(0, ups.points[0]->end_timestamp_millis);
EXPECT_GE(ups.points[0]->end_timestamp_millis,
ups.points[0]->start_timestamp_millis);
}
}
TEST(CollectMetricsTest, PercentileSampler) {
auto sampler_with_labels =
std::unique_ptr<PercentileSampler<2>>(PercentileSampler<2>::New(
{"/tensorflow/test/pctsampler_with_labels",
"Percentile sampler with labels.", "MyLabel0", "MyLabel1"},
{25.0, 50.0, 75.0}, 1024, UnitOfMeasure::kNumber));
auto sampler_without_labels =
std::unique_ptr<PercentileSampler<0>>(PercentileSampler<0>::New(
{"/tensorflow/test/pctsampler_without_labels",
"Percentile sampler without labels."},
{25.0, 50.0, 75.0}, 1024, UnitOfMeasure::kNumber));
sampler_with_labels->GetCell("Label00", "Label10")->Add(0.7);
sampler_with_labels->GetCell("Label01", "Label11")->Add(1.5);
sampler_without_labels->GetCell()->Add(0.5);
for (const bool collect_metric_descriptors : {true, false}) {
SCOPED_TRACE(strings::StrCat("collect_metric_descriptors: ",
collect_metric_descriptors));
auto* collection_registry = CollectionRegistry::Default();
CollectionRegistry::CollectMetricsOptions options;
options.collect_metric_descriptors = collect_metric_descriptors;
const std::unique_ptr<CollectedMetrics> collected_metrics =
collection_registry->CollectMetrics(options);
if (collect_metric_descriptors) {
ASSERT_GE(collected_metrics->metric_descriptor_map.size(), 2);
const MetricDescriptor& ld = *collected_metrics->metric_descriptor_map.at(
"/tensorflow/test/pctsampler_with_labels");
EXPECT_EQ("/tensorflow/test/pctsampler_with_labels", ld.name);
EXPECT_EQ("Percentile sampler with labels.", ld.description);
ASSERT_EQ(2, ld.label_names.size());
EXPECT_EQ("MyLabel0", ld.label_names[0]);
EXPECT_EQ("MyLabel1", ld.label_names[1]);
EXPECT_EQ(MetricKind::kCumulative, ld.metric_kind);
EXPECT_EQ(ValueType::kPercentiles, ld.value_type);
const MetricDescriptor& ud = *collected_metrics->metric_descriptor_map.at(
"/tensorflow/test/pctsampler_without_labels");
EXPECT_EQ("/tensorflow/test/pctsampler_without_labels", ud.name);
EXPECT_EQ("Percentile sampler without labels.", ud.description);
ASSERT_EQ(0, ud.label_names.size());
EXPECT_EQ(MetricKind::kCumulative, ud.metric_kind);
EXPECT_EQ(ValueType::kPercentiles, ud.value_type);
} else {
EXPECT_EQ(0, collected_metrics->metric_descriptor_map.size());
}
ASSERT_GE(collected_metrics->point_set_map.size(), 2);
const PointSet& lps = *collected_metrics->point_set_map.at(
"/tensorflow/test/pctsampler_with_labels");
EXPECT_EQ("/tensorflow/test/pctsampler_with_labels", lps.metric_name);
ASSERT_EQ(2, lps.points.size());
ASSERT_EQ(2, lps.points[0]->labels.size());
EXPECT_EQ("MyLabel0", lps.points[0]->labels[0].name);
EXPECT_EQ("Label00", lps.points[0]->labels[0].value);
EXPECT_EQ("MyLabel1", lps.points[0]->labels[1].name);
EXPECT_EQ("Label10", lps.points[0]->labels[1].value);
EXPECT_EQ(ValueType::kPercentiles, lps.points[0]->value_type);
EXPECT_LT(0, lps.points[0]->start_timestamp_millis);
EXPECT_LT(0, lps.points[0]->end_timestamp_millis);
EXPECT_GE(lps.points[0]->end_timestamp_millis,
lps.points[0]->start_timestamp_millis);
ASSERT_EQ(2, lps.points[1]->labels.size());
EXPECT_EQ("MyLabel0", lps.points[1]->labels[0].name);
EXPECT_EQ("Label01", lps.points[1]->labels[0].value);
EXPECT_EQ("MyLabel1", lps.points[1]->labels[1].name);
EXPECT_EQ("Label11", lps.points[1]->labels[1].value);
EXPECT_EQ(ValueType::kPercentiles, lps.points[1]->value_type);
EXPECT_LT(0, lps.points[1]->start_timestamp_millis);
EXPECT_LT(0, lps.points[1]->end_timestamp_millis);
EXPECT_GE(lps.points[1]->end_timestamp_millis,
lps.points[1]->start_timestamp_millis);
const PointSet& ups = *collected_metrics->point_set_map.at(
"/tensorflow/test/pctsampler_without_labels");
EXPECT_EQ("/tensorflow/test/pctsampler_without_labels", ups.metric_name);
ASSERT_EQ(1, ups.points.size());
EXPECT_EQ(0, ups.points[0]->labels.size());
EXPECT_EQ(ValueType::kPercentiles, ups.points[0]->value_type);
EXPECT_LT(0, ups.points[0]->start_timestamp_millis);
EXPECT_LT(0, ups.points[0]->end_timestamp_millis);
EXPECT_GE(ups.points[0]->end_timestamp_millis,
ups.points[0]->start_timestamp_millis);
}
}
class FakeClockEnv : public EnvWrapper {
public:
FakeClockEnv() : EnvWrapper(Env::Default()), current_millis_(0) {}
void AdvanceByMillis(const uint64 millis) { current_millis_ += millis; }
uint64 NowMicros() const override { return current_millis_ * 1000; }
private:
uint64 current_millis_;
};
TEST(CollectionRegistryTest, WriteTimestamps) {
FakeClockEnv fake_clock_env;
auto collection_registry =
test_util::CollectionRegistryTestAccess::CreateRegistry(&fake_clock_env);
fake_clock_env.AdvanceByMillis(25);
{
const MetricDef<MetricKind::kCumulative, int64_t, 0> cumulative_metric(
"/tensorflow/cumulative/metric", "An example metric with no labels.");
auto handle = collection_registry->Register(
&cumulative_metric, [&](MetricCollectorGetter getter) {
auto metric_collector = getter.Get(&cumulative_metric);
metric_collector.CollectValue({}, 42);
});
fake_clock_env.AdvanceByMillis(75);
const std::unique_ptr<CollectedMetrics> collected_metrics =
collection_registry->CollectMetrics({});
const PointSet& point_set =
*collected_metrics->point_set_map.at("/tensorflow/cumulative/metric");
ASSERT_EQ(1, point_set.points.size());
EXPECT_EQ(25, point_set.points[0]->start_timestamp_millis);
EXPECT_EQ(100, point_set.points[0]->end_timestamp_millis);
}
{
const MetricDef<MetricKind::kGauge, int64_t, 0> gauge_metric(
"/tensorflow/gauge/metric", "An example metric with no labels.");
auto handle = collection_registry->Register(
&gauge_metric, [&](MetricCollectorGetter getter) {
auto metric_collector = getter.Get(&gauge_metric);
metric_collector.CollectValue({}, 42);
});
fake_clock_env.AdvanceByMillis(75);
const std::unique_ptr<CollectedMetrics> collected_metrics =
collection_registry->CollectMetrics({});
const PointSet& point_set =
*collected_metrics->point_set_map.at("/tensorflow/gauge/metric");
ASSERT_EQ(1, point_set.points.size());
EXPECT_EQ(175, point_set.points[0]->start_timestamp_millis);
EXPECT_EQ(175, point_set.points[0]->end_timestamp_millis);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/monitoring/collection_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/monitoring/collection_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5ec888cf-ea02-4ebf-826b-99b1cfef72df | cpp | tensorflow/tensorflow | percentile_sampler | third_party/xla/xla/tsl/lib/monitoring/percentile_sampler.cc | tensorflow/core/lib/monitoring/percentile_sampler_test.cc | #include "xla/tsl/lib/monitoring/percentile_sampler.h"
#include <algorithm>
#include <cmath>
#include <vector>
#include "xla/tsl/lib/monitoring/types.h"
#include "tsl/platform/env_time.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/types.h"
#ifdef IS_MOBILE_PLATFORM
#else
namespace tsl {
namespace monitoring {
void PercentileSamplerCell::Add(double sample) {
uint64 nstime = EnvTime::NowNanos();
mutex_lock l(mu_);
samples_[next_position_] = {nstime, sample};
++next_position_;
if (TF_PREDICT_FALSE(next_position_ >= samples_.size())) {
next_position_ = 0;
}
if (TF_PREDICT_FALSE(num_samples_ < samples_.size())) {
++num_samples_;
}
++total_samples_;
accumulator_ += sample;
}
Percentiles PercentileSamplerCell::value() const {
Percentiles pct_samples;
pct_samples.unit_of_measure = unit_of_measure_;
size_t total_samples;
long double accumulator;
std::vector<Sample> samples = GetSamples(&total_samples, &accumulator);
if (!samples.empty()) {
pct_samples.num_samples = samples.size();
pct_samples.total_samples = total_samples;
pct_samples.accumulator = accumulator;
pct_samples.start_nstime = samples.front().nstime;
pct_samples.end_nstime = samples.back().nstime;
long double total = 0.0;
for (auto& sample : samples) {
total += sample.value;
}
pct_samples.mean = total / pct_samples.num_samples;
long double total_sigma = 0.0;
for (auto& sample : samples) {
double delta = sample.value - pct_samples.mean;
total_sigma += delta * delta;
}
pct_samples.stddev = std::sqrt(total_sigma / pct_samples.num_samples);
std::sort(samples.begin(), samples.end());
pct_samples.min_value = samples.front().value;
pct_samples.max_value = samples.back().value;
for (auto percentile : percentiles_) {
size_t index = std::min<size_t>(
static_cast<size_t>(percentile * pct_samples.num_samples / 100.0),
pct_samples.num_samples - 1);
PercentilePoint pct = {percentile, samples[index].value};
pct_samples.points.push_back(pct);
}
}
return pct_samples;
}
std::vector<PercentileSamplerCell::Sample> PercentileSamplerCell::GetSamples(
size_t* total_samples, long double* accumulator) const {
mutex_lock l(mu_);
std::vector<Sample> samples;
if (num_samples_ == samples_.size()) {
samples.insert(samples.end(), samples_.begin() + next_position_,
samples_.end());
}
samples.insert(samples.end(), samples_.begin(),
samples_.begin() + next_position_);
*total_samples = total_samples_;
*accumulator = accumulator_;
return samples;
}
}
}
#endif | #include "tensorflow/core/lib/monitoring/percentile_sampler.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace monitoring {
namespace {
auto* pctsampler_with_labels = PercentileSampler<1>::New(
{"/tensorflow/test/percentile_sampler_with_labels",
"Percentile sampler with one label.", "MyLabel"},
{25.0, 50.0, 90.0, 99.0}, 1024, UnitOfMeasure::kNumber);
auto* pctsampler_without_labels = PercentileSampler<0>::New(
{"/tensorflow/test/percentile_sampler_without_labels",
"Percentile sampler without labels initialized as empty."},
{25.0, 50.0, 90.0, 99.0}, 1024, UnitOfMeasure::kNumber);
TEST(LabeledPercentileSamplerTest, FixedPercentilesValues) {
auto* cell = pctsampler_with_labels->GetCell("MyLabel");
cell->Add(10.0);
cell->Add(4.0);
cell->Add(1.0);
cell->Add(0.6);
auto value = cell->value();
EXPECT_EQ(value.min_value, 0.6);
EXPECT_EQ(value.max_value, 10.0);
EXPECT_EQ(value.num_samples, 4);
EXPECT_EQ(value.points[0].value, 1.0);
EXPECT_EQ(value.points[1].value, 4.0);
EXPECT_EQ(value.points[2].value, 10.0);
EXPECT_EQ(value.points[3].value, 10.0);
}
TEST(UnlabeledPercentileSamplerTest, FixedPercentilesValues) {
auto* cell = pctsampler_without_labels->GetCell();
cell->Add(10.0);
cell->Add(4.0);
cell->Add(1.0);
cell->Add(0.6);
auto value = cell->value();
EXPECT_EQ(value.min_value, 0.6);
EXPECT_EQ(value.max_value, 10.0);
EXPECT_EQ(value.num_samples, 4);
EXPECT_EQ(value.points[0].value, 1.0);
EXPECT_EQ(value.points[1].value, 4.0);
EXPECT_EQ(value.points[2].value, 10.0);
EXPECT_EQ(value.points[3].value, 10.0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/monitoring/percentile_sampler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/monitoring/percentile_sampler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6aadb0b1-e600-47f6-ac26-d99cf7ea00cd | cpp | tensorflow/tensorflow | histogram | third_party/xla/xla/tsl/lib/histogram/histogram.cc | third_party/xla/xla/tsl/lib/histogram/histogram_test.cc | #include "xla/tsl/lib/histogram/histogram.h"
#include <float.h>
#include <math.h>
#include <vector>
#include "xla/tsl/protobuf/histogram.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace histogram {
static std::vector<double>* InitDefaultBucketsInner() {
std::vector<double> buckets;
std::vector<double> neg_buckets;
double v = 1.0e-12;
while (v < 1.0e20) {
buckets.push_back(v);
neg_buckets.push_back(-v);
v *= 1.1;
}
buckets.push_back(DBL_MAX);
neg_buckets.push_back(-DBL_MAX);
std::reverse(neg_buckets.begin(), neg_buckets.end());
std::vector<double>* result = new std::vector<double>;
result->insert(result->end(), neg_buckets.begin(), neg_buckets.end());
result->push_back(0.0);
result->insert(result->end(), buckets.begin(), buckets.end());
return result;
}
static absl::Span<const double> InitDefaultBuckets() {
static std::vector<double>* default_bucket_limits = InitDefaultBucketsInner();
return *default_bucket_limits;
}
Histogram::Histogram() : bucket_limits_(InitDefaultBuckets()) { Clear(); }
Histogram::Histogram(absl::Span<const double> custom_bucket_limits)
: custom_bucket_limits_(custom_bucket_limits.begin(),
custom_bucket_limits.end()),
bucket_limits_(custom_bucket_limits_) {
#ifndef NDEBUG
DCHECK_GT(bucket_limits_.size(), size_t{0});
for (size_t i = 1; i < bucket_limits_.size(); i++) {
DCHECK_GT(bucket_limits_[i], bucket_limits_[i - 1]);
}
#endif
Clear();
}
bool Histogram::DecodeFromProto(const HistogramProto& proto) {
if ((proto.bucket_size() != proto.bucket_limit_size()) ||
(proto.bucket_size() == 0)) {
return false;
}
min_ = proto.min();
max_ = proto.max();
num_ = proto.num();
sum_ = proto.sum();
sum_squares_ = proto.sum_squares();
custom_bucket_limits_.clear();
custom_bucket_limits_.insert(custom_bucket_limits_.end(),
proto.bucket_limit().begin(),
proto.bucket_limit().end());
bucket_limits_ = custom_bucket_limits_;
buckets_.clear();
buckets_.insert(buckets_.end(), proto.bucket().begin(), proto.bucket().end());
return true;
}
void Histogram::Clear() {
min_ = bucket_limits_[bucket_limits_.size() - 1];
max_ = -DBL_MAX;
num_ = 0;
sum_ = 0;
sum_squares_ = 0;
buckets_.resize(bucket_limits_.size());
for (size_t i = 0; i < bucket_limits_.size(); i++) {
buckets_[i] = 0;
}
}
void Histogram::Add(double value) {
int b =
std::upper_bound(bucket_limits_.begin(), bucket_limits_.end(), value) -
bucket_limits_.begin();
buckets_[b] += 1.0;
if (min_ > value) min_ = value;
if (max_ < value) max_ = value;
num_++;
sum_ += value;
sum_squares_ += (value * value);
}
double Histogram::Median() const { return Percentile(50.0); }
double Histogram::Remap(double x, double x0, double x1, double y0,
double y1) const {
return y0 + (x - x0) / (x1 - x0) * (y1 - y0);
}
double Histogram::Percentile(double p) const {
if (num_ == 0.0) return 0.0;
double threshold = num_ * (p / 100.0);
double cumsum_prev = 0;
for (size_t i = 0; i < buckets_.size(); i++) {
double cumsum = cumsum_prev + buckets_[i];
if (cumsum >= threshold) {
if (cumsum == cumsum_prev) {
continue;
}
double lhs = (i == 0 || cumsum_prev == 0) ? min_ : bucket_limits_[i - 1];
lhs = std::max(lhs, min_);
double rhs = bucket_limits_[i];
rhs = std::min(rhs, max_);
double weight = Remap(threshold, cumsum_prev, cumsum, lhs, rhs);
return weight;
}
cumsum_prev = cumsum;
}
return max_;
}
double Histogram::Average() const {
if (num_ == 0.0) return 0;
return sum_ / num_;
}
double Histogram::StandardDeviation() const {
if (num_ == 0.0) return 0;
double variance = (sum_squares_ * num_ - sum_ * sum_) / (num_ * num_);
return sqrt(variance);
}
std::string Histogram::ToString() const {
std::string r;
char buf[200];
snprintf(buf, sizeof(buf), "Count: %.0f Average: %.4f StdDev: %.2f\n", num_,
Average(), StandardDeviation());
r.append(buf);
snprintf(buf, sizeof(buf), "Min: %.4f Median: %.4f Max: %.4f\n",
(num_ == 0.0 ? 0.0 : min_), Median(), max_);
r.append(buf);
r.append("------------------------------------------------------\n");
const double mult = num_ > 0 ? 100.0 / num_ : 0.0;
double sum = 0;
for (size_t b = 0; b < buckets_.size(); b++) {
if (buckets_[b] <= 0.0) continue;
sum += buckets_[b];
snprintf(buf, sizeof(buf), "[ %10.2g, %10.2g ) %7.0f %7.3f%% %7.3f%% ",
((b == 0) ? -DBL_MAX : bucket_limits_[b - 1]),
bucket_limits_[b],
buckets_[b],
mult * buckets_[b],
mult * sum);
r.append(buf);
int marks = static_cast<int>(20 * (buckets_[b] / num_) + 0.5);
r.append(marks, '#');
r.push_back('\n');
}
return r;
}
void Histogram::EncodeToProto(HistogramProto* proto,
bool preserve_zero_buckets) const {
proto->Clear();
proto->set_min(min_);
proto->set_max(max_);
proto->set_num(num_);
proto->set_sum(sum_);
proto->set_sum_squares(sum_squares_);
for (size_t i = 0; i < buckets_.size();) {
double end = bucket_limits_[i];
double count = buckets_[i];
i++;
if (!preserve_zero_buckets && count <= 0.0) {
while (i < buckets_.size() && buckets_[i] <= 0.0) {
end = bucket_limits_[i];
count = buckets_[i];
i++;
}
}
proto->add_bucket_limit(end);
proto->add_bucket(count);
}
if (proto->bucket_size() == 0.0) {
proto->add_bucket_limit(DBL_MAX);
proto->add_bucket(0.0);
}
}
bool ThreadSafeHistogram::DecodeFromProto(const HistogramProto& proto) {
mutex_lock l(mu_);
return histogram_.DecodeFromProto(proto);
}
void ThreadSafeHistogram::Clear() {
mutex_lock l(mu_);
histogram_.Clear();
}
void ThreadSafeHistogram::Add(double value) {
mutex_lock l(mu_);
histogram_.Add(value);
}
void ThreadSafeHistogram::EncodeToProto(HistogramProto* proto,
bool preserve_zero_buckets) const {
mutex_lock l(mu_);
histogram_.EncodeToProto(proto, preserve_zero_buckets);
}
double ThreadSafeHistogram::Median() const {
mutex_lock l(mu_);
return histogram_.Median();
}
double ThreadSafeHistogram::Percentile(double p) const {
mutex_lock l(mu_);
return histogram_.Percentile(p);
}
double ThreadSafeHistogram::Average() const {
mutex_lock l(mu_);
return histogram_.Average();
}
double ThreadSafeHistogram::StandardDeviation() const {
mutex_lock l(mu_);
return histogram_.StandardDeviation();
}
std::string ThreadSafeHistogram::ToString() const {
mutex_lock l(mu_);
return histogram_.ToString();
}
}
} | #include "xla/tsl/lib/histogram/histogram.h"
#include <float.h>
#include "xla/tsl/protobuf/histogram.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace histogram {
static void Validate(const Histogram& h) {
string s1 = h.ToString();
LOG(ERROR) << s1;
HistogramProto proto_with_zeroes;
h.EncodeToProto(&proto_with_zeroes, true);
Histogram h2;
EXPECT_TRUE(h2.DecodeFromProto(proto_with_zeroes));
string s2 = h2.ToString();
LOG(ERROR) << s2;
EXPECT_EQ(s1, s2);
HistogramProto proto_no_zeroes;
h.EncodeToProto(&proto_no_zeroes, false);
LOG(ERROR) << proto_no_zeroes.DebugString();
Histogram h3;
EXPECT_TRUE(h3.DecodeFromProto(proto_no_zeroes));
string s3 = h3.ToString();
LOG(ERROR) << s3;
EXPECT_EQ(s1, s3);
}
TEST(Histogram, Empty) {
Histogram h;
Validate(h);
}
TEST(Histogram, SingleValue) {
Histogram h;
h.Add(-3.0);
Validate(h);
}
TEST(Histogram, CustomBuckets) {
Histogram h({-10, -5, 0, 5, 10, 100, 1000, 10000, DBL_MAX});
h.Add(-3.0);
h.Add(4.99);
h.Add(5.0);
h.Add(1000.0);
Validate(h);
}
TEST(Histogram, Median) {
Histogram h({0, 10, 100, DBL_MAX});
h.Add(-2);
h.Add(-2);
h.Add(0);
double median = h.Median();
EXPECT_EQ(median, -0.5);
}
TEST(Histogram, Percentile) {
Histogram h({1, 2, 3, 4});
h.Add(-1.0);
h.Add(1.5);
h.Add(1.5);
h.Add(1.5);
h.Add(2.5);
h.Add(2.5);
h.Add(2.5);
h.Add(2.5);
h.Add(3.5);
h.Add(3.9);
EXPECT_EQ(h.Percentile(0), -1.0);
EXPECT_EQ(h.Percentile(25), 1.5);
EXPECT_EQ(h.Percentile(50), 2.25);
EXPECT_EQ(h.Percentile(75), 2.875);
EXPECT_EQ(h.Percentile(90), 3.45);
EXPECT_EQ(h.Percentile(100), 3.9);
}
TEST(Histogram, Basic) {
Histogram h;
for (int i = 0; i < 100; i++) {
h.Add(i);
}
for (int i = 1000; i < 100000; i += 1000) {
h.Add(i);
}
Validate(h);
}
TEST(ThreadSafeHistogram, Basic) {
Histogram h;
for (int i = 0; i < 100; i++) {
h.Add(i);
}
ThreadSafeHistogram tsh;
for (int i = 0; i < 100; i++) {
tsh.Add(i);
}
for (int i = 0; i < 2; ++i) {
bool preserve_zero_buckets = (i == 0);
HistogramProto h_proto;
h.EncodeToProto(&h_proto, preserve_zero_buckets);
HistogramProto tsh_proto;
tsh.EncodeToProto(&tsh_proto, preserve_zero_buckets);
Histogram h2;
EXPECT_TRUE(h2.DecodeFromProto(tsh_proto));
ThreadSafeHistogram tsh2;
EXPECT_TRUE(tsh2.DecodeFromProto(h_proto));
EXPECT_EQ(h2.ToString(), tsh2.ToString());
}
EXPECT_EQ(h.Median(), tsh.Median());
EXPECT_EQ(h.Percentile(40.0), tsh.Percentile(40.0));
EXPECT_EQ(h.Average(), tsh.Average());
EXPECT_EQ(h.StandardDeviation(), tsh.StandardDeviation());
EXPECT_EQ(h.ToString(), tsh.ToString());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/histogram/histogram.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/histogram/histogram_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ce81a04e-fca4-4ddf-96e3-daf1ae6b533d | cpp | tensorflow/tensorflow | traceme_recorder | third_party/xla/xla/tsl/profiler/backends/cpu/traceme_recorder.cc | third_party/xla/xla/tsl/profiler/backends/cpu/traceme_recorder_test.cc | #include "xla/tsl/profiler/backends/cpu/traceme_recorder.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <atomic>
#include <deque>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/tsl/profiler/utils/lock_free_queue.h"
#include "xla/tsl/profiler/utils/per_thread.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace profiler {
namespace internal {
#ifdef _WIN32
#define DECL_DLL_EXPORT __declspec(dllexport)
#else
#define DECL_DLL_EXPORT
#endif
DECL_DLL_EXPORT std::atomic<int> g_trace_level(
TraceMeRecorder::kTracingDisabled);
static_assert(ATOMIC_INT_LOCK_FREE == 2, "Assumed atomic<int> was lock free");
}
namespace {
class SplitEventTracker {
public:
void AddStart(TraceMeRecorder::Event&& event) {
DCHECK(event.IsStart());
start_events_.emplace(event.ActivityId(), std::move(event));
}
void AddEnd(TraceMeRecorder::Event* event) {
DCHECK(event->IsEnd());
if (!FindStartAndMerge(event)) {
end_events_.push_back(event);
}
}
void HandleCrossThreadEvents() {
for (auto* event : end_events_) {
FindStartAndMerge(event);
}
}
private:
bool FindStartAndMerge(TraceMeRecorder::Event* event) {
auto iter = start_events_.find(event->ActivityId());
if (iter == start_events_.end()) return false;
auto& start_event = iter->second;
event->name = std::move(start_event.name);
event->start_time = start_event.start_time;
start_events_.erase(iter);
return true;
}
absl::flat_hash_map<int64_t, TraceMeRecorder::Event> start_events_;
std::vector<TraceMeRecorder::Event*> end_events_;
};
class ThreadLocalRecorder {
public:
ThreadLocalRecorder() {
auto* env = Env::Default();
info_.tid = env->GetCurrentThreadId();
env->GetCurrentThreadName(&info_.name);
}
const TraceMeRecorder::ThreadInfo& Info() const { return info_; }
void Record(TraceMeRecorder::Event&& event) { queue_.Push(std::move(event)); }
void Clear() { queue_.Clear(); }
TF_MUST_USE_RESULT std::deque<TraceMeRecorder::Event> Consume(
SplitEventTracker* split_event_tracker) {
std::deque<TraceMeRecorder::Event> events;
std::optional<TraceMeRecorder::Event> event;
while ((event = queue_.Pop())) {
if (event->IsStart()) {
split_event_tracker->AddStart(*std::move(event));
continue;
}
events.push_back(*std::move(event));
if (events.back().IsEnd()) {
split_event_tracker->AddEnd(&events.back());
}
}
return events;
}
private:
TraceMeRecorder::ThreadInfo info_;
LockFreeQueue<TraceMeRecorder::Event> queue_;
};
}
void TraceMeRecorder::Clear() {
auto recorders = PerThread<ThreadLocalRecorder>::StartRecording();
for (auto& recorder : recorders) {
recorder->Clear();
};
}
TraceMeRecorder::Events TraceMeRecorder::Consume() {
TraceMeRecorder::Events result;
SplitEventTracker split_event_tracker;
auto recorders = PerThread<ThreadLocalRecorder>::StopRecording();
for (auto& recorder : recorders) {
auto events = recorder->Consume(&split_event_tracker);
if (!events.empty()) {
result.push_back({recorder->Info(), std::move(events)});
}
};
split_event_tracker.HandleCrossThreadEvents();
return result;
}
bool TraceMeRecorder::Start(int level) {
level = std::max(0, level);
int expected = kTracingDisabled;
bool started = internal::g_trace_level.compare_exchange_strong(
expected, level, std::memory_order_acq_rel);
if (started) {
Clear();
}
return started;
}
void TraceMeRecorder::Record(Event&& event) {
PerThread<ThreadLocalRecorder>::Get().Record(std::move(event));
}
TraceMeRecorder::Events TraceMeRecorder::Stop() {
TraceMeRecorder::Events events;
if (internal::g_trace_level.exchange(
kTracingDisabled, std::memory_order_acq_rel) != kTracingDisabled) {
events = Consume();
}
return events;
}
int64_t TraceMeRecorder::NewActivityId() {
static std::atomic<int32> thread_counter(1);
const thread_local static int32_t thread_id =
thread_counter.fetch_add(1, std::memory_order_relaxed);
thread_local static uint32 per_thread_activity_id = 0;
return static_cast<int64_t>(thread_id) << 32 | per_thread_activity_id++;
}
}
} | #include "xla/tsl/profiler/backends/cpu/traceme_recorder.h"
#include <atomic>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "xla/tsl/profiler/utils/math_utils.h"
#include "xla/tsl/profiler/utils/time_utils.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/notification.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace profiler {
namespace {
using ::testing::ElementsAre;
MATCHER_P(Named, name, "") { return arg.name == name; }
TEST(RecorderTest, SingleThreaded) {
int64_t start_time = GetCurrentTimeNanos();
int64_t end_time = start_time + UniToNano(1);
TraceMeRecorder::Record({"before", start_time, end_time});
TraceMeRecorder::Start(1);
TraceMeRecorder::Record({"during1", start_time, end_time});
TraceMeRecorder::Record({"during2", start_time, end_time});
auto results = TraceMeRecorder::Stop();
TraceMeRecorder::Record({"after", start_time, end_time});
ASSERT_EQ(results.size(), 1);
EXPECT_THAT(results[0].events,
ElementsAre(Named("during1"), Named("during2")));
}
TEST(RecorderTest, Multithreaded) {
constexpr static int kNumThreads = 4;
tsl::Notification start;
tsl::Notification stop;
thread::ThreadPool pool(tsl::Env::Default(), "testpool", kNumThreads);
std::atomic<int> thread_count = {0};
for (int i = 0; i < kNumThreads; i++) {
pool.Schedule([&start, &stop, &thread_count] {
uint64 j = 0;
bool was_active = false;
auto record_event = [&j]() {
int64_t start_time = GetCurrentTimeNanos();
int64_t end_time = start_time + UniToNano(1);
TraceMeRecorder::Record(
{absl::StrCat(j++), start_time, end_time});
};
thread_count.fetch_add(1, std::memory_order_relaxed);
start.WaitForNotification();
while (!stop.HasBeenNotified()) {
if (TraceMeRecorder::Active()) {
record_event();
was_active = true;
}
if (was_active && !TraceMeRecorder::Active()) {
record_event();
record_event();
was_active = false;
}
SpinForNanos(10);
}
});
}
struct ThreadState {
bool split_session = false;
bool overlapping_sessions = false;
std::set<uint64> events;
};
absl::flat_hash_map<uint32 , ThreadState> thread_state;
auto done = [&thread_state] {
for (const auto& id_and_thread : thread_state) {
auto& t = id_and_thread.second;
if (t.events.size() < 2) return false;
}
return true;
};
while (thread_count.load(std::memory_order_relaxed) < kNumThreads) {
LOG(INFO) << "Waiting for all threads to spin up...";
SleepForMillis(1);
}
start.Notify();
constexpr static int kMaxIters = 100;
for (int iters = 0; iters < kMaxIters && !done(); ++iters) {
LOG(INFO) << "Looping until convergence, iteration: " << iters;
TraceMeRecorder::Start(1);
SleepForMillis(100);
auto results = TraceMeRecorder::Stop();
for (const auto& thread : results) {
if (thread.events.empty()) continue;
auto& state = thread_state[thread.thread.tid];
std::set<uint64> session_events;
uint64 current = 0;
for (const auto& event : thread.events) {
uint64 activity_id;
ASSERT_TRUE(absl::SimpleAtoi(event.name, &activity_id));
session_events.emplace(activity_id);
if (current != 0 && activity_id != current + 1) {
state.split_session = true;
}
current = activity_id;
}
for (const auto& event : session_events) {
auto result = state.events.emplace(event);
if (!result.second) {
state.overlapping_sessions = true;
}
}
}
SleepForMillis(1);
}
stop.Notify();
for (const auto& id_and_thread : thread_state) {
auto& thread = id_and_thread.second;
EXPECT_FALSE(thread.split_session)
<< "Expected contiguous events in a session";
EXPECT_FALSE(thread.overlapping_sessions) << "Expected disjoint sessions";
EXPECT_GT(thread.events.size(), 1)
<< "Expected gaps in thread events between sessions";
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/backends/cpu/traceme_recorder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/backends/cpu/traceme_recorder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
40a77d2d-ac7a-47c8-aed8-486396bd535b | cpp | tensorflow/tensorflow | device_utils | tensorflow/core/common_runtime/device/device_utils.cc | third_party/xla/xla/tsl/profiler/utils/device_utils_test.cc | #include "tensorflow/core/common_runtime/device/device_utils.h"
#include "tensorflow/core/platform/regexp.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringpiece.h"
namespace tensorflow {
namespace device_utils {
Status ValidateDeviceType(StringPiece type) {
static const LazyRE2 kTfDeviceTypeRegEx = {"[A-Z][A-Z_]*"};
bool matches = RE2::FullMatch(type, *kTfDeviceTypeRegEx);
if (!matches) {
return Status(absl::StatusCode::kFailedPrecondition,
strings::StrCat("Device name/type '", type, "' must match ",
kTfDeviceTypeRegEx->pattern(), "."));
}
return absl::OkStatus();
}
}
} | #include "xla/tsl/profiler/utils/device_utils.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profiler {
namespace {
tensorflow::profiler::XPlane CreateXPlane(absl::string_view name) {
tensorflow::profiler::XPlane plane;
plane.set_name(name.data(), name.size());
return plane;
}
TEST(DeviceUtilsTest, GetDeviceType) {
EXPECT_EQ(GetDeviceType(CreateXPlane(kHostThreadsPlaneName)),
DeviceType::kCpu);
EXPECT_EQ(GetDeviceType(CreateXPlane(absl::StrCat(kTpuPlanePrefix, 0))),
DeviceType::kTpu);
EXPECT_EQ(GetDeviceType(CreateXPlane(absl::StrCat(kGpuPlanePrefix, 0))),
DeviceType::kGpu);
EXPECT_EQ(GetDeviceType(CreateXPlane("unknown")), DeviceType::kUnknown);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device/device_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/device_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0870f6bf-356a-4ba4-973a-d1a6c1553518 | cpp | tensorflow/tensorflow | xplane_utils | third_party/xla/xla/tsl/profiler/utils/xplane_utils.cc | third_party/xla/xla/tsl/profiler/utils/xplane_utils_test.cc | #include "xla/tsl/profiler/utils/xplane_utils.h"
#include <algorithm>
#include <cstdint>
#include <limits>
#include <optional>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/math_utils.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/timespan.h"
#include "xla/tsl/profiler/utils/xplane_builder.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "xla/tsl/util/stats_calculator.h"
#include "tsl/platform/fingerprint.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/lib/context_types.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
namespace {
template <typename T, typename Pred>
std::vector<int> FindAll(const protobuf::RepeatedPtrField<T>& array,
const Pred& pred) {
std::vector<int> indices;
for (int i = 0; i < array.size(); ++i) {
if (pred(&array.Get(i))) indices.push_back(i);
}
return indices;
}
template <typename T, typename Pred>
int Find(const protobuf::RepeatedPtrField<T>& array, const Pred& pred) {
std::vector<int> indices = FindAll(array, pred);
if (indices.size() > 1) {
LOG(WARNING) << "Found multiple " << T().GetTypeName()
<< " when only one was expected.";
}
return indices.empty() ? -1 : indices.front();
}
template <typename T>
void RemoveAt(protobuf::RepeatedPtrField<T>* array,
const std::vector<int>& indices) {
if (indices.empty()) return;
if (array->size() == indices.size()) {
array->Clear();
return;
}
auto remove_iter = indices.begin();
int i = *(remove_iter++);
for (int j = i + 1; j < array->size(); ++j) {
if (remove_iter != indices.end() && *remove_iter == j) {
++remove_iter;
} else {
array->SwapElements(j, i++);
}
}
array->DeleteSubrange(i, array->size() - i);
}
template <typename T>
void Remove(protobuf::RepeatedPtrField<T>* array, const T* elem) {
int i = Find(*array, [elem](const T* e) { return elem == e; });
RemoveAt(array, {i});
}
template <typename T, typename Pred>
void RemoveIf(protobuf::RepeatedPtrField<T>* array, Pred&& pred) {
std::vector<int> indices = FindAll(*array, pred);
RemoveAt(array, indices);
}
void CopyEventMetadata(const XEventMetadata& src_event_metadata,
const XPlaneVisitor& src_plane,
XEventMetadata& dst_event_metadata,
XPlaneBuilder& dst_plane) {
if (dst_event_metadata.display_name().empty() &&
!src_event_metadata.display_name().empty()) {
dst_event_metadata.set_display_name(src_event_metadata.display_name());
}
if (dst_event_metadata.name().empty() && !src_event_metadata.name().empty()) {
dst_event_metadata.set_name(src_event_metadata.name());
}
if (dst_event_metadata.metadata().empty() &&
!src_event_metadata.metadata().empty()) {
dst_event_metadata.set_metadata(src_event_metadata.metadata());
}
if (dst_event_metadata.stats().empty() &&
!src_event_metadata.stats().empty()) {
XEventMetadataVisitor src_event_metadata_visitor(&src_plane,
&src_event_metadata);
src_event_metadata_visitor.ForEachStat([&](const XStatVisitor& src_stat) {
XStatMetadata& metadata =
*dst_plane.GetOrCreateStatMetadata(src_stat.Name());
XStat& dst_stat = *dst_event_metadata.add_stats();
dst_stat = src_stat.RawStat();
if (src_stat.ValueCase() == XStat::kRefValue) {
XStatMetadata& value_metadata =
*dst_plane.GetOrCreateStatMetadata(src_stat.StrOrRefValue());
dst_stat.set_ref_value(value_metadata.id());
}
dst_stat.set_metadata_id(metadata.id());
});
}
DCHECK_EQ(src_event_metadata.stats_size(), dst_event_metadata.stats_size());
}
void CopyEvent(const XEventVisitor& src_event, const XPlaneVisitor& src,
const XPlane& src_plane, int64_t time_offset_ps,
XPlaneBuilder& dst_plane, XLineBuilder& dst_line) {
XEventMetadata* dst_event_metadata =
dst_plane.GetOrCreateEventMetadata(src_event.Name());
CopyEventMetadata(*src_event.metadata(), src, *dst_event_metadata, dst_plane);
XEventBuilder dst_event = dst_line.AddEvent(*dst_event_metadata);
if (src_event.IsAggregatedEvent()) {
dst_event.SetNumOccurrences(src_event.NumOccurrences());
} else {
dst_event.SetOffsetPs(src_event.OffsetPs() + time_offset_ps);
}
dst_event.SetDurationPs(src_event.DurationPs());
src_event.ForEachStat([&](const XStatVisitor& stat) {
dst_event.AddStat(*dst_plane.GetOrCreateStatMetadata(stat.Name()),
stat.RawStat(), src_plane);
});
}
bool IsOpLineName(absl::string_view line_name) {
return line_name == kXlaOpLineName || line_name == kTensorFlowOpLineName;
}
}
const XPlane* FindPlaneWithName(const XSpace& space, absl::string_view name) {
int i = Find(space.planes(),
[name](const XPlane* plane) { return plane->name() == name; });
return (i != -1) ? &space.planes(i) : nullptr;
}
std::vector<const XPlane*> FindPlanesWithNames(
const XSpace& space, const std::vector<absl::string_view>& names) {
absl::flat_hash_set<absl::string_view> names_set(names.begin(), names.end());
std::vector<int> indices =
FindAll(space.planes(), [&names_set](const XPlane* plane) {
return names_set.contains(plane->name());
});
std::vector<const XPlane*> planes;
planes.reserve(indices.size());
for (int i : indices) {
planes.push_back(&space.planes(i));
}
return planes;
}
XPlane* FindMutablePlaneWithName(XSpace* space, absl::string_view name) {
int i = Find(space->planes(),
[name](const XPlane* plane) { return plane->name() == name; });
return (i != -1) ? space->mutable_planes(i) : nullptr;
}
XPlane* FindOrAddMutablePlaneWithName(XSpace* space, absl::string_view name) {
XPlane* plane = FindMutablePlaneWithName(space, name);
if (plane == nullptr) {
plane = space->add_planes();
plane->set_name(name.data(), name.size());
}
return plane;
}
std::vector<const XPlane*> FindPlanesWithPrefix(const XSpace& space,
absl::string_view prefix) {
return FindPlanes(space, [&](const XPlane& plane) {
return absl::StartsWith(plane.name(), prefix);
});
}
std::vector<XPlane*> FindMutablePlanesWithPrefix(XSpace* space,
absl::string_view prefix) {
return FindMutablePlanes(space, [&](XPlane& plane) {
return absl::StartsWith(plane.name(), prefix);
});
}
const XLine* FindLineWithId(const XPlane& plane, int64_t id) {
int i =
Find(plane.lines(), [id](const XLine* line) { return line->id() == id; });
return (i != -1) ? &plane.lines(i) : nullptr;
}
std::vector<const XLine*> FindLinesWithId(const XPlane& plane, int64_t id) {
std::vector<int> indices = FindAll(
plane.lines(), [id](const XLine* line) { return line->id() == id; });
std::vector<const XLine*> lines;
lines.reserve(indices.size());
for (int index : indices) {
lines.push_back(&plane.lines(index));
}
return lines;
}
const XLine* FindLineWithName(const XPlane& plane, absl::string_view name) {
int i = Find(plane.lines(),
[name](const XLine* line) { return line->name() == name; });
return (i != -1) ? &plane.lines(i) : nullptr;
}
XStat* FindOrAddMutableStat(const XStatMetadata& stat_metadata, XEvent* event) {
for (auto& stat : *event->mutable_stats()) {
if (stat.metadata_id() == stat_metadata.id()) {
return &stat;
}
}
XStat* stat = event->add_stats();
stat->set_metadata_id(stat_metadata.id());
return stat;
}
void RemovePlane(XSpace* space, const XPlane* plane) {
DCHECK(plane != nullptr);
Remove(space->mutable_planes(), plane);
}
void RemovePlanes(XSpace* space, const std::vector<const XPlane*>& planes) {
absl::flat_hash_set<const XPlane*> planes_set(planes.begin(), planes.end());
RemoveIf(space->mutable_planes(), [&planes_set](const XPlane* plane) {
return planes_set.contains(plane);
});
}
void RemoveLine(XPlane* plane, const XLine* line) {
DCHECK(line != nullptr);
Remove(plane->mutable_lines(), line);
}
void RemoveEvents(XLine* line,
const absl::flat_hash_set<const XEvent*>& events) {
RemoveIf(line->mutable_events(),
[&](const XEvent* event) { return events.contains(event); });
}
void RemoveEmptyPlanes(XSpace* space) {
RemoveIf(space->mutable_planes(),
[&](const XPlane* plane) { return plane->lines().empty(); });
}
void RemoveEmptyLines(XPlane* plane) {
RemoveIf(plane->mutable_lines(),
[&](const XLine* line) { return line->events().empty(); });
}
bool XEventsComparator::operator()(const XEvent* a, const XEvent* b) const {
return XEventTimespan(*a) < XEventTimespan(*b);
}
void SortXPlane(XPlane* plane) {
for (XLine& line : *plane->mutable_lines()) {
auto& events = *line.mutable_events();
std::sort(events.pointer_begin(), events.pointer_end(),
XEventsComparator());
}
}
void SortXSpace(XSpace* space) {
for (XPlane& plane : *space->mutable_planes()) SortXPlane(&plane);
}
void NormalizeTimestamps(XPlane* plane, uint64 start_time_ns) {
for (XLine& line : *plane->mutable_lines()) {
if (line.timestamp_ns() >= static_cast<int64_t>(start_time_ns)) {
line.set_timestamp_ns(line.timestamp_ns() - start_time_ns);
}
}
}
void NormalizeTimestamps(XSpace* space, uint64 start_time_ns) {
for (XPlane& plane : *space->mutable_planes()) {
NormalizeTimestamps(&plane, start_time_ns);
}
}
void MergePlanes(const XPlane& src_plane, XPlane* dst_plane) {
RemoveEmptyLines(dst_plane);
XPlaneVisitor src(&src_plane);
XPlaneBuilder dst(dst_plane);
src.ForEachStat([&](const XStatVisitor& stat) {
XStatMetadata* stat_metadata = dst.GetOrCreateStatMetadata(stat.Name());
dst.SetOrAddStat(*stat_metadata, stat.RawStat(), src_plane);
});
src.ForEachLine([&](const XLineVisitor& line) {
XLineBuilder dst_line = dst.GetOrCreateLine(line.Id());
int64_t time_offset_ps = 0LL;
if (dst_line.NumEvents() == 0) {
dst_line.SetTimestampNs(line.TimestampNs());
dst_line.SetName(line.Name());
dst_line.SetDisplayNameIfEmpty(line.DisplayName());
} else {
if (line.TimestampNs() <= dst_line.TimestampNs()) {
dst_line.SetTimestampNsAndAdjustEventOffsets(line.TimestampNs());
} else {
time_offset_ps =
NanoToPico(line.TimestampNs() - dst_line.TimestampNs());
}
dst_line.SetNameIfEmpty(line.Name());
}
line.ForEachEvent([&](const XEventVisitor& event) {
CopyEvent(event, src, src_plane, time_offset_ps, dst, dst_line);
});
});
}
void MergePlanes(const std::vector<const XPlane*>& src_planes,
XPlane* dst_plane) {
for (const XPlane* src_plane : src_planes) {
MergePlanes(*src_plane, dst_plane);
}
}
int64_t GetStartTimestampNs(const XPlane& plane) {
if (plane.lines().empty()) return 0LL;
int64_t plane_timestamp = std::numeric_limits<int64_t>::max();
for (const auto& line : plane.lines()) {
plane_timestamp = std::min(plane_timestamp, line.timestamp_ns());
}
return plane_timestamp;
}
bool IsEmpty(const XSpace& space) {
for (const auto& plane : space.planes()) {
for (const auto& line : plane.lines()) {
if (!line.events().empty()) {
return false;
}
}
}
return true;
}
bool IsXSpaceGrouped(const XSpace& space) {
for (const auto& plane : space.planes()) {
XPlaneVisitor xplane = tsl::profiler::CreateTfXPlaneVisitor(&plane);
const XStatMetadata* group_id_stat =
xplane.GetStatMetadataByType(StatType::kGroupId);
if (group_id_stat) return true;
}
return false;
}
void AddFlowsToXplane(int32_t host_id, bool is_host_plane, bool connect_traceme,
XPlane* xplane) {
if (!xplane) return;
XPlaneBuilder plane(xplane);
XStatMetadata* correlation_id_stats_metadata =
plane.GetStatMetadata(GetStatTypeStr(StatType::kCorrelationId));
XStatMetadata* producer_type_stats_metadata =
plane.GetStatMetadata(GetStatTypeStr(StatType::kProducerType));
XStatMetadata* consumer_type_stats_metadata =
plane.GetStatMetadata(GetStatTypeStr(StatType::kConsumerType));
XStatMetadata* producer_id_stats_metadata =
plane.GetStatMetadata(GetStatTypeStr(StatType::kProducerId));
XStatMetadata* consumer_id_stats_metadata =
plane.GetStatMetadata(GetStatTypeStr(StatType::kConsumerId));
XStatMetadata* flow_stats_metadata =
plane.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kFlow));
XFlow::FlowDirection direction = is_host_plane
? XFlow::FlowDirection::kFlowOut
: XFlow::FlowDirection::kFlowIn;
plane.ForEachLine([&](XLineBuilder line) {
line.ForEachEvent([&](XEventBuilder event) {
std::optional<uint64_t> correlation_id;
std::optional<uint64_t> producer_type;
std::optional<uint64_t> consumer_type;
std::optional<uint64_t> producer_id;
std::optional<uint64_t> consumer_id;
event.ForEachStat([&](XStat* stat) {
if (correlation_id_stats_metadata &&
stat->metadata_id() == correlation_id_stats_metadata->id()) {
correlation_id = stat->uint64_value();
} else if (connect_traceme) {
if (producer_type_stats_metadata &&
stat->metadata_id() == producer_type_stats_metadata->id()) {
producer_type = XStatsBuilder<XPlane>::IntOrUintValue(*stat);
} else if (consumer_type_stats_metadata &&
stat->metadata_id() ==
consumer_type_stats_metadata->id()) {
consumer_type = XStatsBuilder<XPlane>::IntOrUintValue(*stat);
} else if (producer_id_stats_metadata &&
stat->metadata_id() == producer_id_stats_metadata->id()) {
producer_id = XStatsBuilder<XPlane>::IntOrUintValue(*stat);
} else if (consumer_id_stats_metadata &&
stat->metadata_id() == consumer_id_stats_metadata->id()) {
consumer_id = XStatsBuilder<XPlane>::IntOrUintValue(*stat);
}
}
});
if (correlation_id) {
XFlow flow(XFlow::GetFlowId(host_id, *correlation_id), direction,
ContextType::kGpuLaunch);
event.AddStatValue(*flow_stats_metadata, flow.ToStatValue());
}
if (connect_traceme) {
if (producer_type && producer_id) {
auto context_type = GetSafeContextType(*producer_type);
XFlow flow(XFlow::GetFlowId(host_id, *producer_id, context_type),
XFlow::FlowDirection::kFlowOut, context_type);
event.AddStatValue(*flow_stats_metadata, flow.ToStatValue());
}
if (consumer_type && consumer_id) {
auto context_type = GetSafeContextType(*consumer_type);
XFlow flow(XFlow::GetFlowId(host_id, *consumer_id, context_type),
XFlow::FlowDirection::kFlowIn, context_type);
event.AddStatValue(*flow_stats_metadata, flow.ToStatValue());
}
}
});
});
}
uint64_t GetDevicePlaneFingerprint(const XPlane& plane) {
const XLine* xla_module_line = FindLineWithName(plane, kXlaModuleLineName);
if (!xla_module_line) return 0ULL;
XPlaneVisitor xplane(&plane);
XLineVisitor xline(&xplane, xla_module_line);
std::set<uint64_t> ordered_module_fps;
xline.ForEachEvent([&](const XEventVisitor& xevent) {
ordered_module_fps.insert(Fingerprint64(xevent.Name()));
});
if (ordered_module_fps.empty()) return 0ULL;
uint64_t output = 0ULL;
for (const auto& fp : ordered_module_fps) {
output = FingerprintCat64(output, fp);
}
return output;
}
std::optional<XEventVisitor> XEventContextTracker::GetContainingEvent(
const Timespan& event) {
if (!line_) return std::nullopt;
if (current_index_ != -1) {
XEventVisitor current_event(plane_, line_, &line_->events(current_index_));
if (current_event.GetTimespan().Includes(event)) {
return current_event;
}
}
for (int i = current_index_ + 1; i < line_->events_size(); ++i) {
XEventVisitor current_event(plane_, line_, &line_->events(i));
if (current_event.TimestampPs() > event.end_ps()) break;
if (current_event.EndTimestampPs() < event.begin_ps()) continue;
current_index_ = i;
if (current_event.GetTimespan().Includes(event)) {
return current_event;
}
break;
}
return std::nullopt;
}
std::optional<XEventVisitor> XEventContextTracker::GetOverlappingEvent(
const Timespan& event) {
if (!line_) return std::nullopt;
if (current_index_ != -1) {
XEventVisitor current_event(plane_, line_, &line_->events(current_index_));
if (current_event.GetTimespan().Overlaps(event)) {
return current_event;
}
}
for (int i = current_index_ + 1; i < line_->events_size(); ++i) {
XEventVisitor current_event(plane_, line_, &line_->events(i));
if (current_event.TimestampPs() > event.end_ps()) break;
if (current_event.EndTimestampPs() < event.begin_ps()) continue;
current_index_ = i;
if (current_event.GetTimespan().Overlaps(event)) {
return current_event;
}
break;
}
return std::nullopt;
}
void AggregateXPlane(const XPlane& full_trace, XPlane& aggregated_trace) {
struct EventStat {
tsl::Stat<int64_t> stat;
int64_t children_duration;
};
using StatByEvent = absl::flat_hash_map<int64_t , EventStat>;
using StatByGroup = absl::flat_hash_map<int64_t , StatByEvent>;
absl::flat_hash_map<int64_t , StatByGroup> stats;
const XPlaneVisitor& plane = CreateTfXPlaneVisitor(&full_trace);
XPlaneBuilder aggregated_plane(&aggregated_trace);
aggregated_plane.SetName(plane.Name());
uint64_t first_op_start_ps = kint64max;
uint64_t last_op_end_ps = 0;
plane.ForEachLine([&](const XLineVisitor& line) {
if (line.Name() == kStepLineName ||
line.Name() == kSparseCoreStepLineName) {
XLineBuilder aggregated_line =
aggregated_plane.GetOrCreateLine(line.Id());
aggregated_line.SetName(kStepLineName);
line.ForEachEvent([&](const XEventVisitor& event) {
CopyEvent(event, plane, full_trace, 0LL, aggregated_plane,
aggregated_line);
});
}
if (!IsOpLineName(line.Name())) return;
XLineBuilder aggregated_line = aggregated_plane.GetOrCreateLine(line.Id());
aggregated_line.SetName(line.Name());
std::vector<XEventVisitor> event_stack;
line.ForEachEvent([&](XEventVisitor event) {
first_op_start_ps = first_op_start_ps <= event.TimestampPs()
? first_op_start_ps
: event.TimestampPs();
last_op_end_ps = last_op_end_ps >= event.EndTimestampPs()
? last_op_end_ps
: event.EndTimestampPs();
const auto& group_stat = event.GetStat(StatType::kGroupId);
int64_t group_id =
group_stat.has_value() ? group_stat->IntOrUintValue() : kint64max;
StatByEvent& line_stats = stats[line.Id()][group_id];
line_stats[event.Id()].stat.UpdateStat(event.DurationPs());
DCHECK(event_stack.empty() || !(event < event_stack.back()));
while (!event_stack.empty() &&
!event_stack.back().GetTimespan().Includes(event.GetTimespan())) {
event_stack.pop_back();
}
if (!event_stack.empty()) {
line_stats[event_stack.back().Id()].children_duration +=
event.DurationPs();
}
event_stack.push_back(std::move(event));
});
});
uint64_t total_time_ps =
(last_op_end_ps && last_op_end_ps > first_op_start_ps)
? last_op_end_ps - first_op_start_ps
: 0;
aggregated_plane.AddStatValue(
*aggregated_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kTotalProfileDurationPs)),
total_time_ps);
XStatMetadata* kMinDurationPs = aggregated_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kMinDurationPs));
XStatMetadata* kSelfDurationPs = aggregated_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kSelfDurationPs));
XStatMetadata* kGroupId = aggregated_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kGroupId));
for (const auto& [line_id, stats_by_group] : stats) {
XLineBuilder aggregated_line = aggregated_plane.GetOrCreateLine(line_id);
for (const auto& [group_id, stat_by_event] : stats_by_group) {
for (const auto& [event_id, event_stat] : stat_by_event) {
const auto& src_event_metadata = *plane.GetEventMetadata(event_id);
XEventMetadata& event_metadata =
*aggregated_plane.GetOrCreateEventMetadata(
src_event_metadata.name());
CopyEventMetadata(src_event_metadata, plane, event_metadata,
aggregated_plane);
XEventBuilder aggregated_event =
aggregated_line.AddEvent(event_metadata);
aggregated_event.SetNumOccurrences(event_stat.stat.count());
aggregated_event.SetDurationPs(event_stat.stat.sum());
if (group_id != kint64max) {
aggregated_event.AddStatValue(*kGroupId, group_id);
}
if (event_stat.stat.count() > 1) {
aggregated_event.AddStatValue(*kMinDurationPs, event_stat.stat.min());
}
if (event_stat.children_duration != 0) {
aggregated_event.AddStatValue(
*kSelfDurationPs,
event_stat.stat.sum() - event_stat.children_duration);
}
}
}
}
}
bool IsCustomPlane(const XPlane& plane) {
constexpr absl::string_view kLegacyCustomPlanePrefix = "/custom:";
return absl::StartsWith(plane.name(), kCustomPlanePrefix) ||
absl::StartsWith(plane.name(), kLegacyCustomPlanePrefix);
}
bool IsHostPlane(const XPlane& plane) {
return plane.name() == kHostThreadsPlaneName ||
plane.name() == kHostCpusPlaneName ||
plane.name() == kTFStreamzPlaneName ||
plane.name() == kMetadataPlaneName ||
plane.name() == kSyscallsPlaneName ||
plane.name() == kPythonTracerPlaneName ||
plane.name() == kCuptiDriverApiPlaneName;
}
bool IsDevicePlane(const XPlane& plane) {
if (IsHostPlane(plane)) return false;
return absl::StartsWith(plane.name(), "/device") ||
absl::StartsWith(plane.name(), kTpuNonCorePlaneNamePrefix) ||
IsCustomPlane(plane);
}
}
} | #include "xla/tsl/profiler/utils/xplane_utils.h"
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "xla/tsl/profiler/utils/math_utils.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/xplane_builder.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
namespace {
using ::testing::Property;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
#if defined(PLATFORM_GOOGLE)
using ::testing::EqualsProto;
using ::testing::proto::IgnoringFields;
using ::testing::proto::IgnoringRepeatedFieldOrdering;
using ::testing::proto::Partially;
#endif
XEvent CreateEvent(int64_t offset_ps, int64_t duration_ps) {
XEvent event;
event.set_offset_ps(offset_ps);
event.set_duration_ps(duration_ps);
return event;
}
TEST(XPlaneUtilsTest, AddAndRemovePlanes) {
XSpace space;
auto* p1 = FindOrAddMutablePlaneWithName(&space, "p1");
EXPECT_EQ(p1, FindPlaneWithName(space, "p1"));
auto* p2 = FindOrAddMutablePlaneWithName(&space, "p2");
EXPECT_EQ(p2, FindPlaneWithName(space, "p2"));
auto* p3 = FindOrAddMutablePlaneWithName(&space, "p3");
EXPECT_EQ(p3, FindPlaneWithName(space, "p3"));
RemovePlane(&space, p2);
EXPECT_EQ(space.planes_size(), 2);
EXPECT_EQ(p1, FindPlaneWithName(space, "p1"));
EXPECT_EQ(p3, FindPlaneWithName(space, "p3"));
RemovePlane(&space, p1);
EXPECT_EQ(space.planes_size(), 1);
EXPECT_EQ(p3, FindPlaneWithName(space, "p3"));
RemovePlane(&space, p3);
EXPECT_EQ(space.planes_size(), 0);
}
TEST(XPlaneUtilsTest, RemoveEmptyPlanes) {
XSpace space;
RemoveEmptyPlanes(&space);
EXPECT_EQ(space.planes_size(), 0);
auto* plane1 = space.add_planes();
plane1->set_name("p1");
plane1->add_lines()->set_name("p1l1");
plane1->add_lines()->set_name("p1l2");
auto* plane2 = space.add_planes();
plane2->set_name("p2");
auto* plane3 = space.add_planes();
plane3->set_name("p3");
plane3->add_lines()->set_name("p3l1");
auto* plane4 = space.add_planes();
plane4->set_name("p4");
RemoveEmptyPlanes(&space);
ASSERT_EQ(space.planes_size(), 2);
EXPECT_EQ(space.planes(0).name(), "p1");
EXPECT_EQ(space.planes(1).name(), "p3");
}
TEST(XPlaneUtilsTest, RemoveEmptyLines) {
XPlane plane;
RemoveEmptyLines(&plane);
EXPECT_EQ(plane.lines_size(), 0);
auto* line1 = plane.add_lines();
line1->set_name("l1");
line1->add_events();
line1->add_events();
auto* line2 = plane.add_lines();
line2->set_name("l2");
auto* line3 = plane.add_lines();
line3->set_name("l3");
line3->add_events();
auto* line4 = plane.add_lines();
line4->set_name("l4");
RemoveEmptyLines(&plane);
ASSERT_EQ(plane.lines_size(), 2);
EXPECT_EQ(plane.lines(0).name(), "l1");
EXPECT_EQ(plane.lines(1).name(), "l3");
}
TEST(XPlaneUtilsTest, RemoveLine) {
XPlane plane;
const XLine* line1 = plane.add_lines();
const XLine* line2 = plane.add_lines();
const XLine* line3 = plane.add_lines();
RemoveLine(&plane, line2);
ASSERT_EQ(plane.lines_size(), 2);
EXPECT_EQ(&plane.lines(0), line1);
EXPECT_EQ(&plane.lines(1), line3);
}
TEST(XPlaneUtilsTest, RemoveEvents) {
XLine line;
const XEvent* event1 = line.add_events();
const XEvent* event2 = line.add_events();
const XEvent* event3 = line.add_events();
const XEvent* event4 = line.add_events();
RemoveEvents(&line, {event1, event3});
ASSERT_EQ(line.events_size(), 2);
EXPECT_EQ(&line.events(0), event2);
EXPECT_EQ(&line.events(1), event4);
}
TEST(XPlaneUtilsTest, SortXPlaneTest) {
XPlane plane;
XLine* line = plane.add_lines();
*line->add_events() = CreateEvent(200, 100);
*line->add_events() = CreateEvent(100, 100);
*line->add_events() = CreateEvent(120, 50);
*line->add_events() = CreateEvent(120, 30);
SortXPlane(&plane);
ASSERT_EQ(plane.lines_size(), 1);
ASSERT_EQ(plane.lines(0).events_size(), 4);
EXPECT_EQ(plane.lines(0).events(0).offset_ps(), 100);
EXPECT_EQ(plane.lines(0).events(0).duration_ps(), 100);
EXPECT_EQ(plane.lines(0).events(1).offset_ps(), 120);
EXPECT_EQ(plane.lines(0).events(1).duration_ps(), 50);
EXPECT_EQ(plane.lines(0).events(2).offset_ps(), 120);
EXPECT_EQ(plane.lines(0).events(2).duration_ps(), 30);
EXPECT_EQ(plane.lines(0).events(3).offset_ps(), 200);
EXPECT_EQ(plane.lines(0).events(3).duration_ps(), 100);
}
namespace {
XLineBuilder CreateXLine(XPlaneBuilder* plane, absl::string_view name,
absl::string_view display, int64_t id,
int64_t timestamp_ns) {
XLineBuilder line = plane->GetOrCreateLine(id);
line.SetName(name);
line.SetTimestampNs(timestamp_ns);
line.SetDisplayNameIfEmpty(display);
return line;
}
XEventBuilder CreateXEvent(XPlaneBuilder* plane, XLineBuilder line,
absl::string_view event_name,
std::optional<absl::string_view> display,
int64_t offset_ns, int64_t duration_ns) {
XEventMetadata* event_metadata = plane->GetOrCreateEventMetadata(event_name);
if (display) event_metadata->set_display_name(std::string(*display));
XEventBuilder event = line.AddEvent(*event_metadata);
event.SetOffsetNs(offset_ns);
event.SetDurationNs(duration_ns);
return event;
}
template <typename T, typename V>
void CreateXStats(XPlaneBuilder* plane, T* stats_owner,
absl::string_view stats_name, V stats_value) {
stats_owner->AddStatValue(*plane->GetOrCreateStatMetadata(stats_name),
stats_value);
}
void CheckXLine(const XLine& line, absl::string_view name,
absl::string_view display, int64_t start_time_ns,
int64_t events_size) {
EXPECT_EQ(line.name(), name);
EXPECT_EQ(line.display_name(), display);
EXPECT_EQ(line.timestamp_ns(), start_time_ns);
EXPECT_EQ(line.events_size(), events_size);
}
void CheckXEvent(const XEvent& event, const XPlane& plane,
absl::string_view name, absl::string_view display,
int64_t offset_ns, int64_t duration_ns, int64_t stats_size) {
const XEventMetadata& event_metadata =
plane.event_metadata().at(event.metadata_id());
EXPECT_EQ(event_metadata.name(), name);
EXPECT_EQ(event_metadata.display_name(), display);
EXPECT_EQ(event.offset_ps(), NanoToPico(offset_ns));
EXPECT_EQ(event.duration_ps(), NanoToPico(duration_ns));
EXPECT_EQ(event.stats_size(), stats_size);
}
}
TEST(XPlaneUtilsTest, MergeXPlaneTest) {
XPlane src_plane, dst_plane;
constexpr int64_t kLineIdOnlyInSrcPlane = 1LL;
constexpr int64_t kLineIdOnlyInDstPlane = 2LL;
constexpr int64_t kLineIdInBothPlanes = 3LL;
constexpr int64_t kLineIdInBothPlanes2 = 4LL;
{
XPlaneBuilder src(&src_plane);
CreateXStats(&src, &src, "plane_stat1", 1);
CreateXStats(&src, &src, "plane_stat3", 3.0);
auto l1 = CreateXLine(&src, "l1", "d1", kLineIdOnlyInSrcPlane, 100);
auto e1 = CreateXEvent(&src, l1, "event1", "display1", 1, 2);
CreateXStats(&src, &e1, "event_stat1", 2.0);
auto e2 = CreateXEvent(&src, l1, "event2", std::nullopt, 3, 4);
CreateXStats(&src, &e2, "event_stat2", 3);
auto l2 = CreateXLine(&src, "l2", "d2", kLineIdInBothPlanes, 200);
auto e3 = CreateXEvent(&src, l2, "event3", std::nullopt, 5, 7);
CreateXStats(&src, &e3, "event_stat3", 2.0);
auto e4 = CreateXEvent(&src, l2, "event4", std::nullopt, 6, 8);
CreateXStats(&src, &e4, "event_stat4", 3);
CreateXStats(&src, &e4, "event_stat5", 3);
auto l5 = CreateXLine(&src, "l5", "d5", kLineIdInBothPlanes2, 700);
CreateXEvent(&src, l5, "event51", std::nullopt, 9, 10);
CreateXEvent(&src, l5, "event52", std::nullopt, 11, 12);
}
{
XPlaneBuilder dst(&dst_plane);
CreateXStats(&dst, &dst, "plane_stat2", 2);
CreateXStats(&dst, &dst, "plane_stat3", 4);
auto l3 = CreateXLine(&dst, "l3", "d3", kLineIdOnlyInDstPlane, 300);
auto e5 = CreateXEvent(&dst, l3, "event5", std::nullopt, 11, 2);
CreateXStats(&dst, &e5, "event_stat6", 2.0);
auto e6 = CreateXEvent(&dst, l3, "event6", std::nullopt, 13, 4);
CreateXStats(&dst, &e6, "event_stat7", 3);
auto l2 = CreateXLine(&dst, "l4", "d4", kLineIdInBothPlanes, 400);
auto e7 = CreateXEvent(&dst, l2, "event7", std::nullopt, 15, 7);
CreateXStats(&dst, &e7, "event_stat8", 2.0);
auto e8 = CreateXEvent(&dst, l2, "event8", "display8", 16, 8);
CreateXStats(&dst, &e8, "event_stat9", 3);
auto l6 = CreateXLine(&dst, "l6", "d6", kLineIdInBothPlanes2, 300);
CreateXEvent(&dst, l6, "event61", std::nullopt, 21, 10);
CreateXEvent(&dst, l6, "event62", std::nullopt, 22, 12);
}
MergePlanes(src_plane, &dst_plane);
XPlaneVisitor plane(&dst_plane);
EXPECT_EQ(dst_plane.lines_size(), 4);
EXPECT_EQ(dst_plane.stats_size(), 3);
absl::flat_hash_map<absl::string_view, absl::string_view> plane_stats;
plane.ForEachStat([&](const XStatVisitor& stat) {
if (stat.Name() == "plane_stat1") {
EXPECT_EQ(stat.IntValue(), 1);
} else if (stat.Name() == "plane_stat2") {
EXPECT_EQ(stat.IntValue(), 2);
} else if (stat.Name() == "plane_stat3") {
EXPECT_EQ(stat.DoubleValue(), 3.0);
} else {
EXPECT_TRUE(false);
}
});
EXPECT_EQ(dst_plane.stat_metadata_size(), 12);
{
const XLine& line = dst_plane.lines(0);
CheckXLine(line, "l3", "d3", 300, 2);
CheckXEvent(line.events(0), dst_plane, "event5", "", 11, 2, 1);
CheckXEvent(line.events(1), dst_plane, "event6", "", 13, 4, 1);
}
{
const XLine& line = dst_plane.lines(1);
CheckXLine(line, "l4", "d4", 200, 4);
CheckXEvent(line.events(0), dst_plane, "event7", "", 215, 7, 1);
CheckXEvent(line.events(1), dst_plane, "event8", "display8", 216, 8, 1);
CheckXEvent(line.events(2), dst_plane, "event3", "", 5, 7, 1);
CheckXEvent(line.events(3), dst_plane, "event4", "", 6, 8, 2);
}
{
const XLine& line = dst_plane.lines(2);
CheckXLine(line, "l6", "d6", 300, 4);
CheckXEvent(line.events(0), dst_plane, "event61", "", 21, 10, 0);
CheckXEvent(line.events(1), dst_plane, "event62", "", 22, 12, 0);
CheckXEvent(line.events(2), dst_plane, "event51", "", 409, 10, 0);
CheckXEvent(line.events(3), dst_plane, "event52", "", 411, 12, 0);
}
{
const XLine& line = dst_plane.lines(3);
CheckXLine(line, "l1", "d1", 100, 2);
CheckXEvent(line.events(0), dst_plane, "event1", "display1", 1, 2, 1);
CheckXEvent(line.events(1), dst_plane, "event2", "", 3, 4, 1);
}
}
TEST(XPlaneUtilsTest, FindPlanesWithPrefix) {
XSpace xspace;
FindOrAddMutablePlaneWithName(&xspace, "test-prefix:0");
FindOrAddMutablePlaneWithName(&xspace, "test-prefix:1");
FindOrAddMutablePlaneWithName(&xspace, "test-prefix:2");
FindOrAddMutablePlaneWithName(&xspace, "test-prefix:3");
XPlane* p4 = FindOrAddMutablePlaneWithName(&xspace, "test-do-not-include:0");
std::vector<const XPlane*> xplanes =
FindPlanesWithPrefix(xspace, "test-prefix");
ASSERT_EQ(4, xplanes.size());
for (const XPlane* plane : xplanes) {
ASSERT_NE(p4, plane);
}
}
TEST(XplaneUtilsTest, FindMutablePlanesWithPrefix) {
XSpace xspace;
FindOrAddMutablePlaneWithName(&xspace, "test-prefix:0");
FindOrAddMutablePlaneWithName(&xspace, "test-prefix:1");
FindOrAddMutablePlaneWithName(&xspace, "test-prefix:2");
FindOrAddMutablePlaneWithName(&xspace, "test-prefix:3");
XPlane* p4 = FindOrAddMutablePlaneWithName(&xspace, "test-do-not-include:0");
std::vector<XPlane*> xplanes =
FindMutablePlanesWithPrefix(&xspace, "test-prefix");
ASSERT_EQ(4, xplanes.size());
for (XPlane* plane : xplanes) {
ASSERT_NE(p4, plane);
}
}
TEST(XplaneUtilsTest, FindPlanesWithPredicate) {
XSpace xspace;
FindOrAddMutablePlaneWithName(&xspace, "test-prefix:0");
XPlane* p1 = FindOrAddMutablePlaneWithName(&xspace, "test-prefix:1");
std::vector<const XPlane*> xplanes = FindPlanes(
xspace,
[](const XPlane& xplane) { return xplane.name() == "test-prefix:1"; });
ASSERT_EQ(1, xplanes.size());
ASSERT_EQ(p1, xplanes[0]);
}
TEST(XplaneUtilsTest, FindMutablePlanesWithPredicate) {
XSpace xspace;
FindOrAddMutablePlaneWithName(&xspace, "test-prefix:0");
XPlane* p1 = FindOrAddMutablePlaneWithName(&xspace, "test-prefix:1");
std::vector<XPlane*> xplanes = FindMutablePlanes(
&xspace, [](XPlane& xplane) { return xplane.name() == "test-prefix:1"; });
ASSERT_EQ(1, xplanes.size());
ASSERT_EQ(p1, xplanes[0]);
}
TEST(XplaneUtilsTest, TestAggregateXPlanes) {
XPlane xplane;
XPlaneBuilder builder(&xplane);
auto& event_metadata1 = *builder.GetOrCreateEventMetadata("EventMetadata1");
auto& event_metadata2 = *builder.GetOrCreateEventMetadata("EventMetadata2");
auto& event_metadata3 = *builder.GetOrCreateEventMetadata("EventMetadata3");
auto& event_metadata4 = *builder.GetOrCreateEventMetadata("EventMetadata4");
auto& step_event_metadata1 =
*builder.GetOrCreateEventMetadata("StepEventMetadata1");
auto& step_event_metadata2 =
*builder.GetOrCreateEventMetadata("StepEventMetadata2");
XLineBuilder step_line = builder.GetOrCreateLine(1);
step_line.SetName(kStepLineName);
XEventBuilder step1 = step_line.AddEvent(step_event_metadata1);
step1.SetOffsetNs(0);
step1.SetDurationNs(10);
XEventBuilder step2 = step_line.AddEvent(step_event_metadata2);
step2.SetOffsetNs(10);
step2.SetDurationNs(10);
XLineBuilder line = builder.GetOrCreateLine(2);
line.SetName(kTensorFlowOpLineName);
XEventBuilder event1 = line.AddEvent(event_metadata1);
event1.SetOffsetNs(0);
event1.SetDurationNs(5);
XEventBuilder event3 = line.AddEvent(event_metadata3);
event3.SetOffsetNs(0);
event3.SetDurationNs(2);
XEventBuilder event2 = line.AddEvent(event_metadata2);
event2.SetOffsetNs(5);
event2.SetDurationNs(5);
XEventBuilder event4 = line.AddEvent(event_metadata2);
event4.SetOffsetNs(10);
event4.SetDurationNs(5);
XEventBuilder event5 = line.AddEvent(event_metadata4);
event5.SetOffsetNs(15);
event5.SetDurationNs(6);
XEventBuilder event6 = line.AddEvent(event_metadata1);
event6.SetOffsetNs(15);
event6.SetDurationNs(4);
XEventBuilder event7 = line.AddEvent(event_metadata3);
event7.SetOffsetNs(15);
event7.SetDurationNs(3);
XPlane aggregated_xplane;
AggregateXPlane(xplane, aggregated_xplane);
#if defined(PLATFORM_GOOGLE)
ASSERT_THAT(
aggregated_xplane,
IgnoringFields(
{"tensorflow.profiler.XEvent.metadata_id",
"tensorflow.profiler.XPlane.event_metadata"},
IgnoringRepeatedFieldOrdering(EqualsProto(
R"pb(lines {
id: 1
name: "Steps"
events { metadata_id: 1 offset_ps: 0 duration_ps: 10000 }
events {
metadata_id: 2
offset_ps: 10000
duration_ps: 10000
}
}
lines {
id: 2
name: "Framework Ops"
events {
metadata_id: 3
duration_ps: 10000
stats { metadata_id: 2 int64_value: 5000 }
num_occurrences: 2
}
events {
metadata_id: 4
duration_ps: 5000
stats { metadata_id: 2 int64_value: 2000 }
num_occurrences: 2
}
events {
metadata_id: 5
duration_ps: 9000
stats { metadata_id: 2 int64_value: 4000 }
stats { metadata_id: 3 int64_value: 4000 }
num_occurrences: 2
}
events {
metadata_id: 6
duration_ps: 6000
stats { metadata_id: 3 int64_value: 2000 }
num_occurrences: 1
}
}
stat_metadata {
key: 1
value { id: 1 name: "total_profile_duration_ps" }
}
stat_metadata {
key: 2
value { id: 2 name: "min_duration_ps" }
}
stat_metadata {
key: 3
value { id: 3 name: "self_duration_ps" }
}
stat_metadata {
key: 4
value { id: 4 name: "group_id" }
}
stats { metadata_id: 1 uint64_value: 21000 }
)pb"))));
std::vector<std::string> event_metadata_names;
for (const auto& [id, event_metadata] : aggregated_xplane.event_metadata()) {
event_metadata_names.push_back(event_metadata.name());
}
EXPECT_THAT(event_metadata_names,
UnorderedElementsAre("EventMetadata1", "EventMetadata2",
"EventMetadata3", "EventMetadata4",
"StepEventMetadata1", "StepEventMetadata2"));
#endif
}
TEST(XPlanuUtilsTest, TestInstantEventDoesNotFail) {
XPlane xplane;
XPlaneBuilder xplane_builder(&xplane);
XEventMetadata* event_metadata1 = xplane_builder.GetOrCreateEventMetadata(1);
XEventMetadata* event_metadata2 = xplane_builder.GetOrCreateEventMetadata(2);
XLineBuilder line = xplane_builder.GetOrCreateLine(1);
line.SetName(kTensorFlowOpLineName);
XEventBuilder event1 = line.AddEvent(*event_metadata1);
XEventBuilder event2 = line.AddEvent(*event_metadata2);
event1.SetOffsetNs(1);
event1.SetDurationNs(0);
event2.SetOffsetNs(1);
event2.SetDurationNs(0);
XPlane aggregated_xplane;
AggregateXPlane(xplane, aggregated_xplane);
EXPECT_THAT(aggregated_xplane.lines(),
UnorderedElementsAre(Property(&XLine::events, SizeIs(2))));
}
TEST(XplaneutilsTest, TestEventMetadataStatsAreCopied) {
XPlane xplane;
XPlaneBuilder xplane_builder(&xplane);
XEventMetadata* event_metadata = xplane_builder.GetOrCreateEventMetadata(1);
XStatsBuilder<XEventMetadata> stats(event_metadata, &xplane_builder);
stats.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kTfOp)),
"TestFunction");
XLineBuilder line = xplane_builder.GetOrCreateLine(1);
line.SetName(kTensorFlowOpLineName);
XEventBuilder event = line.AddEvent(*event_metadata);
event.SetDurationNs(0);
event.SetOffsetNs(0);
XPlane aggregated_xplane;
AggregateXPlane(xplane, aggregated_xplane);
XPlaneVisitor visitor = CreateTfXPlaneVisitor(&aggregated_xplane);
XEventMetadataVisitor metadata_visitor(&visitor, visitor.GetEventMetadata(1));
std::optional<XStatVisitor> stat = metadata_visitor.GetStat(StatType::kTfOp);
ASSERT_TRUE(stat.has_value());
EXPECT_EQ(stat->Name(), "tf_op");
EXPECT_EQ(stat->StrOrRefValue(), "TestFunction");
}
TEST(XplaneutilsTest, TestEventMetadataStatsAreCopiedForRefValue) {
XPlane xplane;
XPlaneBuilder xplane_builder(&xplane);
XEventMetadata* event_metadata = xplane_builder.GetOrCreateEventMetadata(1);
XStatsBuilder<XEventMetadata> stats(event_metadata, &xplane_builder);
stats.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kTfOp)),
*xplane_builder.GetOrCreateStatMetadata("TestFunction"));
XLineBuilder line = xplane_builder.GetOrCreateLine(1);
line.SetName(kTensorFlowOpLineName);
XEventBuilder event = line.AddEvent(*event_metadata);
event.SetDurationNs(0);
event.SetOffsetNs(0);
XPlane aggregated_xplane;
AggregateXPlane(xplane, aggregated_xplane);
XPlaneVisitor visitor = CreateTfXPlaneVisitor(&aggregated_xplane);
XEventMetadataVisitor metadata_visitor(&visitor, visitor.GetEventMetadata(1));
std::optional<XStatVisitor> stat = metadata_visitor.GetStat(StatType::kTfOp);
ASSERT_TRUE(stat.has_value());
EXPECT_EQ(stat->Name(), "tf_op");
EXPECT_EQ(stat->StrOrRefValue(), "TestFunction");
}
TEST(XplaneutilsTest, TestIsXSpaceGrouped) {
XSpace space;
{
XPlaneBuilder p1(space.add_planes());
auto l1 = CreateXLine(&p1, "l1", "d1", 1, 100);
auto e1 = CreateXEvent(&p1, l1, "event1", "display1", 1, 2);
CreateXStats(&p1, &e1, "event_stat1", 2.0);
}
EXPECT_FALSE(IsXSpaceGrouped(space));
{
XPlaneBuilder p2(space.add_planes());
auto l2 = CreateXLine(&p2, "l2", "d2", 1, 100);
auto e2 = CreateXEvent(&p2, l2, "event2", "display2", 1, 2);
CreateXStats(&p2, &e2, "group_id", 1);
}
LOG(ERROR) << space.DebugString();
EXPECT_TRUE(IsXSpaceGrouped(space));
}
TEST(XplaneutilsTest, TestIsHostPlane) {
XSpace xspace;
auto xplane_host_thread = FindOrAddMutablePlaneWithName(&xspace, "/host:CPU");
auto xplane_host_cpu = FindOrAddMutablePlaneWithName(&xspace, "Host CPUs");
auto xplane_tfstreamz =
FindOrAddMutablePlaneWithName(&xspace, "/host:tfstreamz");
auto xplane_metadata =
FindOrAddMutablePlaneWithName(&xspace, "/host:metadata");
auto xplane_syscalls = FindOrAddMutablePlaneWithName(&xspace, "Syscalls");
auto xplane_python_tracer =
FindOrAddMutablePlaneWithName(&xspace, "/host:python-tracer");
auto xplane_custom_prefix =
FindOrAddMutablePlaneWithName(&xspace, "/device:CUSTOM:123");
auto xplane_legacy_custom =
FindOrAddMutablePlaneWithName(&xspace, "/custom:456");
auto xplane_cupti = FindOrAddMutablePlaneWithName(&xspace, "/host:CUPTI");
EXPECT_TRUE(IsHostPlane(*xplane_host_thread));
EXPECT_TRUE(IsHostPlane(*xplane_host_cpu));
EXPECT_TRUE(IsHostPlane(*xplane_tfstreamz));
EXPECT_TRUE(IsHostPlane(*xplane_metadata));
EXPECT_TRUE(IsHostPlane(*xplane_syscalls));
EXPECT_TRUE(IsHostPlane(*xplane_python_tracer));
EXPECT_FALSE(IsHostPlane(*xplane_custom_prefix));
EXPECT_FALSE(IsHostPlane(*xplane_legacy_custom));
EXPECT_TRUE(IsHostPlane(*xplane_cupti));
}
TEST(XplaneutilsTest, TestIsDevicePlane) {
XSpace xspace;
auto xplane_host_thread = FindOrAddMutablePlaneWithName(&xspace, "/host:CPU");
auto xplane_device_thread =
FindOrAddMutablePlaneWithName(&xspace, "/device:TPU");
auto xplane_task_env_thread =
FindOrAddMutablePlaneWithName(&xspace, "Task Environment");
auto xplane_custom_prefix =
FindOrAddMutablePlaneWithName(&xspace, "/device:CUSTOM:123");
auto xplane_legacy_custom =
FindOrAddMutablePlaneWithName(&xspace, "/custom:456");
EXPECT_FALSE(IsDevicePlane(*xplane_host_thread));
EXPECT_FALSE(IsDevicePlane(*xplane_task_env_thread));
EXPECT_TRUE(IsDevicePlane(*xplane_device_thread));
EXPECT_TRUE(IsDevicePlane(*xplane_custom_prefix));
EXPECT_TRUE(IsDevicePlane(*xplane_legacy_custom));
}
TEST(XplaneUtilsTest, XPlaneGroupingPropagatesStep) {
XPlane xplane;
XPlaneBuilder builder(&xplane);
XStatMetadata* kGroupId =
builder.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kGroupId));
XLineBuilder line = builder.GetOrCreateLine(1);
line.SetName(kStepLineName);
XEventMetadata* event_metadata = builder.GetOrCreateEventMetadata(1);
event_metadata->set_name("Step 1");
XEventBuilder event_builder = line.AddEvent(*event_metadata);
event_builder.AddStatValue(*kGroupId, 1);
event_builder.SetDurationNs(100);
event_builder.SetOffsetNs(100);
XEventMetadata* event_metadata2 = builder.GetOrCreateEventMetadata(2);
event_metadata2->set_name("Step 2");
XEventBuilder event_builder2 = line.AddEvent(*event_metadata2);
event_builder2.AddStatValue(*kGroupId, 2);
event_builder2.SetDurationNs(100);
event_builder2.SetOffsetNs(300);
XPlane aggregated_xplane;
AggregateXPlane(xplane, aggregated_xplane);
#if defined(PLATFORM_GOOGLE)
EXPECT_THAT(aggregated_xplane, Partially(EqualsProto(xplane)));
#endif
}
TEST(XplaneUtilsTest, XPlaneGroupingPropagatesGroupId) {
XPlane xplane;
XPlaneBuilder builder(&xplane);
XEventMetadata* event_metadata1 = builder.GetOrCreateEventMetadata(1);
event_metadata1->set_name("EventMetadata1");
XStatMetadata* kGroupId =
builder.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kGroupId));
XLineBuilder line = builder.GetOrCreateLine(1);
line.SetName(kXlaOpLineName);
XEventBuilder event_builder = line.AddEvent(*event_metadata1);
event_builder.SetDurationNs(100);
event_builder.SetOffsetNs(100);
event_builder.AddStatValue(*kGroupId, 1);
XEventBuilder event_builder2 = line.AddEvent(*event_metadata1);
event_builder2.AddStatValue(*kGroupId, 2);
event_builder2.SetDurationNs(100);
event_builder2.SetOffsetNs(300);
XPlane aggregated_xplane;
AggregateXPlane(xplane, aggregated_xplane);
EXPECT_THAT(aggregated_xplane.lines(),
UnorderedElementsAre(Property(&XLine::events, SizeIs(2))));
XPlaneVisitor visitor = CreateTfXPlaneVisitor(&aggregated_xplane);
visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
EXPECT_TRUE(event.GetStat(StatType::kGroupId).has_value());
});
});
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/xplane_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/xplane_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0d905a1f-160e-4f33-a14f-417150c8fc86 | cpp | tensorflow/tensorflow | xplane_builder | third_party/xla/xla/tsl/profiler/utils/xplane_builder.cc | third_party/xla/xla/tsl/profiler/utils/xplane_builder_test.cc | #include "xla/tsl/profiler/utils/xplane_builder.h"
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "xla/tsl/profiler/utils/math_utils.h"
#include "xla/tsl/profiler/utils/timespan.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
XPlaneBuilder::XPlaneBuilder(XPlane* plane)
: XStatsBuilder<XPlane>(plane, this), plane_(plane) {
for (auto& id_and_metadata : *plane->mutable_event_metadata()) {
auto& metadata = id_and_metadata.second;
last_event_metadata_id_ =
std::max<int64_t>(last_event_metadata_id_, metadata.id());
if (!metadata.name().empty()) {
event_metadata_by_name_.try_emplace(metadata.name(), &metadata);
}
}
for (auto& id_and_metadata : *plane->mutable_stat_metadata()) {
auto& metadata = id_and_metadata.second;
last_stat_metadata_id_ =
std::max<int64_t>(last_stat_metadata_id_, metadata.id());
if (!metadata.name().empty()) {
stat_metadata_by_name_.try_emplace(metadata.name(), &metadata);
}
}
for (XLine& line : *plane->mutable_lines()) {
lines_by_id_.try_emplace(line.id(), &line);
}
}
XEventMetadata* XPlaneBuilder::GetOrCreateEventMetadata(int64_t metadata_id) {
XEventMetadata& metadata = (*plane_->mutable_event_metadata())[metadata_id];
metadata.set_id(metadata_id);
return &metadata;
}
XEventMetadata* XPlaneBuilder::CreateEventMetadata() {
return GetOrCreateEventMetadata(++last_event_metadata_id_);
}
XEventMetadata* XPlaneBuilder::GetOrCreateEventMetadata(
absl::string_view name) {
XEventMetadata*& metadata = event_metadata_by_name_[name];
if (metadata == nullptr) {
metadata = CreateEventMetadata();
metadata->set_name(std::string(name));
}
return metadata;
}
XEventMetadata* XPlaneBuilder::GetOrCreateEventMetadata(std::string&& name) {
XEventMetadata*& metadata = event_metadata_by_name_[name];
if (metadata == nullptr) {
metadata = CreateEventMetadata();
metadata->set_name(std::move(name));
}
return metadata;
}
std::vector<XEventMetadata*> XPlaneBuilder::GetOrCreateEventsMetadata(
const std::vector<absl::string_view>& names) {
std::vector<XEventMetadata*> metadata;
metadata.reserve(names.size());
for (absl::string_view name : names) {
metadata.push_back(GetOrCreateEventMetadata(name));
}
return metadata;
}
XEventMetadata* XPlaneBuilder::GetEventMetadata(absl::string_view name) const {
auto result = event_metadata_by_name_.find(name);
if (result == event_metadata_by_name_.end()) return nullptr;
return result->second;
}
XStatMetadata* XPlaneBuilder::GetStatMetadata(absl::string_view name) const {
auto result = stat_metadata_by_name_.find(name);
if (result == stat_metadata_by_name_.end()) return nullptr;
return result->second;
}
XStatMetadata* XPlaneBuilder::GetOrCreateStatMetadata(int64_t metadata_id) {
XStatMetadata& metadata = (*plane_->mutable_stat_metadata())[metadata_id];
metadata.set_id(metadata_id);
return &metadata;
}
const XStatMetadata* XPlaneBuilder::GetStatMetadata(int64_t metadata_id) const {
auto result = plane_->stat_metadata().find(metadata_id);
if (result == plane_->stat_metadata().end()) return nullptr;
return &(result->second);
}
XStatMetadata* XPlaneBuilder::CreateStatMetadata() {
return GetOrCreateStatMetadata(++last_stat_metadata_id_);
}
XStatMetadata* XPlaneBuilder::GetOrCreateStatMetadata(absl::string_view name) {
XStatMetadata*& metadata = stat_metadata_by_name_[name];
if (metadata == nullptr) {
metadata = CreateStatMetadata();
metadata->set_name(std::string(name));
}
return metadata;
}
XStatMetadata* XPlaneBuilder::GetOrCreateStatMetadata(std::string&& name) {
XStatMetadata*& metadata = stat_metadata_by_name_[name];
if (metadata == nullptr) {
metadata = CreateStatMetadata();
metadata->set_name(std::move(name));
}
return metadata;
}
XLineBuilder XPlaneBuilder::GetOrCreateLine(int64_t line_id) {
XLine*& line = lines_by_id_[line_id];
if (line == nullptr) {
line = plane_->add_lines();
line->set_id(line_id);
}
return XLineBuilder(line, this);
}
XEventBuilder XLineBuilder::AddEvent(const Timespan& timespan,
const XEventMetadata& metadata) {
XEvent* event = line_->add_events();
event->set_metadata_id(metadata.id());
XEventBuilder builder(line_, plane_, event);
builder.SetOffsetPs(timespan.begin_ps());
builder.SetDurationPs(timespan.duration_ps());
return builder;
}
XEventBuilder XLineBuilder::AddEvent(const XEventMetadata& metadata) {
XEvent* event = line_->add_events();
event->set_metadata_id(metadata.id());
return XEventBuilder(line_, plane_, event);
}
XEventBuilder XLineBuilder::AddEvent(const XEvent& event) {
XEvent* new_event = line_->add_events();
*new_event = event;
return XEventBuilder(line_, plane_, new_event);
}
void XLineBuilder::SetTimestampNsAndAdjustEventOffsets(int64_t timestamp_ns) {
int64_t offset_ps = NanoToPico(line_->timestamp_ns() - timestamp_ns);
line_->set_timestamp_ns(timestamp_ns);
if (offset_ps) {
for (auto& event : *line_->mutable_events()) {
event.set_offset_ps(event.offset_ps() + offset_ps);
}
}
}
}
} | #include "xla/tsl/profiler/utils/xplane_builder.h"
#include <string>
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "tsl/platform/test.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
namespace {
TEST(TimespanTests, NonInstantSpanIncludesSingleTimeTests) {
XPlane plane;
XPlaneBuilder xplane_builder(&plane);
XLineBuilder xline_builder = xplane_builder.GetOrCreateLine(0);
XEventBuilder event_builder = xline_builder.AddEvent(
*xplane_builder.GetOrCreateEventMetadata("1st event"));
constexpr auto kBoolStat = true;
constexpr auto kInt32Stat = int32_t{1234};
constexpr auto kInt64Stat = int64_t{1234} << 32;
constexpr auto kUint32Stat = uint32_t{5678};
constexpr auto kUint64Stat = uint64_t{5678} << 32;
constexpr auto kFloatStat = 0.5f;
constexpr auto kDoubleStat = 1.0;
constexpr auto kStringStat = "abc";
constexpr auto kRefStat = "referenced abc";
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("bool stat"), kBoolStat);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("int32 stat"), kInt32Stat);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("int64 stat"), kInt64Stat);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("uint32 stat"), kUint32Stat);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("uint64 stat"), kUint64Stat);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("string stat"), kStringStat);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("float stat"), kFloatStat);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("double stat"), kDoubleStat);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("ref stat"),
*xplane_builder.GetOrCreateStatMetadata(kRefStat));
XPlaneVisitor xplane_visitor(&plane);
EXPECT_EQ(xplane_visitor.NumLines(), 1);
int num_stats = 0;
xplane_visitor.ForEachLine([&](const XLineVisitor& xline) {
xline.ForEachEvent([&](const XEventVisitor& xevent) {
EXPECT_EQ(xevent.Name(), "1st event");
xevent.ForEachStat([&](const XStatVisitor& stat) {
if (stat.Name() == "bool stat") {
EXPECT_EQ(stat.BoolValue(), kBoolStat);
num_stats++;
} else if (stat.Name() == "int32 stat") {
EXPECT_EQ(stat.IntValue(), kInt32Stat);
EXPECT_EQ(stat.IntOrUintValue(), kInt32Stat);
num_stats++;
} else if (stat.Name() == "int64 stat") {
EXPECT_EQ(stat.IntValue(), kInt64Stat);
EXPECT_EQ(stat.IntOrUintValue(), kInt64Stat);
num_stats++;
} else if (stat.Name() == "uint32 stat") {
EXPECT_EQ(stat.UintValue(), kUint32Stat);
EXPECT_EQ(stat.IntOrUintValue(), kUint32Stat);
num_stats++;
} else if (stat.Name() == "uint64 stat") {
EXPECT_EQ(stat.UintValue(), kUint64Stat);
EXPECT_EQ(stat.IntOrUintValue(), kUint64Stat);
num_stats++;
} else if (stat.Name() == "string stat") {
EXPECT_EQ(stat.StrOrRefValue(), kStringStat);
num_stats++;
} else if (stat.Name() == "float stat") {
EXPECT_EQ(stat.DoubleValue(), kFloatStat);
num_stats++;
} else if (stat.Name() == "double stat") {
EXPECT_EQ(stat.DoubleValue(), kDoubleStat);
num_stats++;
} else if (stat.Name() == "ref stat") {
EXPECT_EQ(stat.StrOrRefValue(), kRefStat);
num_stats++;
}
});
});
});
EXPECT_EQ(num_stats, 9);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/xplane_builder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/xplane_builder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1d8e0070-730d-42fb-a5b6-de175a72d79a | cpp | tensorflow/tensorflow | tf_op_utils | third_party/xla/xla/tsl/profiler/utils/tf_op_utils.cc | third_party/xla/xla/tsl/profiler/utils/tf_op_utils_test.cc | #include "xla/tsl/profiler/utils/tf_op_utils.h"
#include <cstdint>
#include <optional>
#include <string>
#include <vector>
#include "absl/strings/ascii.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "tsl/platform/regexp.h"
namespace tsl {
namespace profiler {
namespace {
const absl::string_view kIterator = "Iterator";
const absl::string_view kSeparator = "::";
constexpr char kNameScopeSeparator = '/';
constexpr char kOpNameSuffixSeparator = '_';
bool IsInteger(absl::string_view str) {
int64_t unused;
return absl::SimpleAtoi(str, &unused);
}
absl::string_view DeriveOpType(absl::string_view full_op_name) {
std::vector<absl::string_view> name_scopes_and_op_name =
absl::StrSplit(full_op_name, kNameScopeSeparator);
absl::string_view op_name = name_scopes_and_op_name.back();
std::vector<absl::string_view> op_type_and_maybe_suffix =
absl::StrSplit(op_name, kOpNameSuffixSeparator);
absl::string_view maybe_suffix = op_type_and_maybe_suffix.back();
absl::string_view op_type = op_name;
if (IsInteger(maybe_suffix)) {
op_type = op_name.substr(0, op_name.size() - maybe_suffix.size() - 1);
}
return op_type;
}
std::optional<TfOp> GetMemcpyOp(absl::string_view tf_op_fullname) {
TfOp tf_op;
tf_op.name = tf_op_fullname;
if (absl::StartsWithIgnoreCase(tf_op_fullname, "MEMCPYHToD")) {
tf_op.category = Category::kMemcpyHToD;
tf_op.type = kMemcpyHToDOp;
return tf_op;
}
if (absl::StartsWithIgnoreCase(tf_op_fullname, "MEMCPYDToH")) {
tf_op.category = Category::kMemcpyDToH;
tf_op.type = kMemcpyDToHOp;
return tf_op;
}
if (absl::StartsWithIgnoreCase(tf_op_fullname, "MEMCPYDToD")) {
tf_op.category = Category::kMemcpyDToD;
tf_op.type = kMemcpyDToDOp;
return tf_op;
} else if (absl::StartsWithIgnoreCase(tf_op_fullname, "MEMCPYHToH")) {
tf_op.category = Category::kMemcpyHToH;
tf_op.type = kMemcpyHToHOp;
return tf_op;
}
return std::nullopt;
}
}
const absl::string_view kUnknownOp = "";
const absl::string_view kDatasetOp = "Dataset";
const absl::string_view kMemcpyHToDOp = "MemcpyHToD";
const absl::string_view kMemcpyDToHOp = "MemcpyDToH";
const absl::string_view kMemcpyDToDOp = "MemcpyDToD";
const absl::string_view kMemcpyHToHOp = "MemcpyHToH";
bool IsTfOpName(absl::string_view op_name) {
static const LazyRE2 kTfOpNameRegEx = {"[A-Za-z0-9.][A-Za-z0-9_.\\/>-]*"};
return RE2::FullMatch(op_name, *kTfOpNameRegEx);
}
bool IsTfOpType(absl::string_view op_type) {
static const LazyRE2 kTfOpTypeRegEx = {"[A-Z_][a-zA-Z0-9_]*"};
return RE2::FullMatch(op_type, *kTfOpTypeRegEx);
}
bool IsJaxOpType(absl::string_view op_type) {
static const LazyRE2 kJaxOpTypeRegEx = {"[a-z_][a-z0-9_]*(\\[.*\\])?"};
return RE2::FullMatch(op_type, *kJaxOpTypeRegEx);
}
bool IsJaxOpNameAndType(absl::string_view op_name, absl::string_view op_type) {
if (op_name.empty() || !IsJaxOpType(op_type)) return false;
std::vector<absl::string_view> split_result =
absl::StrSplit(op_name, kNameScopeSeparator);
return absl::StrContains(split_result.back(), op_type);
}
TfOp ParseTfOpFullname(absl::string_view tf_op_fullname) {
TfOp tf_op = {Category::kUnknown, tf_op_fullname, kUnknownOp};
std::vector<absl::string_view> parts =
absl::StrSplit(tf_op_fullname, absl::MaxSplits(':', 1));
if (parts.size() != 2) {
if (std::optional<TfOp> tfop = GetMemcpyOp(parts[0]); tfop.has_value()) {
return *tfop;
}
return tf_op;
}
if (parts[0] == kIterator) {
tf_op.category = Category::kTfData;
tf_op.type = kDatasetOp;
return tf_op;
}
if (IsTfOpName(parts[0]) && IsTfOpType(parts[1])) {
tf_op.category = Category::kTensorFlow;
tf_op.name = parts[0];
tf_op.type = parts[1];
return tf_op;
}
absl::string_view op_type =
parts[1].empty() ? DeriveOpType(parts[0]) : parts[1];
if (IsJaxOpType(op_type)) {
tf_op.category = Category::kJax;
tf_op.name = parts[0];
tf_op.type = op_type.substr(0, op_type.find('['));
return tf_op;
}
if (parts[1].empty()) {
tf_op.category = Category::kTensorFlow;
tf_op.name = parts[0];
tf_op.type = op_type;
return tf_op;
}
return tf_op;
}
std::vector<absl::string_view> ParseTfNameScopes(absl::string_view tf_op_name) {
std::vector<absl::string_view> name_scopes =
absl::StrSplit(tf_op_name, kNameScopeSeparator);
if (!name_scopes.empty()) name_scopes.pop_back();
return name_scopes;
}
std::vector<absl::string_view> ParseTfNameScopes(const TfOp& tf_op) {
return ParseTfNameScopes(tf_op.name);
}
std::string TfOpEventName(const TfOp& tf_op) {
std::string event_name;
if (tf_op.category == Category::kUnknown) {
event_name = std::string(absl::StripTrailingAsciiWhitespace(tf_op.name));
} else if (tf_op.category == Category::kTfData) {
event_name = DatasetOpEventName(tf_op.name);
} else {
event_name = std::string(tf_op.type);
}
return event_name;
}
std::string TfOpEventName(absl::string_view tf_op_fullname) {
return TfOpEventName(ParseTfOpFullname(tf_op_fullname));
}
std::string DatasetOpEventName(absl::string_view full_name) {
std::vector<absl::string_view> split_result =
absl::StrSplit(full_name, kSeparator);
return absl::StrCat(kIterator, kSeparator, split_result.back());
}
std::string IteratorName(absl::string_view full_name) {
std::vector<absl::string_view> split_result =
absl::StrSplit(full_name, kSeparator);
return std::string(split_result.back());
}
std::vector<absl::string_view> ParseTensorShapes(
absl::string_view tensor_shapes) {
absl::ConsumePrefix(&tensor_shapes, "(");
absl::ConsumeSuffix(&tensor_shapes, ")");
return absl::StrSplit(tensor_shapes, ';');
}
}
} | #include "xla/tsl/profiler/utils/tf_op_utils.h"
#include <vector>
#include "absl/strings/string_view.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profiler {
namespace {
TEST(TfOpUtilsTest, TfOpTest) {
const absl::string_view kName = "OpName:OpType";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kTensorFlow);
EXPECT_EQ(tf_op.name, "OpName");
EXPECT_EQ(tf_op.type, "OpType");
EXPECT_EQ(TfOpEventName(kName), "OpType");
}
TEST(TfOpUtilsTest, InternalTfOpTest) {
const absl::string_view kName = "OpName:_InternalOpType";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kTensorFlow);
EXPECT_EQ(tf_op.name, "OpName");
EXPECT_EQ(tf_op.type, "_InternalOpType");
EXPECT_EQ(TfOpEventName(kName), "_InternalOpType");
}
TEST(TfOpUtilsTest, TfOpWithPathTest) {
const absl::string_view kName = "path/to/name:OpType";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kTensorFlow);
EXPECT_EQ(tf_op.name, "path/to/name");
EXPECT_EQ(tf_op.type, "OpType");
EXPECT_EQ(TfOpEventName(kName), "OpType");
}
TEST(TfOpUtilsTest, ShortDatasetOpTest) {
const absl::string_view kName = "Iterator::Batch";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kTfData);
EXPECT_EQ(tf_op.name, kName);
EXPECT_EQ(tf_op.type, kDatasetOp);
EXPECT_EQ(TfOpEventName(kName), kName);
}
TEST(TfOpUtilsTest, LongDatasetOpTest) {
const absl::string_view kName = "Iterator::Batch::Map::TfRecord";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kTfData);
EXPECT_EQ(tf_op.name, kName);
EXPECT_EQ(tf_op.type, kDatasetOp);
EXPECT_EQ(TfOpEventName(kName), "Iterator::TfRecord");
}
TEST(TfOpUtilsTest, TraceMeTest) {
const absl::string_view kName = "MyTraceMe";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kUnknown);
EXPECT_EQ(tf_op.name, kName);
EXPECT_EQ(tf_op.type, kUnknownOp);
EXPECT_EQ(TfOpEventName(kName), kName);
}
TEST(TfOpUtilsTest, TraceMeWithColonTest) {
const absl::string_view kName = "RunStep/Server:54635";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kUnknown);
EXPECT_EQ(tf_op.name, kName);
EXPECT_EQ(tf_op.type, kUnknownOp);
EXPECT_EQ(TfOpEventName(kName), kName);
}
TEST(TfOpUtilsTest, TraceMeWithDoubleColonTest) {
const absl::string_view kName = "XLA::StartProgram";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kUnknown);
EXPECT_EQ(tf_op.name, kName);
EXPECT_EQ(tf_op.type, kUnknownOp);
EXPECT_EQ(TfOpEventName(kName), kName);
}
TEST(TfOpUtilsTest, TraceMeWithTrailingWhitespaceTest) {
const absl::string_view kName = "SessionRun ";
const absl::string_view kNameTrimmed = "SessionRun";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kUnknown);
EXPECT_EQ(tf_op.name, kName);
EXPECT_EQ(tf_op.type, kUnknownOp);
EXPECT_EQ(TfOpEventName(kName), kNameTrimmed);
}
TEST(TfOpUtilsTest, InfeedEnqueueTest) {
const absl::string_view kName =
"input_pipeline_task0/while/body/_1/InfeedQueue/enqueue/"
"1:InfeedEnqueueTuple";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kTensorFlow);
EXPECT_EQ(tf_op.name,
"input_pipeline_task0/while/body/_1/InfeedQueue/enqueue/1");
EXPECT_EQ(tf_op.type, "InfeedEnqueueTuple");
EXPECT_EQ(TfOpEventName(kName), "InfeedEnqueueTuple");
EXPECT_TRUE(IsInfeedEnqueueOp(tf_op.type));
EXPECT_TRUE(IsInfeedEnqueueOp(tf_op));
}
TEST(TfOpUtilsTest, MemcpyHToDTest) {
const absl::string_view kName = "MemcpyHToD";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kMemcpyHToD);
EXPECT_EQ(tf_op.name, kName);
EXPECT_EQ(tf_op.type, kMemcpyHToDOp);
EXPECT_EQ(TfOpEventName(kName), kName);
EXPECT_TRUE(IsMemcpyHToDOp(tf_op.type));
EXPECT_TRUE(IsMemcpyHToDOp(tf_op));
}
TEST(TfOpUtilsTest, MemcpyDToHTest) {
const absl::string_view kName = "MemcpyDToH";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kMemcpyDToH);
EXPECT_EQ(tf_op.name, kName);
EXPECT_EQ(tf_op.type, kMemcpyDToHOp);
EXPECT_EQ(TfOpEventName(kName), kName);
EXPECT_TRUE(IsMemcpyDToHOp(tf_op));
}
TEST(TfOpUtilsTest, MemcpyDToDTest) {
const absl::string_view kName = "MemcpyDToD";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kMemcpyDToD);
EXPECT_EQ(tf_op.name, kName);
EXPECT_EQ(tf_op.type, kMemcpyDToDOp);
EXPECT_EQ(TfOpEventName(kName), kName);
EXPECT_TRUE(IsMemcpyDToDOp(tf_op));
}
TEST(TfOpUtilsTest, MemcpyHToHTest) {
const absl::string_view kName = "MemcpyHToH";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kMemcpyHToH);
EXPECT_EQ(tf_op.name, kName);
EXPECT_EQ(tf_op.type, kMemcpyHToHOp);
EXPECT_EQ(TfOpEventName(kName), kName);
EXPECT_TRUE(IsMemcpyHToHOp(tf_op));
}
TEST(TfOpUtilsTest, JaxOpTest) {
const absl::string_view kName = "op_name:op_type";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kJax);
EXPECT_EQ(tf_op.name, "op_name");
EXPECT_EQ(tf_op.type, "op_type");
EXPECT_EQ(TfOpEventName(kName), "op_type");
}
TEST(TfOpUtilsTest, JaxOpWithColonTest) {
const absl::string_view kName = "op_name/op_type:";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kJax);
EXPECT_EQ(tf_op.name, "op_name/op_type");
EXPECT_EQ(tf_op.type, "op_type");
EXPECT_EQ(TfOpEventName(kName), "op_type");
}
TEST(TfOpUtilsTest, JaxOpNameTest) {
const absl::string_view kOpName = "namescope/add";
const absl::string_view kOpType = "add";
EXPECT_TRUE(IsJaxOpNameAndType(kOpName, kOpType));
}
TEST(TfOpUtilsTest, JaxOpWithBracketTest) {
const absl::string_view kName = "op_name:op_type[array=([])]";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kJax);
EXPECT_EQ(tf_op.name, "op_name");
EXPECT_EQ(tf_op.type, "op_type");
EXPECT_EQ(TfOpEventName(kName), "op_type");
}
TEST(TfOpUtilsTest, JaxOpWithBracketAndTrailingColonTest) {
const absl::string_view kName = "op_name/op_type[array=([])]:";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kJax);
EXPECT_EQ(tf_op.name, "op_name/op_type[array=([])]");
EXPECT_EQ(tf_op.type, "op_type");
EXPECT_EQ(TfOpEventName(kName), "op_type");
}
TEST(TfOpUtilsTest, JaxOpNameWithMetadataTest) {
const absl::string_view kOpName =
"pmap(<unnamed wrapped function>)/gather[ "
"dimension_numbers=GatherDimensionNumbers(offset_dims=(2,), "
"collapsed_slice_dims=(0, 1), start_index_map=(0, 1))\n "
" slice_sizes=(1, 1, 81) ]:gather";
const absl::string_view kOpType = "gather";
EXPECT_TRUE(IsJaxOpNameAndType(kOpName, kOpType));
}
TEST(TfOpUtilsTest, OtherXlaOpTest) {
const absl::string_view kName =
"namescope.1/namespace__opname2d:namespace__opname2d";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kJax);
EXPECT_EQ(tf_op.name, "namescope.1/namespace__opname2d");
EXPECT_EQ(tf_op.type, "namespace__opname2d");
EXPECT_EQ(TfOpEventName(kName), "namespace__opname2d");
}
TEST(TfOpUtilsTest, OtherXlaOpNameTest) {
const absl::string_view kOpName = "namescope.1/namespace__opname2d";
const absl::string_view kOpType = "namespace__opname2d";
EXPECT_TRUE(IsJaxOpNameAndType(kOpName, kOpType));
}
TEST(TfOpUtilsTest, OpWithoutTypeTest) {
const absl::string_view kName = "namescope/OpName_1:";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kTensorFlow);
EXPECT_EQ(tf_op.name, "namescope/OpName_1");
EXPECT_EQ(tf_op.type, "OpName");
EXPECT_EQ(TfOpEventName(kName),
"OpName");
}
TEST(TfOpUtilsTest, OpTypeWithUnderstslTest) {
const absl::string_view kName = "namescope/OpName_a:";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kTensorFlow);
EXPECT_EQ(tf_op.name, "namescope/OpName_a");
EXPECT_EQ(tf_op.type, "OpName_a");
EXPECT_EQ(TfOpEventName(kName),
"OpName_a");
}
TEST(TfOpUtilsTest, NameScopeTest) {
const absl::string_view kName = "scope-1/scope2/OpName:OpType";
TfOp tf_op = ParseTfOpFullname(kName);
EXPECT_EQ(tf_op.category, Category::kTensorFlow);
EXPECT_EQ(tf_op.name, "scope-1/scope2/OpName");
EXPECT_EQ(tf_op.type, "OpType");
std::vector<absl::string_view> name_scopes = ParseTfNameScopes(tf_op);
EXPECT_EQ(name_scopes.size(), 2);
EXPECT_EQ(name_scopes[0], "scope-1");
EXPECT_EQ(name_scopes[1], "scope2");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/tf_op_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/tf_op_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8f8f9e65-c8b2-4926-9c46-bde37faed913 | cpp | tensorflow/tensorflow | tpu_xplane_utils | third_party/xla/xla/tsl/profiler/utils/tpu_xplane_utils.cc | third_party/xla/xla/tsl/profiler/utils/tpu_xplane_utils_test.cc | #include "xla/tsl/profiler/utils/tpu_xplane_utils.h"
#include <optional>
#include <vector>
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_utils.h"
#include "tsl/platform/regexp.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
std::vector<const XPlane*> FindTensorCorePlanes(const XSpace& xspace) {
return FindPlanes(xspace, [](const XPlane& xplane) {
static const LazyRE2 re = {kTpuPlaneRegex};
return RE2::FullMatch(xplane.name(), *re);
});
}
std::vector<XPlane*> FindMutableTensorCorePlanes(XSpace* xspace) {
return FindMutablePlanes(xspace, [](const XPlane& xplane) {
static const LazyRE2 re = {kTpuPlaneRegex};
return RE2::FullMatch(xplane.name(), *re);
});
}
std::optional<int> GetTensorCoreId(absl::string_view plane_name) {
int core_id = -1;
if (RE2::FullMatch(plane_name, {kTpuPlaneRegex}, &core_id)) {
return core_id;
}
return std::nullopt;
}
std::optional<int> GetSparseCoreId(absl::string_view plane_name) {
std::optional<int> core_id;
RE2::FullMatch(plane_name, {kSparseCorePlaneRegex}, &core_id);
return core_id;
}
}
} | #include "xla/tsl/profiler/utils/tpu_xplane_utils.h"
#include <vector>
#include "absl/strings/str_cat.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_utils.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "tsl/platform/test.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
namespace {
using ::testing::Optional;
using ::testing::UnorderedElementsAre;
TEST(TpuXPlaneUtilsTest, GetTensorCoreXPlanesFromXSpace) {
XSpace xspace;
XPlane* p1 = FindOrAddMutablePlaneWithName(&xspace, TpuPlaneName(0));
XPlane* p2 = FindOrAddMutablePlaneWithName(&xspace, TpuPlaneName(1));
FindOrAddMutablePlaneWithName(&xspace, TpuPlaneName(2) + "Postfix");
std::vector<const XPlane*> xplanes = FindTensorCorePlanes(xspace);
EXPECT_THAT(xplanes, UnorderedElementsAre(p1, p2));
}
TEST(TpuXPlaneUtilsTest, GetMutableTensorCoreXPlanesFromXSpace) {
XSpace xspace;
XPlane* p1 = FindOrAddMutablePlaneWithName(&xspace, TpuPlaneName(0));
XPlane* p2 = FindOrAddMutablePlaneWithName(&xspace, TpuPlaneName(1));
FindOrAddMutablePlaneWithName(&xspace, TpuPlaneName(2) + "Postfix");
std::vector<XPlane*> xplanes = FindMutableTensorCorePlanes(&xspace);
EXPECT_THAT(xplanes, UnorderedElementsAre(p1, p2));
}
TEST(TpuXPlaneUtilsTest, GetTensorCoreIdFromPlaneName) {
EXPECT_EQ(GetTensorCoreId(TpuPlaneName(0)), 0);
}
TEST(TpuXPlaneUtilsTest, IsNotTensorCorePlaneName) {
EXPECT_FALSE(GetTensorCoreId("/metadata:0").has_value());
}
TEST(TpuXPlaneUtilsTest, IsNotTensorCorePlaneNameWithPrefix) {
EXPECT_FALSE(
GetTensorCoreId(absl::StrCat("/prefix", TpuPlaneName(0))).has_value());
}
TEST(TpuXplaneUtilsTest, GetSparseCorePlanesFromXSpace) {
XSpace space;
XPlane* p1 = FindOrAddMutablePlaneWithName(&space, TpuPlaneName(0));
XPlane* p2 = FindOrAddMutablePlaneWithName(&space, TpuPlaneName(1));
XPlane* p3 = FindOrAddMutablePlaneWithName(
&space, absl::StrCat(TpuPlaneName(0), " SparseCore 0"));
XPlane* p4 = FindOrAddMutablePlaneWithName(
&space, absl::StrCat(TpuPlaneName(0), " SparseCore 1"));
EXPECT_THAT(FindTensorCorePlanes(space), UnorderedElementsAre(p1, p2));
EXPECT_THAT(FindPlanesWithPrefix(space, kTpuPlanePrefix),
UnorderedElementsAre(p1, p2, p3, p4));
EXPECT_THAT(GetSparseCoreId(p3->name()), Optional(0));
EXPECT_THAT(GetSparseCoreId(p4->name()), Optional(1));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/tpu_xplane_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/tpu_xplane_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2812a750-abac-41e2-8608-97c9f26bc2c8 | cpp | tensorflow/tensorflow | group_events | third_party/xla/xla/tsl/profiler/utils/group_events.cc | third_party/xla/xla/tsl/profiler/utils/group_events_test.cc | #include "xla/tsl/profiler/utils/group_events.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <map>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/functional/bind_front.h"
#include "absl/strings/str_cat.h"
#include "xla/tsl/lib/gtl/map_util.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/xplane_builder.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_utils.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "tsl/platform/dso_loader.h"
#include "tsl/platform/env.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace profiler {
void CreateStatMetadata(XPlane* plane) {
XPlaneBuilder builder(plane);
builder.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kGroupId));
builder.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kStepName));
builder.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kIsEager));
}
std::optional<int64_t> GetKernelEventType(bool is_host_plane,
const XEventVisitor& event) {
if (event.GetStat(StatType::kCorrelationId).has_value()) {
return is_host_plane ? HostEventType::kKernelLaunch
: HostEventType::kKernelExecute;
}
return std::nullopt;
}
int64_t GetEventType(bool is_host_plane, const XEventVisitor& event) {
if (std::optional<int64_t> event_type = event.Type()) {
return *event_type;
} else if (std::optional<int64_t> kernel_event_type =
GetKernelEventType(is_host_plane, event)) {
return *kernel_event_type;
} else {
return HostEventType::kUnknownHostEventType;
}
}
bool IsLegacyRootEvent(const XEventVisitor& event) {
return event.Type() == HostEventType::kTraceContext;
}
struct GroupingEventStats {
explicit GroupingEventStats(const XEventVisitor& event);
std::optional<int> producer_type;
std::optional<uint64_t> producer_id;
std::optional<int> consumer_type;
std::optional<uint64_t> consumer_id;
std::optional<int> root_level;
bool is_async = false;
};
GroupingEventStats::GroupingEventStats(const XEventVisitor& event) {
std::optional<int64_t> step_id;
event.ForEachStat([&](const XStatVisitor& stat) {
if (!stat.Type().has_value()) return;
switch (*stat.Type()) {
case StatType::kProducerType:
producer_type = stat.IntValue();
break;
case StatType::kProducerId:
producer_id = stat.IntOrUintValue();
break;
case StatType::kConsumerType:
consumer_type = stat.IntValue();
break;
case StatType::kConsumerId:
consumer_id = stat.IntOrUintValue();
break;
case StatType::kIsRoot:
root_level = stat.IntValue();
break;
case StatType::kIsAsync:
is_async = stat.BoolValue();
break;
case StatType::kStepId:
step_id = stat.IntValue();
break;
default:
break;
}
});
if (!root_level.has_value() && IsLegacyRootEvent(event)) {
root_level = 1;
}
}
void SetContextGroup(const GroupingEventStats& stats, EventNode* event,
ContextGroupMap* context_groups) {
if (stats.producer_type.has_value() && stats.producer_id.has_value()) {
((*context_groups)[*stats.producer_type][*stats.producer_id])
.producers.push_back(event);
}
if (stats.consumer_type.has_value() && stats.consumer_id.has_value()) {
((*context_groups)[*stats.consumer_type][*stats.consumer_id])
.consumers.push_back(event);
}
}
void ConnectContextGroups(const ContextGroupMap& context_groups) {
for (auto& type_id_group : context_groups) {
for (auto& id_group : type_id_group.second) {
const ContextGroup& group = id_group.second;
if (group.producers.size() >= 64 && group.consumers.size() >= 64) {
LOG_EVERY_N(WARNING, 1000)
<< "id:" << id_group.first
<< " producers:" << group.producers.size() << " : "
<< group.producers[0]->GetEventVisitor().Name()
<< " consumers:" << group.consumers.size() << " : "
<< group.consumers[0]->GetEventVisitor().Name();
continue;
}
for (EventNode* parent : group.producers) {
for (EventNode* child : group.consumers) {
parent->AddChild(child);
}
}
}
}
}
bool IsImplicitRootEvent(const XEventVisitor& event) {
static const auto* const kImplicitRootEvents =
new absl::flat_hash_set<int64_t>{
HostEventType::kFunctionRun, HostEventType::kSessionRun,
HostEventType::kRunGraph, HostEventType::kExecutorStateProcess};
return event.Type().has_value() &&
kImplicitRootEvents->contains(*event.Type());
}
void ProcessRootEvent(int64_t group_id, EventNode* root_event,
GroupMetadataMap* group_metadata_map) {
root_event->PropagateGroupId(group_id, group_metadata_map);
std::string group_name = root_event->GetGroupName();
if (!IsImplicitRootEvent(root_event->GetEventVisitor())) {
root_event->AddStepName(group_name);
}
(*group_metadata_map)[group_id].name = std::move(group_name);
}
using Comparator = std::function<bool(const EventNode*)>;
const EventNode* FindParentWithComparator(const Comparator& comparator,
const EventNode* node,
bool include_self) {
std::queue<const EventNode*> nodes;
absl::flat_hash_set<const EventNode*> seen = {node};
if (include_self) {
nodes.push(node);
} else {
for (const EventNode* parent : node->GetParents()) {
nodes.push(parent);
seen.insert(parent);
}
}
while (!nodes.empty()) {
const EventNode* node = nodes.front();
nodes.pop();
if (comparator(node)) return node;
for (const EventNode* parent : node->GetParents()) {
if (seen.contains(parent)) continue;
nodes.push(parent);
seen.insert(parent);
}
}
return nullptr;
}
bool IsIteratorEventType(std::optional<int64_t> event_type) {
return event_type == HostEventType::kIterator ||
event_type == HostEventType::kDeviceInputPipelineSecondIterator;
}
bool CheckLoopOp(const XSpace& space) {
for (const XPlane& plane : space.planes()) {
for (const auto& event_metadata : plane.event_metadata()) {
std::optional<int64_t> event_type =
FindHostEventType(event_metadata.second.name());
if (!event_type.has_value()) continue;
switch (*event_type) {
case HostEventType::kWhileOpEvalCond:
case HostEventType::kWhileOpStartBody:
case HostEventType::kForOp:
case HostEventType::kParallelForOp:
case HostEventType::kForeverOp:
return true;
default:
break;
}
}
}
return false;
}
std::optional<XStatVisitor> EventNode::GetContextStat(int64_t stat_type) const {
std::queue<const EventNode*> nodes;
absl::flat_hash_set<const EventNode*> seen = {this};
nodes.push(this);
while (!nodes.empty()) {
const EventNode* node = nodes.front();
nodes.pop();
if (std::optional<XStatVisitor> stat = node->visitor_.GetStat(stat_type)) {
return stat;
}
for (const EventNode* parent : node->GetParents()) {
if (seen.contains(parent)) continue;
nodes.push(parent);
seen.insert(parent);
}
}
return std::nullopt;
}
std::string EventNode::GetGroupName() const {
std::string name;
if (std::optional<XStatVisitor> stat = GetContextStat(StatType::kGraphType)) {
absl::StrAppend(&name, stat->StrOrRefValue(), " ");
} else if (!(IsImplicitRootEvent(visitor_))) {
absl::StrAppend(&name, GetEventVisitor().Name(), " ");
}
int64_t step_num = group_id_.value_or(0);
if (std::optional<XStatVisitor> stat = GetContextStat(StatType::kIterNum)) {
step_num = stat->IntValue();
} else if (std::optional<XStatVisitor> stat =
GetContextStat(StatType::kStepNum)) {
step_num = stat->IntValue();
}
absl::StrAppend(&name, step_num);
return name;
}
XStat* EventNode::FindOrAddStatByType(int64_t stat_type) {
const XPlaneVisitor& plane = visitor_.Plane();
const XStatMetadata* stat_metadata = plane.GetStatMetadataByType(stat_type);
DCHECK(stat_metadata != nullptr);
auto* raw_event = const_cast<XEvent*>(&visitor_.RawEvent());
return FindOrAddMutableStat(*stat_metadata, raw_event);
}
void EventNode::SetGroupId(int64_t group_id) {
group_id_ = group_id;
FindOrAddStatByType(StatType::kGroupId)->set_int64_value(group_id);
}
void EventNode::PropagateGroupId(int64_t group_id,
GroupMetadataMap* group_metadata_map) {
std::queue<EventNode*> nodes;
absl::flat_hash_set<EventNode*> seen = {this};
nodes.push(this);
while (!nodes.empty()) {
EventNode* node = nodes.front();
nodes.pop();
std::optional<int64_t> node_group_id = node->GetGroupId();
if (node_group_id.has_value()) {
if (*node_group_id != group_id) {
(*group_metadata_map)[group_id].children.insert(*node_group_id);
(*group_metadata_map)[*node_group_id].parents.insert(group_id);
}
} else {
node->SetGroupId(group_id);
for (EventNode* child : node->GetChildren()) {
if (seen.contains(child)) continue;
nodes.push(child);
seen.insert(child);
}
}
}
}
void EventNode::AddStepName(absl::string_view step_name) {
FindOrAddStatByType(StatType::kStepName)
->set_str_value(step_name.data(), step_name.size());
}
void EventNode::SetIsEager(bool is_eager) {
FindOrAddStatByType(StatType::kIsEager)->set_int64_value(is_eager ? 1 : 0);
}
bool EventNode::IsCompiledFunc() const {
auto is_func = visitor_.GetStat(StatType::kIsFunc);
return !is_func || is_func->IntValue();
}
bool EventNode::IsEager() const {
const EventNode* node = FindParent(HostEventType::kEagerKernelExecute);
if (node == nullptr) {
return false;
}
return !node->IsCompiledFunc();
}
const EventNode* EventNode::FindParent(int64_t event_type) const {
return FindParentWithComparator(
[event_type](const EventNode* node) {
return node->GetEventVisitor().Type() == event_type;
},
this, true);
}
void EventForest::FindEventNodeAndApply(
const int64_t event_type, const std::vector<int64_t>& stat_types,
const std::function<void(EventNode&, const std::vector<uint64>&)>& cb) {
if (auto* event_node_list = gtl::FindOrNull(event_node_map_, event_type)) {
for (EventNode& event_node : *event_node_list) {
std::vector<uint64> stats;
for (const auto stat_type : stat_types) {
std::optional<XStatVisitor> stat =
event_node.GetEventVisitor().GetStat(stat_type);
if (!stat) break;
stats.push_back(stat->IntOrUintValue());
}
if (stats.size() == stat_types.size()) {
cb(event_node, stats);
}
}
}
}
void EventForest::ConnectIntraThread(XPlane* plane, XPlaneVisitor* visitor,
ContextGroupMap* context_groups) {
bool is_host_plane = (visitor->Name() == kHostThreadsPlaneName);
for (auto& line : *plane->mutable_lines()) {
std::vector<EventNode*> parent_nodes;
for (auto& event : *line.mutable_events()) {
XEventVisitor event_visitor(visitor, &line, &event);
int64_t event_type = GetEventType(is_host_plane, event_visitor);
EventNode* cur_node =
&event_node_map_[event_type].emplace_back(std::move(event_visitor));
GroupingEventStats stats(cur_node->GetEventVisitor());
if (stats.root_level.has_value()) {
cur_node->SetRootLevel(*stats.root_level);
}
SetContextGroup(stats, cur_node, context_groups);
if (!stats.is_async) {
while (!parent_nodes.empty()) {
EventNode* parent_node = parent_nodes.back();
if (parent_node->GetEventVisitor().GetTimespan().Includes(
cur_node->GetEventVisitor().GetTimespan())) {
parent_node->AddChild(cur_node);
break;
} else {
parent_nodes.pop_back();
}
}
parent_nodes.push_back(cur_node);
}
}
}
}
void EventForest::ConnectInterThread(
const std::vector<InterThreadConnectInfo>& connect_info_list) {
for (const auto& connect_info : connect_info_list) {
absl::flat_hash_map<std::vector<uint64>, EventNode*> connect_map;
const std::vector<int64_t>& parent_stat_types =
connect_info.parent_stat_types;
const std::vector<int64_t>* child_stat_types =
&connect_info.child_stat_types;
if (child_stat_types->empty()) {
child_stat_types = &parent_stat_types;
}
FindEventNodeAndApply(connect_info.parent_event_type, parent_stat_types,
[&connect_map](EventNode& event_node,
const std::vector<uint64>& stats) {
connect_map[stats] = &event_node;
});
FindEventNodeAndApply(
connect_info.child_event_type, *child_stat_types,
[&connect_map](EventNode& event_node,
const std::vector<uint64>& stats) {
if (auto parent_event_node = gtl::FindPtrOrNull(connect_map, stats)) {
parent_event_node->AddChild(&event_node);
}
});
}
}
bool RootNeedsGrouping(const EventNode* root) {
if (root->GetGroupId().has_value()) return false;
const EventNode* root_parent = FindParentWithComparator(
[root](const EventNode* parent) {
return parent->RootLevel() == root->RootLevel();
},
root,
false);
return root_parent == nullptr;
}
void SortRootEventList(EventList* event_list) {
absl::c_sort(*event_list, [](const EventNode* e1, const EventNode* e2) {
return e1->RootLevel() == e2->RootLevel()
? *e1 < *e2
: e1->RootLevel() > e2->RootLevel();
});
}
void EventForest::CreateEventGroups() {
int64_t group_id = 0;
if (!tf_loop_root_events_.empty()) {
for (EventNode* root_event : tf_loop_root_events_) {
ProcessRootEvent(group_id++, root_event, &group_metadata_map_);
}
return;
}
EventList root_events;
for (auto& [event_type, events] : event_node_map_) {
for (EventNode& event : events) {
if (!event.RootLevel()) continue;
std::optional<XStatVisitor> step_id_stat =
event.GetEventVisitor().GetStat(StatType::kStepId);
if (step_id_stat && tf_data_step_ids_.contains(step_id_stat->IntValue()))
continue;
root_events.push_back(&event);
}
}
SortRootEventList(&root_events);
for (EventNode* root_event : root_events) {
if (RootNeedsGrouping(root_event)) {
ProcessRootEvent(group_id++, root_event, &group_metadata_map_);
}
}
}
void EventForest::MarkEagerlyExecutedGpuKernels() {
auto kernel_execute_event_node_list =
gtl::FindOrNull(event_node_map_, HostEventType::kKernelExecute);
if (!kernel_execute_event_node_list) return;
for (EventNode& kernel_execute_event_node : *kernel_execute_event_node_list) {
kernel_execute_event_node.SetIsEager(kernel_execute_event_node.IsEager());
}
}
void EventForest::MarkEagerlyExecutedCpuTfOps() {
auto tf_op_run_event_node_list =
gtl::FindOrNull(event_node_map_, HostEventType::kTfOpRun);
if (!tf_op_run_event_node_list) return;
for (EventNode& tf_op_run_event_node : *tf_op_run_event_node_list) {
tf_op_run_event_node.SetIsEager(tf_op_run_event_node.IsEager());
}
}
void EventForest::ProcessTfDataSteps() {
const int64_t tf_data_event_types[] = {
HostEventType::kTfDataCapturedFunctionRun,
HostEventType::kTfDataCapturedFunctionRunAsync,
HostEventType::kTfDataCapturedFunctionRunInstantiated,
HostEventType::kTfDataCapturedFunctionRunWithBorrowedArgs};
for (const int64_t tf_data_event_type : tf_data_event_types) {
auto tf_data_events = gtl::FindOrNull(event_node_map_, tf_data_event_type);
if (!tf_data_events) continue;
for (const EventNode& tf_data_event : *tf_data_events) {
std::optional<XStatVisitor> step_id_stat =
tf_data_event.GetEventVisitor().GetStat(StatType::kStepId);
if (!step_id_stat) continue;
tf_data_step_ids_.insert(step_id_stat->IntValue());
}
}
}
void EventForest::ProcessTensorFlowLoop() {
struct TensorFlowLoopIteration {
EventNode* first_event = nullptr;
std::vector<EventNode*> events;
};
using TensorFlowLoop =
absl::flat_hash_map<int64_t , TensorFlowLoopIteration>;
absl::flat_hash_map<int64_t , TensorFlowLoop> tf_loops;
auto executor_event_list =
gtl::FindOrNull(event_node_map_, HostEventType::kExecutorStateProcess);
if (!executor_event_list) return;
for (EventNode& executor_event : *executor_event_list) {
std::optional<XStatVisitor> step_id_stat =
executor_event.GetEventVisitor().GetStat(StatType::kStepId);
std::optional<XStatVisitor> iter_num_stat =
executor_event.GetEventVisitor().GetStat(StatType::kIterNum);
if (!step_id_stat || !iter_num_stat) continue;
int64_t step_id = step_id_stat->IntValue();
if (tf_data_step_ids_.contains(step_id)) continue;
TensorFlowLoop& tf_loop = tf_loops[step_id];
TensorFlowLoopIteration& iteration = tf_loop[iter_num_stat->IntValue()];
if (!iteration.first_event || executor_event < *iteration.first_event) {
iteration.first_event = &executor_event;
}
iteration.events.push_back(&executor_event);
}
std::vector<const TensorFlowLoopIteration*> iters;
for (const auto& step_id_and_tf_loop : tf_loops) {
const TensorFlowLoop& tf_loop = step_id_and_tf_loop.second;
if (tf_loop.size() == 1 && tf_loop.contains(0)) continue;
for (const auto& iter_num_and_iter : tf_loop) {
iters.push_back(&iter_num_and_iter.second);
}
}
absl::c_sort(iters, [](const auto& iter1, const auto& iter2) {
return *iter1->first_event < *iter2->first_event;
});
for (const TensorFlowLoopIteration* iter : iters) {
EventNode* root_event = iter->first_event;
tf_loop_root_events_.push_back(root_event);
for (EventNode* event : iter->events) {
if (event == root_event) continue;
root_event->AddChild(event);
}
}
}
void EventForest::AddPlane(
const std::function<XPlaneVisitor(const XPlane*)> visitor_factory,
XPlane* plane) {
CreateStatMetadata(plane);
planes_.push_back({plane, visitor_factory(plane)});
}
void EventForest::AddSpace(
const std::function<XPlaneVisitor(const XPlane*)> visitor_factory,
XSpace* space) {
for (XPlane& plane : *space->mutable_planes()) {
AddPlane(visitor_factory, &plane);
}
}
void EventForest::AddPlanes(
const std::function<XPlaneVisitor(const XPlane*)> visitor_factory,
const std::vector<XPlane*>& planes) {
for (XPlane* plane : planes) {
AddPlane(visitor_factory, plane);
}
}
void EventForest::ConnectEvents(
const std::vector<InterThreadConnectInfo>& connect_info_list) {
ContextGroupMap context_groups;
for (auto& plane_visitor : planes_) {
ConnectIntraThread(plane_visitor.first, &plane_visitor.second,
&context_groups);
}
ConnectInterThread(connect_info_list);
ConnectContextGroups(context_groups);
}
void EventForest::ConnectTfDataEvents() {
absl::flat_hash_map<
std::pair<int64_t , int64_t >,
std::vector<EventNode*>>
produce_iterator_map;
uint64 num_producers = 0;
for (HostEventType event_type :
{HostEventType::kPrefetchProduce,
HostEventType::kParallelInterleaveProduce,
HostEventType::kParallelMapProduce, HostEventType::kMapAndBatchProduce,
HostEventType::kParseExampleProduce,
HostEventType::kParallelBatchProduce}) {
auto produce_event_list = gtl::FindOrNull(event_node_map_, event_type);
if (!produce_event_list) continue;
VLOG(1) << produce_event_list->size() << " "
<< GetHostEventTypeStr(event_type) << " events found.";
for (EventNode& produce_event : *produce_event_list) {
std::optional<XStatVisitor> element_id =
produce_event.GetEventVisitor().GetStat(StatType::kElementId);
if (!element_id.has_value()) continue;
for (EventNode* produce_iterator : produce_event.GetChildren()) {
if (IsIteratorEventType(produce_iterator->GetEventVisitor().Type())) {
std::optional<XStatVisitor> iterator_id =
produce_iterator->GetEventVisitor().GetStat(StatType::kParentId);
if (!iterator_id.has_value()) break;
produce_iterator_map[{iterator_id->IntValue(),
element_id->IntValue()}]
.push_back(produce_iterator);
++num_producers;
break;
}
}
}
}
VLOG(1) << num_producers << " producer iterators found.";
uint64 num_matched = 0;
for (HostEventType event_type :
{HostEventType::kPrefetchConsume,
HostEventType::kParallelInterleaveConsume,
HostEventType::kParallelMapConsume, HostEventType::kMapAndBatchConsume,
HostEventType::kParseExampleConsume,
HostEventType::kParallelBatchConsume}) {
auto consume_event_list = gtl::FindOrNull(event_node_map_, event_type);
if (!consume_event_list) continue;
VLOG(1) << consume_event_list->size() << " "
<< GetHostEventTypeStr(event_type) << " events found.";
for (EventNode& consume_event : *consume_event_list) {
std::optional<XStatVisitor> element_id =
consume_event.GetEventVisitor().GetStat(StatType::kElementId);
if (!element_id.has_value()) continue;
if (consume_event.GetParents().empty()) continue;
EventNode* consume_iterator = consume_event.GetParents().at(0);
if (!consume_iterator ||
!IsIteratorEventType(consume_iterator->GetEventVisitor().Type())) {
continue;
}
std::optional<XStatVisitor> iterator_id =
consume_iterator->GetEventVisitor().GetStat(StatType::kStepId);
if (!iterator_id.has_value()) continue;
if (auto produce_iterators = gtl::FindOrNull(
produce_iterator_map, std::make_pair(iterator_id->IntValue(),
element_id->IntValue()))) {
for (EventNode* produce_iterator : *produce_iterators) {
consume_iterator->AddChild(produce_iterator);
++num_matched;
}
}
}
}
VLOG(1) << num_matched << " consumer iterators matched.";
}
void EventForest::GroupEvents() {
ProcessTfDataSteps();
ProcessTensorFlowLoop();
CreateEventGroups();
MarkEagerlyExecutedGpuKernels();
MarkEagerlyExecutedCpuTfOps();
}
std::vector<InterThreadConnectInfo> CreateInterThreadConnectInfoList() {
std::vector<InterThreadConnectInfo> connect_info_list = {
{HostEventType::kExecutorStateProcess,
HostEventType::kIteratorGetNextOp,
{StatType::kStepId, StatType::kIterNum}},
{HostEventType::kExecutorStateProcess,
HostEventType::kIteratorGetNextAsOptionalOp,
{StatType::kStepId, StatType::kIterNum}},
{HostEventType::kKernelLaunch,
HostEventType::kKernelExecute,
{StatType::kCorrelationId}}};
return connect_info_list;
}
void GroupTfEvents(XSpace* space, EventForest* event_forest) {
if (CheckLoopOp(*space)) {
return;
}
std::vector<InterThreadConnectInfo> connect_info_list =
CreateInterThreadConnectInfoList();
event_forest->AddSpace(CreateTfXPlaneVisitor, space);
event_forest->ConnectEvents(connect_info_list);
event_forest->GroupEvents();
}
void GroupTfEvents(XSpace* space) {
EventForest event_forest;
GroupTfEvents(space, &event_forest);
}
void AddGroupMetadataToStepEvents(const GroupMetadataMap& group_metadata_map,
XLineBuilder& line) {
if (group_metadata_map.empty()) return;
XPlaneBuilder* plane = line.Plane();
const XStatMetadata* group_id_stat_metadata =
plane->GetStatMetadata(GetStatTypeStr(StatType::kGroupId));
if (group_id_stat_metadata == nullptr) return;
const XStatMetadata* step_name_stat_metadata =
plane->GetOrCreateStatMetadata(GetStatTypeStr(StatType::kStepName));
line.ForEachEvent([&](XEventBuilder event) {
const XStat* group_id_stat = event.GetStat(*group_id_stat_metadata);
if (group_id_stat != nullptr) {
int64_t group_id = group_id_stat->int64_value();
if (const GroupMetadata* group_metadata =
gtl::FindOrNull(group_metadata_map, group_id)) {
event.AddStatValue(*step_name_stat_metadata, group_metadata->name);
}
}
});
}
std::optional<int64_t> GetGroupId(const XEventVisitor& event,
const XStatMetadata& group_id_stat_metadata) {
if (auto group_id_stat =
event.GetStat(StatType::kGroupId, group_id_stat_metadata)) {
return group_id_stat->IntValue();
}
return std::nullopt;
}
class GroupQueue {
public:
GroupQueue(const XPlaneVisitor* plane, const XLine* line,
const XStatMetadata* group_id_stat_metadata)
: group_queue_(plane, line),
group_id_stat_metadata_(group_id_stat_metadata) {}
std::optional<int64_t> OverlappingGroupId(Timespan timespan) {
if (!group_event_visitor_ ||
!group_event_visitor_->GetTimespan().Overlaps(timespan)) {
group_event_visitor_ = group_queue_.GetOverlappingEvent(timespan);
if (group_event_visitor_) {
group_id_ = GetGroupId(*group_event_visitor_, *group_id_stat_metadata_);
} else {
group_id_.reset();
}
}
return group_id_;
}
private:
XEventContextTracker group_queue_;
std::optional<XEventVisitor> group_event_visitor_;
std::optional<int64_t> group_id_;
const XStatMetadata* group_id_stat_metadata_;
};
void MergeHostSteps(const XStatMetadata& group_id_stat_metadata,
const XPlaneVisitor& plane_visitor,
XPlaneBuilder* plane_builder, XLine* step_line) {
std::optional<int64_t> merged_group_id;
std::optional<XEventBuilder> merged_step_builder;
absl::flat_hash_set<const XEvent*> events_to_remove;
for (XEvent& step_event : *step_line->mutable_events()) {
XEventVisitor step_visitor(&plane_visitor, step_line, &step_event);
auto group_id = GetGroupId(step_visitor, group_id_stat_metadata);
if (!group_id) {
merged_group_id.reset();
merged_step_builder.reset();
events_to_remove.insert(&step_event);
} else if (merged_group_id != group_id) {
merged_group_id = group_id;
merged_step_builder.emplace(step_line, plane_builder, &step_event);
} else {
merged_step_builder->SetEndTimestampPs(step_visitor.EndTimestampPs());
events_to_remove.insert(&step_event);
}
}
if (events_to_remove.size() < step_line->events_size()) {
RemoveEvents(step_line, events_to_remove);
}
}
void GroupLine(const XStatMetadata& group_id_stat_metadata,
const XPlaneVisitor& plane_visitor, const XLine& group_line,
XPlaneBuilder* plane_builder, XLine* line) {
GroupQueue group_queue(&plane_visitor, &group_line, &group_id_stat_metadata);
for (XEvent& event : *line->mutable_events()) {
XEventBuilder event_builder(line, plane_builder, &event);
if (auto group_id =
group_queue.OverlappingGroupId(event_builder.GetTimespan())) {
event_builder.AddStatValue(group_id_stat_metadata, *group_id);
}
}
}
void GroupHostAndPlanes(
tensorflow::profiler::XSpace* space,
const std::vector<tensorflow::profiler::XPlane*>& device_traces,
EventForest* event_forest) {
std::vector<InterThreadConnectInfo> connect_info_list =
CreateInterThreadConnectInfoList();
event_forest->AddSpace(CreateTfXPlaneVisitor, space);
event_forest->AddPlanes(CreateTfXPlaneVisitor, device_traces);
event_forest->ConnectEvents(connect_info_list);
event_forest->GroupEvents();
}
void GroupXplaneEvents(tensorflow::profiler::XPlane* plane,
const GroupMetadataMap& group_metadata_map) {
XLine* module_line = nullptr;
XLine* step_line = nullptr;
std::vector<XLine*> other_lines;
for (XLine& line : *plane->mutable_lines()) {
if (line.name() == "XLA Modules") {
module_line = &line;
} else if (line.name() == "Steps") {
step_line = &line;
} else {
other_lines.push_back(&line);
}
}
if (!module_line) return;
XPlaneBuilder plane_builder(plane);
const XStatMetadata* group_id_stat_metadata =
plane_builder.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kGroupId));
XPlaneVisitor plane_visitor = CreateTfXPlaneVisitor(plane);
const XLine* group_line = module_line;
if (step_line) {
bool device_loop = (step_line->events_size() > module_line->events_size());
if (device_loop) {
group_line = nullptr;
} else {
if (group_line) {
GroupLine(*group_id_stat_metadata, plane_visitor, *group_line,
&plane_builder, step_line);
MergeHostSteps(*group_id_stat_metadata, plane_visitor, &plane_builder,
step_line);
XLineBuilder step_line_builder(step_line, &plane_builder);
AddGroupMetadataToStepEvents(group_metadata_map, step_line_builder);
}
}
}
if (group_line) {
for (XLine* line : other_lines) {
GroupLine(*group_id_stat_metadata, plane_visitor, *group_line,
&plane_builder, line);
}
}
}
void GroupTpuEventsOSS(
tensorflow::profiler::XSpace* space,
const std::vector<tensorflow::profiler::XPlane*>& device_traces,
EventForest* event_forest) {
if (CheckLoopOp(*space)) {
return;
}
GroupHostAndPlanes(space, device_traces, event_forest);
if (device_traces.empty()) return;
const GroupMetadataMap& group_metadata_map =
event_forest->GetGroupMetadataMap();
std::vector<std::unique_ptr<Thread>> threads;
ThreadOptions thread_options;
threads.reserve(device_traces.size());
for (XPlane* plane : device_traces) {
threads.emplace_back(Env::Default()->StartThread(
thread_options, "group_xplane_events",
absl::bind_front(GroupXplaneEvents, plane,
std::ref(group_metadata_map))));
}
}
}
} | #include "xla/tsl/profiler/utils/group_events.h"
#include <optional>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/xplane_builder.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_test_utils.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
namespace {
constexpr int64_t kTfExecutor = static_cast<int64_t>(ContextType::kTfExecutor);
TEST(GroupEventsTest, GroupGpuTraceLegacyRootTest) {
constexpr int64_t kStepNum = 123;
constexpr int64_t kStepId = 0;
constexpr int64_t kCorrelationId = 100;
XSpace space;
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(&space));
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(
&host_plane_builder, &main_thread, HostEventType::kTraceContext, 0, 100,
{{StatType::kGraphType, "train"}, {StatType::kStepNum, kStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kStepId},
{StatType::kProducerType, kTfExecutor},
{StatType::kProducerId, kStepId}});
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kStepId, kStepId},
{StatType::kConsumerType, kTfExecutor},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 30, 70,
{{StatType::kCorrelationId, kCorrelationId}});
XPlane* device_plane = space.add_planes();
XPlaneBuilder device_plane_builder(device_plane);
device_plane_builder.ReserveLines(1);
auto stream = device_plane_builder.GetOrCreateLine(0);
CreateXEvent(&device_plane_builder, &stream, "matmul", 200, 300,
{{StatType::kCorrelationId, kCorrelationId}});
EventForest event_forest;
GroupTfEvents(&space, &event_forest);
const GroupMetadataMap& group_metadata_map =
event_forest.GetGroupMetadataMap();
XPlaneVisitor device_plane_visitor = CreateTfXPlaneVisitor(device_plane);
EXPECT_EQ(device_plane->lines(0).events(0).stats_size(), 3);
EXPECT_EQ(device_plane_visitor.GetStatType(
device_plane->lines(0).events(0).stats(1).metadata_id()),
StatType::kGroupId);
EXPECT_EQ(group_metadata_map.size(), 1);
EXPECT_EQ(group_metadata_map.at(0).name, "train 123");
}
TEST(GroupEventsTest, GroupGpuTraceTest) {
constexpr int64_t kStepNum = 123;
constexpr int64_t kStepId = 0;
constexpr int64_t kCorrelationId = 100;
XSpace space;
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(&space));
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(
&host_plane_builder, &main_thread, "train", 0, 100,
{{StatType::kStepNum, kStepNum}, {StatType::kIsRoot, int64_t{1}}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kStepId},
{StatType::kProducerType, kTfExecutor},
{StatType::kProducerId, kStepId}});
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kStepId, kStepId},
{StatType::kConsumerType, kTfExecutor},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 30, 70,
{{StatType::kCorrelationId, kCorrelationId}});
XPlane* device_plane = space.add_planes();
XPlaneBuilder device_plane_builder(device_plane);
device_plane_builder.ReserveLines(1);
auto stream = device_plane_builder.GetOrCreateLine(0);
CreateXEvent(&device_plane_builder, &stream, "matmul", 200, 300,
{{StatType::kCorrelationId, kCorrelationId}});
EventForest event_forest;
GroupTfEvents(&space, &event_forest);
const GroupMetadataMap& group_metadata_map =
event_forest.GetGroupMetadataMap();
XPlaneVisitor device_plane_visitor = CreateTfXPlaneVisitor(device_plane);
EXPECT_EQ(device_plane->lines(0).events(0).stats_size(), 3);
EXPECT_EQ(device_plane_visitor.GetStatType(
device_plane->lines(0).events(0).stats(1).metadata_id()),
StatType::kGroupId);
EXPECT_EQ(group_metadata_map.size(), 1);
EXPECT_EQ(group_metadata_map.at(0).name, "train 123");
}
TEST(GroupEventsTest, GroupTensorFlowLoopTest) {
constexpr int64_t kStepId = 0;
constexpr int64_t kIterNum = 10;
constexpr int64_t kCorrelationId = 100;
XSpace space;
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(&space));
host_plane_builder.ReserveLines(1);
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 5, 10,
{{StatType::kStepId, kStepId},
{StatType::kIterNum, kIterNum},
{StatType::kConsumerType, kTfExecutor},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kStepId, kStepId},
{StatType::kIterNum, kIterNum},
{StatType::kConsumerType, kTfExecutor},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 30, 70,
{{StatType::kCorrelationId, kCorrelationId}});
XPlane* device_plane = space.add_planes();
XPlaneBuilder device_plane_builder(device_plane);
device_plane_builder.ReserveLines(1);
auto stream = device_plane_builder.GetOrCreateLine(0);
CreateXEvent(&device_plane_builder, &stream, "matmul", 200, 300,
{{StatType::kCorrelationId, kCorrelationId}});
EventForest event_forest;
GroupTfEvents(&space, &event_forest);
const GroupMetadataMap& group_metadata_map =
event_forest.GetGroupMetadataMap();
XPlaneVisitor device_plane_visitor = CreateTfXPlaneVisitor(device_plane);
EXPECT_EQ(device_plane->lines(0).events(0).stats_size(), 3);
EXPECT_EQ(device_plane_visitor.GetStatType(
device_plane->lines(0).events(0).stats(1).metadata_id()),
StatType::kGroupId);
EXPECT_EQ(device_plane->lines(0).events(0).stats(1).int64_value(), 0);
EXPECT_EQ(group_metadata_map.size(), 1);
ASSERT_TRUE(group_metadata_map.contains(0));
EXPECT_EQ(group_metadata_map.at(0).name, "10");
}
TEST(GroupEventsTest, GroupMultipleTensorFlowLoopsTest) {
constexpr int64_t kFirstStepId = 0;
constexpr int64_t kSecondStepId = 1;
constexpr int64_t kFirstIterNumStart = 10;
constexpr int64_t kSecondIterNumStart = 0;
XSpace space;
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(&space));
host_plane_builder.ReserveLines(2);
auto first_tf_executor_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &first_tf_executor_thread,
HostEventType::kExecutorStateProcess, 220, 80,
{{StatType::kStepId, kSecondStepId},
{StatType::kIterNum, kSecondIterNumStart},
{StatType::kConsumerType, kTfExecutor},
{StatType::kConsumerId, kSecondStepId}});
CreateXEvent(&host_plane_builder, &first_tf_executor_thread,
HostEventType::kExecutorStateProcess, 320, 80,
{{StatType::kStepId, kSecondStepId},
{StatType::kIterNum, kSecondIterNumStart + 1},
{StatType::kConsumerType, kTfExecutor},
{StatType::kConsumerId, kSecondStepId}});
auto second_tf_executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &second_tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kStepId, kFirstStepId},
{StatType::kIterNum, kFirstIterNumStart},
{StatType::kConsumerType, kTfExecutor},
{StatType::kConsumerId, kFirstStepId}});
CreateXEvent(&host_plane_builder, &second_tf_executor_thread,
HostEventType::kExecutorStateProcess, 120, 80,
{{StatType::kStepId, kFirstStepId},
{StatType::kIterNum, kFirstIterNumStart + 1},
{StatType::kConsumerType, kTfExecutor},
{StatType::kConsumerId, kFirstStepId}});
EventForest event_forest;
GroupTfEvents(&space, &event_forest);
const GroupMetadataMap& group_metadata_map =
event_forest.GetGroupMetadataMap();
EXPECT_EQ(group_metadata_map.size(), 4);
ASSERT_TRUE(group_metadata_map.contains(0));
EXPECT_EQ(group_metadata_map.at(0).name, "10");
ASSERT_TRUE(group_metadata_map.contains(1));
EXPECT_EQ(group_metadata_map.at(1).name, "11");
ASSERT_TRUE(group_metadata_map.contains(2));
EXPECT_EQ(group_metadata_map.at(2).name, "0");
ASSERT_TRUE(group_metadata_map.contains(3));
EXPECT_EQ(group_metadata_map.at(3).name, "1");
}
TEST(GroupEventsTest, EagerOpTest) {
XSpace space;
XPlane* host_plane = GetOrCreateHostXPlane(&space);
XPlaneBuilder host_plane_builder(host_plane);
host_plane_builder.ReserveLines(1);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
XPlane* device_plane = space.add_planes();
XPlaneBuilder device_plane_builder(device_plane);
device_plane_builder.ReserveLines(1);
auto gpu_stream = device_plane_builder.GetOrCreateLine(0);
int64_t correlation_id = 100;
const char* kTF1GpuLaunchEvent = "tf1 matmul";
const char* kTF1GpuEvent = "tf1_kernel_matmul";
CreateXEvent(&host_plane_builder, &main_thread, kTF1GpuLaunchEvent, 10, 90,
{{StatType::kCorrelationId, correlation_id}});
CreateXEvent(&device_plane_builder, &gpu_stream, kTF1GpuEvent, 200, 300,
{{StatType::kCorrelationId, correlation_id}});
++correlation_id;
const char* kLegacyGpuLaunchEvent = "legacy matmul";
const char* kLegacyGpuEvent = "legacy_kernel_matmul";
CreateXEvent(&host_plane_builder, &main_thread,
HostEventType::kEagerKernelExecute, 100, 200);
CreateXEvent(&host_plane_builder, &main_thread, kLegacyGpuLaunchEvent, 110,
190, {{StatType::kCorrelationId, correlation_id}});
CreateXEvent(&device_plane_builder, &gpu_stream, kLegacyGpuEvent, 300, 400,
{{StatType::kCorrelationId, correlation_id}});
++correlation_id;
const char* kEagerOpGpuLaunchEvent = "eager op matmul";
const char* kEagerOpGpuEvent = "eager_op_kernel_matmul";
CreateXEvent(&host_plane_builder, &main_thread,
HostEventType::kEagerKernelExecute, 200, 300,
{{StatType::kIsFunc, static_cast<int64_t>(0)}});
CreateXEvent(&host_plane_builder, &main_thread, kEagerOpGpuLaunchEvent, 210,
290, {{StatType::kCorrelationId, correlation_id}});
CreateXEvent(&device_plane_builder, &gpu_stream, kEagerOpGpuEvent, 400, 500,
{{StatType::kCorrelationId, correlation_id}});
++correlation_id;
const char* kEagerFuncGpuLaunchEvent = "eager func matmul";
const char* kEagerFuncGpuEvent = "eager_func_kernel_matmul";
CreateXEvent(&host_plane_builder, &main_thread,
HostEventType::kEagerKernelExecute, 300, 400,
{{StatType::kIsFunc, static_cast<int64_t>(1)}});
CreateXEvent(&host_plane_builder, &main_thread, kEagerFuncGpuLaunchEvent, 310,
390, {{StatType::kCorrelationId, correlation_id}});
CreateXEvent(&device_plane_builder, &gpu_stream, kEagerFuncGpuEvent, 500, 600,
{{StatType::kCorrelationId, correlation_id}});
++correlation_id;
const char* kEagerOpCpuEvent = "eager_op_cpu_kernel:Matmul";
CreateXEvent(&host_plane_builder, &main_thread,
HostEventType::kEagerKernelExecute, 400, 500,
{{StatType::kIsFunc, static_cast<int64_t>(0)}});
CreateXEvent(&host_plane_builder, &main_thread, kEagerOpCpuEvent, 410, 490);
const char* kEagerFuncCpuEvent = "eager_func_cpu_kernel:Matmul";
CreateXEvent(&host_plane_builder, &main_thread,
HostEventType::kEagerKernelExecute, 500, 600,
{{StatType::kIsFunc, static_cast<int64_t>(1)}});
CreateXEvent(&host_plane_builder, &main_thread, kEagerFuncCpuEvent, 510, 590);
GroupTfEvents(&space);
auto is_eager = [](const XEventVisitor& event) {
auto eager_stats = event.GetStat(StatType::kIsEager);
return eager_stats && eager_stats->IntValue();
};
XPlaneVisitor host_plane_visitor = CreateTfXPlaneVisitor(host_plane);
int interested_events_encountered = 0;
host_plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
if (event.Name() == kEagerOpCpuEvent) {
interested_events_encountered++;
EXPECT_TRUE(is_eager(event));
} else if (event.Name() == kEagerFuncCpuEvent) {
interested_events_encountered++;
EXPECT_FALSE(is_eager(event));
}
});
});
EXPECT_EQ(interested_events_encountered, 2);
XPlaneVisitor device_plane_visitor = CreateTfXPlaneVisitor(device_plane);
interested_events_encountered = 0;
device_plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
if (event.Name() == kTF1GpuEvent) {
interested_events_encountered++;
EXPECT_FALSE(is_eager(event));
} else if (event.Name() == kLegacyGpuEvent) {
interested_events_encountered++;
EXPECT_FALSE(is_eager(event));
} else if (event.Name() == kEagerOpGpuEvent) {
interested_events_encountered++;
EXPECT_TRUE(is_eager(event));
} else if (event.Name() == kEagerFuncGpuEvent) {
interested_events_encountered++;
EXPECT_FALSE(is_eager(event));
}
});
});
EXPECT_EQ(interested_events_encountered, 4);
}
TEST(GroupEventsTest, FunctionOpTest) {
constexpr int64_t kStepNum = 123;
constexpr int64_t kStepId = 0;
constexpr int64_t kCorrelationId = 100;
XSpace space;
XPlane* host_plane = GetOrCreateHostXPlane(&space);
XPlaneBuilder host_plane_builder(host_plane);
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
0, 100, {{StatType::kStepNum, kStepNum}});
CreateXEvent(&host_plane_builder, &main_thread,
HostEventType::kEagerKernelExecute, 10, 90);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kStepId},
{StatType::kProducerType, kTfExecutor},
{StatType::kProducerId, kStepId}});
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kStepId, kStepId},
{StatType::kConsumerType, kTfExecutor},
{StatType::kConsumerId, kStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 30, 30,
{{StatType::kCorrelationId, kCorrelationId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "add:Add", 70, 20);
XPlane* device_plane = space.add_planes();
XPlaneBuilder device_plane_builder(device_plane);
device_plane_builder.ReserveLines(1);
auto stream = device_plane_builder.GetOrCreateLine(0);
CreateXEvent(&device_plane_builder, &stream, "matmul", 200, 300,
{{StatType::kCorrelationId, kCorrelationId}});
GroupTfEvents(&space);
XPlaneVisitor host_plane_visitor = CreateTfXPlaneVisitor(host_plane);
const XEvent& cpu_tf_op = host_plane->lines(1).events(2);
EXPECT_EQ(cpu_tf_op.stats_size(), 2);
EXPECT_EQ(host_plane_visitor.GetStatType(cpu_tf_op.stats(1).metadata_id()),
StatType::kIsEager);
EXPECT_EQ(cpu_tf_op.stats(1).int64_value(), 0);
XPlaneVisitor device_plane_visitor = CreateTfXPlaneVisitor(device_plane);
const XEvent& gpu_kernel = device_plane->lines(0).events(0);
EXPECT_EQ(gpu_kernel.stats_size(), 3);
EXPECT_EQ(device_plane_visitor.GetStatType(gpu_kernel.stats(2).metadata_id()),
StatType::kIsEager);
EXPECT_EQ(gpu_kernel.stats(2).int64_value(), 0);
}
TEST(GroupEventsTest, SemanticArgTest) {
constexpr int64_t kIsRoot = 1;
constexpr int64_t kStepNum = 100;
constexpr int64_t kContextType = 123;
constexpr uint64 kContextId = 456;
XSpace raw_space;
XPlane* raw_plane = raw_space.add_planes();
XPlaneBuilder plane(raw_plane);
plane.ReserveLines(2);
auto root_producer = plane.GetOrCreateLine(0);
CreateXEvent(&plane, &root_producer, HostEventType::kTraceContext, 0, 100,
{{StatType::kIsRoot, kIsRoot}, {StatType::kStepNum, kStepNum}});
CreateXEvent(&plane, &root_producer, HostEventType::kFunctionRun, 10, 90,
{{StatType::kProducerType, kContextType},
{StatType::kProducerId, kContextId}});
auto consumer = plane.GetOrCreateLine(1);
CreateXEvent(&plane, &consumer, HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kConsumerType, kContextType},
{StatType::kConsumerId, kContextId}});
GroupTfEvents(&raw_space);
int num_events = 0;
CreateTfXPlaneVisitor(raw_plane).ForEachLine([&](const XLineVisitor& line) {
num_events += line.NumEvents();
line.ForEachEvent([&](const XEventVisitor& event) {
std::optional<int64_t> group_id;
if (std::optional<XStatVisitor> stat =
event.GetStat(StatType::kGroupId)) {
group_id = stat->IntValue();
}
EXPECT_TRUE(group_id.has_value());
EXPECT_EQ(*group_id, 0);
});
});
EXPECT_EQ(num_events, 3);
}
TEST(GroupEventsTest, SemanticIntArgNoMatchTest) {
constexpr int64_t kIsRoot = 1;
constexpr int64_t kStepNum = 100;
constexpr int64_t kContextType = 123;
constexpr uint64 kProducerId = 456;
constexpr uint64 kConsumerId = 789;
XSpace raw_space;
XPlane* raw_plane = raw_space.add_planes();
XPlaneBuilder plane(raw_plane);
plane.ReserveLines(2);
auto root_producer = plane.GetOrCreateLine(0);
CreateXEvent(&plane, &root_producer, HostEventType::kTraceContext, 0, 100,
{{StatType::kIsRoot, kIsRoot}, {StatType::kStepNum, kStepNum}});
CreateXEvent(&plane, &root_producer, HostEventType::kFunctionRun, 10, 90,
{{StatType::kProducerType, kContextType},
{StatType::kProducerId, kProducerId}});
auto consumer = plane.GetOrCreateLine(1);
CreateXEvent(&plane, &consumer, HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kConsumerType, kContextType},
{StatType::kConsumerId, kConsumerId}});
GroupTfEvents(&raw_space);
int num_events = 0;
CreateTfXPlaneVisitor(raw_plane).ForEachLine([&](const XLineVisitor& line) {
num_events += line.NumEvents();
line.ForEachEvent([&](const XEventVisitor& event) {
std::optional<int64_t> group_id;
if (std::optional<XStatVisitor> stat =
event.GetStat(StatType::kGroupId)) {
group_id = stat->IntValue();
}
if (event.Type() == HostEventType::kExecutorStateProcess) {
EXPECT_FALSE(group_id.has_value());
} else {
EXPECT_TRUE(group_id.has_value());
EXPECT_EQ(*group_id, 0);
}
});
});
EXPECT_EQ(num_events, 3);
}
TEST(GroupEventsTest, SemanticUintArgNoMatchTest) {
constexpr int64_t kIsRoot = 1;
constexpr int64_t kStepNum = 100;
constexpr int64_t kContextType = 123;
constexpr uint64 kProducerId = UINT64_MAX;
constexpr uint64 kConsumerId = UINT64_MAX - 1;
XSpace raw_space;
XPlane* raw_plane = raw_space.add_planes();
XPlaneBuilder plane(raw_plane);
plane.ReserveLines(2);
auto root_producer = plane.GetOrCreateLine(0);
CreateXEvent(&plane, &root_producer, HostEventType::kTraceContext, 0, 100,
{{StatType::kIsRoot, kIsRoot}, {StatType::kStepNum, kStepNum}});
CreateXEvent(&plane, &root_producer, HostEventType::kFunctionRun, 10, 90,
{{StatType::kProducerType, kContextType},
{StatType::kProducerId, kProducerId}});
auto consumer = plane.GetOrCreateLine(1);
CreateXEvent(&plane, &consumer, HostEventType::kExecutorStateProcess, 20, 80,
{{StatType::kConsumerType, kContextType},
{StatType::kConsumerId, kConsumerId}});
GroupTfEvents(&raw_space);
int num_events = 0;
CreateTfXPlaneVisitor(raw_plane).ForEachLine([&](const XLineVisitor& line) {
num_events += line.NumEvents();
line.ForEachEvent([&](const XEventVisitor& event) {
std::optional<int64_t> group_id;
if (std::optional<XStatVisitor> stat =
event.GetStat(StatType::kGroupId)) {
group_id = stat->IntValue();
}
if (event.Type() == HostEventType::kExecutorStateProcess) {
EXPECT_FALSE(group_id.has_value());
} else {
EXPECT_TRUE(group_id.has_value());
EXPECT_EQ(*group_id, 0);
}
});
});
EXPECT_EQ(num_events, 3);
}
TEST(GroupEventsTest, AsyncEventTest) {
constexpr int64_t kIsRoot = 1;
constexpr int64_t kIsAsync = 1;
constexpr absl::string_view kParent = "parent";
constexpr absl::string_view kAsync = "async";
constexpr absl::string_view kChild = "child";
XSpace raw_space;
XPlane* raw_plane = raw_space.add_planes();
XPlaneBuilder plane(raw_plane);
plane.ReserveLines(1);
auto line = plane.GetOrCreateLine(0);
CreateXEvent(&plane, &line, kParent, 0, 100, {{StatType::kIsRoot, kIsRoot}});
CreateXEvent(&plane, &line, kAsync, 10, 200,
{{StatType::kIsAsync, kIsAsync}});
CreateXEvent(&plane, &line, kChild, 20, 80);
GroupTfEvents(&raw_space);
CreateTfXPlaneVisitor(raw_plane).ForEachLine([&](const XLineVisitor& line) {
EXPECT_EQ(line.NumEvents(), 3);
line.ForEachEvent([&](const XEventVisitor& event) {
std::optional<int64_t> group_id;
if (std::optional<XStatVisitor> stat =
event.GetStat(StatType::kGroupId)) {
group_id = stat->IntValue();
}
if (event.Name() == kAsync) {
EXPECT_FALSE(group_id.has_value());
} else {
EXPECT_TRUE(group_id.has_value());
EXPECT_EQ(*group_id, 0);
}
});
});
}
TEST(GroupEventsTest, BatchingSessionTest) {
constexpr absl::string_view kSchedule = "Schedule";
constexpr int64_t kBatchContextType =
static_cast<int64_t>(ContextType::kSharedBatchScheduler);
constexpr int64_t kBatchContextId = 123;
constexpr int64_t kBatchingSessionRunRootLevel = 1;
constexpr int64_t kProcessBatchRootLevel = 2;
XSpace raw_space;
XPlane* raw_plane = raw_space.add_planes();
XPlaneBuilder plane(raw_plane);
plane.ReserveLines(2);
auto request_thread = plane.GetOrCreateLine(0);
CreateXEvent(&plane, &request_thread, HostEventType::kBatchingSessionRun, 0,
100, {{StatType::kIsRoot, kBatchingSessionRunRootLevel}});
CreateXEvent(&plane, &request_thread, kSchedule, 0, 100,
{{StatType::kProducerType, kBatchContextType},
{StatType::kProducerId, kBatchContextId}});
CreateXEvent(&plane, &request_thread, HostEventType::kBatchingSessionRun, 200,
100, {{StatType::kIsRoot, kBatchingSessionRunRootLevel}});
CreateXEvent(&plane, &request_thread, kSchedule, 200, 100,
{{StatType::kProducerType, kBatchContextType},
{StatType::kProducerId, kBatchContextId}});
auto batch_thread = plane.GetOrCreateLine(1);
CreateXEvent(&plane, &batch_thread, HostEventType::kProcessBatch, 200, 100,
{{StatType::kConsumerType, kBatchContextType},
{StatType::kConsumerId, kBatchContextId},
{StatType::kIsRoot, kProcessBatchRootLevel}});
EventForest event_forest;
GroupTfEvents(&raw_space, &event_forest);
const GroupMetadataMap& group_metadata_map =
event_forest.GetGroupMetadataMap();
EXPECT_EQ(group_metadata_map.size(), 3);
EXPECT_EQ(group_metadata_map.at(0).parents.size(), 2);
EXPECT_EQ(group_metadata_map.at(1).children.size(), 1);
EXPECT_EQ(group_metadata_map.at(2).children.size(), 1);
uint64 num_checked = 0;
CreateTfXPlaneVisitor(raw_plane).ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
std::optional<int64_t> group_id;
if (std::optional<XStatVisitor> stat =
event.GetStat(StatType::kGroupId)) {
group_id = stat->IntValue();
}
EXPECT_TRUE(group_id.has_value());
if (line.Id() == 0 &&
event.Type() == HostEventType::kBatchingSessionRun) {
++num_checked;
} else if (line.Id() == 1 &&
event.Type() == HostEventType::kProcessBatch) {
++num_checked;
}
});
});
EXPECT_EQ(num_checked, 3);
}
TEST(GroupTPUEventsTest, TpuExecuteOpTest) {
tensorflow::profiler::XSpace space;
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(&space));
host_plane_builder.ReserveLines(1);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(
&host_plane_builder, &main_thread, HostEventType::kExecutorStateProcess,
20, 50,
{{StatType::kStepId, int64_t{123}}, {StatType::kIterNum, int64_t{456}}});
EventForest event_forest;
GroupTpuEventsOSS(&space, {}, &event_forest);
EXPECT_EQ(event_forest.GetGroupMetadataMap().size(), 1);
XPlaneVisitor host_plane_visitor = CreateTfXPlaneVisitor(&space.planes(0));
host_plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
EXPECT_TRUE(event.GetStat(StatType::kGroupId).has_value());
});
});
}
TEST(GroupTPUEventsTest, TpuRequestTest) {
tensorflow::profiler::XSpace space;
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(&space));
host_plane_builder.ReserveLines(1);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kSessionRun, 0,
100, {{StatType::kIsRoot, int64_t{1}}});
CreateXEvent(&host_plane_builder, &main_thread,
GetHostEventTypeStr(HostEventType::kEnqueueRequestLocked), 20,
50,
{{StatType::kQueueAddr, int64_t{123}},
{StatType::kRequestId, int64_t{456}}});
EventForest event_forest;
GroupTpuEventsOSS(&space, {}, &event_forest);
EXPECT_EQ(event_forest.GetGroupMetadataMap().size(), 1);
XPlaneVisitor host_plane_visitor = CreateTfXPlaneVisitor(&space.planes(0));
host_plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
EXPECT_TRUE(event.GetStat(StatType::kGroupId).has_value());
});
});
}
TEST(GroupTPUEventsTest, TpuProgramCallbackTest) {
tensorflow::profiler::XSpace space;
XPlaneBuilder host_plane_builder(GetOrCreateHostXPlane(&space));
host_plane_builder.ReserveLines(1);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kSessionRun, 0,
100, {{StatType::kIsRoot, int64_t{1}}});
CreateXEvent(&host_plane_builder, &main_thread,
GetHostEventTypeStr(HostEventType::kDoEnqueueProgram), 20, 50,
{{StatType::kRunId, int64_t{123}},
{StatType::kQueueId, int64_t{0}},
{StatType::kDeviceOrdinal, int64_t{1}}});
EventForest event_forest;
GroupTpuEventsOSS(&space, {}, &event_forest);
EXPECT_EQ(event_forest.GetGroupMetadataMap().size(), 1);
XPlaneVisitor host_plane_visitor = CreateTfXPlaneVisitor(&space.planes(0));
host_plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
EXPECT_TRUE(event.GetStat(StatType::kGroupId).has_value());
});
});
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/group_events.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/group_events_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0ab1eb70-b950-474e-92b9-0937ea336b35 | cpp | tensorflow/tensorflow | timestamp_utils | third_party/xla/xla/tsl/profiler/utils/timestamp_utils.cc | third_party/xla/xla/tsl/profiler/utils/timestamp_utils_test.cc | #include "xla/tsl/profiler/utils/timestamp_utils.h"
#include <cstdint>
#include "absl/log/log.h"
#include "xla/tsl/profiler/utils/xplane_builder.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_utils.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
void SetSessionTimestamps(uint64_t start_walltime_ns, uint64_t stop_walltime_ns,
tensorflow::profiler::XSpace& space) {
if (start_walltime_ns != 0 && stop_walltime_ns != 0) {
tsl::profiler::XPlaneBuilder plane(
tsl::profiler::FindOrAddMutablePlaneWithName(
&space, tsl::profiler::kTaskEnvPlaneName));
plane.AddStatValue(*plane.GetOrCreateStatMetadata(
GetTaskEnvStatTypeStr(kEnvProfileStartTime)),
start_walltime_ns);
plane.AddStatValue(*plane.GetOrCreateStatMetadata(
GetTaskEnvStatTypeStr(kEnvProfileStopTime)),
stop_walltime_ns);
} else {
LOG(WARNING) << "Not Setting Session Timestamps, (start_walltime_ns, "
"stop_walltime_ns) : "
<< start_walltime_ns << ", " << stop_walltime_ns;
}
}
}
} | #include "xla/tsl/profiler/utils/timestamp_utils.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_utils.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profiler {
using ::testing::Eq;
TEST(TimestampUtilsTest, StartAndStopTimestampAreAdded) {
XSpace xspace;
SetSessionTimestamps(1000, 2000, xspace);
const XPlane* xplane = FindPlaneWithName(xspace, kTaskEnvPlaneName);
XPlaneVisitor visitor(xplane, {}, {FindTaskEnvStatType});
auto start_time = visitor.GetStat(TaskEnvStatType::kEnvProfileStartTime);
auto stop_time = visitor.GetStat(TaskEnvStatType::kEnvProfileStopTime);
EXPECT_THAT(start_time->IntOrUintValue(), Eq(1000));
EXPECT_THAT(stop_time->IntOrUintValue(), Eq(2000));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/timestamp_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/timestamp_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f6caa2d5-6866-43cc-a9a0-f63d605eeb98 | cpp | tensorflow/tensorflow | buffer_pool | third_party/xla/xla/tsl/profiler/utils/buffer_pool.cc | third_party/xla/xla/tsl/profiler/utils/buffer_pool_test.cc | #include "xla/tsl/profiler/utils/buffer_pool.h"
#include <ios>
#include "tsl/platform/logging.h"
#include "tsl/platform/mem.h"
#include "tsl/platform/mutex.h"
namespace tsl {
namespace profiler {
BufferPool::BufferPool(size_t buffer_size_in_bytes)
: buffer_size_in_bytes_(buffer_size_in_bytes) {}
BufferPool::~BufferPool() { DestroyAllBuffers(); }
uint8_t* BufferPool::GetOrCreateBuffer() {
{
mutex_lock lock(buffers_mutex_);
if (!buffers_.empty()) {
uint8_t* buffer = buffers_.back();
buffers_.pop_back();
if (!buffer) {
LOG(ERROR) << "A reused buffer must not be null!";
return nullptr;
}
VLOG(3) << "Reused Buffer, buffer=" << std::hex
<< reinterpret_cast<uintptr_t>(buffer) << std::dec;
return buffer;
}
}
constexpr size_t kBufferAlignSize = 8;
uint8_t* buffer = reinterpret_cast<uint8_t*>(
port::AlignedMalloc(buffer_size_in_bytes_, kBufferAlignSize));
if (buffer == nullptr) {
LOG(WARNING) << "Buffer not allocated.";
return nullptr;
}
VLOG(3) << "Allocated Buffer, buffer=" << std::hex
<< reinterpret_cast<uintptr_t>(buffer) << std::dec
<< " size=" << buffer_size_in_bytes_;
return buffer;
}
void BufferPool::ReclaimBuffer(uint8_t* buffer) {
mutex_lock lock(buffers_mutex_);
buffers_.push_back(buffer);
VLOG(3) << "Reclaimed Buffer, buffer=" << std::hex
<< reinterpret_cast<uintptr_t>(buffer) << std::dec;
}
void BufferPool::DestroyAllBuffers() {
mutex_lock lock(buffers_mutex_);
for (uint8_t* buffer : buffers_) {
VLOG(3) << "Freeing Buffer, buffer:" << std::hex
<< reinterpret_cast<uintptr_t>(buffer) << std::dec;
port::AlignedFree(buffer);
}
buffers_.clear();
}
size_t BufferPool::GetBufferSizeInBytes() const {
return buffer_size_in_bytes_;
}
}
} | #include "xla/tsl/profiler/utils/buffer_pool.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profiler {
namespace {
TEST(BufferPoolTest, GetOrCreateBufferAlloc) {
constexpr size_t kBufferSizeInBytes = 32 * 1024;
BufferPool buffer_pool(kBufferSizeInBytes);
uint8_t* first_buffer = buffer_pool.GetOrCreateBuffer();
EXPECT_NE(first_buffer, nullptr);
uint8_t* second_buffer = buffer_pool.GetOrCreateBuffer();
EXPECT_NE(second_buffer, first_buffer);
for (size_t idx = 0; idx < kBufferSizeInBytes; ++idx) {
first_buffer[idx] = 0xAB;
}
buffer_pool.ReclaimBuffer(first_buffer);
buffer_pool.ReclaimBuffer(second_buffer);
}
TEST(BufferPoolTest, GetOrCreateBufferReuse) {
constexpr size_t kBufferSizeInBytes = 32 * 1024;
BufferPool buffer_pool(kBufferSizeInBytes);
uint8_t* buffer = buffer_pool.GetOrCreateBuffer();
EXPECT_NE(buffer, nullptr);
buffer[0] = 0xFF;
uint8_t* previous_buffer = buffer;
buffer_pool.ReclaimBuffer(buffer);
uint8_t* reused_buffer = buffer_pool.GetOrCreateBuffer();
EXPECT_EQ(reused_buffer, previous_buffer);
for (size_t idx = 0; idx < kBufferSizeInBytes; ++idx) {
reused_buffer[idx] = 0xCD;
}
buffer_pool.ReclaimBuffer(reused_buffer);
}
TEST(BufferPoolTest, DestroyAllBuffers) {
constexpr size_t kBufferSizeInBytes = 32 * 1024;
BufferPool buffer_pool(kBufferSizeInBytes);
uint8_t* first_buffer = buffer_pool.GetOrCreateBuffer();
EXPECT_NE(first_buffer, nullptr);
buffer_pool.DestroyAllBuffers();
for (size_t idx = 0; idx < kBufferSizeInBytes; ++idx) {
first_buffer[idx] = 0xEF;
}
uint8_t* second_buffer = buffer_pool.GetOrCreateBuffer();
for (size_t idx = 0; idx < kBufferSizeInBytes; ++idx) {
second_buffer[idx] = 0xAB;
}
buffer_pool.ReclaimBuffer(first_buffer);
buffer_pool.ReclaimBuffer(second_buffer);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/buffer_pool.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/buffer_pool_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dc782095-8db1-4f12-bb84-d81e11395814 | cpp | tensorflow/tensorflow | parse_annotation | third_party/xla/xla/tsl/profiler/utils/parse_annotation.cc | third_party/xla/xla/tsl/profiler/utils/parse_annotation_test.cc | #include "xla/tsl/profiler/utils/parse_annotation.h"
#include <stack>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/ascii.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
namespace tsl {
namespace profiler {
namespace {
std::vector<absl::string_view> SplitNameAndMetadata(
absl::string_view annotation) {
std::vector<absl::string_view> parts;
if (!HasMetadata(annotation)) {
parts.emplace_back(annotation);
} else {
annotation.remove_suffix(1);
parts = absl::StrSplit(annotation, '#');
if (parts.size() > 2) {
parts.resize(2);
}
}
while (parts.size() < 2) {
parts.emplace_back();
}
return parts;
}
std::vector<absl::string_view> SplitPairs(absl::string_view metadata) {
std::vector<absl::string_view> key_value_pairs;
std::stack<char> quotes;
size_t start = 0, end = 0;
for (; end < metadata.size(); ++end) {
char ch = metadata[end];
switch (ch) {
case '\"':
case '\'':
if (quotes.empty() || quotes.top() != ch) {
quotes.push(ch);
} else {
quotes.pop();
}
break;
case '{':
case '(':
case '[':
quotes.push(ch);
break;
case '}':
if (!quotes.empty() && quotes.top() == '{') {
quotes.pop();
}
break;
case ')':
if (!quotes.empty() && quotes.top() == '(') {
quotes.pop();
}
break;
case ']':
if (!quotes.empty() && quotes.top() == '[') {
quotes.pop();
}
break;
case ',':
if (quotes.empty()) {
if (end - start > 1) {
key_value_pairs.emplace_back(metadata.data() + start, end - start);
}
start = end + 1;
}
break;
}
}
if (end - start > 1) {
key_value_pairs.emplace_back(metadata.data() + start, end - start);
}
return key_value_pairs;
}
std::vector<std::pair<absl::string_view, absl::string_view>> ParseMetadata(
absl::string_view metadata) {
std::vector<std::pair<absl::string_view, absl::string_view>> key_values;
for (absl::string_view pair : SplitPairs(metadata)) {
std::vector<absl::string_view> parts =
absl::StrSplit(pair, absl::MaxSplits('=', 1));
if (parts.size() == 2) {
absl::string_view key = absl::StripAsciiWhitespace(parts[0]);
absl::string_view value = absl::StripAsciiWhitespace(parts[1]);
if (!key.empty() && !value.empty()) {
key_values.push_back({key, value});
}
}
}
return key_values;
}
}
Annotation ParseAnnotation(absl::string_view annotation) {
Annotation result;
std::vector<absl::string_view> parts = SplitNameAndMetadata(annotation);
if (!parts.empty()) {
result.name = absl::StripAsciiWhitespace(parts[0]);
for (const auto& key_value : ParseMetadata(parts[1])) {
result.metadata.push_back({key_value.first, key_value.second});
}
}
return result;
}
std::vector<Annotation> ParseAnnotationStack(
absl::string_view annotation_stack) {
std::vector<Annotation> annotations;
const std::string kAnnotationDelimiter = "::";
for (absl::string_view annotation : absl::StrSplit(
annotation_stack, kAnnotationDelimiter, absl::SkipEmpty())) {
annotations.emplace_back(ParseAnnotation(annotation));
}
return annotations;
}
}
} | #include "xla/tsl/profiler/utils/parse_annotation.h"
#include <vector>
#include "absl/strings/string_view.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profiler {
namespace {
TEST(ParseAnnotationStackTest, EmptyAnnotationStackTest) {
std::vector<Annotation> annotations = ParseAnnotationStack("");
ASSERT_TRUE(annotations.empty());
}
TEST(ParseAnnotationStackTest, SingleAnnotationStackTest) {
std::vector<Annotation> annotations = ParseAnnotationStack("name");
ASSERT_FALSE(annotations.empty());
EXPECT_EQ(annotations.back().name, "name");
EXPECT_TRUE(annotations.back().metadata.empty());
}
TEST(ParseAnnotationStackTest, MultiLevelAnnotationStackTest) {
std::vector<Annotation> annotations = ParseAnnotationStack("outer::inner");
ASSERT_EQ(annotations.size(), 2);
EXPECT_EQ(annotations.front().name, "outer");
EXPECT_TRUE(annotations.front().metadata.empty());
EXPECT_EQ(annotations.back().name, "inner");
EXPECT_TRUE(annotations.back().metadata.empty());
}
TEST(ParseAnnotationTest, EmptyAnnotationTest) {
Annotation annotation = ParseAnnotation("");
EXPECT_TRUE(annotation.name.empty());
EXPECT_TRUE(annotation.metadata.empty());
}
TEST(ParseAnnotationTest, SimpleNameTest) {
Annotation annotation = ParseAnnotation("name");
EXPECT_EQ(annotation.name, "name");
EXPECT_TRUE(annotation.metadata.empty());
}
TEST(ParseAnnotationTest, SimpleNameWithWhitespaceTest) {
Annotation annotation = ParseAnnotation("name ");
EXPECT_EQ(annotation.name, "name");
EXPECT_TRUE(annotation.metadata.empty());
}
TEST(ParseAnnotationTest, EmptyMetadataTest) {
Annotation annotation = ParseAnnotation("name#");
EXPECT_EQ(annotation.name, "name");
EXPECT_TRUE(annotation.metadata.empty());
annotation = ParseAnnotation("name1##");
EXPECT_EQ(annotation.name, "name1");
EXPECT_TRUE(annotation.metadata.empty());
annotation = ParseAnnotation("name2###");
EXPECT_EQ(annotation.name, "name2");
EXPECT_TRUE(annotation.metadata.empty());
}
TEST(ParseAnnotationTest, SingleMetadataTest) {
Annotation annotation = ParseAnnotation("name#key=value#");
EXPECT_EQ(annotation.name, "name");
ASSERT_EQ(annotation.metadata.size(), 1);
EXPECT_EQ(annotation.metadata.at(0).key, "key");
EXPECT_EQ(annotation.metadata.at(0).value, "value");
}
TEST(ParseAnnotationTest, MultipleMetadataTest) {
Annotation annotation = ParseAnnotation("name#k1=v1,k2=v2,k3=v3#");
EXPECT_EQ(annotation.name, "name");
ASSERT_EQ(annotation.metadata.size(), 3);
EXPECT_EQ(annotation.metadata.at(0).key, "k1");
EXPECT_EQ(annotation.metadata.at(0).value, "v1");
EXPECT_EQ(annotation.metadata.at(1).key, "k2");
EXPECT_EQ(annotation.metadata.at(1).value, "v2");
EXPECT_EQ(annotation.metadata.at(2).key, "k3");
EXPECT_EQ(annotation.metadata.at(2).value, "v3");
}
TEST(ParseAnnotationTest, MultipleMetadataWithWhitespaceTest) {
Annotation annotation = ParseAnnotation("name # k1 = v1, ,k2=v2 #");
EXPECT_EQ(annotation.name, "name");
ASSERT_EQ(annotation.metadata.size(), 2);
EXPECT_EQ(annotation.metadata.at(0).key, "k1");
EXPECT_EQ(annotation.metadata.at(0).value, "v1");
EXPECT_EQ(annotation.metadata.at(1).key, "k2");
EXPECT_EQ(annotation.metadata.at(1).value, "v2");
}
TEST(ParseAnnotationTest, KeyValueSeparatorTest) {
Annotation annotation = ParseAnnotation("name#=v1,k2=,k3==v3,k4=v4=#");
EXPECT_EQ(annotation.name, "name");
ASSERT_EQ(annotation.metadata.size(), 2);
EXPECT_EQ(annotation.metadata.at(0).key, "k3");
EXPECT_EQ(annotation.metadata.at(0).value, "=v3");
EXPECT_EQ(annotation.metadata.at(1).key, "k4");
EXPECT_EQ(annotation.metadata.at(1).value, "v4=");
}
TEST(ParseAnnotationTest, ExtraMetadataSeparatorTest) {
Annotation annotation = ParseAnnotation("name##k1=v1#");
EXPECT_EQ(annotation.name, "name");
EXPECT_TRUE(annotation.metadata.empty());
}
TEST(ParseAnnotationTest, QuotedMetadata) {
Annotation annotation = ParseAnnotation(
"name#k1=(v11,v12),k2=[v21,v22,v23],k3={v31,v32}, k4=\"v41,v42\","
"(k51,k52)='v51,v52'#");
EXPECT_EQ(annotation.metadata.at(0).key, "k1");
EXPECT_EQ(annotation.metadata.at(0).value, "(v11,v12)");
EXPECT_EQ(annotation.metadata.at(1).key, "k2");
EXPECT_EQ(annotation.metadata.at(1).value, "[v21,v22,v23]");
EXPECT_EQ(annotation.metadata.at(2).key, "k3");
EXPECT_EQ(annotation.metadata.at(2).value, "{v31,v32}");
EXPECT_EQ(annotation.metadata.at(3).key, "k4");
EXPECT_EQ(annotation.metadata.at(3).value, "\"v41,v42\"");
EXPECT_EQ(annotation.metadata.at(4).key, "(k51,k52)");
EXPECT_EQ(annotation.metadata.at(4).value, "'v51,v52'");
}
TEST(ParseAnnotationTest, UnmatchedQuotedMetadata) {
Annotation annotation = ParseAnnotation("name#k1=v1,k2=(v2,k3=v3#");
EXPECT_EQ(annotation.metadata.at(0).key, "k1");
EXPECT_EQ(annotation.metadata.at(0).value, "v1");
EXPECT_EQ(annotation.metadata.at(1).key, "k2");
EXPECT_EQ(annotation.metadata.at(1).value, "(v2,k3=v3");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/parse_annotation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/parse_annotation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6bc61fe2-d66b-43c8-afde-73c141c7c124 | cpp | tensorflow/tensorflow | preprocess_xplane | third_party/xla/xla/tsl/profiler/utils/preprocess_xplane.cc | third_party/xla/xla/tsl/profiler/utils/preprocess_xplane_test.cc | #include "xla/tsl/profiler/utils/preprocess_xplane.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/tsl/profiler/utils/xplane_builder.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "tsl/profiler/lib/context_types.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
namespace {
using ::tsl::profiler::HostEventType;
using ::tsl::profiler::StatType;
using ::tsl::profiler::XEventBuilder;
using ::tsl::profiler::XLineBuilder;
using ::tsl::profiler::XPlane;
using ::tsl::profiler::XPlaneBuilder;
using ::tsl::profiler::XSpace;
void MutateXPlane(XPlane& plane,
const std::vector<std::unique_ptr<XplaneEventMutatorFactory>>&
mutator_factories) {
XPlaneBuilder plane_builder(&plane);
absl::flat_hash_map<int64_t, std::vector<std::unique_ptr<XplaneEventMutator>>>
mutators_from_event_metadata_id;
std::vector<std::unique_ptr<XplaneEventMutator>> line_mutators;
for (const auto& mutator_factory : mutator_factories) {
auto mutators = mutator_factory->CreateMutators(plane_builder);
for (auto& mutator : mutators) {
if (mutator->event_metadata()) {
auto id = mutator->event_metadata()->id();
mutators_from_event_metadata_id[id].push_back(std::move(mutator));
} else {
line_mutators.push_back(std::move(mutator));
}
}
}
if (mutators_from_event_metadata_id.empty() && line_mutators.empty()) {
return;
}
plane_builder.ForEachLine([&](XLineBuilder line_builder) {
for (const auto& mutator : line_mutators) {
mutator->MutateEventsInLine(line_builder);
}
if (mutators_from_event_metadata_id.empty()) return;
line_builder.ForEachEvent([&](XEventBuilder event_builder) {
auto event_mutators =
mutators_from_event_metadata_id.find(event_builder.MetadataId());
if (event_mutators != mutators_from_event_metadata_id.end()) {
for (const auto& mutator : event_mutators->second) {
mutator->Mutate(event_builder);
}
}
});
});
}
std::vector<std::unique_ptr<XplaneEventMutatorFactory>>
CreateMutatorFactories() {
std::vector<std::unique_ptr<XplaneEventMutatorFactory>> mutator_factories;
mutator_factories.push_back(ThreadpoolLineMutatorFactory::CreateFactory());
mutator_factories.push_back(XplaneRootEventMutatorFactory::CreateFactory(
HostEventType::kProcessBatch, 2));
mutator_factories.push_back(XplaneRootEventMutatorFactory::CreateFactory(
HostEventType::kBatchingSessionRun, 1));
mutator_factories.push_back(
XplaneConnectedEventMutatorFactory<
HostEventType::kExecutorStateProcess,
HostEventType::kTpuExecuteOp, ContextType::kLegacy,
false,
XContextStatsAccessor<uint64_t, StatType::kStepId>,
XContextStatsAccessor<uint64_t,
StatType::kIterNum>>::CreateFactory());
#define ADD_QUEUE_CONNECTION(__enque_event__, __deque_event__) \
mutator_factories.push_back( \
XplaneConnectedEventMutatorFactory< \
HostEventType::__enque_event__, HostEventType::__deque_event__, \
ContextType::kTpuStream, true, \
XContextStatsAccessor<uint64, StatType::kRequestId>, \
XContextStatsAccessor<uint64, \
StatType::kQueueAddr>>::CreateFactory())
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kRunProgramRequest);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kHostCallbackRequest);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kTransferH2DRequest);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kTransferPreprocessedH2DRequest);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kTransferD2HRequest);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kOnDeviceSendRequest);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kOnDeviceRecvRequest);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kOnDeviceSendRecvLocalRequest);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kCustomWait);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kOnDeviceSendRequestMulti);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kOnDeviceRecvRequestMulti);
ADD_QUEUE_CONNECTION(kEnqueueRequestLocked, kPjrtAsyncWait);
#undef ADD_QUEUE_CONNECTION
mutator_factories.push_back(
HostRunIdMutatorFactory<
HostEventType::kDoEnqueueProgram>::CreateFactory());
mutator_factories.push_back(
HostRunIdMutatorFactory<
HostEventType::kCompleteCallbacks>::CreateFactory());
mutator_factories.push_back(
HostRunIdMutatorFactory<
HostEventType::kDoEnqueueContinuationProgram>::CreateFactory());
mutator_factories.push_back(
XplaneConnectedEventMutatorFactory<
HostEventType::kDoEnqueueProgram,
HostEventType::kCompleteCallbacks,
ContextType::kTpuLaunch,
true,
XContextStatsAccessor<uint64_t, StatType::kDeviceOrdinal>,
XContextStatsAccessor<uint64_t, StatType::kQueueId>,
XContextStatsAccessor<uint64_t, StatType::kRunId>,
XContextStatsAccessorWithDefault<uint64_t, StatType::kCoreType,
0ULL>>::CreateFactory());
mutator_factories.push_back(TpuModuleLineMutatorFactory::CreateFactory());
return mutator_factories;
}
}
void PreprocessXPlane(XPlane* plane) {
if (plane == nullptr) return;
auto mutator_factories = CreateMutatorFactories();
MutateXPlane(*plane, mutator_factories);
}
void PreprocessXSpace(XSpace* space) {
if (space == nullptr) return;
auto mutator_factories = CreateMutatorFactories();
for (XPlane& plane : *space->mutable_planes()) {
MutateXPlane(plane, mutator_factories);
}
}
}
} | #include "xla/tsl/profiler/utils/preprocess_xplane.h"
#include <cstdint>
#include <memory>
#include <optional>
#include "absl/container/flat_hash_map.h"
#include "absl/hash/hash.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/xplane_builder.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_test_utils.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "tsl/platform/test.h"
#include "tsl/profiler/lib/connected_traceme.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
namespace {
using ::tsl::profiler::CreateTfXPlaneVisitor;
using ::tsl::profiler::CreateXEvent;
using ::tsl::profiler::GetHostEventTypeStr;
using ::tsl::profiler::HostEventType;
using ::tsl::profiler::StatType;
using ::tsl::profiler::XEventVisitor;
using ::tsl::profiler::XLineVisitor;
using ::tsl::profiler::XPlane;
using ::tsl::profiler::XPlaneBuilder;
using ::tsl::profiler::XPlaneVisitor;
using ::tsl::profiler::XSpace;
TEST(PreprocessXPlane, IsRootStatsTest) {
XSpace space;
XPlane* plane = space.add_planes();
XPlaneBuilder plane_builder(plane);
plane_builder.ReserveLines(1);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kProcessBatch), 100, 100);
CreateXEvent(&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kBatchingSessionRun), 200,
100);
PreprocessXSpace(&space);
XPlaneVisitor plane_visitor = CreateTfXPlaneVisitor(plane);
plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
ASSERT_TRUE(event.GetStat(StatType::kIsRoot).has_value());
int64_t is_root = event.GetStat(StatType::kIsRoot)->IntValue();
if (event.Type() == HostEventType::kBatchingSessionRun) {
EXPECT_EQ(is_root, 1);
} else if (event.Type() == HostEventType::kProcessBatch) {
EXPECT_EQ(is_root, 2);
} else {
CHECK(false);
}
});
});
}
TEST(PreprocessXPlane, ProducerConsumerTest) {
XSpace space;
XPlane* plane = space.add_planes();
XPlaneBuilder plane_builder(plane);
plane_builder.ReserveLines(2);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(
&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kExecutorStateProcess), 100, 100,
{{StatType::kStepId, int64_t{123}}, {StatType::kIterNum, int64_t{456}}});
line_builder = plane_builder.GetOrCreateLine(1);
CreateXEvent(
&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kTpuExecuteOp), 200, 100,
{{StatType::kStepId, int64_t{123}}, {StatType::kIterNum, int64_t{456}}});
PreprocessXSpace(&space);
std::optional<uint64_t> producer_context_id, consumer_context_id;
XPlaneVisitor plane_visitor = CreateTfXPlaneVisitor(plane);
plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
if (event.Type() == HostEventType::kExecutorStateProcess) {
auto producer_type = event.GetStat(StatType::kProducerType);
ASSERT_TRUE(producer_type.has_value());
EXPECT_EQ(producer_type->IntValue(),
static_cast<int64_t>(ContextType::kLegacy));
auto producer_id = event.GetStat(StatType::kProducerId);
ASSERT_TRUE(producer_id.has_value());
producer_context_id = producer_id->IntOrUintValue();
} else if (event.Type() == HostEventType::kTpuExecuteOp) {
auto consumer_type = event.GetStat(StatType::kConsumerType);
ASSERT_TRUE(consumer_type.has_value());
EXPECT_EQ(consumer_type->IntValue(),
static_cast<int64_t>(ContextType::kLegacy));
auto consumer_id = event.GetStat(StatType::kConsumerId);
ASSERT_TRUE(consumer_id.has_value());
consumer_context_id = consumer_id->IntOrUintValue();
} else {
CHECK(false);
}
});
});
ASSERT_TRUE(producer_context_id && consumer_context_id);
ASSERT_EQ(*producer_context_id, *consumer_context_id);
}
TEST(PreprocessXPlane, ProducerConsumerNotMatchedTest) {
XSpace space;
XPlane* plane = space.add_planes();
XPlaneBuilder plane_builder(plane);
plane_builder.ReserveLines(2);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kExecutorStateProcess), 100,
100,
{{StatType::kStepId, int64_t{123}},
{StatType::kIterNum, int64_t{456}},
{StatType::kDeviceOrdinal, int64_t{789}}});
line_builder = plane_builder.GetOrCreateLine(1);
CreateXEvent(
&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kTpuExecuteOp), 200, 100,
{{StatType::kStepId, int64_t{123}}, {StatType::kIterNum, int64_t{789}}});
PreprocessXSpace(&space);
std::optional<uint64_t> producer_context_id, consumer_context_id;
XPlaneVisitor plane_visitor = CreateTfXPlaneVisitor(plane);
plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
if (event.Type() == HostEventType::kExecutorStateProcess) {
auto producer_type = event.GetStat(StatType::kProducerType);
ASSERT_TRUE(producer_type.has_value());
EXPECT_EQ(producer_type->IntValue(),
static_cast<int64_t>(ContextType::kLegacy));
auto producer_id = event.GetStat(StatType::kProducerId);
ASSERT_TRUE(producer_id.has_value());
producer_context_id = producer_id->IntOrUintValue();
} else if (event.Type() == HostEventType::kTpuExecuteOp) {
auto consumer_type = event.GetStat(StatType::kConsumerType);
ASSERT_TRUE(consumer_type.has_value());
EXPECT_EQ(consumer_type->IntValue(),
static_cast<int64_t>(ContextType::kLegacy));
auto consumer_id = event.GetStat(StatType::kConsumerId);
ASSERT_TRUE(consumer_id.has_value());
consumer_context_id = consumer_id->IntOrUintValue();
} else {
CHECK(false);
}
});
});
ASSERT_TRUE(producer_context_id && consumer_context_id);
ASSERT_NE(*producer_context_id, *consumer_context_id);
}
TEST(PreprocessXPlane, MissingLegacyStatTest) {
XSpace space;
XPlane* plane = space.add_planes();
XPlaneBuilder plane_builder(plane);
plane_builder.ReserveLines(2);
auto line_builder = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kExecutorStateProcess), 100,
100, {{StatType::kStepId, int64_t{123}}});
line_builder = plane_builder.GetOrCreateLine(1);
CreateXEvent(&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kTpuExecuteOp), 200, 100,
{{StatType::kStepId, int64_t{123}}});
PreprocessXSpace(&space);
XPlaneVisitor plane_visitor = CreateTfXPlaneVisitor(plane);
plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
if (event.Type() == HostEventType::kExecutorStateProcess) {
auto producer_type = event.GetStat(StatType::kProducerType);
ASSERT_FALSE(producer_type.has_value());
auto producer_id = event.GetStat(StatType::kProducerId);
ASSERT_FALSE(producer_id.has_value());
} else if (event.Type() == HostEventType::kTpuExecuteOp) {
auto consumer_type = event.GetStat(StatType::kConsumerType);
ASSERT_FALSE(consumer_type.has_value());
auto consumer_id = event.GetStat(StatType::kConsumerId);
ASSERT_FALSE(consumer_id.has_value());
} else {
CHECK(false);
}
});
});
}
TEST(PreprocessXPlane, HostRunIdPreprocessorTest) {
XSpace space;
XPlane* plane = space.add_planes();
XPlaneBuilder plane_builder(plane);
plane_builder.ReserveLines(2);
auto line_builder = plane_builder.GetOrCreateLine(0);
int64_t host_run_id = int64_t{582974244};
int64_t device_run_id = int64_t{46103332};
CreateXEvent(
&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kDoEnqueueContinuationProgram), 100,
100, {});
CreateXEvent(&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kDoEnqueueProgram), 100, 100,
{{StatType::kRunId, int64_t{host_run_id}}});
CreateXEvent(&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kTpuExecuteOp), 200, 100,
{{StatType::kRunId, int64_t{device_run_id}}});
CreateXEvent(&plane_builder, &line_builder,
GetHostEventTypeStr(HostEventType::kCompleteCallbacks), 300, 100,
{{StatType::kRunId, int64_t{host_run_id}}});
line_builder = plane_builder.GetOrCreateLine(1);
PreprocessXSpace(&space);
XPlaneVisitor plane_visitor = CreateTfXPlaneVisitor(plane);
plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
if (event.Type() == HostEventType::kDoEnqueueContinuationProgram) {
auto run_id = event.GetStat(StatType::kRunId);
ASSERT_FALSE(run_id.has_value());
} else if (event.Type() == HostEventType::kDoEnqueueProgram) {
auto run_id = event.GetStat(StatType::kRunId);
ASSERT_TRUE(run_id.has_value());
ASSERT_EQ(run_id->IntValue(), device_run_id);
} else if (event.Type() == HostEventType::kTpuExecuteOp) {
auto run_id = event.GetStat(StatType::kRunId);
ASSERT_TRUE(run_id.has_value());
ASSERT_EQ(run_id->IntValue(), device_run_id);
} else if (event.Type() == HostEventType::kCompleteCallbacks) {
auto run_id = event.GetStat(StatType::kRunId);
ASSERT_TRUE(run_id.has_value());
ASSERT_EQ(run_id->IntValue(), device_run_id);
} else {
CHECK(false);
}
});
});
}
TEST(PreprocessXPlane, ThreadPoolPreprocessorTest) {
XSpace space;
XPlane* plane = space.add_planes();
XPlaneBuilder plane_builder(plane);
auto main_line = plane_builder.GetOrCreateLine(0);
CreateXEvent(&plane_builder, &main_line, kThreadpoolListenerRecord, 100, 100,
{{StatType::kProducerType,
static_cast<int64_t>(ContextType::kThreadpoolEvent)},
{StatType::kProducerId, int64_t{123}}});
auto thread_pool_line = plane_builder.GetOrCreateLine(1);
CreateXEvent(&plane_builder, &thread_pool_line,
kThreadpoolListenerStartRegion, 200, 0,
{{StatType::kConsumerType,
static_cast<int64_t>(ContextType::kThreadpoolEvent)},
{StatType::kConsumerId, int64_t{123}}});
CreateXEvent(&plane_builder, &thread_pool_line, kThreadpoolListenerStopRegion,
300, 0,
{{StatType::kConsumerType,
static_cast<int64_t>(ContextType::kThreadpoolEvent)},
{StatType::kConsumerId, int64_t{123}}});
bool new_event_added = false;
PreprocessXSpace(&space);
XPlaneVisitor plane_visitor = CreateTfXPlaneVisitor(plane);
plane_visitor.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
if (event.Name() == kThreadpoolListenerRegion) {
new_event_added = true;
EXPECT_EQ(event.DurationPs(), 100);
EXPECT_EQ(event.TimestampPs(), 200);
auto stat = event.GetStat(StatType::kConsumerId);
EXPECT_TRUE(stat.has_value());
EXPECT_EQ(stat->IntOrUintValue(), 123);
}
});
});
EXPECT_TRUE(new_event_added);
}
TEST(PreprocessXPlane, XContextStatsAccessorNPETest) {
auto xplane = std::make_unique<XPlane>();
XPlaneBuilder xplane_builder(xplane.get());
XLine xline;
XLineBuilder xline_builder(&xline, &xplane_builder);
XEvent xevent;
XEventBuilder xevent_builder(&xline, &xplane_builder, &xevent);
XContextStatsAccessor<int64_t, StatType::kRunId> run_id_accessor;
ASSERT_FALSE(run_id_accessor.Initialize(xplane_builder));
EXPECT_EQ(run_id_accessor.GetStat(xevent_builder), std::nullopt);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/preprocess_xplane.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/preprocess_xplane_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
62e61c22-dfcd-4e95-b89f-f33ebb6697b3 | cpp | tensorflow/tensorflow | remote_profiler_session_manager | third_party/xla/xla/tsl/profiler/rpc/client/remote_profiler_session_manager.cc | third_party/xla/xla/tsl/profiler/rpc/client/remote_profiler_session_manager_test.cc | #include "xla/tsl/profiler/rpc/client/remote_profiler_session_manager.h"
#include <cstddef>
#include <memory>
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "xla/tsl/profiler/rpc/client/profiler_client.h"
#include "xla/tsl/profiler/utils/time_utils.h"
#include "tsl/platform/env_time.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace profiler {
using tensorflow::ProfileRequest;
using tensorflow::RemoteProfilerSessionManagerOptions;
std::unique_ptr<RemoteProfilerSessionManager>
RemoteProfilerSessionManager::Create(
const RemoteProfilerSessionManagerOptions& options,
const ProfileRequest& request, absl::Status& out_status,
AddressResolver resolver) {
VLOG(1) << "Creating a RemoteProfilerSessionManager.";
auto session_manager = absl::WrapUnique(
new RemoteProfilerSessionManager(options, request, resolver));
out_status = session_manager->Init();
if (!out_status.ok()) {
return nullptr;
}
return session_manager;
}
RemoteProfilerSessionManager::RemoteProfilerSessionManager(
RemoteProfilerSessionManagerOptions options, ProfileRequest request,
AddressResolver resolver)
: options_(options), request_(request) {
if (resolver) {
resolver_ = resolver;
} else {
resolver_ = [](absl::string_view addr) { return std::string(addr); };
}
}
RemoteProfilerSessionManager::~RemoteProfilerSessionManager() {
VLOG(2) << "Destroying RemoteProfilerSessionManager.";
}
absl::Status RemoteProfilerSessionManager::Init() {
mutex_lock lock(mutex_);
VLOG(1) << "SessionManager initializing.";
const absl::Time session_created_ts =
absl::FromUnixNanos(options_.session_creation_timestamp_ns());
const absl::Time deadline =
session_created_ts +
absl::Milliseconds(options_.max_session_duration_ms());
LOG(INFO) << "Deadline set to " << deadline
<< " because max_session_duration_ms was "
<< options_.max_session_duration_ms()
<< " and session_creation_timestamp_ns was "
<< options_.session_creation_timestamp_ns() << " ["
<< session_created_ts << "]";
clients_.reserve(options_.service_addresses_size());
ProfileRequest request = request_;
for (auto& service_address : options_.service_addresses()) {
std::string resolved_service_address = resolver_(service_address);
request.set_host_name(resolved_service_address);
auto client = RemoteProfilerSession::Create(resolved_service_address,
deadline, request);
clients_.push_back(std::move(client));
}
LOG(INFO) << "Issued Profile gRPC to " << clients_.size() << " clients";
return absl::OkStatus();
}
std::vector<RemoteProfilerSessionManager::Response>
RemoteProfilerSessionManager::WaitForCompletion() {
mutex_lock lock(mutex_);
std::vector<RemoteProfilerSessionManager::Response> remote_responses(
clients_.size());
for (int32_t idx = 0; idx < clients_.size(); ++idx) {
auto& remote_response = remote_responses[idx];
auto* client = clients_[idx].get();
remote_response.profile_response =
client->WaitForCompletion(remote_response.status);
remote_response.service_address = std::string(client->GetServiceAddress());
}
return remote_responses;
}
}
} | #include "xla/tsl/profiler/rpc/client/remote_profiler_session_manager.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "xla/tsl/profiler/rpc/client/profiler_client_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/protobuf/profiler_options.pb.h"
#include "tsl/profiler/protobuf/profiler_service.pb.h"
namespace tsl {
namespace profiler {
namespace {
using tensorflow::ProfileRequest;
using tensorflow::RemoteProfilerSessionManagerOptions;
using ::tsl::profiler::test::DurationApproxLess;
using ::tsl::profiler::test::DurationNear;
using ::tsl::profiler::test::StartServer;
using ::tsl::testing::TmpDir;
using Response = tsl::profiler::RemoteProfilerSessionManager::Response;
constexpr double kGracePeriodSeconds = 10.0;
ProfileRequest PopulateProfileRequest(
absl::string_view repository_root, absl::string_view session_id,
absl::string_view host_name,
const RemoteProfilerSessionManagerOptions& options) {
constexpr uint64 kMaxEvents = 1000000;
const absl::string_view kXPlanePb = "xplane.pb";
ProfileRequest request;
request.set_duration_ms(options.profiler_options().duration_ms());
request.set_max_events(kMaxEvents);
request.set_repository_root(repository_root.data(), repository_root.size());
request.set_session_id(session_id.data(), session_id.size());
request.set_host_name(host_name.data(), host_name.size());
request.add_tools(kXPlanePb.data(), kXPlanePb.size());
*request.mutable_opts() = options.profiler_options();
return request;
}
TEST(RemoteProfilerSessionManagerTest, Simple) {
absl::Duration duration = absl::Milliseconds(30);
RemoteProfilerSessionManagerOptions options;
*options.mutable_profiler_options() = tsl::ProfilerSession::DefaultOptions();
options.mutable_profiler_options()->set_duration_ms(
absl::ToInt64Milliseconds(duration));
std::string service_address;
auto server = StartServer(duration, &service_address);
options.add_service_addresses(service_address);
absl::Time approx_start = absl::Now();
absl::Duration grace = absl::Seconds(kGracePeriodSeconds);
absl::Duration max_duration = duration + grace;
options.set_max_session_duration_ms(absl::ToInt64Milliseconds(max_duration));
options.set_session_creation_timestamp_ns(absl::ToUnixNanos(approx_start));
ProfileRequest request =
PopulateProfileRequest(TmpDir(), "session_id", service_address, options);
absl::Status status;
auto sessions =
RemoteProfilerSessionManager::Create(options, request, status);
EXPECT_TRUE(status.ok());
std::vector<Response> responses = sessions->WaitForCompletion();
absl::Duration elapsed = absl::Now() - approx_start;
ASSERT_EQ(responses.size(), 1);
EXPECT_TRUE(responses.back().status.ok());
EXPECT_TRUE(responses.back().profile_response->empty_trace());
EXPECT_EQ(responses.back().profile_response->tool_data_size(), 0);
EXPECT_THAT(elapsed, DurationApproxLess(max_duration));
}
TEST(RemoteProfilerSessionManagerTest, ExpiredDeadline) {
absl::Duration duration = absl::Milliseconds(30);
RemoteProfilerSessionManagerOptions options;
*options.mutable_profiler_options() = tsl::ProfilerSession::DefaultOptions();
options.mutable_profiler_options()->set_duration_ms(
absl::ToInt64Milliseconds(duration));
std::string service_address;
auto server = StartServer(duration, &service_address);
options.add_service_addresses(service_address);
absl::Duration grace = absl::Seconds(kGracePeriodSeconds);
absl::Duration max_duration = duration + grace;
options.set_max_session_duration_ms(absl::ToInt64Milliseconds(max_duration));
options.set_session_creation_timestamp_ns(0);
absl::Time approx_start = absl::Now();
ProfileRequest request =
PopulateProfileRequest(TmpDir(), "session_id", service_address, options);
absl::Status status;
auto sessions =
RemoteProfilerSessionManager::Create(options, request, status);
EXPECT_TRUE(status.ok());
std::vector<Response> responses = sessions->WaitForCompletion();
absl::Duration elapsed = absl::Now() - approx_start;
EXPECT_THAT(elapsed, DurationNear(absl::Seconds(0)));
ASSERT_EQ(responses.size(), 1);
EXPECT_TRUE(absl::IsDeadlineExceeded(responses.back().status));
EXPECT_TRUE(responses.back().profile_response->empty_trace());
EXPECT_EQ(responses.back().profile_response->tool_data_size(), 0);
}
TEST(RemoteProfilerSessionManagerTest, LongSession) {
absl::Duration duration = absl::Seconds(3);
RemoteProfilerSessionManagerOptions options;
*options.mutable_profiler_options() = tsl::ProfilerSession::DefaultOptions();
options.mutable_profiler_options()->set_duration_ms(
absl::ToInt64Milliseconds(duration));
std::string service_address;
auto server = StartServer(duration, &service_address);
options.add_service_addresses(service_address);
absl::Time approx_start = absl::Now();
absl::Duration grace = absl::Seconds(kGracePeriodSeconds);
absl::Duration max_duration = duration + grace;
options.set_max_session_duration_ms(absl::ToInt64Milliseconds(max_duration));
options.set_session_creation_timestamp_ns(absl::ToUnixNanos(approx_start));
ProfileRequest request =
PopulateProfileRequest(TmpDir(), "session_id", service_address, options);
absl::Status status;
auto sessions =
RemoteProfilerSessionManager::Create(options, request, status);
EXPECT_TRUE(status.ok());
std::vector<Response> responses = sessions->WaitForCompletion();
absl::Duration elapsed = absl::Now() - approx_start;
ASSERT_EQ(responses.size(), 1);
EXPECT_TRUE(responses.back().status.ok());
EXPECT_TRUE(responses.back().profile_response->empty_trace());
EXPECT_EQ(responses.back().profile_response->tool_data_size(), 0);
EXPECT_THAT(elapsed, DurationApproxLess(max_duration));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/rpc/client/remote_profiler_session_manager.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/rpc/client/remote_profiler_session_manager_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e8e70257-c930-4276-aa31-a78ba89ae267 | cpp | tensorflow/tensorflow | profiler_client | third_party/xla/xla/tsl/profiler/rpc/client/profiler_client.cc | third_party/xla/xla/tsl/profiler/rpc/client/profiler_client_test.cc | #include "xla/tsl/profiler/rpc/client/profiler_client.h"
#include <limits>
#include <memory>
#include "absl/memory/memory.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "grpcpp/grpcpp.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/types.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tsl {
namespace profiler {
namespace {
using tensorflow::MonitorRequest;
using tensorflow::MonitorResponse;
using tensorflow::NewProfileSessionRequest;
using tensorflow::NewProfileSessionResponse;
using tensorflow::ProfileRequest;
using tensorflow::ProfileResponse;
inline absl::Status FromGrpcStatus(const ::grpc::Status& s) {
return s.ok() ? absl::OkStatus()
: absl::Status(static_cast<absl::StatusCode>(s.error_code()),
s.error_message());
}
template <typename T>
std::unique_ptr<typename T::Stub> CreateStub(
const std::string& service_address) {
::grpc::ChannelArguments channel_args;
channel_args.SetMaxReceiveMessageSize(std::numeric_limits<int32>::max());
auto channel = ::grpc::CreateCustomChannel(
service_address, ::grpc::InsecureChannelCredentials(), channel_args);
if (!channel) {
LOG(ERROR) << "Unable to create channel" << service_address;
return nullptr;
}
return T::NewStub(channel);
}
}
absl::Status ProfileGrpc(const std::string& service_address,
const ProfileRequest& request,
ProfileResponse* response) {
::grpc::ClientContext context;
std::unique_ptr<tensorflow::grpc::ProfilerService::Stub> stub =
CreateStub<tensorflow::grpc::ProfilerService>(service_address);
TF_RETURN_IF_ERROR(
FromGrpcStatus(stub->Profile(&context, request, response)));
return absl::OkStatus();
}
absl::Status NewSessionGrpc(const std::string& service_address,
const NewProfileSessionRequest& request,
NewProfileSessionResponse* response) {
::grpc::ClientContext context;
std::unique_ptr<tensorflow::grpc::ProfileAnalysis::Stub> stub =
CreateStub<tensorflow::grpc::ProfileAnalysis>(service_address);
TF_RETURN_IF_ERROR(
FromGrpcStatus(stub->NewSession(&context, request, response)));
return absl::OkStatus();
}
absl::Status MonitorGrpc(const std::string& service_address,
const MonitorRequest& request,
MonitorResponse* response) {
::grpc::ClientContext context;
std::unique_ptr<tensorflow::grpc::ProfilerService::Stub> stub =
CreateStub<tensorflow::grpc::ProfilerService>(service_address);
TF_RETURN_IF_ERROR(
FromGrpcStatus(stub->Monitor(&context, request, response)));
return absl::OkStatus();
}
std::unique_ptr<RemoteProfilerSession> RemoteProfilerSession::Create(
const std::string& service_address, absl::Time deadline,
const ProfileRequest& profile_request) {
auto instance = absl::WrapUnique(
new RemoteProfilerSession(service_address, deadline, profile_request));
instance->ProfileAsync();
return instance;
}
RemoteProfilerSession::RemoteProfilerSession(
const std::string& service_address, absl::Time deadline,
const ProfileRequest& profile_request)
: response_(absl::make_unique<ProfileResponse>()),
service_address_(service_address),
stub_(CreateStub<tensorflow::grpc::ProfilerService>(service_address_)),
deadline_(deadline),
profile_request_(profile_request) {
response_->set_empty_trace(true);
}
RemoteProfilerSession::~RemoteProfilerSession() {
absl::Status dummy;
WaitForCompletion(dummy);
grpc_context_.TryCancel();
}
void RemoteProfilerSession::ProfileAsync() {
LOG(INFO) << "Asynchronous gRPC Profile() to " << service_address_;
grpc_context_.set_deadline(absl::ToChronoTime(deadline_));
VLOG(1) << "Deadline set to " << deadline_;
rpc_ = stub_->AsyncProfile(&grpc_context_, profile_request_, &cq_);
rpc_->Finish(response_.get(), &grpc_status_,
static_cast<void*>(&status_on_completion_));
VLOG(2) << "Asynchronous gRPC Profile() issued." << absl::Now();
}
std::unique_ptr<ProfileResponse> RemoteProfilerSession::WaitForCompletion(
absl::Status& out_status) {
if (!response_) {
out_status = errors::FailedPrecondition(
"WaitForCompletion must only be called once.");
return nullptr;
}
LOG(INFO) << "Waiting for completion.";
void* got_tag = nullptr;
bool ok = false;
bool success = cq_.Next(&got_tag, &ok);
if (!success || !ok || got_tag == nullptr) {
out_status =
errors::Internal("Missing or invalid event from completion queue.");
return nullptr;
}
VLOG(1) << "Writing out status.";
DCHECK_EQ(got_tag, &status_on_completion_);
status_on_completion_.Update(FromGrpcStatus(grpc_status_));
if (status_on_completion_.code() == error::DEADLINE_EXCEEDED) {
LOG(WARNING) << status_on_completion_;
} else if (!status_on_completion_.ok()) {
LOG(ERROR) << status_on_completion_;
}
out_status = status_on_completion_;
return std::move(response_);
}
}
} | #include "xla/tsl/profiler/rpc/client/profiler_client.h"
#include <memory>
#include <string>
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "xla/tsl/profiler/rpc/client/profiler_client_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/protobuf/profiler_service.pb.h"
namespace tsl {
namespace profiler {
namespace {
using tensorflow::ProfileRequest;
using ::tsl::profiler::test::DurationApproxLess;
using ::tsl::profiler::test::DurationNear;
using ::tsl::profiler::test::StartServer;
TEST(RemoteProfilerSession, Simple) {
absl::Duration duration = absl::Milliseconds(10);
ProfileRequest request;
std::string service_addr;
auto server = StartServer(duration, &service_addr, &request);
absl::Duration grace = absl::Seconds(1);
absl::Duration max_duration = duration + grace;
absl::Time approx_start = absl::Now();
absl::Time deadline = approx_start + max_duration;
auto remote_session =
RemoteProfilerSession::Create(service_addr, deadline, request);
absl::Status status;
auto response = remote_session->WaitForCompletion(status);
absl::Duration elapsed = absl::Now() - approx_start;
EXPECT_TRUE(status.ok());
EXPECT_TRUE(response->empty_trace());
EXPECT_EQ(response->tool_data_size(), 0);
EXPECT_THAT(elapsed, DurationApproxLess(max_duration));
}
TEST(RemoteProfilerSession, WaitNotCalled) {
absl::Duration duration = absl::Milliseconds(10);
ProfileRequest request;
std::string service_addr;
auto server = StartServer(duration, &service_addr, &request);
absl::Duration grace = absl::Seconds(1);
absl::Duration max_duration = duration + grace;
absl::Time approx_start = absl::Now();
absl::Time deadline = approx_start + max_duration;
auto remote_session =
RemoteProfilerSession::Create(service_addr, deadline, request);
absl::Duration elapsed = absl::Now() - approx_start;
EXPECT_THAT(elapsed, DurationApproxLess(max_duration));
}
TEST(RemoteProfilerSession, Timeout) {
absl::Duration duration = absl::Milliseconds(10);
ProfileRequest request;
std::string service_addr;
auto server = StartServer(duration, &service_addr, &request);
auto remote_session =
RemoteProfilerSession::Create(service_addr, absl::Now(), request);
absl::Status status;
auto response = remote_session->WaitForCompletion(status);
EXPECT_TRUE(errors::IsDeadlineExceeded(status));
EXPECT_TRUE(response->empty_trace());
EXPECT_EQ(response->tool_data_size(), 0);
}
TEST(RemoteProfilerSession, LongDeadline) {
absl::Duration duration = absl::Milliseconds(10);
ProfileRequest request;
std::string service_addr;
auto server = StartServer(duration, &service_addr, &request);
absl::Time approx_start = absl::Now();
absl::Duration grace = absl::Seconds(1000);
absl::Duration max_duration = duration + grace;
const absl::Time deadline = approx_start + max_duration;
auto remote_session =
RemoteProfilerSession::Create(service_addr, deadline, request);
absl::Status status;
auto response = remote_session->WaitForCompletion(status);
absl::Duration elapsed = absl::Now() - approx_start;
EXPECT_TRUE(status.ok());
EXPECT_TRUE(response->empty_trace());
EXPECT_EQ(response->tool_data_size(), 0);
EXPECT_THAT(elapsed, DurationNear(duration));
}
TEST(RemoteProfilerSession, LongDuration) {
absl::Duration duration = absl::Seconds(3);
ProfileRequest request;
std::string service_addr;
auto server = StartServer(duration, &service_addr, &request);
absl::Time approx_start = absl::Now();
absl::Duration grace = absl::Seconds(1);
absl::Duration max_duration = duration + grace;
const absl::Time deadline = approx_start + max_duration;
auto remote_session =
RemoteProfilerSession::Create(service_addr, deadline, request);
absl::Status status;
auto response = remote_session->WaitForCompletion(status);
absl::Duration elapsed = absl::Now() - approx_start;
EXPECT_TRUE(status.ok());
EXPECT_TRUE(response->empty_trace());
EXPECT_EQ(response->tool_data_size(), 0);
EXPECT_THAT(elapsed, DurationApproxLess(max_duration));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/rpc/client/profiler_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/rpc/client/profiler_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fd3f943a-1cc2-433d-9f21-46081f85e3cd | cpp | tensorflow/tensorflow | trace_events_to_json | tensorflow/core/profiler/convert/trace_viewer/trace_events_to_json.cc | third_party/xla/xla/tsl/profiler/convert/trace_events_to_json_test.cc | #include "tensorflow/core/profiler/convert/trace_viewer/trace_events_to_json.h"
#include <cstdint>
#include <map>
#include <string>
#include <string_view>
#include <utility>
#include "absl/strings/match.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/profiler/protobuf/trace_events.pb.h"
#include "tensorflow/core/profiler/protobuf/trace_events_raw.pb.h"
namespace tensorflow {
namespace profiler {
std::string JsonEscape(absl::string_view raw) {
std::string escaped_string;
const size_t length = raw.length();
escaped_string.reserve((length + 1) * 2);
escaped_string.push_back('"');
for (size_t i = 0; i < length; ++i) {
const unsigned char c = raw[i];
if (c < 0x20) {
escaped_string.push_back('\\');
switch (c) {
case '\b':
escaped_string.push_back('b');
break;
case '\f':
escaped_string.push_back('f');
break;
case '\n':
escaped_string.push_back('n');
break;
case '\r':
escaped_string.push_back('r');
break;
case '\t':
escaped_string.push_back('t');
break;
default:
absl::StrAppendFormat(&escaped_string, "u%04x",
static_cast<unsigned int>(c));
}
continue;
}
switch (c) {
case '\"':
escaped_string.append("\\\"");
continue;
case '\\':
escaped_string.append("\\\\");
continue;
case '<':
case '>':
case '&': {
absl::StrAppendFormat(&escaped_string, "\\u%04x",
static_cast<unsigned int>(c));
continue;
}
case '\xe2': {
if ((i + 2 < length) && (raw[i + 1] == '\x80')) {
if (raw[i + 2] == '\xa8') {
escaped_string.append("\\u2028");
i += 2;
continue;
} else if (raw[i + 2] == '\xa9') {
escaped_string.append("\\u2029");
i += 2;
continue;
}
}
escaped_string.push_back(c);
continue;
}
}
escaped_string.push_back(c);
}
escaped_string.push_back('"');
return escaped_string;
}
std::string ProtoString(const tsl::protobuf::Message& pb) {
return JsonEscape(pb.DebugString());
}
std::map<uint64_t, uint64_t> BuildStackFrameReferences(const Trace& trace) {
const auto& name_table = trace.name_table();
std::map<uint64_t, uint64_t> output;
for (const auto& [fp, name] : name_table) {
if (!absl::StartsWith(name, "@@")) continue;
output[fp] = 0;
}
uint64_t sf = 1;
for (auto& it : output) {
it.second = sf++;
}
return output;
}
}
} | #include "xla/tsl/profiler/convert/trace_events_to_json.h"
#include <string>
#include "json/json.h"
#include "xla/tsl/profiler/convert/trace_container.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/test.h"
#include "tsl/profiler/protobuf/trace_events.pb.h"
namespace tsl {
namespace profiler {
namespace {
Json::Value ToJsonValue(const std::string& json_str) {
Json::Value json;
Json::Reader reader;
EXPECT_TRUE(reader.parse(json_str, json));
return json;
}
TEST(TraceEventsToJson, JsonConversion) {
const std::string metadata_string = R"pb(
devices {
key: 2
value {
name: 'D2'
device_id: 2
resources {
key: 2
value { resource_id: 2 name: 'R2.2' }
}
}
}
devices {
key: 1
value {
name: 'D1'
device_id: 1
resources {
key: 2
value { resource_id: 1 name: 'R1.2' }
}
}
}
)pb";
TraceContainer container;
EXPECT_TRUE(container.ParseMetadataFromString(metadata_string));
TraceEvent* event = container.CreateEvent();
event->set_device_id(1);
event->set_resource_id(2);
event->set_name("E1.2.1");
event->set_timestamp_ps(100000);
event->set_duration_ps(10000);
event->mutable_args()->insert({"long_name", "E1.2.1 long"});
event->mutable_args()->insert({"arg2", "arg2 val"});
event = container.CreateEvent();
event->set_device_id(2);
event->set_resource_id(2);
event->set_name("E2.2.1 # \"comment\"");
event->set_timestamp_ps(105000);
container.CapEvents(2);
Json::Value json = ToJsonValue(TraceContainerToJson(container));
Json::Value expected_json = ToJsonValue(R"(
{
"displayTimeUnit": "ns",
"metadata": { "highres-ticks": true },
"traceEvents": [
{"ph":"M", "pid":1, "name":"process_name", "args":{"name":"D1"}},
{"ph":"M", "pid":1, "name":"process_sort_index", "args":{"sort_index":1}},
{"ph":"M", "pid":1, "tid":2, "name":"thread_name",
"args":{"name":"R1.2"}},
{"ph":"M", "pid":1, "tid":2, "name":"thread_sort_index",
"args":{"sort_index":2}},
{"ph":"M", "pid":2, "name":"process_name", "args":{"name":"D2"}},
{"ph":"M", "pid":2, "name":"process_sort_index", "args":{"sort_index":2}},
{"ph":"M", "pid":2, "tid":2, "name":"thread_name",
"args":{"name":"R2.2"}},
{"ph":"M", "pid":2, "tid":2, "name":"thread_sort_index",
"args":{"sort_index":2}},
{
"ph" : "X",
"pid" : 1,
"tid" : 2,
"name" : "E1.2.1",
"ts" : 0.1,
"dur" : 0.01,
"args" : {"arg2": "arg2 val", "long_name": "E1.2.1 long"}
},
{
"ph" : "X",
"pid" : 2,
"tid" : 2,
"name" : "E2.2.1 # \"comment\"",
"ts" : 0.105,
"dur" : 1e-6
},
{}
]
})");
EXPECT_EQ(json, expected_json);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/trace_viewer/trace_events_to_json.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/convert/trace_events_to_json_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fe0cd32f-dfd6-469a-95f5-6ad6cf632eca | cpp | tensorflow/tensorflow | trace_container | third_party/xla/xla/tsl/profiler/convert/trace_container.cc | third_party/xla/xla/tsl/profiler/convert/trace_container_test.cc | #include "xla/tsl/profiler/convert/trace_container.h"
#include <algorithm>
#include <string>
#include <string_view>
#include <vector>
#include "tsl/platform/protobuf.h"
namespace tsl {
namespace profiler {
bool TraceContainer::ParseMetadataFromString(const std::string& description) {
return protobuf::TextFormat::ParseFromString(description, &metadata_);
}
void TraceContainer::CapEvents(const uint32_t max_count) {
const size_t total_count = events_.size();
if (total_count <= max_count) {
return;
}
const std::vector<TraceEvent*>::iterator end = events_.begin() + max_count;
std::partial_sort(
events_.begin(), end, events_.end(),
[](const TraceEvent* const lhs, const TraceEvent* const rhs) -> bool {
return lhs->timestamp_ps() < rhs->timestamp_ps();
});
for (std::vector<TraceEvent*>::iterator i = end; i != events_.end(); ++i) {
delete *i;
}
events_.erase(end, events_.end());
}
void TraceContainer::FlushAndSerializeEvents(std::string* const output) {
Trace trace = metadata_;
for (TraceEvent* const event : events_) {
trace.mutable_trace_events()->AddAllocated(event);
}
events_.clear();
trace.SerializeToString(output);
}
}
} | #include "xla/tsl/profiler/convert/trace_container.h"
#include <string>
#include "tsl/platform/protobuf.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profiler {
namespace {
void PopulateDummyEvent(TraceEvent* const event) {
event->set_device_id(1);
event->set_resource_id(2);
event->set_name("A");
event->set_timestamp_ps(3);
event->set_duration_ps(4);
}
TEST(TraceContainer, TraceEventAllocation) {
TraceContainer container;
PopulateDummyEvent(container.CreateEvent());
}
TEST(TraceContainer, FlushAndSerializeEvents) {
TraceContainer container;
PopulateDummyEvent(container.CreateEvent());
EXPECT_EQ(container.UnsortedEvents().size(), 1);
std::string serialized;
container.FlushAndSerializeEvents(&serialized);
EXPECT_EQ(container.UnsortedEvents().size(), 0);
PopulateDummyEvent(container.CreateEvent());
EXPECT_EQ(container.UnsortedEvents().size(), 1);
std::string reserialized;
container.FlushAndSerializeEvents(&reserialized);
EXPECT_EQ(serialized, reserialized);
EXPECT_EQ(container.UnsortedEvents().size(), 0);
Trace trace;
trace.ParseFromString(reserialized);
EXPECT_EQ(trace.trace_events_size(), 1);
}
TEST(TraceContainer, CapEvents) {
TraceContainer container;
for (int i = 0; i < 100; i++) {
container.CreateEvent()->set_timestamp_ps((100 - i) % 50);
}
container.CapEvents(101);
EXPECT_EQ(container.UnsortedEvents().size(), 100);
container.CapEvents(100);
EXPECT_EQ(container.UnsortedEvents().size(), 100);
container.CapEvents(99);
EXPECT_EQ(container.UnsortedEvents().size(), 99);
container.CapEvents(50);
EXPECT_EQ(container.UnsortedEvents().size(), 50);
for (const TraceEvent* const event : container.UnsortedEvents()) {
EXPECT_LT(event->timestamp_ps(), 25);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/convert/trace_container.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/convert/trace_container_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
01947b9c-2719-4c74-b27a-405f3c7c3b5f | cpp | tensorflow/tensorflow | xplane_to_trace_events | third_party/xla/xla/tsl/profiler/convert/xplane_to_trace_events.cc | third_party/xla/xla/tsl/profiler/convert/xplane_to_trace_events_test.cc | #include "xla/tsl/profiler/convert/xplane_to_trace_events.h"
#include <stddef.h>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/trace_utils.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_utils.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/protobuf/trace_events.pb.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
namespace {
using tensorflow::profiler::XSpace;
void BuildDeviceAndResources(uint32 device_id, const XPlaneVisitor& plane,
Device* device) {
device->set_name(std::string(plane.Name()));
device->set_device_id(device_id);
bool sort_by_ordinal = (device_id == kHostThreadsDeviceId);
int ordinal = 0;
plane.ForEachLine([&](const XLineVisitor& line) {
uint32 resource_id = line.DisplayId();
Resource& resource = (*device->mutable_resources())[resource_id];
resource.set_resource_id(resource_id);
resource.set_name(std::string(line.DisplayName()));
if (sort_by_ordinal) {
resource.set_sort_index(++ordinal);
}
});
}
void ConvertXPlaneToTraceEvents(uint32 device_id, const XPlaneVisitor& xplane,
TraceContainer& container) {
BuildDeviceAndResources(device_id, xplane,
container.MutableDevice(device_id));
xplane.ForEachLine([device_id, &container](const XLineVisitor& xline) {
uint32 resource_id = xline.DisplayId();
if (xline.DisplayName() == tsl::profiler::kXlaAsyncOpLineName) {
return;
}
xline.ForEachEvent(
[device_id, resource_id, &container](const XEventVisitor& xevent) {
int64_t event_type =
xevent.Type().value_or(HostEventType::kUnknownHostEventType);
if (IsInternalEvent(event_type)) return;
TraceEvent* event = container.CreateEvent();
auto& args = *event->mutable_args();
event->set_device_id(device_id);
event->set_resource_id(resource_id);
if (xevent.HasDisplayName()) {
event->set_name(std::string(xevent.DisplayName()));
args["long_name"] = std::string(xevent.Name());
} else {
event->set_name(std::string(xevent.Name()));
}
event->set_timestamp_ps(xevent.TimestampPs());
event->set_duration_ps(xevent.DurationPs());
auto for_each_stat = [&](const XStatVisitor& stat) {
if (stat.ValueCase() == XStat::VALUE_NOT_SET) return;
if (IsInternalStat(stat.Type())) return;
if (stat.Type() == StatType::kStepName) {
event->set_name(stat.ToString());
}
args[std::string(stat.Name())] = stat.ToString();
};
xevent.Metadata().ForEachStat(for_each_stat);
xevent.ForEachStat(for_each_stat);
});
});
}
}
uint64 GetTraceViewerMaxEvents() {
constexpr uint64 kMaxEvents = 1000000;
char* max_events = getenv("TF_PROFILER_TRACE_VIEWER_MAX_EVENTS");
if (max_events != nullptr) {
return std::stoull(max_events, nullptr, 10);
} else {
return kMaxEvents;
}
}
TraceContainer ConvertXSpaceToTraceContainer(const XSpace& xspace) {
TraceContainer container;
const XPlane* host_plane = FindPlaneWithName(xspace, kHostThreadsPlaneName);
if (host_plane != nullptr) {
XPlaneVisitor xplane = CreateTfXPlaneVisitor(host_plane);
ConvertXPlaneToTraceEvents(kHostThreadsDeviceId, xplane, container);
}
std::vector<const XPlane*> device_planes =
FindPlanesWithPrefix(xspace, kGpuPlanePrefix);
if (device_planes.empty()) {
device_planes = FindPlanesWithPrefix(xspace, kTpuPlanePrefix);
}
if (device_planes.empty()) {
device_planes = FindPlanesWithPrefix(xspace, kCustomPlanePrefix);
}
for (const XPlane* device_plane : device_planes) {
XPlaneVisitor xplane = CreateTfXPlaneVisitor(device_plane);
uint32 device_id = kFirstDeviceId + xplane.Id();
ConvertXPlaneToTraceEvents(device_id, xplane, container);
}
uint64 viewer_max_events = GetTraceViewerMaxEvents();
container.CapEvents(viewer_max_events);
return container;
}
void ConvertXSpaceToTraceEventsString(const XSpace& xspace,
std::string* content) {
ConvertXSpaceToTraceContainer(xspace).FlushAndSerializeEvents(content);
}
}
} | #include "xla/tsl/profiler/convert/xplane_to_trace_events.h"
#include <limits>
#include <utility>
#include "xla/tsl/profiler/utils/trace_utils.h"
#include "xla/tsl/profiler/utils/xplane_builder.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "tsl/platform/test.h"
#include "tsl/profiler/protobuf/trace_events.pb.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace tsl {
namespace profiler {
namespace {
using tensorflow::profiler::XSpace;
void CreateXSpace(XSpace* space) {
XPlaneBuilder host_plane(space->add_planes());
host_plane.SetName(kHostThreadsPlaneName);
XLineBuilder thread1 = host_plane.GetOrCreateLine(10);
thread1.SetName("thread1");
XEventBuilder event1 =
thread1.AddEvent(*host_plane.GetOrCreateEventMetadata("event1"));
event1.SetTimestampNs(150000);
event1.SetDurationNs(10000);
event1.AddStatValue(*host_plane.GetOrCreateStatMetadata("tf_op"),
*host_plane.GetOrCreateStatMetadata("Relu"));
XLineBuilder thread2 = host_plane.GetOrCreateLine(20);
thread2.SetName("thread2");
XEventBuilder event2 =
thread2.AddEvent(*host_plane.GetOrCreateEventMetadata("event2"));
event2.SetTimestampNs(160000);
event2.SetDurationNs(10000);
event2.AddStatValue(*host_plane.GetOrCreateStatMetadata("tf_op"),
*host_plane.GetOrCreateStatMetadata("Conv2D"));
XPlaneBuilder device_plane(space->add_planes());
device_plane.SetName(GpuPlaneName(0));
device_plane.SetId(0);
XLineBuilder stream1 = device_plane.GetOrCreateLine(30);
stream1.SetName("gpu stream 1");
XEventBuilder event3 =
stream1.AddEvent(*device_plane.GetOrCreateEventMetadata("kernel1"));
event3.SetTimestampNs(180000);
event3.SetDurationNs(10000);
event3.AddStatValue(*device_plane.GetOrCreateStatMetadata("correlation id"),
55);
}
TEST(ConvertXPlaneToTraceEvents, Convert) {
XSpace xspace;
CreateXSpace(&xspace);
TraceContainer container = ConvertXSpaceToTraceContainer(xspace);
ASSERT_EQ(container.trace().devices_size(), 2);
EXPECT_EQ(
container.trace().devices().at(kHostThreadsDeviceId).resources_size(), 2);
EXPECT_EQ(container.trace().devices().at(kFirstDeviceId).resources_size(), 1);
EXPECT_EQ(container.UnsortedEvents().size(), 3);
}
TEST(ConvertXPlaneToTraceEvents, SkipAsyncOps) {
XSpace xspace;
XPlaneBuilder device_plane(xspace.add_planes());
device_plane.SetName(GpuPlaneName(0));
XLineBuilder async_ops = device_plane.GetOrCreateLine(10);
async_ops.SetName(kXlaAsyncOpLineName);
XEventBuilder event1 =
async_ops.AddEvent(*device_plane.GetOrCreateEventMetadata("event1"));
event1.SetTimestampNs(100);
event1.SetDurationNs(1);
TraceContainer container = ConvertXSpaceToTraceContainer(xspace);
ASSERT_THAT(container.UnsortedEvents(), ::testing::IsEmpty());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/convert/xplane_to_trace_events.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/convert/xplane_to_trace_events_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
66a14b23-1deb-461d-a3a5-eef516eff56b | cpp | tensorflow/tensorflow | tsl_status | third_party/xla/xla/tsl/c/tsl_status.cc | third_party/xla/xla/tsl/c/tsl_status_test.cc | #include "xla/tsl/c/tsl_status.h"
#include <string>
#include "xla/tsl/c/tsl_status_internal.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
using ::tsl::Status;
using ::tsl::error::Code;
using ::tsl::errors::IOError;
TSL_Status* TSL_NewStatus() { return new TSL_Status; }
void TSL_DeleteStatus(TSL_Status* s) { delete s; }
void TSL_SetStatus(TSL_Status* s, TSL_Code code, const char* msg) {
if (code == TSL_OK) {
s->status = absl::OkStatus();
return;
}
s->status =
Status(static_cast<absl::StatusCode>(code), absl::string_view(msg));
}
void TSL_SetPayload(TSL_Status* s, const char* key, const char* value) {
s->status.SetPayload(key, absl::Cord(absl::string_view(value)));
}
void TSL_ForEachPayload(const TSL_Status* s, TSL_PayloadVisitor visitor,
void* capture) {
s->status.ForEachPayload([visitor, capture](absl::string_view type_url,
const absl::Cord& payload) {
std::string type_url_str(type_url);
std::string payload_str(payload);
visitor(type_url_str.c_str(), payload_str.c_str(), capture);
});
}
void TSL_SetStatusFromIOError(TSL_Status* s, int error_code,
const char* context) {
s->status = IOError(context, error_code);
}
TSL_Code TSL_GetCode(const TSL_Status* s) {
return static_cast<TSL_Code>(s->status.code());
}
const char* TSL_Message(const TSL_Status* s) {
return absl::StatusMessageAsCStr(s->status);
} | #include "xla/tsl/c/tsl_status.h"
#include <string>
#include <unordered_map>
#include <utility>
#include "xla/tsl/c/tsl_status_internal.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
TEST(TSL_Status, PayloadsSet) {
TSL_Status* tsl_status = TSL_NewStatus();
TSL_SetStatus(tsl_status, TSL_CANCELLED, "Error Message");
TSL_SetPayload(tsl_status, "a", "1");
TSL_SetPayload(tsl_status, "b", "2");
TSL_SetPayload(tsl_status, "c", "3");
std::unordered_map<std::string, std::string> payloads;
TSL_ForEachPayload(
tsl_status,
[](const char* key, const char* value, void* capture) {
std::unordered_map<std::string, std::string>* payloads =
static_cast<std::unordered_map<std::string, std::string>*>(capture);
payloads->emplace(key, value);
},
&payloads);
EXPECT_EQ(payloads.size(), 3);
EXPECT_EQ(payloads.at("a"), "1");
EXPECT_EQ(payloads.at("b"), "2");
EXPECT_EQ(payloads.at("c"), "3");
TSL_DeleteStatus(tsl_status);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/c/tsl_status.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/c/tsl_status_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bb168610-5f08-458f-9cab-96ab93874d84 | cpp | tensorflow/tensorflow | call_options | third_party/xla/xla/tsl/distributed_runtime/call_options.cc | tensorflow/core/distributed_runtime/call_options_test.cc | #include "xla/tsl/distributed_runtime/call_options.h"
#include <utility>
#include "tsl/platform/mutex.h"
namespace tsl {
CallOptions::CallOptions() = default;
void CallOptions::StartCancel() {
mutex_lock l(mu_);
if (cancel_func_ != nullptr) {
cancel_func_();
}
}
void CallOptions::SetCancelCallback(CancelFunction cancel_func) {
mutex_lock l(mu_);
cancel_func_ = std::move(cancel_func);
}
void CallOptions::ClearCancelCallback() {
mutex_lock l(mu_);
cancel_func_ = nullptr;
}
int64_t CallOptions::GetTimeout() {
mutex_lock l(mu_);
return timeout_in_ms_;
}
void CallOptions::SetTimeout(int64_t ms) {
mutex_lock l(mu_);
timeout_in_ms_ = ms;
}
} | #include "tensorflow/core/distributed_runtime/call_options.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(CallOptions, Cancel) {
int num_calls = 0;
CallOptions opts;
opts.StartCancel();
EXPECT_EQ(num_calls, 0);
opts.SetCancelCallback([&num_calls]() { num_calls++; });
EXPECT_EQ(num_calls, 0);
opts.StartCancel();
EXPECT_EQ(num_calls, 1);
opts.StartCancel();
EXPECT_EQ(num_calls, 2);
opts.ClearCancelCallback();
EXPECT_EQ(num_calls, 2);
opts.StartCancel();
EXPECT_EQ(num_calls, 2);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/distributed_runtime/call_options.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/call_options_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0c2807a6-9de3-4dea-b2ce-9f085bb68d10 | cpp | tensorflow/tensorflow | coordination_service_error_util | third_party/xla/xla/tsl/distributed_runtime/coordination/coordination_service_error_util.cc | third_party/xla/xla/tsl/distributed_runtime/coordination/coordination_service_error_util_test.cc | #include "xla/tsl/distributed_runtime/coordination/coordination_service_error_util.h"
#include <optional>
#include <string>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "tsl/platform/regexp.h"
namespace tsl {
absl::Status TrimCoordinationErrorMessage(const absl::Status& s) {
if (s.ok()) {
return s;
}
auto status_message = std::string(s.message());
auto additional_info_index = status_message.find("Additional GRPC");
if (additional_info_index == std::string::npos) {
return s;
}
std::optional<absl::Cord> payload =
s.GetPayload(CoordinationErrorPayloadKey());
if (!payload.has_value() && absl::IsUnavailable(s)) {
auto prefix_message =
"Failed to send RPC to coordination service. Either the leader task "
"died/restarted unexpectedly or this task is experiencing network "
"issues. Check earlier logs from this task and the "
"leader (usually slice 0 process/task/worker 0) to debug further.\n";
status_message = absl::StrCat(
prefix_message,
status_message.substr(additional_info_index));
} else {
std::string rpc_name;
RE2::PartialMatch(status_message,
"(/tensorflow.CoordinationService/(\\w+))", &rpc_name);
status_message = status_message.substr(0, additional_info_index);
absl::StrAppend(&status_message, "\nRPC: ", rpc_name);
}
auto trimmed_status = absl::Status(s.code(), status_message);
if (payload.has_value()) {
trimmed_status.SetPayload(CoordinationErrorPayloadKey(), *payload);
}
#if defined(PLATFORM_GOOGLE)
for (const auto& source_location : s.GetSourceLocations()) {
trimmed_status.AddSourceLocation(source_location);
}
#endif
return trimmed_status;
}
} | #include "xla/tsl/distributed_runtime/coordination/coordination_service_error_util.h"
#include <string>
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "xla/tsl/protobuf/coordination_service.pb.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
using ::tensorflow::CoordinatedTask;
using ::tensorflow::CoordinationServiceError;
TEST(CoordinationServiceErrorUtil, MakeCoordinationErrorWithEmptyPayload) {
absl::Status error = absl::InternalError("Test Error");
absl::Status coordination_error = MakeCoordinationError(error);
EXPECT_EQ(coordination_error.code(), error.code());
EXPECT_EQ(coordination_error.message(), error.message());
EXPECT_EQ(
coordination_error.GetPayload(CoordinationErrorPayloadKey()).value(), "");
}
TEST(CoordinationServiceErrorUtil, MakeCoordinationErrorWithErrorOrigin) {
absl::Status error = absl::InternalError("Test Error");
CoordinatedTask source_task;
source_task.set_job_name("test_worker");
source_task.set_task_id(7);
absl::Status coordination_error = MakeCoordinationError(error, source_task);
EXPECT_EQ(coordination_error.code(), error.code());
EXPECT_EQ(coordination_error.message(), error.message());
CoordinationServiceError payload;
payload.ParseFromString(std::string(
coordination_error.GetPayload(CoordinationErrorPayloadKey()).value()));
EXPECT_EQ(payload.source_task().job_name(), source_task.job_name());
EXPECT_EQ(payload.source_task().task_id(), source_task.task_id());
EXPECT_EQ(payload.is_reported_error(), false);
}
TEST(CoordinationServiceErrorUtil, MakeCoordinationErrorWithUserReportedError) {
absl::Status error = absl::InternalError("Test Error");
CoordinatedTask source_task;
source_task.set_job_name("test_worker");
source_task.set_task_id(7);
absl::Status coordination_error =
MakeCoordinationError(error, source_task,
true);
EXPECT_EQ(coordination_error.code(), error.code());
EXPECT_EQ(coordination_error.message(), error.message());
CoordinationServiceError payload;
payload.ParseFromString(std::string(
coordination_error.GetPayload(CoordinationErrorPayloadKey()).value()));
EXPECT_EQ(payload.source_task().job_name(), source_task.job_name());
EXPECT_EQ(payload.source_task().task_id(), source_task.task_id());
EXPECT_EQ(payload.is_reported_error(), true);
}
TEST(CoordinationServiceErrorUtil, MakeCoordinationErrorWithPayload) {
absl::Status error = absl::InternalError("Test Error");
CoordinationServiceError payload;
CoordinatedTask* source_task = payload.mutable_source_task();
source_task->set_job_name("test_worker");
source_task->set_task_id(7);
payload.set_is_reported_error(true);
absl::Status coordination_error = MakeCoordinationError(error, payload);
EXPECT_EQ(coordination_error.code(), error.code());
EXPECT_EQ(coordination_error.message(), error.message());
CoordinationServiceError actual_payload;
actual_payload.ParseFromString(std::string(
coordination_error.GetPayload(CoordinationErrorPayloadKey()).value()));
EXPECT_EQ(actual_payload.source_task().job_name(),
payload.source_task().job_name());
EXPECT_EQ(actual_payload.source_task().task_id(),
payload.source_task().task_id());
EXPECT_EQ(actual_payload.is_reported_error(), payload.is_reported_error());
}
TEST(CoordinationServiceErrorUtil,
TrimCoordinationErrorMessage_CoordinationError) {
absl::Status error = MakeCoordinationError(absl::InternalError(
"Coordination service has stopped. RecordHeartbeat() from task: "
"/job:jax_worker/replica:0/task:2 failed. Additional GRPC error "
"information from remote target coordination_service while calling "
"/tensorflow.CoordinationService/Heartbeat::UNKNOWN:Error received from "
"peer "
"{file:'third_party/grpc/src/core/lib/surface/filter_stack_call.cc', "
"file_line:464, created_time:'2024-08-05T13:57:51.331198242-07:00', "
"grpc_status:13, grpc_message:'Coordination service has stopped. "
"RecordHeartbeat() from task: /job:jax_worker/replica:0/task:2 failed. "
"'} "));
absl::Status trimmed_error = TrimCoordinationErrorMessage(error);
EXPECT_EQ(trimmed_error.code(), error.code());
EXPECT_EQ(trimmed_error.message(),
"Coordination service has stopped. RecordHeartbeat() from task: "
"/job:jax_worker/replica:0/task:2 failed. \nRPC: "
"/tensorflow.CoordinationService/Heartbeat");
EXPECT_EQ(trimmed_error.GetPayload(CoordinationErrorPayloadKey()).value(),
"");
}
TEST(CoordinationServiceErrorUtil, TrimCoordinationErrorMessage_NetworkError) {
absl::Status error = absl::UnavailableError(
"failed to connect to all addresses; last error: UNKNOWN: "
"ipv4:127.0.0.1:10001: Failed to connect to remote host: Connection "
"refused. Additional GRPC error information from remote target "
"coordination_service while calling "
"/tensorflow.CoordinationService/Heartbeat::UNKNOWN:Error received from "
"peer "
"{file:'third_party/grpc/src/core/lib/surface/filter_stack_call.cc', "
"file_line:464, created_time:'2024-08-05T13:57:53.123562608-07:00', "
"grpc_status:14, grpc_message:'failed to connect to all addresses; last "
"error: UNKNOWN: ipv4:127.0.0.1:10001: Failed to connect to remote host: "
"Connection refused'} ");
absl::Status trimmed_error = TrimCoordinationErrorMessage(error);
auto message = trimmed_error.message();
EXPECT_EQ(trimmed_error.code(), error.code());
EXPECT_TRUE(absl::StrContains(message, "Check earlier logs"));
EXPECT_EQ(message.find("failed to connect"),
message.rfind("failed to connect"))
<< trimmed_error;
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/distributed_runtime/coordination/coordination_service_error_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/distributed_runtime/coordination/coordination_service_error_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dbee8c02-9029-4404-9614-1b7062d0d7a5 | cpp | tensorflow/tensorflow | coordination_service | third_party/xla/xla/tsl/distributed_runtime/coordination/coordination_service.cc | third_party/xla/xla/tsl/distributed_runtime/coordination/coordination_service_test.cc | #include "xla/tsl/distributed_runtime/coordination/coordination_service.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/bind_front.h"
#include "absl/hash/hash.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "xla/tsl/distributed_runtime/call_options.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_client.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_error_util.h"
#include "xla/tsl/protobuf/coordination_config.pb.h"
#include "xla/tsl/protobuf/coordination_service.pb.h"
#include "xla/tsl/util/device_name_utils.h"
#include "tsl/platform/env.h"
#include "tsl/platform/random.h"
#include "tsl/platform/status.h"
namespace tsl {
namespace {
using tensorflow::CoordinatedTask;
using tensorflow::CoordinatedTaskState;
using tensorflow::CoordinatedTaskStateInfo;
using tensorflow::CoordinationServiceConfig;
using tensorflow::CoordinationServiceError;
using tensorflow::DeviceInfo;
using tensorflow::KeyValueEntry;
constexpr absl::Duration kDevicePropagationTimeout = absl::Hours(1);
constexpr int kDefaultHeartbeatTimeoutMs = 10 * 1000;
constexpr int kServiceToClientTimeoutMs = 10 * 1000;
constexpr size_t kOngoingBarriersSoftLimit = 20;
constexpr char kHealthCheckThread[] = "CoordinationServiceHealthCheck";
constexpr int kPendingTaskLogLimit = 20;
constexpr int kPendingStragglerLogLimit = 3;
std::string GetTaskName(std::string_view job_name, int task_id) {
return absl::StrCat("/job:", job_name, "/replica:", 0, "/task:", task_id);
}
std::string GetTaskName(const CoordinatedTask& task) {
return GetTaskName(task.job_name(), task.task_id());
}
CoordinatedTask GetTaskFromName(std::string_view task_name) {
DeviceNameUtils::ParsedName parsed;
DeviceNameUtils::ParseFullName(task_name, &parsed);
CoordinatedTask task;
task.set_job_name(parsed.job);
task.set_task_id(parsed.task);
return task;
}
struct CoordinatedTaskHash {
uint64_t operator()(const CoordinatedTask& task) const {
return absl::HashOf(task.job_name(), task.task_id());
}
};
struct CoordinatedTaskEqual {
bool operator()(const CoordinatedTask& lhs,
const CoordinatedTask& rhs) const {
return lhs.job_name() == rhs.job_name() && lhs.task_id() == rhs.task_id();
}
};
class CoordinationServiceStandaloneImpl : public CoordinationServiceInterface {
public:
CoordinationServiceStandaloneImpl(
Env* env, const CoordinationServiceConfig& config,
std::unique_ptr<CoordinationClientCache> client_cache);
~CoordinationServiceStandaloneImpl() override { Stop(); }
void SetDeviceAggregationFunction(
std::function<DeviceInfo(const DeviceInfo& devices)>
post_aggregate_device_fn) override;
void LogConnectStatusLocked() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(state_mu_);
absl::Status RegisterTask(const CoordinatedTask& task,
uint64_t incarnation) override;
void WaitForAllTasks(const CoordinatedTask& task, const DeviceInfo& devices,
StatusCallback done) override;
void ShutdownTaskAsync(const CoordinatedTask& task,
StatusCallback done) override;
absl::Status ResetTask(const CoordinatedTask& task) override;
absl::Status RecordHeartbeat(const CoordinatedTask& task,
uint64_t incarnation) override;
absl::Status ReportTaskError(const CoordinatedTask& task,
absl::Status error) override;
std::vector<CoordinatedTaskStateInfo> GetTaskState(
const std::vector<CoordinatedTask>& task) override;
absl::Status InsertKeyValue(std::string_view key,
std::string_view value) override;
absl::Status InsertKeyValue(std::string_view key, std::string_view value,
bool allow_overwrite) override;
void GetKeyValueAsync(std::string_view key,
StatusOrValueCallback done) override;
absl::StatusOr<std::string> TryGetKeyValue(std::string_view key) override;
std::vector<KeyValueEntry> GetKeyValueDir(
std::string_view directory_key) override;
absl::Status DeleteKeyValue(std::string_view key) override;
void BarrierAsync(std::string_view barrier_id, absl::Duration timeout,
const CoordinatedTask& task,
const std::vector<CoordinatedTask>& participating_tasks,
StatusCallback done) override;
absl::Status CancelBarrier(std::string_view barrier_id,
const CoordinatedTask& task) override;
void PollForErrorAsync(const CoordinatedTask& task,
StatusCallback done) override;
private:
const DeviceInfo& ListClusterDevices() override
ABSL_EXCLUSIVE_LOCKS_REQUIRED(state_mu_);
uint64_t GetServiceIncarnation() override;
void CheckHeartbeatTimeout();
void CheckBarrierTimeout();
void CheckStaleness();
void StartCheckStaleness();
void Stop(bool shut_staleness_thread = true);
bool ServiceHasStopped() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(state_mu_);
void ReportServiceErrorToTaskAsync(const CoordinatedTask& destination_task,
absl::Status error);
void PropagateError(const CoordinatedTask& source_task,
bool is_reported_by_task = false)
ABSL_LOCKS_EXCLUDED(state_mu_);
void SetTaskError(std::string_view task_name, absl::Status error)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(state_mu_);
absl::Status DisconnectTask(const CoordinatedTask& task)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(state_mu_);
struct BarrierState {
bool passed = false;
absl::Status result = absl::UnknownError(
"Invalid barrier result.");
uint64_t deadline_in_micros = 0;
int num_pending_tasks = 0;
absl::flat_hash_map<CoordinatedTask, bool, CoordinatedTaskHash,
CoordinatedTaskEqual>
tasks_at_barrier;
std::vector<StatusCallback> done_callbacks;
CoordinatedTask initiating_task;
};
bool ValidateBarrierArgs(
std::string_view barrier_id, absl::Duration timeout,
const CoordinatedTask& task,
const std::vector<CoordinatedTask>& participating_tasks,
StatusCallback done);
bool InitializeBarrier(
BarrierState* barrier, std::string_view barrier_id,
absl::Duration timeout, const CoordinatedTask& task,
const std::vector<CoordinatedTask>& participating_tasks,
StatusCallback done) ABSL_EXCLUSIVE_LOCKS_REQUIRED(state_mu_);
void PassBarrier(std::string_view barrier_id, absl::Status result,
BarrierState* barrier)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(state_mu_);
void AggregateClusterDevices() ABSL_EXCLUSIVE_LOCKS_REQUIRED(state_mu_);
void CompleteShutdownAfterBarrier(absl::Status result, BarrierState* barrier)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(state_mu_);
bool ValidateTaskArgs(
const std::vector<CoordinatedTask>& tasks_args,
const absl::flat_hash_map<CoordinatedTask, bool, CoordinatedTaskHash,
CoordinatedTaskEqual>& tasks_at_barrier,
int64_t cluster_size);
bool isRecoverableJob(std::string_view task_name) const;
void SendErrorPollingResponse(const absl::Status& error);
bool SendErrorPollingResponseOrStopService(const absl::Status& error);
bool IsClientPollingForError() const;
class ErrorPollingState {
public:
bool Responded() const { return responded_; }
void SetError(const absl::Status& error);
const absl::Status& GetError() const { return error_; }
bool IsTaskPolling(absl::string_view task_name) const {
return polling_task_names_.contains(task_name);
}
void AddTask(const CoordinatedTask& task, StatusCallback&& done);
private:
bool responded_ = false;
absl::Status error_ = absl::OkStatus();
std::vector<StatusCallback> done_callbacks_;
absl::flat_hash_set<std::string> polling_task_names_;
};
class TaskState {
public:
CoordinatedTaskState GetState() { return state_; }
absl::Status GetStatus() { return status_; }
uint64_t GetTaskIncarnation() { return task_incarnation_; }
void SetConnected(uint64_t task_incarnation);
void Disconnect(uint64_t grace_period_duration_us);
absl::Status RecordHeartbeat(uint64_t task_incarnation);
int64_t TimeSinceLastHeartbeatMs();
void SetError(absl::Status status);
DeviceInfo GetDeviceInfo() { return devices_; }
void CollectDeviceInfo(const DeviceInfo& devices) { devices_ = devices; }
bool DeviceInfoIsCollected() { return devices_.device_size() != 0; }
absl::flat_hash_set<std::string> GetOngoingBarriers();
void JoinBarrier(std::string_view barrier_id);
void ExitBarrier(std::string_view barrier_id);
bool IsDisconnectedBeyondGracePeriod();
private:
uint64_t task_incarnation_ = 0;
CoordinatedTaskState state_ = CoordinatedTaskState::TASKSTATE_DISCONNECTED;
absl::Status status_;
absl::Mutex last_heartbeat_mu_;
uint64_t last_heartbeat_us_ ABSL_GUARDED_BY(last_heartbeat_mu_);
uint64_t disconnect_grace_period_us_ = 0;
DeviceInfo devices_;
absl::flat_hash_set<std::string> ongoing_barriers_for_task_;
};
std::unique_ptr<CoordinationClientCache> client_cache_;
Env& env_;
const uint64_t service_incarnation_ = random::New64();
const uint64_t heartbeat_timeout_ms_;
const absl::Duration shutdown_barrier_timeout_;
bool allow_new_incarnation_to_reconnect_ = false;
bool client_polling_for_error_ = false;
std::function<DeviceInfo(const DeviceInfo& devices)>
post_aggregate_device_fn_;
const std::string device_propagation_barrier_id_ =
absl::StrCat("WaitForAllTasks::", std::to_string(service_incarnation_));
const std::string shutdown_barrier_id_ =
absl::StrCat("Shutdown::", std::to_string(service_incarnation_));
absl::Mutex state_mu_;
absl::flat_hash_map<std::string, std::unique_ptr<TaskState>> cluster_state_
ABSL_GUARDED_BY(state_mu_);
DeviceInfo cluster_devices_ ABSL_GUARDED_BY(state_mu_);
absl::Mutex kv_mu_;
std::map<std::string, std::string> kv_store_ ABSL_GUARDED_BY(kv_mu_);
absl::flat_hash_map<std::string, std::vector<StatusOrValueCallback>> get_cb_
ABSL_GUARDED_BY(kv_mu_);
absl::CondVar check_staleness_thread_cv_;
bool shutting_down_ ABSL_GUARDED_BY(state_mu_) = false;
std::unique_ptr<Thread> check_staleness_thread_;
absl::flat_hash_map<std::string, BarrierState> barriers_
ABSL_GUARDED_BY(state_mu_);
absl::flat_hash_set<std::string> ongoing_barriers_ ABSL_GUARDED_BY(state_mu_);
absl::flat_hash_set<std::string> recoverable_jobs_;
ErrorPollingState error_polling_state_ ABSL_GUARDED_BY(state_mu_);
CoordinationServiceStandaloneImpl(const CoordinationServiceStandaloneImpl&) =
delete;
void operator=(const CoordinationServiceStandaloneImpl&) = delete;
};
void CoordinationServiceStandaloneImpl::ErrorPollingState::SetError(
const absl::Status& error) {
if (responded_) return;
responded_ = true;
error_ = error;
for (auto& done_cb : done_callbacks_) {
done_cb(error_);
}
done_callbacks_.clear();
}
void CoordinationServiceStandaloneImpl::ErrorPollingState::AddTask(
const CoordinatedTask& task, StatusCallback&& done) {
if (Responded()) return;
polling_task_names_.insert(GetTaskName(task));
done_callbacks_.emplace_back(done);
}
void CoordinationServiceStandaloneImpl::TaskState::SetConnected(
uint64_t task_incarnation) {
state_ = CoordinatedTaskState::TASKSTATE_CONNECTED;
status_ = absl::OkStatus();
task_incarnation_ = task_incarnation;
absl::MutexLock l(&last_heartbeat_mu_);
last_heartbeat_us_ = Env::Default()->NowMicros();
}
void CoordinationServiceStandaloneImpl::TaskState::Disconnect(
uint64_t grace_period_duration_us) {
disconnect_grace_period_us_ =
Env::Default()->NowMicros() + grace_period_duration_us;
state_ = CoordinatedTaskState::TASKSTATE_DISCONNECTED;
status_ = absl::OkStatus();
}
void CoordinationServiceStandaloneImpl::TaskState::SetError(
const absl::Status status) {
if (state_ == CoordinatedTaskState::TASKSTATE_ERROR) return;
state_ = CoordinatedTaskState::TASKSTATE_ERROR;
status_ = status;
}
absl::Status CoordinationServiceStandaloneImpl::TaskState::RecordHeartbeat(
uint64_t task_incarnation) {
if (!status_.ok()) return status_;
if (task_incarnation != task_incarnation_) {
return MakeCoordinationError(absl::AbortedError(absl::StrCat(
"Incarnation ID mismatch: expecting ", task_incarnation_, " but got ",
task_incarnation, ". This means the remote task has restarted.")));
}
absl::MutexLock l(&last_heartbeat_mu_);
last_heartbeat_us_ = Env::Default()->NowMicros();
return absl::OkStatus();
}
int64_t
CoordinationServiceStandaloneImpl::TaskState::TimeSinceLastHeartbeatMs() {
absl::MutexLock l(&last_heartbeat_mu_);
return (Env::Default()->NowMicros() - last_heartbeat_us_) / 1000;
}
absl::flat_hash_set<std::string>
CoordinationServiceStandaloneImpl::TaskState::GetOngoingBarriers() {
return ongoing_barriers_for_task_;
}
void CoordinationServiceStandaloneImpl::TaskState::JoinBarrier(
std::string_view barrier_id) {
ongoing_barriers_for_task_.emplace(barrier_id);
}
void CoordinationServiceStandaloneImpl::TaskState::ExitBarrier(
std::string_view barrier_id) {
ongoing_barriers_for_task_.erase(barrier_id);
}
bool CoordinationServiceStandaloneImpl::TaskState::
IsDisconnectedBeyondGracePeriod() {
return GetState() == CoordinatedTaskState::TASKSTATE_DISCONNECTED &&
Env::Default()->NowMicros() > disconnect_grace_period_us_;
}
void CoordinationServiceStandaloneImpl::SetDeviceAggregationFunction(
std::function<DeviceInfo(const DeviceInfo& devices)>
post_aggregate_device_fn) {
post_aggregate_device_fn_ = std::move(post_aggregate_device_fn);
}
CoordinationServiceStandaloneImpl::CoordinationServiceStandaloneImpl(
Env* env, const CoordinationServiceConfig& config,
std::unique_ptr<CoordinationClientCache> client_cache)
: client_cache_(std::move(client_cache)),
env_(*env),
heartbeat_timeout_ms_([&config]() -> uint64_t {
return config.heartbeat_timeout_in_ms() > 0
? config.heartbeat_timeout_in_ms()
: kDefaultHeartbeatTimeoutMs;
}()),
shutdown_barrier_timeout_(
absl::Milliseconds(config.shutdown_barrier_timeout_in_ms())),
allow_new_incarnation_to_reconnect_(
config.allow_new_incarnation_to_reconnect()) {
LOG(INFO) << "Initializing CoordinationService";
recoverable_jobs_ = absl::flat_hash_set<std::string>(
config.recoverable_jobs().cbegin(), config.recoverable_jobs().cend());
for (const auto& job : config.coordinated_job_list()) {
for (int i = 0; i < job.num_tasks(); ++i) {
const std::string task_name = GetTaskName(job.name(), i);
cluster_state_.emplace(task_name, std::make_unique<TaskState>());
}
}
StartCheckStaleness();
}
void CoordinationServiceStandaloneImpl::CheckHeartbeatTimeout() {
absl::Status status = absl::OkStatus();
std::vector<std::string_view> stale_task_names;
const bool has_service_to_client_connection = client_cache_ != nullptr;
{
absl::MutexLock l(&state_mu_);
for (const auto& [task_name, task_state] : cluster_state_) {
if (task_state->GetState() != CoordinatedTaskState::TASKSTATE_CONNECTED) {
continue;
}
const bool is_stale =
task_state->TimeSinceLastHeartbeatMs() > heartbeat_timeout_ms_;
VLOG(10) << "Checking staleness for " << task_name
<< " stale?=" << is_stale;
if (is_stale) {
stale_task_names.push_back(task_name);
status = MakeCoordinationError(absl::UnavailableError(
absl::StrCat("Task ", task_name,
" heartbeat timeout. This indicates that the "
"remote task has failed, got preempted, or "
"crashed unexpectedly. Check the task logs "
"for an earlier error to debug further.")));
SetTaskError(task_name, status);
}
}
}
if (!stale_task_names.empty()) {
if (!has_service_to_client_connection) {
absl::Status heartbeat_timeout_error =
MakeCoordinationError(absl::UnavailableError(absl::StrCat(
"The following tasks are unhealthy (stopped sending "
"heartbeats):\n",
absl::StrJoin(stale_task_names, "\n"),
"\nCheck the task logs for an earlier error to debug "
"further.")));
if (SendErrorPollingResponseOrStopService(heartbeat_timeout_error)) {
return;
}
} else {
for (const auto& stale_task_name : stale_task_names) {
PropagateError(GetTaskFromName(stale_task_name));
}
}
}
}
void CoordinationServiceStandaloneImpl::CheckBarrierTimeout() {
absl::flat_hash_map<std::string, BarrierState*> expired_barriers;
uint64_t current_time_micros = Env::Default()->NowMicros();
std::optional<std::string> shutdown_error;
{
absl::MutexLock l(&state_mu_);
for (std::string_view barrier_id : ongoing_barriers_) {
auto* barrier = &barriers_[barrier_id];
if (current_time_micros > barrier->deadline_in_micros) {
expired_barriers[barrier_id] = barrier;
}
}
for (const auto& [barrier_id, barrier] : expired_barriers) {
std::string pending_tasks;
int pending_task_count = 0;
for (const auto& [task, at_barrier] : barrier->tasks_at_barrier) {
if (at_barrier) {
continue;
}
++pending_task_count;
if (pending_task_count < kPendingTaskLogLimit) {
absl::StrAppend(&pending_tasks, GetTaskName(task), "\n");
}
}
const int64_t tasks_at_barrier =
barrier->tasks_at_barrier.size() - pending_task_count;
std::string error_message = absl::StrFormat(
"Barrier timed out. Id: %s. This usually happens because a task "
"triggered the barrier too early or too slowly. Please look at the "
"task logs (both timed out and first task) to debug further.\n"
"# of tasks that reached the barrier: %d/%d.\nThe first "
"task at the barrier: %s. Some timed out task names:\n%s",
barrier_id, tasks_at_barrier, barrier->tasks_at_barrier.size(),
GetTaskName(barrier->initiating_task), pending_tasks);
if (barrier_id == shutdown_barrier_id_) {
shutdown_error = error_message;
}
const absl::Status error =
MakeCoordinationError(absl::DeadlineExceededError(error_message));
PassBarrier(barrier_id, error, barrier);
}
}
const bool has_service_to_client_connection = client_cache_ != nullptr;
if (!has_service_to_client_connection && shutdown_error) {
SendErrorPollingResponseOrStopService(
MakeCoordinationError(absl::DeadlineExceededError(absl::StrCat(
"Shutdown barrier timed out. Error: ", *shutdown_error))));
}
}
void CoordinationServiceStandaloneImpl::CheckStaleness() {
while (true) {
{
absl::MutexLock l(&state_mu_);
check_staleness_thread_cv_.WaitWithTimeout(&state_mu_, absl::Seconds(1));
if (shutting_down_) {
return;
}
}
CheckHeartbeatTimeout();
CheckBarrierTimeout();
}
}
void CoordinationServiceStandaloneImpl::StartCheckStaleness() {
check_staleness_thread_.reset(env_.StartThread(
{}, kHealthCheckThread,
absl::bind_front(&CoordinationServiceStandaloneImpl::CheckStaleness,
this)));
}
void CoordinationServiceStandaloneImpl::Stop(bool shut_staleness_thread) {
{
absl::MutexLock l(&kv_mu_);
for (const auto& [key, get_kv_callbacks] : get_cb_) {
for (const auto& get_kv_callback : get_kv_callbacks) {
get_kv_callback(absl::CancelledError(
absl::StrCat("Coordination service is shutting down. Cancelling "
"GetKeyValue() for key: ",
key)));
}
}
get_cb_.clear();
}
{
absl::MutexLock l(&state_mu_);
shutting_down_ = true;
check_staleness_thread_cv_.SignalAll();
for (auto& [barrier_id, barrier] : barriers_) {
if (!barrier.passed) {
absl::Status error =
MakeCoordinationError(absl::AbortedError(absl::StrCat(
"Barrier failed because service is shutting down. Barrier_id: ",
barrier_id)));
PassBarrier(barrier_id, error, &barrier);
}
}
barriers_.clear();
cluster_state_.clear();
}
if (IsClientPollingForError()) {
SendErrorPollingResponse(
absl::CancelledError("Coordination service is shutting down. "
"Cancelling PollForErrorAsync()"));
}
if (shut_staleness_thread) {
check_staleness_thread_.reset();
}
}
bool CoordinationServiceStandaloneImpl::ServiceHasStopped() const {
return shutting_down_;
}
void CoordinationServiceStandaloneImpl::LogConnectStatusLocked() const {
const int num_tasks = cluster_state_.size();
int pending_tasks = 0;
std::vector<std::string> task_names;
for (const auto& [task_name, task_state] : cluster_state_) {
if (task_state->GetState() != CoordinatedTaskState::TASKSTATE_CONNECTED) {
pending_tasks++;
if (task_names.size() < kPendingStragglerLogLimit) {
task_names.push_back(task_name);
}
}
}
LOG(INFO) << "Waiting for " << pending_tasks << "/" << num_tasks
<< " tasks to connect.";
if (!task_names.empty()) {
LOG(INFO) << "Example stragglers:\n" << absl::StrJoin(task_names, "\n");
}
}
absl::Status CoordinationServiceStandaloneImpl::RegisterTask(
const CoordinatedTask& task, uint64_t incarnation) {
const std::string task_name = GetTaskName(task);
absl::Status error;
std::string error_message;
{
absl::MutexLock l(&state_mu_);
if (ServiceHasStopped()) {
return MakeCoordinationError(absl::InternalError(absl::StrCat(
"Coordination service has stopped. RegisterTask() from task: ",
task_name,
" failed. This usually implies an earlier error that caused "
"coordination service to shut down before the workers disconnect "
"gracefully. Check the task leader's logs for an earlier error to "
"debug the root cause.")));
}
if (!cluster_state_.contains(task_name)) {
return MakeCoordinationError(absl::InvalidArgumentError(absl::StrCat(
"Unexpected task registered with task_name=", task_name)));
}
auto* task_cluster_state = cluster_state_[task_name].get();
const auto task_state = task_cluster_state->GetState();
const auto task_status = task_cluster_state->GetStatus();
if (task_state == CoordinatedTaskState::TASKSTATE_DISCONNECTED ||
(allow_new_incarnation_to_reconnect_ &&
(absl::IsUnavailable(task_status) &&
task_status.GetPayload(CoordinationErrorPayloadKey())))) {
task_cluster_state->SetConnected(incarnation);
LOG(INFO) << task_name
<< " has connected to coordination service. Incarnation: "
<< incarnation;
LogConnectStatusLocked();
return absl::OkStatus();
} else if (task_state == CoordinatedTaskState::TASKSTATE_CONNECTED) {
if (task_cluster_state->GetTaskIncarnation() == incarnation) {
task_cluster_state->SetConnected(incarnation);
LOG(INFO) << task_name
<< " has connected to coordination service with the same "
<< "incarnation again: " << incarnation;
LogConnectStatusLocked();
return absl::OkStatus();
} else {
error_message =
absl::StrCat(task_name,
" unexpectedly tried to connect with a different "
"incarnation. It has likely restarted.");
}
} else {
error_message =
absl::StrCat(task_name,
" unexpectedly tried to connect while it is already in "
"error. ResetTask() should be called before a "
"subsequent connect attempt.");
}
LOG(ERROR) << error_message;
error = MakeCoordinationError(absl::AbortedError(error_message), task);
SetTaskError(task_name, error);
}
assert(!error.ok());
PropagateError(task);
return error;
}
void CoordinationServiceStandaloneImpl::WaitForAllTasks(
const CoordinatedTask& task, const DeviceInfo& devices,
StatusCallback done) {
{
absl::MutexLock l(&state_mu_);
if (ServiceHasStopped()) {
done(MakeCoordinationError(absl::InternalError(
"Coordination service has stopped. WaitForAllTasks() failed.")));
return;
}
const auto& task_state = cluster_state_.find(GetTaskName(task));
if (task_state != cluster_state_.end() &&
!task_state->second->DeviceInfoIsCollected()) {
task_state->second->CollectDeviceInfo(devices);
}
}
BarrierAsync(device_propagation_barrier_id_, kDevicePropagationTimeout, task,
{}, std::move(done));
}
void CoordinationServiceStandaloneImpl::ShutdownTaskAsync(
const CoordinatedTask& task, StatusCallback done) {
VLOG(3) << "Task " << GetTaskName(task) << " invoked ShutdownTaskAsync()";
if (shutdown_barrier_timeout_ > absl::ZeroDuration()) {
BarrierAsync(shutdown_barrier_id_, shutdown_barrier_timeout_, task, {},
done);
} else {
absl::Status status;
{
absl::MutexLock l(&state_mu_);
if (ServiceHasStopped()) {
status = MakeCoordinationError(absl::InternalError(
"Coordination service has stopped. ShutdownTaskAsync() failed."));
} else {
status = DisconnectTask(task);
}
}
done(status);
}
}
absl::Status CoordinationServiceStandaloneImpl::ResetTask(
const CoordinatedTask& task) {
absl::MutexLock l(&state_mu_);
return DisconnectTask(task);
}
absl::Status CoordinationServiceStandaloneImpl::DisconnectTask(
const CoordinatedTask& task) {
const std::string task_name = GetTaskName(task);
if (ServiceHasStopped()) {
return MakeCoordinationError(absl::InternalError(
absl::StrCat("Coordination service has stopped. DisconnectTask() "
"failed for task_name=",
task_name)));
} else if (!cluster_state_.contains(task_name)) {
return MakeCoordinationError(absl::InvalidArgumentError(absl::StrCat(
"Unexpected disconnect request with task_name=", task_name)));
} else if (cluster_state_[task_name]->GetState() ==
CoordinatedTaskState::TASKSTATE_DISCONNECTED) {
return MakeCoordinationError(absl::FailedPreconditionError(
absl::StrCat("The task is already disconnected: ", task_name)));
}
cluster_state_[task_name]->Disconnect(
heartbeat_timeout_ms_ * 1000);
for (const auto& barrier_id :
cluster_state_[task_name]->GetOngoingBarriers()) {
absl::Status error = MakeCoordinationError(absl::InternalError(absl::StrCat(
"Barrier failed because a task has disconnected. Barrier Id: ",
barrier_id, ", Task: ", task_name)));
PassBarrier(barrier_id, error, &barriers_[barrier_id]);
}
LOG(INFO) << task_name << " has disconnected from coordination service.";
return absl::OkStatus();
}
const DeviceInfo& CoordinationServiceStandaloneImpl::ListClusterDevices() {
return cluster_devices_;
}
uint64_t CoordinationServiceStandaloneImpl::GetServiceIncarnation() {
return service_incarnation_;
}
absl::Status CoordinationServiceStandaloneImpl::ReportTaskError(
const CoordinatedTask& task, absl::Status error) {
const std::string task_name = GetTaskName(task);
{
absl::MutexLock l(&state_mu_);
if (ServiceHasStopped()) {
return MakeCoordinationError(absl::InternalError(
"Coordination service has stopped. ReportTaskError() failed."));
} else if (!cluster_state_.contains(task_name)) {
return MakeCoordinationError(absl::InvalidArgumentError(
absl::StrCat("Unexpected request from task ", task_name)));
} else if (cluster_state_[task_name]->GetState() !=
CoordinatedTaskState::TASKSTATE_CONNECTED) {
return MakeCoordinationError(absl::FailedPreconditionError(
"The task is not connected or already has an error."));
} else {
SetTaskError(task_name, error);
}
}
PropagateError(task, true);
return absl::OkStatus();
}
std::vector<CoordinatedTaskStateInfo>
CoordinationServiceStandaloneImpl::GetTaskState(
const std::vector<CoordinatedTask>& tasks) {
std::vector<CoordinatedTaskStateInfo> states_info;
for (const auto& task : tasks) {
const std::string task_name = GetTaskName(task);
auto& state_info = states_info.emplace_back();
absl::Status error;
{
absl::MutexLock l(&state_mu_);
state_info.set_state(cluster_state_[task_name]->GetState());
error = cluster_state_[task_name]->GetStatus();
}
*state_info.mutable_task() = task;
state_info.set_error_code(error.raw_code());
state_info.set_error_message(std::string(error.message()));
if (!error.ok()) {
*state_info.mutable_error_payload()->mutable_source_task() = task;
state_info.mutable_error_payload()->set_is_reported_error(false);
}
}
return states_info;
}
absl::Status CoordinationServiceStandaloneImpl::RecordHeartbeat(
const CoordinatedTask& task, uint64_t incarnation) {
const std::string task_name = GetTaskName(task);
absl::Status s = absl::OkStatus();
{
absl::MutexLock l(&state_mu_);
if (ServiceHasStopped()) {
return MakeCoordinationError(absl::InternalError(absl::StrCat(
"Coordination service has stopped. RecordHeartbeat() from task: ",
task_name,
" failed. This usually implies an earlier error that caused "
"coordination service to shut down before the workers disconnect "
"gracefully. Check the task leader's logs for an earlier error to "
"debug the root cause.")));
} else if (!cluster_state_.contains(task_name)) {
return MakeCoordinationError(absl::InvalidArgumentError(
absl::StrCat("Unexpected heartbeat request from task: ", task_name,
". This usually implies a configuration error.")));
}
if (!cluster_state_[task_name]->GetStatus().ok()) {
return cluster_state_[task_name]->GetStatus();
} else if (cluster_state_[task_name]->IsDisconnectedBeyondGracePeriod()) {
return MakeCoordinationError(absl::InvalidArgumentError(absl::StrCat(
"Task with task_name=", task_name,
" must be registered before sending heartbeat messages")));
}
VLOG(10) << "Record heartbeat from task: " << task_name
<< "at incarnation: " << incarnation << "at " << absl::Now();
s = cluster_state_[task_name]->RecordHeartbeat(incarnation);
}
if (!s.ok()) {
{
absl::MutexLock l(&state_mu_);
SetTaskError(task_name, s);
}
PropagateError(task);
}
return s;
}
void CoordinationServiceStandaloneImpl::ReportServiceErrorToTaskAsync(
const CoordinatedTask& destination_task, absl::Status error) {
assert(!error.ok());
if (client_cache_ == nullptr) {
LOG(ERROR) << error;
return;
}
auto request = std::make_shared<ReportErrorToTaskRequest>();
auto response = std::make_shared<ReportErrorToTaskResponse>();
request->set_error_code(error.raw_code());
request->set_error_message(std::string(error.message()));
CoordinatedTask* error_source =
request->mutable_error_payload()->mutable_source_task();
error_source->set_job_name("coordination_service");
auto call_opts = std::make_shared<CallOptions>();
call_opts->SetTimeout(kServiceToClientTimeoutMs);
const std::string task_name = GetTaskName(destination_task);
CoordinationClient* client = client_cache_->GetClient(task_name);
client->ReportErrorToTaskAsync(
call_opts.get(), request.get(), response.get(),
[request, response, task_name, call_opts](absl::Status s) {
if (!s.ok()) {
LOG(ERROR) << "Encountered another error while reporting to "
<< task_name << ": " << s;
}
});
}
void CoordinationServiceStandaloneImpl::PropagateError(
const CoordinatedTask& source_task, bool is_reported_by_task) {
VLOG(3) << "PropagateError() from " << GetTaskName(source_task);
if (isRecoverableJob(source_task.job_name())) return;
absl::Status error;
{
absl::MutexLock l(&state_mu_);
error = cluster_state_[GetTaskName(source_task)]->GetStatus();
}
assert(!error.ok());
ReportErrorToTaskRequest request;
request.set_error_code(error.raw_code());
request.set_error_message(std::string(error.message()));
CoordinationServiceError* payload = request.mutable_error_payload();
*payload->mutable_source_task() = source_task;
payload->set_is_reported_error(is_reported_by_task);
CallOptions call_opts;
call_opts.SetTimeout(kServiceToClientTimeoutMs);
std::vector<std::shared_ptr<absl::Notification>> notifications;
std::vector<std::string_view> task_names;
{
absl::ReaderMutexLock l(&state_mu_);
task_names.reserve(cluster_state_.size());
for (const auto& pair : cluster_state_) {
task_names.emplace_back(pair.first);
}
}
for (std::string_view task : task_names) {
{
absl::MutexLock l(&state_mu_);
if (cluster_state_[task]->GetState() !=
CoordinatedTaskState::TASKSTATE_CONNECTED)
continue;
}
if (client_cache_ == nullptr) {
SendErrorPollingResponseOrStopService(error);
return;
}
CoordinationClient* client = client_cache_->GetClient(std::string(task));
auto response = std::make_shared<ReportErrorToTaskResponse>();
auto n = std::make_shared<absl::Notification>();
client->ReportErrorToTaskAsync(
&call_opts, &request, response.get(),
[response, n, task](absl::Status s) {
if (!s.ok()) {
LOG(ERROR) << "Encountered another error while reporting to "
<< task << ": " << s;
}
n->Notify();
});
notifications.push_back(n);
}
for (auto& n : notifications) {
n->WaitForNotification();
}
}
std::string NormalizeKey(std::string_view orig_key) {
std::string norm_key = std::string(orig_key);
const char* src = norm_key.c_str();
std::string::iterator dst = norm_key.begin();
while (*src) {
while (*src == '/') src++;
while (*src && *src != '/') {
*dst++ = *src++;
}
if (*src) {
*dst++ = *src++;
}
}
if (dst > norm_key.begin() && *(dst - 1) == '/') dst--;
norm_key.resize(dst - norm_key.begin());
return norm_key;
}
absl::Status CoordinationServiceStandaloneImpl::InsertKeyValue(
std::string_view key, std::string_view value) {
return InsertKeyValue(key, value, false);
}
absl::Status CoordinationServiceStandaloneImpl::InsertKeyValue(
std::string_view key, std::string_view value, bool allow_overwrite) {
VLOG(3) << "InsertKeyValue(): " << key << ": " << value
<< " allow_overwrite: " << allow_overwrite;
const std::string norm_key = NormalizeKey(key);
absl::MutexLock l(&kv_mu_);
if (!allow_overwrite && kv_store_.find(norm_key) != kv_store_.end()) {
return MakeCoordinationError(absl::AlreadyExistsError(
absl::StrCat("Config key ", key, " already exists.")));
}
kv_store_.insert_or_assign(norm_key, value);
auto iter = get_cb_.find(norm_key);
if (iter != get_cb_.end()) {
for (const auto& cb : iter->second) {
cb(value);
}
get_cb_.erase(iter);
}
return absl::OkStatus();
}
void CoordinationServiceStandaloneImpl::GetKeyValueAsync(
std::string_view key, StatusOrValueCallback done) {
VLOG(3) << "GetKeyValue(): " << key;
const std::string norm_key = NormalizeKey(key);
absl::MutexLock l(&kv_mu_);
const auto& iter = kv_store_.find(norm_key);
if (iter != kv_store_.end()) {
done(iter->second);
return;
}
auto cb_iter = get_cb_.find(norm_key);
if (cb_iter == get_cb_.end()) {
cb_iter =
get_cb_.emplace(norm_key, std::vector<StatusOrValueCallback>()).first;
}
cb_iter->second.emplace_back(std::move(done));
}
absl::StatusOr<std::string> CoordinationServiceStandaloneImpl::TryGetKeyValue(
std::string_view key) {
VLOG(3) << "TryGetKeyValue(): " << key;
const std::string norm_key = NormalizeKey(key);
absl::MutexLock l(&kv_mu_);
const auto& iter = kv_store_.find(norm_key);
if (iter == kv_store_.end()) {
return absl::NotFoundError(absl::StrCat("Config key ", key, " not found."));
}
return iter->second;
}
std::vector<KeyValueEntry> CoordinationServiceStandaloneImpl::GetKeyValueDir(
std::string_view directory_key) {
VLOG(3) << "TryGetKeyValueDir(): " << directory_key;
std::vector<KeyValueEntry> kvs_in_directory;
const std::string norm_key = NormalizeKey(directory_key);
const std::string dir = absl::StrCat(norm_key, "/");
absl::MutexLock l(&kv_mu_);
auto begin = kv_store_.lower_bound(dir);
std::map<std::string, std::string>::iterator it;
for (it = begin; it != kv_store_.end(); ++it) {
if (std::mismatch(dir.begin(), dir.end(), it->first.begin()).first !=
dir.end()) {
break;
}
KeyValueEntry kv;
kv.set_key(it->first);
kv.set_value(it->second);
kvs_in_directory.push_back(kv);
}
return kvs_in_directory;
}
absl::Status CoordinationServiceStandaloneImpl::DeleteKeyValue(
std::string_view key) {
VLOG(3) << "DeleteKeyValue(): " << key;
const std::string norm_key = NormalizeKey(key);
absl::MutexLock l(&kv_mu_);
const std::string dir = absl::StrCat(norm_key, "/");
auto begin = kv_store_.lower_bound(dir);
std::map<std::string, std::string>::iterator end;
for (end = begin; end != kv_store_.end(); end++) {
if (std::mismatch(dir.begin(), dir.end(), end->first.begin()).first !=
dir.end())
break;
}
kv_store_.erase(begin, end);
auto iter = kv_store_.find(norm_key);
if (iter != kv_store_.end()) {
kv_store_.erase(iter);
}
return absl::OkStatus();
}
void CoordinationServiceStandaloneImpl::SetTaskError(std::string_view task_name,
absl::Status error) {
cluster_state_[task_name]->SetError(error);
for (const auto& barrier_id :
cluster_state_[task_name]->GetOngoingBarriers()) {
absl::Status barrier_error =
MakeCoordinationError(absl::InternalError(absl::StrCat(
"Barrier failed beacuse a task is in error. Barrier Id: ",
barrier_id, ", Task: ", task_name, "Error: ", error.message())));
PassBarrier(barrier_id, barrier_error, &barriers_[barrier_id]);
}
LOG(ERROR) << task_name
<< " has been set to ERROR in coordination service: " << error;
}
void CoordinationServiceStandaloneImpl::PollForErrorAsync(
const CoordinatedTask& task, StatusCallback done) {
const std::string task_name = GetTaskName(task);
VLOG(3) << "Task " << task_name << " invoked PollForErrorAsync().";
absl::MutexLock l(&state_mu_);
if (ServiceHasStopped()) {
done(MakeCoordinationError(absl::InternalError(
"PollForError requested after coordination service has shut down.")));
return;
}
if (client_cache_ != nullptr) {
done(MakeCoordinationError(
absl::InternalError("Should not use error polling from service when "
"there is service to client connection.")));
return;
}
client_polling_for_error_ = true;
if (!cluster_state_.contains(task_name)) {
done(MakeCoordinationError(absl::InvalidArgumentError(
absl::StrCat("Unexpected task (", task_name,
") that is not in the cluster polling for errors."))));
return;
}
if (cluster_state_[task_name]->IsDisconnectedBeyondGracePeriod()) {
done(MakeCoordinationError(absl::FailedPreconditionError(
absl::StrCat("Task (", task_name,
") that has not been registered or has disconnected "
"polling for errors."))));
return;
}
if (cluster_state_[task_name]->GetState() ==
CoordinatedTaskState::TASKSTATE_ERROR) {
done(MakeCoordinationError(absl::FailedPreconditionError(absl::StrCat(
"Task (", task_name,
") that is already in error state polling for errors. Current error: ",
cluster_state_[task_name]->GetStatus().ToString()))));
return;
}
if (error_polling_state_.Responded()) {
done(error_polling_state_.GetError());
return;
}
error_polling_state_.AddTask(task, std::move(done));
}
bool CoordinationServiceStandaloneImpl::ValidateBarrierArgs(
std::string_view barrier_id, absl::Duration timeout,
const CoordinatedTask& task,
const std::vector<CoordinatedTask>& participating_tasks,
StatusCallback done) {
const std::string source_task_name = GetTaskName(task);
bool among_participating_tasks =
std::find_if(participating_tasks.begin(), participating_tasks.end(),
[&](const CoordinatedTask& task) {
return GetTaskName(task) == source_task_name;
}) != participating_tasks.end();
if (!participating_tasks.empty() && !among_participating_tasks) {
const std::string task_name = GetTaskName(task);
absl::Status error = MakeCoordinationError(absl::InvalidArgumentError(
absl::StrCat("A non-participating task (", GetTaskName(task),
") called the barrier: ", barrier_id)));
{
absl::MutexLock l(&state_mu_);
if (ServiceHasStopped()) {
done(MakeCoordinationError(absl::InternalError(
"Barrier requested after coordination service has shut down.")));
return false;
}
auto pair = barriers_.try_emplace(barrier_id);
auto it = pair.first;
auto* barrier = &it->second;
PassBarrier(barrier_id, error, barrier);
}
done(error);
return false;
}
return true;
};
bool CoordinationServiceStandaloneImpl::InitializeBarrier(
BarrierState* barrier, std::string_view barrier_id, absl::Duration timeout,
const CoordinatedTask& task,
const std::vector<CoordinatedTask>& participating_tasks,
StatusCallback done) {
barrier->passed = false;
barrier->initiating_task = task;
if (participating_tasks.empty()) {
for (const auto& task_state : cluster_state_) {
std::string_view task_name = task_state.first;
barrier->tasks_at_barrier[GetTaskFromName(task_name)] = false;
}
} else {
for (const auto& task : participating_tasks) {
const std::string task_name = GetTaskName(task);
if (!cluster_state_.contains(task_name)) {
absl::Status error = MakeCoordinationError(absl::InvalidArgumentError(
absl::StrCat("Unexpected task (", task_name,
") that is not in the cluster called the barrier. "
"Barrier Id: ",
barrier_id)));
PassBarrier(barrier_id, error, barrier);
done(error);
return false;
}
barrier->tasks_at_barrier[task] = false;
}
}
barrier->num_pending_tasks = barrier->tasks_at_barrier.size();
for (const auto& pending_task : barrier->tasks_at_barrier) {
const std::string task_name = GetTaskName(pending_task.first);
if (cluster_state_[task_name]->GetState() ==
CoordinatedTaskState::TASKSTATE_ERROR) {
absl::Status error = MakeCoordinationError(absl::InternalError(
absl::StrCat("Task (", task_name,
") is already in error before the barrier "
"was called. Barrier Id: ",
barrier_id)));
PassBarrier(barrier_id, error, barrier);
done(error);
return false;
}
}
barrier->deadline_in_micros =
Env::Default()->NowMicros() + (timeout / absl::Microseconds(1));
ongoing_barriers_.emplace(barrier_id);
const size_t num_ongoing_barriers = ongoing_barriers_.size();
if (num_ongoing_barriers > kOngoingBarriersSoftLimit) {
LOG(WARNING) << "There is a high number of ongoing barriers in "
"coordination service: "
<< num_ongoing_barriers;
}
for (const auto& pending_task : barrier->tasks_at_barrier) {
const CoordinatedTask& task = pending_task.first;
cluster_state_[GetTaskName(task)]->JoinBarrier(barrier_id);
}
return true;
}
void CoordinationServiceStandaloneImpl::BarrierAsync(
std::string_view barrier_id, absl::Duration timeout,
const CoordinatedTask& task,
const std::vector<CoordinatedTask>& participating_tasks,
StatusCallback done) {
VLOG(3) << "Task " << GetTaskName(task) << " invoked BarrierAsync("
<< barrier_id << ").";
if (!ValidateBarrierArgs(barrier_id, timeout, task, participating_tasks,
done)) {
return;
}
absl::MutexLock l(&state_mu_);
if (ServiceHasStopped()) {
done(MakeCoordinationError(absl::InternalError(
"Barrier requested after coordination service has shut down.")));
return;
}
auto pair = barriers_.try_emplace(barrier_id);
auto it = pair.first;
bool inserted = pair.second;
auto* barrier = &it->second;
if (inserted) {
if (!InitializeBarrier(barrier, barrier_id, timeout, task,
participating_tasks, done)) {
return;
}
}
if (barrier->passed) {
if (barrier_id == shutdown_barrier_id_) {
absl::Status s = DisconnectTask(task);
if (!s.ok()) {
done(s);
return;
}
}
done(barrier->result);
return;
}
barrier->done_callbacks.push_back(done);
if (!ValidateTaskArgs(participating_tasks, barrier->tasks_at_barrier,
cluster_state_.size())) {
absl::Status error =
MakeCoordinationError(absl::InvalidArgumentError(absl::StrCat(
"Conflicting tasks specified for the same barrier: ", barrier_id)));
PassBarrier(barrier_id, error, barrier);
return;
}
if (!barrier->tasks_at_barrier[task]) {
barrier->tasks_at_barrier[task] = true;
--barrier->num_pending_tasks;
if (barrier->num_pending_tasks == 0) {
PassBarrier(barrier_id, absl::OkStatus(), barrier);
return;
}
}
}
absl::Status CoordinationServiceStandaloneImpl::CancelBarrier(
std::string_view barrier_id, const CoordinatedTask& task) {
absl::MutexLock l(&state_mu_);
if (ServiceHasStopped()) {
return MakeCoordinationError(absl::InternalError(
"Coordination service has stopped. CancelBarrier() failed."));
}
auto [it, inserted] = barriers_.try_emplace(barrier_id);
auto* barrier = &it->second;
if (inserted) {
LOG(WARNING) << "Barrier (" << barrier_id
<< ") is cancelled before being created by task: "
<< GetTaskName(task);
}
if (barrier->passed) {
return MakeCoordinationError(absl::FailedPreconditionError(absl::StrCat(
"Barrier (", barrier_id, ") has already been passed with status code: ",
barrier->result.code())));
}
absl::Status cancelled = MakeCoordinationError(absl::CancelledError(
absl::StrCat("Barrier (", barrier_id,
") is cancelled by task: ", GetTaskName(task))));
PassBarrier(barrier_id, cancelled, barrier);
VLOG(3) << "Barrier (" << barrier_id << ") is cancelled.";
return absl::OkStatus();
}
void CoordinationServiceStandaloneImpl::PassBarrier(std::string_view barrier_id,
absl::Status result,
BarrierState* barrier) {
barrier->passed = true;
barrier->result = result;
VLOG(3) << "Barrier(" << barrier_id << ") has passed with status: " << result;
if (barrier_id == device_propagation_barrier_id_) {
AggregateClusterDevices();
}
for (const auto& task_at_barrier : barrier->tasks_at_barrier) {
const CoordinatedTask& task = task_at_barrier.first;
cluster_state_[GetTaskName(task)]->ExitBarrier(barrier_id);
}
if (barrier_id == shutdown_barrier_id_) {
CompleteShutdownAfterBarrier(result, barrier);
}
barrier->tasks_at_barrier.clear();
ongoing_barriers_.erase(barrier_id);
for (const auto& callback : barrier->done_callbacks) {
callback(result);
}
barrier->done_callbacks.clear();
}
void CoordinationServiceStandaloneImpl::SendErrorPollingResponse(
const absl::Status& error) {
CHECK(IsClientPollingForError())
<< "`SendErrorPollingResponse` should only be called after agents poll "
"errors from the service.";
{
absl::MutexLock l(&state_mu_);
if (error_polling_state_.Responded()) {
return;
}
}
if (!absl::IsCancelled(error)) {
VLOG(2) << "An error is encountered. Sending the error as a response to "
"all error polling requests: "
<< error;
}
std::vector<std::string> missing_tasks;
{
absl::MutexLock l(&state_mu_);
missing_tasks.reserve(cluster_state_.size());
for (const auto& [task_name, task_state] : cluster_state_) {
if (!error_polling_state_.IsTaskPolling(task_name)) {
missing_tasks.push_back(task_name);
}
}
error_polling_state_.SetError(error);
}
if (!missing_tasks.empty()) {
LOG(ERROR) << absl::StrFormat(
"The following %d tasks in the cluster has not sent request to poll "
"for error. Error will not be propagated to these tasks: %s",
missing_tasks.size(), absl::StrJoin(missing_tasks, ","));
}
}
bool CoordinationServiceStandaloneImpl::ValidateTaskArgs(
const std::vector<CoordinatedTask>& tasks_args,
const absl::flat_hash_map<CoordinatedTask, bool, CoordinatedTaskHash,
CoordinatedTaskEqual>& tasks_at_barrier,
int64_t cluster_size) {
if (tasks_args.empty()) {
return tasks_at_barrier.size() == cluster_size;
} else if (tasks_at_barrier.size() != tasks_args.size()) {
return false;
} else {
for (const auto& task : tasks_args) {
if (!tasks_at_barrier.contains(task)) {
return false;
}
}
}
return true;
}
void CoordinationServiceStandaloneImpl::AggregateClusterDevices() {
assert(cluster_devices_.device_size() == 0);
std::vector<CoordinatedTask> ordered_tasks;
ordered_tasks.reserve(cluster_state_.size());
for (const auto& task : cluster_state_) {
ordered_tasks.push_back(GetTaskFromName(task.first));
}
std::sort(ordered_tasks.begin(), ordered_tasks.end(),
[](const CoordinatedTask& task1, const CoordinatedTask& task2) {
if (task1.job_name() != task2.job_name()) {
return task1.job_name() < task2.job_name();
}
return task1.task_id() < task2.task_id();
});
for (const auto& task : ordered_tasks) {
cluster_devices_.MergeFrom(
cluster_state_[GetTaskName(task)]->GetDeviceInfo());
}
if (post_aggregate_device_fn_ != nullptr) {
cluster_devices_ = post_aggregate_device_fn_(cluster_devices_);
}
}
void CoordinationServiceStandaloneImpl::CompleteShutdownAfterBarrier(
absl::Status result, BarrierState* barrier) {
if (result.ok()) {
LOG(INFO) << "Shutdown barrier in coordination service has passed.";
} else {
LOG(ERROR) << "Shutdown barrier in coordination service has failed:\n"
<< result
<< "\nThis suggests that the workers are out of sync. Either "
"at least one worker is too fast in its execution / "
"crashed early or too slow / hanging. Check the logs for "
"an earlier error to identify the root cause.";
}
absl::Status shutdown_error = MakeCoordinationError(absl::InternalError(
absl::StrCat("Shutdown barrier has failed, but this task is not at the "
"barrier yet.\nBarrier result: '",
barrier->result.message())));
for (const auto& [task, at_barrier] : barrier->tasks_at_barrier) {
if (at_barrier) {
absl::Status disconnect_status = DisconnectTask(task);
if (!disconnect_status.ok()) {
LOG(ERROR) << disconnect_status;
}
} else {
ReportServiceErrorToTaskAsync(task, shutdown_error);
}
}
}
}
std::unique_ptr<CoordinationServiceInterface> EnableCoordinationService(
Env* env, const CoordinationServiceConfig& config,
std::unique_ptr<CoordinationClientCache> cache) {
return std::make_unique<CoordinationServiceStandaloneImpl>(env, config,
std::move(cache));
}
bool CoordinationServiceStandaloneImpl::isRecoverableJob(
const std::string_view task_name) const {
return recoverable_jobs_.find(task_name) != recoverable_jobs_.end();
}
bool CoordinationServiceStandaloneImpl::SendErrorPollingResponseOrStopService(
const absl::Status& error) {
CHECK(!error.ok()) << "SendErrorPollingResponseOrStopService called with OK "
"status. Should always return an error.";
assert(client_cache_ == nullptr);
if (IsClientPollingForError()) {
LOG(ERROR)
<< "Use error polling to propagate the following error to all tasks: "
<< error;
SendErrorPollingResponse(error);
return false;
}
LOG(ERROR) << "Stopping coordination service as there is no "
"service-to-client connection, but we encountered an error: "
<< error;
Stop(false);
return true;
}
bool CoordinationServiceStandaloneImpl::IsClientPollingForError() const {
return client_polling_for_error_;
}
REGISTER_COORDINATION_SERVICE("standalone", EnableCoordinationService);
} | #include "xla/tsl/distributed_runtime/coordination/coordination_service.h"
#include <cstdint>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "absl/time/time.h"
#include "xla/tsl/distributed_runtime/call_options.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_client.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_error_util.h"
#include "xla/tsl/distributed_runtime/coordination/test_device.pb.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/protobuf/coordination_config.pb.h"
#include "xla/tsl/protobuf/coordination_service.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/random.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace {
using ::testing::Each;
using ::testing::EqualsProto;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::UnorderedElementsAre;
using ::testing::status::StatusIs;
using tensorflow::CoordinatedJob;
using tensorflow::CoordinatedTask;
using tensorflow::CoordinationServiceConfig;
using tensorflow::DeviceInfo;
using tensorflow::KeyValueEntry;
using tensorflow::TestDevice;
using tensorflow::TestDeviceList;
constexpr absl::Duration kHeartbeatTimeout = absl::Seconds(2);
constexpr absl::Duration kShutdownBarrierTimeout = absl::Milliseconds(500);
constexpr char kCoordinationServiceType[] = "standalone";
KeyValueEntry CreateKv(const std::string& key, const std::string& value) {
KeyValueEntry kv;
kv.set_key(key);
kv.set_value(value);
return kv;
}
CoordinationServiceConfig GetCoordinationServiceConfig(int num_tasks) {
CoordinationServiceConfig config;
config.set_service_type(kCoordinationServiceType);
CoordinatedJob* job = config.mutable_coordinated_job_list()->Add();
job->set_name("worker");
job->set_num_tasks(num_tasks);
return config;
}
class TestCoordinationClient : public CoordinationClient {
public:
TestCoordinationClient() = default;
absl::Status GetStatus() {
absl::MutexLock l(&mu_);
return status_;
}
void RegisterTaskAsync(CallOptions* opts, const RegisterTaskRequest* request,
RegisterTaskResponse* response,
StatusCallback done) override {
done(absl::OkStatus());
}
void ReportErrorToTaskAsync(CallOptions* call_opts,
const ReportErrorToTaskRequest* request,
ReportErrorToTaskResponse* response,
StatusCallback done) override {
absl::MutexLock l(&mu_);
status_ = absl::Status(static_cast<absl::StatusCode>(request->error_code()),
request->error_message());
done(absl::OkStatus());
}
#define UNIMPLEMENTED(method) \
void method##Async(const method##Request* request, \
method##Response* response, StatusCallback done) \
override{done(absl::UnimplementedError(#method "Async")); \
}
UNIMPLEMENTED(WaitForAllTasks);
UNIMPLEMENTED(ResetTask);
UNIMPLEMENTED(ReportErrorToService);
UNIMPLEMENTED(GetTaskState);
UNIMPLEMENTED(InsertKeyValue);
UNIMPLEMENTED(TryGetKeyValue);
UNIMPLEMENTED(GetKeyValueDir);
UNIMPLEMENTED(DeleteKeyValue);
UNIMPLEMENTED(Barrier);
UNIMPLEMENTED(CancelBarrier);
#undef UNIMPLEMENTED
#define UNIMPLEMENTED_WITH_CALL_OPTS(method) \
void method##Async(CallOptions* call_opts, const method##Request* request, \
method##Response* response, StatusCallback done) \
override{done(absl::UnimplementedError(#method "Async")); \
}
UNIMPLEMENTED_WITH_CALL_OPTS(GetKeyValue);
UNIMPLEMENTED_WITH_CALL_OPTS(Heartbeat);
UNIMPLEMENTED_WITH_CALL_OPTS(ShutdownTask);
UNIMPLEMENTED_WITH_CALL_OPTS(PollForError);
#undef UNIMPLEMENTED_WITH_CALL_OPTS
private:
absl::Mutex mu_;
absl::Status status_ ABSL_GUARDED_BY(mu_);
};
class TestCoordinationClientCache : public CoordinationClientCache {
public:
void AddTask(const std::string& target, CoordinationClient* client) {
clients_.emplace(target, client);
}
CoordinationClient* GetClient(const string& target) override {
auto it = clients_.find(target);
if (it == clients_.end()) return nullptr;
return it->second;
}
std::unique_ptr<CoordinationClient> GetOwnedClient(
const string& target) override {
LOG(ERROR) << "GetOwnedClient is not supported.";
return nullptr;
}
private:
std::unordered_map<std::string, CoordinationClient*> clients_;
};
class CoordinationBarrierTest : public ::testing::Test {
protected:
CoordinationBarrierTest() {
const int num_tasks = 3;
auto client_cache = std::make_unique<TestCoordinationClientCache>();
for (int i = 0; i < num_tasks; ++i) {
CoordinatedTask task;
task.set_job_name("worker");
task.set_task_id(i);
auto client = std::make_unique<TestCoordinationClient>();
client_cache->AddTask(absl::StrCat("/job:worker/replica:0/task:", i),
client.get());
tasks_.push_back(task);
clients_.push_back(std::move(client));
}
CoordinationServiceConfig config = GetCoordinationServiceConfig(num_tasks);
coord_service_ = CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config, std::move(client_cache));
for (int i = 0; i < num_tasks; ++i) {
absl::Status s =
coord_service_->RegisterTask(tasks_[i], 0);
if (!s.ok()) {
LOG(FATAL) << "RegisterTask() failed in CoordinationBarrierTest(): "
<< s;
}
}
}
CoordinationServiceInterface* GetCoordinationService() {
return coord_service_.get();
}
CoordinatedTask GetTask(int i) { return tasks_[i]; }
std::string GetTaskName(const CoordinatedTask& task) {
return absl::StrCat("/job:", task.job_name(), "/replica:", 0,
"/task:", task.task_id());
}
std::vector<TestCoordinationClient*> GetClients() {
std::vector<TestCoordinationClient*> clients;
for (const auto& client : clients_) {
clients.push_back(client.get());
}
return clients;
}
private:
std::unique_ptr<CoordinationServiceInterface> coord_service_;
std::vector<CoordinatedTask> tasks_;
std::vector<std::unique_ptr<TestCoordinationClient>> clients_;
};
class CoordinateTwoTasksTest : public ::testing::Test {
protected:
CoordinateTwoTasksTest() {
task_0_.set_job_name("worker");
task_0_.set_task_id(0);
task_1_.set_job_name("worker");
task_1_.set_task_id(1);
}
void EnableCoordinationService(
bool has_service_to_client_connection = true,
bool enable_shutdown_barrier = false,
bool set_worker_job_recoverable = false,
bool allow_new_incarnation_to_reconnect = false) {
CoordinationServiceConfig config =
GetCoordinationServiceConfig(2);
auto client_cache = std::make_unique<TestCoordinationClientCache>();
if (has_service_to_client_connection) {
client_cache->AddTask("/job:worker/replica:0/task:0", &client_0_);
client_cache->AddTask("/job:worker/replica:0/task:1", &client_1_);
} else {
client_cache = nullptr;
}
config.set_heartbeat_timeout_in_ms(kHeartbeatTimeout /
absl::Milliseconds(1));
if (set_worker_job_recoverable) {
config.mutable_recoverable_jobs()->Add("worker");
}
if (enable_shutdown_barrier) {
config.set_shutdown_barrier_timeout_in_ms(kShutdownBarrierTimeout /
absl::Milliseconds(1));
}
if (allow_new_incarnation_to_reconnect) {
config.set_allow_new_incarnation_to_reconnect(true);
}
coord_service_ = CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config, std::move(client_cache));
}
CoordinatedTask task_0_;
const uint64_t incarnation_0_ = random::New64();
const uint64_t incarnation_0_new_ = random::New64();
TestCoordinationClient client_0_;
CoordinatedTask task_1_;
const uint64_t incarnation_1_ = random::New64();
const uint64_t incarnation_1_new_ = random::New64();
TestCoordinationClient client_1_;
std::unique_ptr<CoordinationServiceInterface> coord_service_;
};
TestDevice CreateTestDevice(absl::string_view name, int local_id = 0) {
TestDevice device;
device.set_name(name);
device.set_local_id(local_id);
return device;
}
TEST_F(CoordinateTwoTasksTest, TestStandaloneService) {
EnableCoordinationService();
CoordinatedTask task_2;
task_2.set_job_name("worker");
task_2.set_task_id(2);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
absl::Notification wait_for_all;
coord_service_->WaitForAllTasks(task_0_, {}, [&](absl::Status s) {
ASSERT_OK(s);
wait_for_all.Notify();
});
ASSERT_FALSE(wait_for_all.HasBeenNotified());
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
coord_service_->WaitForAllTasks(task_1_, {},
[&](absl::Status s) { ASSERT_OK(s); });
wait_for_all.WaitForNotification();
ASSERT_OK(coord_service_->RecordHeartbeat(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RecordHeartbeat(task_1_, incarnation_1_));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_2, 0),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_1_, 0),
StatusIs(absl::StatusCode::kAborted));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_1_, 0),
StatusIs(absl::StatusCode::kAborted));
EXPECT_THAT(client_0_.GetStatus(), StatusIs(absl::StatusCode::kAborted));
}
TEST(CoordinationServiceTest, TestCoordinatedJobs) {
CoordinatedTask chief;
chief.set_job_name("chief");
chief.set_task_id(0);
CoordinatedTask task_0;
task_0.set_job_name("worker");
task_0.set_task_id(0);
CoordinatedTask task_1;
task_1.set_job_name("worker");
task_1.set_task_id(1);
CoordinatedTask evaluator;
evaluator.set_job_name("evaluator");
evaluator.set_task_id(0);
CoordinationServiceConfig config;
config.set_service_type(kCoordinationServiceType);
CoordinatedJob* chief_job = config.mutable_coordinated_job_list()->Add();
chief_job->set_name("chief");
chief_job->set_num_tasks(1);
CoordinatedJob* worker_job = config.mutable_coordinated_job_list()->Add();
worker_job->set_name("worker");
worker_job->set_num_tasks(2);
auto client_cache = std::make_unique<TestCoordinationClientCache>();
TestCoordinationClient ci;
client_cache->AddTask("/job:chief/replica:0/task:0", &ci);
TestCoordinationClient wi0;
client_cache->AddTask("/job:worker/replica:0/task:0", &wi0);
TestCoordinationClient wi1;
client_cache->AddTask("/job:worker/replica:0/task:1", &wi1);
TestCoordinationClient ei;
client_cache->AddTask("/job:evaluator/replica:0/task:0", &ei);
std::unique_ptr<CoordinationServiceInterface> coord_service =
CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config, std::move(client_cache));
absl::Notification register_chief;
ASSERT_OK(coord_service->RegisterTask(chief, 0));
coord_service->WaitForAllTasks(chief, {}, [&](absl::Status s) {
ASSERT_OK(s);
register_chief.Notify();
});
absl::Notification register_task0;
ASSERT_OK(coord_service->RegisterTask(task_0, 0));
coord_service->WaitForAllTasks(task_0, {}, [&](absl::Status s) {
ASSERT_OK(s);
register_task0.Notify();
});
absl::Notification register_task1;
ASSERT_OK(coord_service->RegisterTask(task_1, 0));
coord_service->WaitForAllTasks(task_1, {}, [&](absl::Status s) {
ASSERT_OK(s);
register_task1.Notify();
});
register_chief.WaitForNotification();
register_task0.WaitForNotification();
register_task1.WaitForNotification();
absl::Status status =
coord_service->RegisterTask(evaluator, 0);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(CoordinationServiceTest, RegisterTask_AlreadyConnected_Succeeds) {
const CoordinationServiceConfig config =
GetCoordinationServiceConfig(1);
CoordinatedTask task_0;
task_0.set_job_name("worker");
task_0.set_task_id(0);
std::unique_ptr<CoordinationServiceInterface> coord_service =
CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config,
nullptr);
ASSERT_OK(coord_service->RegisterTask(task_0, 0));
const absl::Status status =
coord_service->RegisterTask(task_0, 0);
TF_EXPECT_OK(status) << status;
}
TEST(CoordinationServiceTest,
RegisterTask_AlreadyConnectedDifferentIncarnation_Fails) {
const CoordinationServiceConfig config =
GetCoordinationServiceConfig(1);
CoordinatedTask task_0;
task_0.set_job_name("worker");
task_0.set_task_id(0);
std::unique_ptr<CoordinationServiceInterface> coord_service =
CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config,
nullptr);
ASSERT_OK(coord_service->RegisterTask(task_0, 0));
const absl::Status status =
coord_service->RegisterTask(task_0, 1);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kAborted));
}
TEST(CoordinationServiceTest, RegisterTask_AlreadyInError_Fails) {
CoordinationServiceConfig config =
GetCoordinationServiceConfig(1);
CoordinatedTask task_0;
task_0.set_job_name("worker");
task_0.set_task_id(0);
std::unique_ptr<CoordinationServiceInterface> coord_service =
CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config,
nullptr);
ASSERT_OK(coord_service->RegisterTask(task_0, 0));
ASSERT_OK(coord_service->ReportTaskError(task_0,
absl::InternalError("test_error")));
const absl::Status status =
coord_service->RegisterTask(task_0, 0);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kAborted));
}
TEST_F(CoordinateTwoTasksTest, TestTaskHeartbeatTimeout) {
EnableCoordinationService();
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(2 * kHeartbeatTimeout));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_0_, incarnation_0_),
StatusIs(absl::StatusCode::kUnavailable));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_1_, incarnation_1_),
StatusIs(absl::StatusCode::kUnavailable));
}
TEST_F(CoordinateTwoTasksTest,
ErrorPollingRequestsGotCancelledErrorUponServiceShutdown) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
std::vector<absl::Status> statuses;
statuses.reserve(2);
for (const CoordinatedTask& task : {task_0_, task_1_}) {
coord_service_->PollForErrorAsync(
task, [&](const absl::Status& status) { statuses.push_back(status); });
}
EXPECT_EQ(statuses.size(), 0);
coord_service_.reset();
EXPECT_EQ(statuses.size(), 2);
EXPECT_THAT(statuses, Each(StatusIs(absl::StatusCode::kCancelled)));
}
TEST_F(CoordinateTwoTasksTest,
HeartbeatTimeoutWithoutServerToClientConnection) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(2 * kHeartbeatTimeout));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_0_, incarnation_0_),
StatusIs(absl::StatusCode::kInternal));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_1_, incarnation_1_),
StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinateTwoTasksTest,
HeartbeatTimeoutErrorCanPropagateThroughErrorPolling) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Notification n0, n1;
absl::Status s0, s1;
coord_service_->PollForErrorAsync(task_0_, [&](const absl::Status& status) {
s0 = status;
n0.Notify();
});
coord_service_->PollForErrorAsync(task_1_, [&](const absl::Status& status) {
s1 = status;
n1.Notify();
});
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(2 * kHeartbeatTimeout));
n0.WaitForNotification();
n1.WaitForNotification();
EXPECT_THAT(s0, StatusIs(absl::StatusCode::kUnavailable));
EXPECT_THAT(s1, StatusIs(absl::StatusCode::kUnavailable));
}
TEST_F(CoordinateTwoTasksTest,
HeartbeatTimeoutErrorFromOneTaskCanPropagateThroughErrorPolling) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Status s0, s1;
absl::Notification n0, n1;
coord_service_->PollForErrorAsync(task_0_, [&](const absl::Status& status) {
s0 = status;
n0.Notify();
});
coord_service_->PollForErrorAsync(task_1_, [&](const absl::Status& status) {
s1 = status;
n1.Notify();
});
const int64_t sleeping_time =
absl::ToInt64Microseconds(0.9 * kHeartbeatTimeout);
Env::Default()->SleepForMicroseconds(sleeping_time);
TF_EXPECT_OK(coord_service_->RecordHeartbeat(task_0_, incarnation_0_));
Env::Default()->SleepForMicroseconds(sleeping_time);
TF_EXPECT_OK(coord_service_->RecordHeartbeat(task_0_, incarnation_0_));
Env::Default()->SleepForMicroseconds(sleeping_time);
n0.WaitForNotification();
n1.WaitForNotification();
EXPECT_THAT(s0,
StatusIs(absl::StatusCode::kUnavailable, HasSubstr("task:1")));
EXPECT_THAT(s1,
StatusIs(absl::StatusCode::kUnavailable, HasSubstr("task:1")));
}
TEST_F(CoordinateTwoTasksTest, ReportedErrorCanPropagateThroughErrorPolling) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
std::vector<absl::Status> statuses;
statuses.reserve(2);
for (const CoordinatedTask& task : {task_0_, task_1_}) {
coord_service_->PollForErrorAsync(
task, [&](const absl::Status& status) { statuses.push_back(status); });
}
ASSERT_OK(coord_service_->ReportTaskError(task_1_,
absl::InternalError("test_error")));
EXPECT_EQ(statuses.size(), 2);
EXPECT_THAT(statuses, Each(StatusIs(absl::StatusCode::kInternal)));
}
TEST_F(CoordinateTwoTasksTest, TestTaskRestart) {
EnableCoordinationService();
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Status s =
coord_service_->RegisterTask(task_1_, random::New64());
EXPECT_THAT(s, StatusIs(absl::StatusCode::kAborted));
EXPECT_THAT(client_0_.GetStatus(), StatusIs(absl::StatusCode::kAborted));
}
TEST_F(CoordinateTwoTasksTest, InsertKeyValue_Duplicate_Fail) {
EnableCoordinationService();
ASSERT_OK(coord_service_->InsertKeyValue("key0", "original_value"));
EXPECT_THAT(coord_service_->InsertKeyValue("key0", "never_added"),
StatusIs(absl::StatusCode::kAlreadyExists));
auto result = coord_service_->TryGetKeyValue("key0");
TF_EXPECT_OK(result.status());
EXPECT_EQ(result.value(), "original_value");
}
TEST_F(CoordinateTwoTasksTest, InsertKeyValue_Duplicate_Overwrite) {
EnableCoordinationService();
ASSERT_OK(coord_service_->InsertKeyValue("key0", "original_value"));
TF_EXPECT_OK(coord_service_->InsertKeyValue("key0", "overwritten_value",
true));
auto result = coord_service_->TryGetKeyValue("key0");
TF_EXPECT_OK(result.status());
EXPECT_EQ(result.value(), "overwritten_value");
}
TEST_F(CoordinateTwoTasksTest, TestSetGetValues) {
EnableCoordinationService();
ASSERT_OK(coord_service_->InsertKeyValue("key0", "value0"));
ASSERT_OK(coord_service_->InsertKeyValue("/path", "value"));
ASSERT_OK(coord_service_->InsertKeyValue("/path/to/key1", "value1"));
ASSERT_OK(coord_service_->InsertKeyValue("path/to
absl::Notification n1;
absl::StatusOr<std::string_view> ret;
coord_service_->GetKeyValueAsync(
"key0", [&](const absl::StatusOr<std::string_view>& status_or_value) {
ret = status_or_value;
n1.Notify();
});
n1.WaitForNotification();
ASSERT_OK(ret.status());
EXPECT_EQ(ret.value(), "value0");
absl::Notification n2;
coord_service_->GetKeyValueAsync(
"path
[&](const absl::StatusOr<std::string_view>& status_or_value) {
ret = status_or_value;
n2.Notify();
});
n2.WaitForNotification();
EXPECT_EQ(ret.value(), "value1");
ASSERT_OK(coord_service_->DeleteKeyValue("key0"));
absl::Notification n3;
coord_service_->GetKeyValueAsync(
"key0", [&](const absl::StatusOr<std::string_view>& status_or_value) {
ret = status_or_value;
n3.Notify();
});
EXPECT_FALSE(n3.HasBeenNotified());
ASSERT_OK(coord_service_->InsertKeyValue("key0", "value0_new"));
n3.WaitForNotification();
EXPECT_EQ(ret.value(), "value0_new");
ASSERT_OK(coord_service_->DeleteKeyValue("/path"));
auto n4 = std::make_shared<absl::Notification>();
coord_service_->GetKeyValueAsync(
"/path/to/key1",
[n4](const absl::StatusOr<std::string_view>& status_or_value) {
n4->Notify();
});
EXPECT_FALSE(n4->HasBeenNotified());
}
TEST(CoordinationServiceTest, TryGetKeyValue) {
const CoordinationServiceConfig config =
GetCoordinationServiceConfig(1);
auto client_cache = std::make_unique<TestCoordinationClientCache>();
std::unique_ptr<CoordinationServiceInterface> coord_service =
CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config, std::move(client_cache));
absl::StatusOr<std::string> result =
coord_service->TryGetKeyValue("test_key");
EXPECT_THAT(result.status(), StatusIs(absl::StatusCode::kNotFound));
ASSERT_OK(coord_service->InsertKeyValue("test_key", "test_value"));
result = coord_service->TryGetKeyValue("test_key");
EXPECT_EQ(result.value(), "test_value");
ASSERT_OK(coord_service->DeleteKeyValue("test_key"));
result = coord_service->TryGetKeyValue("test_key");
EXPECT_THAT(result.status(), StatusIs(absl::StatusCode::kNotFound));
}
TEST_F(CoordinateTwoTasksTest, GetKeyValueDir_SingleValueInDirectory) {
EnableCoordinationService();
KeyValueEntry kv = CreateKv("dir/path", "value0");
ASSERT_OK(coord_service_->InsertKeyValue(kv.key(), kv.value()));
std::vector<KeyValueEntry> result = coord_service_->GetKeyValueDir("dir");
EXPECT_THAT(result, UnorderedElementsAre(EqualsProto(kv)));
}
TEST_F(CoordinateTwoTasksTest, GetKeyValueDir_MultipleValuesInDirectory) {
EnableCoordinationService();
KeyValueEntry kv = CreateKv("dir/path", "value0");
KeyValueEntry kv2 = CreateKv("dir/path2", "value1");
KeyValueEntry kv_sub = CreateKv("dir/sub_dir/path", "value_sub");
ASSERT_OK(coord_service_->InsertKeyValue(kv.key(), kv.value()));
ASSERT_OK(coord_service_->InsertKeyValue(kv2.key(), kv2.value()));
ASSERT_OK(coord_service_->InsertKeyValue(kv_sub.key(), kv_sub.value()));
std::vector<KeyValueEntry> result = coord_service_->GetKeyValueDir("dir");
EXPECT_THAT(result, UnorderedElementsAre(EqualsProto(kv), EqualsProto(kv2),
EqualsProto(kv_sub)));
}
TEST_F(CoordinateTwoTasksTest, GetKeyValueDir_Empty_ReturnsEmptyList) {
EnableCoordinationService();
std::vector<KeyValueEntry> result = coord_service_->GetKeyValueDir("dir");
EXPECT_THAT(result, IsEmpty());
}
TEST_F(CoordinateTwoTasksTest, GetKeyValueDir_WrongDir_ReturnsEmptyList) {
EnableCoordinationService();
ASSERT_OK(coord_service_->InsertKeyValue("dir0/path", "value0"));
std::vector<KeyValueEntry> result = coord_service_->GetKeyValueDir("dir");
EXPECT_THAT(result, IsEmpty());
}
TEST_F(CoordinateTwoTasksTest, GetKeyValueDir_WrongDirPrefix_ReturnsEmptyList) {
EnableCoordinationService();
ASSERT_OK(coord_service_->InsertKeyValue("wrong_dir/dir/path", "value0"));
std::vector<KeyValueEntry> result = coord_service_->GetKeyValueDir("dir");
EXPECT_THAT(result, IsEmpty());
}
TEST_F(CoordinateTwoTasksTest,
GetKeyValueDir_NonDirectoryPrefix_ReturnsEmptyList) {
EnableCoordinationService();
ASSERT_OK(coord_service_->InsertKeyValue("dir_key", "value0"));
std::vector<KeyValueEntry> result = coord_service_->GetKeyValueDir("dir");
EXPECT_THAT(result, IsEmpty());
}
TEST_F(CoordinateTwoTasksTest,
GetKeyValueDir_NonDirectoryKey_ReturnsEmptyList) {
EnableCoordinationService();
ASSERT_OK(coord_service_->InsertKeyValue("dir", "value0"));
std::vector<KeyValueEntry> result = coord_service_->GetKeyValueDir("dir");
EXPECT_THAT(result, IsEmpty());
}
}
TEST(CoordinationServiceTest, ListClusterDevices_TfDevice) {
const CoordinationServiceConfig config =
GetCoordinationServiceConfig(3);
CoordinatedTask task_0;
task_0.set_job_name("worker");
task_0.set_task_id(0);
CoordinatedTask task_1;
task_1.set_job_name("worker");
task_1.set_task_id(1);
CoordinatedTask task_2;
task_2.set_job_name("worker");
task_2.set_task_id(2);
absl::Status status = absl::OkStatus();
auto client_cache = std::make_unique<TestCoordinationClientCache>();
std::unique_ptr<CoordinationServiceInterface> coord_service =
CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config, std::move(client_cache));
absl::Notification n;
DeviceInfo local_devices_0;
DeviceInfo local_devices_1;
DeviceInfo local_devices_2;
local_devices_0.mutable_device()->Add()->PackFrom(
CreateTestDevice("task0_device0"));
local_devices_0.mutable_device()->Add()->PackFrom(
CreateTestDevice("task0_device1"));
local_devices_1.mutable_device()->Add()->PackFrom(
CreateTestDevice("task1_device0"));
local_devices_2.mutable_device()->Add()->PackFrom(
CreateTestDevice("task2_device0"));
DeviceInfo cluster_devices;
coord_service->WaitForAllTasks(task_0, local_devices_0,
[&](absl::Status s) { ASSERT_OK(s); });
coord_service->WaitForAllTasks(task_1, local_devices_1,
[&](absl::Status s) { ASSERT_OK(s); });
coord_service->WaitForAllTasks(task_2, local_devices_2, [&](absl::Status s) {
ASSERT_OK(s);
cluster_devices = coord_service->ListClusterDevices();
n.Notify();
});
n.WaitForNotification();
DeviceInfo expected_cluster_devices;
auto expected_devices = expected_cluster_devices.mutable_device();
expected_devices->Add(local_devices_0.device().begin(),
local_devices_0.device().end());
expected_devices->Add(local_devices_1.device().begin(),
local_devices_1.device().end());
expected_devices->Add(local_devices_2.device().begin(),
local_devices_2.device().end());
EXPECT_THAT(cluster_devices, EqualsProto(expected_cluster_devices));
}
TEST(CoordinationServiceTest, ListClusterDevices_XlaDevice) {
const CoordinationServiceConfig config =
GetCoordinationServiceConfig(3);
CoordinatedTask task_0;
task_0.set_job_name("worker");
task_0.set_task_id(0);
CoordinatedTask task_1;
task_1.set_job_name("worker");
task_1.set_task_id(1);
CoordinatedTask task_2;
task_2.set_job_name("worker");
task_2.set_task_id(2);
absl::Status status = absl::OkStatus();
auto client_cache = std::make_unique<TestCoordinationClientCache>();
std::unique_ptr<CoordinationServiceInterface> coord_service =
CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config, std::move(client_cache));
coord_service->SetDeviceAggregationFunction(
[](const DeviceInfo& raw_global_devices) {
TestDeviceList global_device_list;
int global_id = 0;
for (const auto& device : raw_global_devices.device()) {
TestDevice local_device;
device.UnpackTo(&local_device);
local_device.set_global_id(global_id++);
*global_device_list.mutable_device()->Add() = local_device;
}
DeviceInfo global_devices;
global_devices.mutable_device()->Add()->PackFrom(global_device_list);
return global_devices;
});
absl::Notification n;
DeviceInfo local_devices_0;
DeviceInfo local_devices_1;
DeviceInfo local_devices_2;
TestDevice local_0 = CreateTestDevice("task0_device0", 0);
TestDevice local_0_1 = CreateTestDevice("task0_device1", 1);
TestDevice local_1 = CreateTestDevice("task1_device0", 0);
TestDevice local_2 = CreateTestDevice("task2_device0", 0);
local_devices_0.mutable_device()->Add()->PackFrom(local_0);
local_devices_0.mutable_device()->Add()->PackFrom(local_0_1);
local_devices_1.mutable_device()->Add()->PackFrom(local_1);
local_devices_2.mutable_device()->Add()->PackFrom(local_2);
DeviceInfo cluster_devices;
coord_service->WaitForAllTasks(task_1, local_devices_1,
[&](absl::Status s) { ASSERT_OK(s); });
coord_service->WaitForAllTasks(task_0, local_devices_0,
[&](absl::Status s) { ASSERT_OK(s); });
coord_service->WaitForAllTasks(task_2, local_devices_2, [&](absl::Status s) {
ASSERT_OK(s);
cluster_devices = coord_service->ListClusterDevices();
n.Notify();
});
n.WaitForNotification();
DeviceInfo expected_cluster_devices;
TestDeviceList global_device_list;
local_0.set_global_id(0);
local_0_1.set_global_id(1);
local_1.set_global_id(2);
local_2.set_global_id(3);
*global_device_list.add_device() = local_0;
*global_device_list.add_device() = local_0_1;
*global_device_list.add_device() = local_1;
*global_device_list.add_device() = local_2;
expected_cluster_devices.mutable_device()->Add()->PackFrom(
global_device_list);
EXPECT_THAT(cluster_devices, EqualsProto(expected_cluster_devices));
}
TEST(CoordinationServiceTest, ListClusterDevices_DevicesAreNotAddedTwice) {
const CoordinationServiceConfig config =
GetCoordinationServiceConfig(2);
CoordinatedTask task_0;
task_0.set_job_name("worker");
task_0.set_task_id(0);
CoordinatedTask task_1;
task_1.set_job_name("worker");
task_1.set_task_id(1);
absl::Status status = absl::OkStatus();
auto client_cache = std::make_unique<TestCoordinationClientCache>();
std::unique_ptr<CoordinationServiceInterface> coord_service =
CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config, std::move(client_cache));
absl::Notification n;
DeviceInfo local_devices_0;
DeviceInfo local_devices_1;
local_devices_0.mutable_device()->Add()->PackFrom(
CreateTestDevice("task0_device0"));
local_devices_0.mutable_device()->Add()->PackFrom(
CreateTestDevice("task0_device1"));
local_devices_1.mutable_device()->Add()->PackFrom(
CreateTestDevice("task1_device0"));
DeviceInfo cluster_devices;
coord_service->WaitForAllTasks(task_0, local_devices_0,
[](absl::Status s) { ASSERT_OK(s); });
coord_service->WaitForAllTasks(task_0, local_devices_0,
[](absl::Status s) { ASSERT_OK(s); });
coord_service->WaitForAllTasks(task_1, local_devices_1,
[coord_service = coord_service.get(),
&cluster_devices, &n](absl::Status s) {
ASSERT_OK(s);
cluster_devices =
coord_service->ListClusterDevices();
n.Notify();
});
n.WaitForNotification();
DeviceInfo expected_cluster_devices;
auto expected_devices = expected_cluster_devices.mutable_device();
expected_devices->Add(local_devices_0.device().begin(),
local_devices_0.device().end());
expected_devices->Add(local_devices_1.device().begin(),
local_devices_1.device().end());
EXPECT_THAT(cluster_devices, EqualsProto(expected_cluster_devices));
}
TEST_F(CoordinationBarrierTest, Barrier) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Status barrier_status_0;
absl::Status barrier_status_1;
absl::Status barrier_status_2;
absl::Notification n_0;
absl::Notification n_1;
absl::Notification n_2;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{}, [&barrier_status_0, &n_0](absl::Status s) {
barrier_status_0 = s;
n_0.Notify();
});
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{}, [&barrier_status_1, &n_1](absl::Status s) {
barrier_status_1 = s;
n_1.Notify();
});
EXPECT_FALSE(n_0.HasBeenNotified());
EXPECT_FALSE(n_1.HasBeenNotified());
EXPECT_FALSE(n_2.HasBeenNotified());
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(2),
{}, [&barrier_status_2, &n_2](absl::Status s) {
barrier_status_2 = s;
n_2.Notify();
});
EXPECT_TRUE(n_0.HasBeenNotified());
EXPECT_TRUE(n_1.HasBeenNotified());
EXPECT_TRUE(n_2.HasBeenNotified());
TF_EXPECT_OK(barrier_status_0);
TF_EXPECT_OK(barrier_status_1);
TF_EXPECT_OK(barrier_status_2);
}
TEST_F(CoordinationBarrierTest, BarrierWithSubsetOfTasks) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Status barrier_status_0;
absl::Status barrier_status_1;
absl::Notification n_0;
absl::Notification n_1;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{GetTask(0), GetTask(1)},
[&barrier_status_0, &n_0](absl::Status s) {
barrier_status_0 = s;
n_0.Notify();
});
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{GetTask(0), GetTask(1)},
[&barrier_status_1, &n_1](absl::Status s) {
barrier_status_1 = s;
n_1.Notify();
});
EXPECT_TRUE(n_0.HasBeenNotified());
EXPECT_TRUE(n_1.HasBeenNotified());
TF_EXPECT_OK(barrier_status_0);
TF_EXPECT_OK(barrier_status_1);
}
TEST_F(CoordinationBarrierTest, BarrierWithMismatchedTasks) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Status barrier_status_0;
absl::Status barrier_status_1;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{GetTask(0), GetTask(1)},
[&barrier_status_0](absl::Status s) { barrier_status_0 = s; });
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{GetTask(1), GetTask(2)},
[&barrier_status_1](absl::Status s) { barrier_status_1 = s; });
EXPECT_THAT(barrier_status_0, StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(barrier_status_1, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST_F(CoordinationBarrierTest, BarrierByNonParticipatingTask) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Status barrier_status_0;
absl::Status barrier_status_1;
absl::Notification n_0;
absl::Notification n_1;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{GetTask(0), GetTask(1)},
[&barrier_status_0](absl::Status s) { barrier_status_0 = s; });
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(2),
{GetTask(0), GetTask(1)},
[&barrier_status_1](absl::Status s) { barrier_status_1 = s; });
EXPECT_THAT(barrier_status_0, StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(barrier_status_1, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST_F(CoordinationBarrierTest, BarrierByNonParticipatingTaskThreeTasks) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Status barrier_status_0;
absl::Status barrier_status_1;
absl::Status barrier_status_2;
absl::Notification n_0;
absl::Notification n_1;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{GetTask(0), GetTask(1)},
[&barrier_status_0, &n_0](absl::Status s) {
barrier_status_0 = s;
n_0.Notify();
});
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{GetTask(0), GetTask(1)},
[&barrier_status_1, &n_1](absl::Status s) {
barrier_status_1 = s;
n_1.Notify();
});
n_0.WaitForNotification();
n_1.WaitForNotification();
TF_EXPECT_OK(barrier_status_0);
TF_EXPECT_OK(barrier_status_1);
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(2),
{GetTask(0), GetTask(1)},
[&barrier_status_2](absl::Status s) { barrier_status_2 = s; });
EXPECT_THAT(barrier_status_2, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST_F(CoordinationBarrierTest, BarrierByNonClusterTask) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Status barrier_status_0;
absl::Notification n_0;
CoordinatedTask unspecified_task;
unspecified_task.set_job_name("task_from_another_cluster");
unspecified_task.set_task_id(2);
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{GetTask(0), unspecified_task},
[&barrier_status_0, &n_0](absl::Status s) {
barrier_status_0 = s;
n_0.Notify();
});
n_0.WaitForNotification();
EXPECT_THAT(barrier_status_0, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST_F(CoordinationBarrierTest, BarrierTimeout) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(1);
absl::Status barrier_status_0, barrier_status_1;
absl::Notification n_0, n_1;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{}, [&barrier_status_1, &n_1](absl::Status s) {
barrier_status_1 = s;
n_1.Notify();
});
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{}, [&barrier_status_0, &n_0](absl::Status s) {
barrier_status_0 = s;
n_0.Notify();
});
n_0.WaitForNotification();
n_1.WaitForNotification();
EXPECT_EQ(barrier_status_0, barrier_status_1);
EXPECT_THAT(barrier_status_0, StatusIs(absl::StatusCode::kDeadlineExceeded));
EXPECT_FALSE(
absl::StrContains(barrier_status_0.message(), GetTaskName(GetTask(0))));
EXPECT_TRUE(
absl::StrContains(barrier_status_0.message(),
GetTaskName(GetTask(1))));
EXPECT_TRUE(absl::StrContains(barrier_status_0.message(),
GetTaskName(GetTask(2))));
EXPECT_TRUE(absl::StrContains(
barrier_status_0.message(),
"2/3"));
}
TEST_F(CoordinationBarrierTest, BarrierReturnsPreviousError) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(1);
absl::Status barrier_status_0;
absl::Status barrier_status_1;
absl::Notification n_0;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{}, [&barrier_status_0, &n_0](absl::Status s) {
barrier_status_0 = s;
n_0.Notify();
});
ASSERT_OK(GetCoordinationService()->ReportTaskError(
GetTask(0), absl::InternalError("test_error")));
n_0.WaitForNotification();
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{},
[&barrier_status_1](absl::Status s) { barrier_status_1 = s; });
EXPECT_THAT(barrier_status_0, StatusIs(absl::StatusCode::kInternal));
EXPECT_THAT(barrier_status_1, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinationBarrierTest, BarrierCancelled) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Status barrier_status;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{},
[&barrier_status](absl::Status s) { barrier_status = s; });
absl::Status cancelled_status =
GetCoordinationService()->CancelBarrier(barrier_id, GetTask(0));
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kCancelled));
TF_EXPECT_OK(cancelled_status);
}
TEST_F(CoordinationBarrierTest, CancelNonExistentBarrier_FutureBarrierFails) {
const std::string barrier_id = "cancelled_barrier_id";
absl::Duration timeout = absl::Seconds(1);
absl::Status barrier_status;
ASSERT_OK(GetCoordinationService()->CancelBarrier(barrier_id, GetTask(0)));
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{},
[&barrier_status](absl::Status s) { barrier_status = s; });
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kCancelled));
}
TEST_F(CoordinationBarrierTest, CancelAfterBarrierHasPassed) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Status barrier_status_0;
absl::Status barrier_status_1;
absl::Status barrier_status_2;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{},
[&barrier_status_0](absl::Status s) { barrier_status_0 = s; });
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{},
[&barrier_status_1](absl::Status s) { barrier_status_1 = s; });
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(2),
{},
[&barrier_status_2](absl::Status s) { barrier_status_2 = s; });
absl::Status cancelled_status =
GetCoordinationService()->CancelBarrier(barrier_id, GetTask(0));
EXPECT_THAT(cancelled_status,
StatusIs(absl::StatusCode::kFailedPrecondition));
TF_EXPECT_OK(barrier_status_0);
TF_EXPECT_OK(barrier_status_1);
TF_EXPECT_OK(barrier_status_2);
}
TEST_F(CoordinationBarrierTest, PassedBarrierReturnsImmediately) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Status barrier_status_0;
absl::Status barrier_status_1;
absl::Status barrier_status_2;
absl::Status barrier_status_repeat;
absl::Notification n0;
absl::Notification n1;
absl::Notification n2;
absl::Notification n_repeat;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{}, [&barrier_status_0, &n0](absl::Status s) {
barrier_status_0 = s;
n0.Notify();
});
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{}, [&barrier_status_1, &n1](absl::Status s) {
barrier_status_1 = s;
n1.Notify();
});
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(2),
{}, [&barrier_status_2, &n2](absl::Status s) {
barrier_status_2 = s;
n2.Notify();
});
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{},
[&barrier_status_repeat, &n_repeat](absl::Status s) {
barrier_status_repeat = s;
n_repeat.Notify();
});
EXPECT_TRUE(n0.HasBeenNotified());
EXPECT_TRUE(n1.HasBeenNotified());
EXPECT_TRUE(n2.HasBeenNotified());
EXPECT_TRUE(n_repeat.HasBeenNotified());
TF_EXPECT_OK(barrier_status_0);
TF_EXPECT_OK(barrier_status_1);
TF_EXPECT_OK(barrier_status_2);
TF_EXPECT_OK(barrier_status_repeat);
}
TEST_F(CoordinationBarrierTest, BarrierFailsIfTaskIsAlreadyInError) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
ASSERT_OK(GetCoordinationService()->ReportTaskError(
GetTask(0), absl::InternalError("test_error")));
absl::Status barrier_status;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{},
[&barrier_status](absl::Status s) { barrier_status = s; });
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinationBarrierTest, BarrierFailsUponTaskError) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Notification n0;
absl::Status barrier_status;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{}, [&barrier_status, &n0](absl::Status s) {
barrier_status = s;
n0.Notify();
});
ASSERT_OK(GetCoordinationService()->ReportTaskError(
GetTask(0), absl::InternalError("test_error")));
n0.WaitForNotification();
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinationBarrierTest,
BarrierStillBlocksIfSameTaskCallsOngoingBarrierRepeatedly) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Status barrier_status_0;
absl::Status barrier_status_1;
absl::Status barrier_status_2;
absl::Notification n_0;
absl::Notification n_1;
absl::Notification n_2;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{GetTask(0), GetTask(1)},
[&barrier_status_0, &n_0](absl::Status s) {
barrier_status_0 = s;
n_0.Notify();
});
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{GetTask(0), GetTask(1)},
[&barrier_status_1, &n_1](absl::Status s) {
barrier_status_1 = s;
n_1.Notify();
});
EXPECT_FALSE(n_0.HasBeenNotified());
EXPECT_FALSE(n_1.HasBeenNotified());
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{GetTask(0), GetTask(1)},
[&barrier_status_2, &n_2](absl::Status s) {
barrier_status_2 = s;
n_2.Notify();
});
TF_EXPECT_OK(barrier_status_0);
TF_EXPECT_OK(barrier_status_1);
TF_EXPECT_OK(barrier_status_2);
}
TEST_F(CoordinateTwoTasksTest, ResetAndRegisterAgain) {
EnableCoordinationService();
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
TF_EXPECT_OK(coord_service_->ResetTask(task_0_));
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
}
TEST_F(CoordinateTwoTasksTest, Reset_HeartbeatsAreAcceptedForAGracePeriod) {
EnableCoordinationService();
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
TF_EXPECT_OK(coord_service_->ResetTask(task_0_));
TF_EXPECT_OK(coord_service_->RecordHeartbeat(task_0_, incarnation_0_));
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(3 * kHeartbeatTimeout));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_0_, incarnation_0_),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST_F(CoordinateTwoTasksTest, Reset_FailsOngoingBarrier) {
EnableCoordinationService(true,
false);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
absl::Status barrier_status;
absl::Notification barrier_n;
coord_service_->BarrierAsync("ongoing_barrier", absl::InfiniteDuration(),
task_0_,
{},
[&barrier_status, &barrier_n](absl::Status s) {
barrier_status = s;
barrier_n.Notify();
});
TF_EXPECT_OK(coord_service_->ResetTask(task_0_));
EXPECT_TRUE(barrier_n.HasBeenNotified());
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinateTwoTasksTest, Shutdown_HeartbeatsAreAcceptedForAGracePeriod) {
EnableCoordinationService(true,
false);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
absl::Notification n;
coord_service_->ShutdownTaskAsync(task_0_, [&n](absl::Status s) {
TF_EXPECT_OK(s);
n.Notify();
});
n.WaitForNotification();
TF_EXPECT_OK(coord_service_->RecordHeartbeat(task_0_, incarnation_0_));
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(3 * kHeartbeatTimeout));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_0_, incarnation_0_),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST_F(CoordinateTwoTasksTest, Shutdown_FailsOngoingBarrier) {
EnableCoordinationService(true,
false);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
absl::Status barrier_status;
absl::Notification barrier_n;
coord_service_->BarrierAsync("ongoing_barrier", absl::InfiniteDuration(),
task_0_,
{},
[&barrier_status, &barrier_n](absl::Status s) {
barrier_status = s;
barrier_n.Notify();
});
absl::Notification shutdown_n;
coord_service_->ShutdownTaskAsync(task_0_, [&shutdown_n](absl::Status s) {
TF_EXPECT_OK(s);
shutdown_n.Notify();
});
shutdown_n.WaitForNotification();
EXPECT_TRUE(barrier_n.HasBeenNotified());
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinateTwoTasksTest, ShutdownWithBarrier_BarrierSucceeds) {
EnableCoordinationService(true,
true);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
TF_EXPECT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Status barrier_status;
absl::Status barrier_status_2;
coord_service_->ShutdownTaskAsync(
task_0_, [&barrier_status](absl::Status s) { barrier_status = s; });
coord_service_->ShutdownTaskAsync(
task_1_, [&barrier_status_2](absl::Status s) { barrier_status_2 = s; });
TF_EXPECT_OK(barrier_status);
TF_EXPECT_OK(barrier_status_2);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
TF_EXPECT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
}
TEST_F(CoordinateTwoTasksTest,
ShutdownWithBarrier_BarrierFails_TaskDisconnectsOtherTaskIsAlerted) {
EnableCoordinationService(true,
true);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
TF_EXPECT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Status barrier_status;
absl::Notification n;
coord_service_->ShutdownTaskAsync(task_0_,
[&n, &barrier_status](absl::Status s) {
barrier_status = s;
n.Notify();
});
n.WaitForNotification();
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kDeadlineExceeded));
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
absl::Status other_task_status = client_1_.GetStatus();
EXPECT_THAT(other_task_status, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinateTwoTasksTest,
ShutdownWithBarrier_BarrierFailsWithoutClientConnection_ServiceStops) {
EnableCoordinationService(false,
true);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
TF_EXPECT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Status barrier_status;
absl::Notification n;
coord_service_->ShutdownTaskAsync(task_0_,
[&n, &barrier_status](absl::Status s) {
barrier_status = s;
n.Notify();
});
n.WaitForNotification();
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(absl::Seconds(1)));
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kDeadlineExceeded));
absl::Status s = coord_service_->RecordHeartbeat(task_1_, incarnation_1_);
EXPECT_THAT(s, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinateTwoTasksTest, BarrierFailsIfServiceHasStopped) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Notification n0;
absl::Status barrier_status;
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(2 * kHeartbeatTimeout));
coord_service_->BarrierAsync("barrier_id", absl::Seconds(5), task_0_,
{}, [&](absl::Status s) {
barrier_status = s;
n0.Notify();
});
n0.WaitForNotification();
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinateTwoTasksTest, BarrierFailsAfterErrorPollingResponse) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Notification n0, n1;
absl::Status s0, s1;
coord_service_->PollForErrorAsync(task_0_, [&](const absl::Status& status) {
s0 = status;
n0.Notify();
});
coord_service_->PollForErrorAsync(task_1_, [&](const absl::Status& status) {
s1 = status;
n1.Notify();
});
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(2 * kHeartbeatTimeout));
n0.WaitForNotification();
n1.WaitForNotification();
EXPECT_THAT(s0, StatusIs(absl::StatusCode::kUnavailable));
EXPECT_THAT(s1, StatusIs(absl::StatusCode::kUnavailable));
absl::Notification n_barrier;
absl::Status barrier_status;
coord_service_->BarrierAsync("barrier_id", absl::Seconds(5), task_0_,
{}, [&](absl::Status s) {
barrier_status = s;
n_barrier.Notify();
});
n_barrier.WaitForNotification();
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinateTwoTasksTest, BarrierWithSubsetFailsIfServiceHasStopped) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Notification n0;
absl::Status barrier_status;
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(2 * kHeartbeatTimeout));
coord_service_->BarrierAsync("barrier_id", absl::Seconds(5), task_0_,
{task_0_},
[&](absl::Status s) {
barrier_status = s;
n0.Notify();
});
n0.WaitForNotification();
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinateTwoTasksTest,
BarrierWithNonParticipatingTaskFailsIfServiceHasStopped) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Notification n0;
absl::Status barrier_status;
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(2 * kHeartbeatTimeout));
coord_service_->BarrierAsync("barrier_id", absl::Seconds(5), task_0_,
{task_1_},
[&](absl::Status s) {
barrier_status = s;
n0.Notify();
});
n0.WaitForNotification();
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinateTwoTasksTest, UnrecoverableTaskPropagatesError) {
EnableCoordinationService(true,
false,
false);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
TF_EXPECT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
ASSERT_OK(coord_service_->ReportTaskError(task_0_,
absl::InternalError("test_error")));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_0_, incarnation_0_),
StatusIs(absl::StatusCode::kInternal));
EXPECT_THAT(client_1_.GetStatus(), StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinateTwoTasksTest, RecoverableTaskWillNotPropagateError) {
EnableCoordinationService(true,
false,
true);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
TF_EXPECT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
ASSERT_OK(coord_service_->ReportTaskError(task_0_,
absl::InternalError("test_error")));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_0_, incarnation_0_),
StatusIs(absl::StatusCode::kInternal));
TF_EXPECT_OK(client_1_.GetStatus());
}
TEST_F(CoordinateTwoTasksTest,
RecoverableTaskReportErrorResetAndRegisterAgain) {
EnableCoordinationService(true,
false,
true);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
TF_EXPECT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
ASSERT_OK(coord_service_->ReportTaskError(task_0_,
absl::InternalError("test_error")));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_0_, incarnation_0_),
StatusIs(absl::StatusCode::kInternal));
TF_EXPECT_OK(client_1_.GetStatus());
TF_EXPECT_OK(coord_service_->ResetTask(task_0_));
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_new_));
TF_EXPECT_OK(coord_service_->RecordHeartbeat(task_0_, incarnation_0_new_));
TF_EXPECT_OK(client_1_.GetStatus());
}
TEST_F(CoordinateTwoTasksTest, UnavailableTaskCanReconnect) {
EnableCoordinationService(true,
false,
false,
true);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->ReportTaskError(
task_0_, MakeCoordinationError(absl::UnavailableError("test_error"))));
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_new_));
}
TEST_F(CoordinateTwoTasksTest,
DoNotAllowPollForErrorIfHasServiceToClientConnection) {
EnableCoordinationService(true);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
std::vector<absl::Status> statuses;
statuses.reserve(2);
for (const CoordinatedTask& task : {task_0_, task_1_}) {
coord_service_->PollForErrorAsync(
task, [&](const absl::Status& status) { statuses.push_back(status); });
}
EXPECT_EQ(statuses.size(), 2);
EXPECT_THAT(statuses, Each(StatusIs(absl::StatusCode::kInternal)));
}
TEST_F(CoordinateTwoTasksTest, DoNotAllowPollForErrorIfNotInCluster) {
EnableCoordinationService(false);
CoordinatedTask task_not_in_cluster;
absl::Status s;
coord_service_->PollForErrorAsync(
task_not_in_cluster, [&](const absl::Status& status) { s = status; });
EXPECT_THAT(s, StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("not in the cluster")));
}
TEST_F(CoordinateTwoTasksTest, DoNotAllowPollForErrorIfTaskNotRegistered) {
EnableCoordinationService(false);
absl::Status s;
coord_service_->PollForErrorAsync(
task_0_, [&](const absl::Status& status) { s = status; });
EXPECT_THAT(s, StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("has not been registered")));
}
TEST_F(CoordinateTwoTasksTest,
AllowPollForErrorWithinGracePeriodIfTaskHasShutDown) {
EnableCoordinationService(false);
absl::Status s;
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
coord_service_->ShutdownTaskAsync(task_0_,
[&](const absl::Status& status) {});
coord_service_->ShutdownTaskAsync(task_1_,
[&](const absl::Status& status) {});
coord_service_->PollForErrorAsync(
task_0_, [&](const absl::Status& status) { s = status; });
coord_service_.reset();
EXPECT_THAT(s, StatusIs(absl::StatusCode::kCancelled));
}
TEST_F(CoordinateTwoTasksTest, DoNotAllowPollForErrorIfTaskHasShutDown) {
EnableCoordinationService(false);
absl::Status s;
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
coord_service_->ShutdownTaskAsync(task_0_,
[&](const absl::Status& status) {});
coord_service_->ShutdownTaskAsync(task_1_,
[&](const absl::Status& status) {});
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(2 * kHeartbeatTimeout));
coord_service_->PollForErrorAsync(
task_0_, [&](const absl::Status& status) { s = status; });
EXPECT_THAT(s, StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("has disconnected")));
}
TEST_F(CoordinateTwoTasksTest, DoNotAllowPollForErrorAfterReset) {
EnableCoordinationService(false);
absl::Status s;
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->ResetTask(task_0_));
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(2 * kHeartbeatTimeout));
coord_service_->PollForErrorAsync(
task_0_, [&](const absl::Status& status) { s = status; });
EXPECT_THAT(s, StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("has disconnected")));
}
TEST_F(CoordinateTwoTasksTest, DoNotAllowPollForErrorWhenInErrorState) {
EnableCoordinationService(false);
absl::Status s;
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->ReportTaskError(task_0_,
absl::InternalError("test_error")));
coord_service_->PollForErrorAsync(
task_0_, [&](const absl::Status& status) { s = status; });
EXPECT_THAT(s, StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("test_error")));
}
TEST_F(CoordinateTwoTasksTest, DoNotAllowPollForErrorIfServiceHasStopped) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(2 * kHeartbeatTimeout));
absl::Status s;
coord_service_->PollForErrorAsync(
task_0_, [&](const absl::Status& status) { s = status; });
EXPECT_THAT(s, StatusIs(absl::StatusCode::kInternal,
HasSubstr("service has shut down")));
}
TEST_F(CoordinateTwoTasksTest,
CanPropagateTaskRegistrationErrorThroughErrorPolling) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Status s0;
coord_service_->PollForErrorAsync(
task_0_, [&](const absl::Status& status) { s0 = status; });
ASSERT_THAT(coord_service_->RegisterTask(task_1_, incarnation_0_),
StatusIs(absl::StatusCode::kAborted));
EXPECT_THAT(s0, StatusIs(absl::StatusCode::kAborted));
}
TEST_F(CoordinateTwoTasksTest, LatePollingTaskCanGetError) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
std::vector<absl::Status> statuses;
statuses.reserve(2);
coord_service_->PollForErrorAsync(
task_0_, [&](const absl::Status& status) { statuses.push_back(status); });
ASSERT_OK(coord_service_->ReportTaskError(
task_0_, absl::FailedPreconditionError("test_error_from_task_0")));
coord_service_->PollForErrorAsync(
task_1_, [&](const absl::Status& status) { statuses.push_back(status); });
EXPECT_EQ(statuses.size(), 2);
EXPECT_THAT(statuses, Each(StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("test_error_from_task_0"))));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/distributed_runtime/coordination/coordination_service.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/distributed_runtime/coordination/coordination_service_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c610c4bf-ddc0-4b3d-946d-435e6dbf2fa1 | cpp | tensorflow/tensorflow | coordination_service_agent | third_party/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc | third_party/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent_test.cc | #include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <map>
#include <memory>
#include <optional>
#include <random>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/functional/bind_front.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "xla/tsl/distributed_runtime/call_options.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_client.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_error_util.h"
#include "xla/tsl/framework/cancellation.h"
#include "xla/tsl/lib/monitoring/gauge.h"
#include "xla/tsl/protobuf/coordination_config.pb.h"
#include "xla/tsl/protobuf/coordination_service.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/random.h"
#include "tsl/platform/status.h"
#include "tsl/platform/thread_annotations.h"
namespace tsl {
using tensorflow::CoordinatedTask;
using tensorflow::CoordinatedTaskState;
using tensorflow::CoordinatedTaskStateInfo;
using tensorflow::CoordinationServiceConfig;
using tensorflow::DeviceInfo;
using tensorflow::KeyValueEntry;
namespace {
auto* enabled_usage_metric =
monitoring::Gauge<bool, 0>::New("/coordination_service/agent/enabled",
"Tracks usage of coordination service.");
constexpr absl::Duration kDefaultClusterRegisterTimeout = absl::Hours(1);
constexpr absl::Duration kDefaultHeartbeatTimeout = absl::Seconds(10);
constexpr absl::Duration kDefaultShutdownTimeout = absl::Seconds(10);
constexpr char kHeartbeatThread[] = "CoordinationServiceHeartbeatLoop";
constexpr char kErrorPollingThread[] = "CoordinationServiceErrorPolling";
class CoordinationServiceAgentImpl : public CoordinationServiceAgent {
public:
CoordinationServiceAgentImpl() = default;
~CoordinationServiceAgentImpl() override {
absl::Status s = ShutdownInternal();
VLOG(3) << "Coordination agent dtor failed with status: " << s;
}
absl::Status Initialize(Env* env, std::string_view job_name, int task_id,
const CoordinationServiceConfig& configs,
std::unique_ptr<CoordinationClient> leader_client,
StatusCallback error_fn) override;
absl::Status Initialize(Env* env, const CoordinatedTask& task,
const CoordinationServiceConfig& configs,
std::unique_ptr<CoordinationClient> leader_client,
StatusCallback error_fn) override;
bool IsInitialized() override;
bool IsConnected() override;
bool IsError() override;
absl::Status Connect() override;
absl::Status WaitForAllTasks(const DeviceInfo& local_devices) override;
const DeviceInfo& GetClusterDeviceInfo() override;
absl::StatusOr<CoordinatedTask> GetOwnTask() override;
absl::StatusOr<std::vector<CoordinatedTaskStateInfo>> GetTaskState(
const std::vector<CoordinatedTask>& task) override;
absl::Status ReportError(const absl::Status& error) override;
absl::Status Shutdown() override;
absl::Status Reset() override;
absl::StatusOr<std::string> GetKeyValue(std::string_view key) override;
absl::StatusOr<std::string> GetKeyValue(std::string_view key,
absl::Duration timeout) override;
std::shared_ptr<CallOptions> GetKeyValueAsync(
std::string_view key, StatusOrValueCallback done) override;
absl::StatusOr<std::string> TryGetKeyValue(std::string_view key) override;
absl::StatusOr<std::vector<KeyValueEntry>> GetKeyValueDir(
std::string_view key) override;
void GetKeyValueDirAsync(std::string_view key,
StatusOrValueDirCallback done) override;
absl::Status InsertKeyValue(std::string_view key,
std::string_view value) override;
absl::Status InsertKeyValue(std::string_view key, std::string_view value,
bool allow_overwrite) override;
absl::Status DeleteKeyValue(std::string_view key) override;
absl::Status UpdateKeyValue(std::string_view key,
std::string_view value) override;
absl::Status StartWatchKey(std::string_view key,
ChangedKeyValuesCallback on_change) override;
absl::Status StopWatchKey(std::string_view key) override;
absl::Status WaitAtBarrier(
std::string_view barrier_id, absl::Duration timeout,
const std::vector<CoordinatedTask>& tasks) override;
void WaitAtBarrierAsync(std::string_view barrier_id, absl::Duration timeout,
const std::vector<CoordinatedTask>& tasks,
StatusCallback done) override;
absl::Status CancelBarrier(std::string_view barrier_id) override;
void CancelBarrierAsync(std::string_view barrier_id,
StatusCallback done) override;
absl::StatusOr<Env*> GetEnv() override;
protected:
void SetError(const absl::Status& error) override;
absl::Status ActivateWatch(
std::string_view key, const std::map<std::string, std::string>&) override;
absl::Status ValidateRunningAgent(bool allow_disconnected = false);
void StopHeartbeat();
private:
absl::Status ShutdownInternal();
void StartSendingHeartbeats();
absl::Status PollForError();
std::shared_ptr<CallOptions> PollForErrorAsync(StatusCallback done);
void StartPollingForError();
void StopErrorPolling();
void ResetCancellationManager();
Env* env_ = nullptr;
const uint64_t incarnation_id_ = random::New64();
CoordinatedTask task_;
CoordinationServiceConfig configs_;
StatusCallback error_fn_;
mutable absl::Mutex state_mu_;
CoordinatedTaskState state_ TF_GUARDED_BY(state_mu_) =
CoordinatedTaskState::TASKSTATE_UNINITIALIZED;
absl::Status status_ TF_GUARDED_BY(state_mu_) = absl::OkStatus();
absl::flat_hash_set<std::string> used_barrier_ids_ TF_GUARDED_BY(state_mu_);
uint64_t leader_incarnation_ = 0;
DeviceInfo cluster_devices_;
absl::Mutex heartbeat_thread_shutdown_mu_;
absl::CondVar heartbeat_thread_cv_;
bool shutting_down_ TF_GUARDED_BY(heartbeat_thread_shutdown_mu_) = false;
std::unique_ptr<Thread> heartbeat_thread_;
std::unique_ptr<Thread> error_polling_thread_;
CancellationManager cancellation_manager_;
std::unique_ptr<CancellationManager> error_polling_cancellation_manager_ =
std::make_unique<CancellationManager>();
std::unique_ptr<CoordinationClient> leader_client_;
CoordinationServiceAgentImpl(const CoordinationServiceAgentImpl&) = delete;
void operator=(const CoordinationServiceAgentImpl&) = delete;
};
absl::Status CoordinationServiceAgentImpl::Initialize(
Env* env, std::string_view job_name, int task_id,
const CoordinationServiceConfig& configs,
std::unique_ptr<CoordinationClient> leader_client,
StatusCallback error_fn) {
CoordinatedTask task;
task.set_job_name(std::string(job_name));
task.set_task_id(task_id);
return Initialize(env, task, configs, std::move(leader_client), error_fn);
}
absl::Status CoordinationServiceAgentImpl::Initialize(
Env* env, const CoordinatedTask& task,
const CoordinationServiceConfig& configs,
std::unique_ptr<CoordinationClient> leader_client,
StatusCallback error_fn) {
enabled_usage_metric->GetCell()->Set(true);
absl::MutexLock l(&state_mu_);
if (state_ != CoordinatedTaskState::TASKSTATE_UNINITIALIZED) {
return MakeCoordinationError(absl::FailedPreconditionError(
"Coordination service agent has already been initialized."));
}
env_ = env;
task_ = task;
configs_ = configs;
if (configs_.service_leader().empty()) {
return MakeCoordinationError(absl::InvalidArgumentError(
"CoordinationServiceAgent must be initialized with a valid leader."));
}
leader_client_ = std::move(leader_client);
if (leader_client_ == nullptr) {
return MakeCoordinationError(absl::InvalidArgumentError(
"CoordinationServiceAgent must have a valid leader client."));
}
error_fn_ = error_fn;
state_ = CoordinatedTaskState::TASKSTATE_DISCONNECTED;
return absl::OkStatus();
}
bool CoordinationServiceAgentImpl::IsInitialized() {
absl::MutexLock l(&state_mu_);
return state_ != CoordinatedTaskState::TASKSTATE_UNINITIALIZED;
}
bool CoordinationServiceAgentImpl::IsConnected() {
absl::MutexLock l(&state_mu_);
return state_ == CoordinatedTaskState::TASKSTATE_CONNECTED;
}
bool CoordinationServiceAgentImpl::IsError() {
absl::MutexLock l(&state_mu_);
return state_ == CoordinatedTaskState::TASKSTATE_ERROR;
}
void CoordinationServiceAgentImpl::StopHeartbeat() {
{
absl::MutexLock l(&heartbeat_thread_shutdown_mu_);
shutting_down_ = true;
heartbeat_thread_cv_.SignalAll();
}
heartbeat_thread_ = nullptr;
}
void CoordinationServiceAgentImpl::StopErrorPolling() {
error_polling_cancellation_manager_->StartCancel();
error_polling_thread_ = nullptr;
}
void CoordinationServiceAgentImpl::ResetCancellationManager() {
error_polling_cancellation_manager_ = std::make_unique<CancellationManager>();
}
absl::Status CoordinationServiceAgentImpl::Connect() {
VLOG(3) << "Agent has started trying to Connect().";
{
absl::MutexLock l(&state_mu_);
if (state_ != CoordinatedTaskState::TASKSTATE_DISCONNECTED) {
return MakeCoordinationError(absl::FailedPreconditionError(
"Coordination service agent is not in DISCONNECTED state."));
}
}
absl::Status connect_status =
absl::UnknownError("Connection not attempted yet.");
RegisterTaskRequest request;
*request.mutable_source_task() = task_;
request.set_incarnation(incarnation_id_);
RegisterTaskResponse response;
const int64_t register_timeout =
configs_.cluster_register_timeout_in_ms() > 0
? configs_.cluster_register_timeout_in_ms()
: absl::ToInt64Milliseconds(kDefaultClusterRegisterTimeout);
const absl::Time deadline =
absl::Now() + absl::Milliseconds(register_timeout);
int attempt = 0;
std::default_random_engine generator;
std::uniform_real_distribution<double> distribution(0.0, 1.0);
do {
++attempt;
CallOptions call_opts;
call_opts.SetTimeout(absl::ToInt64Milliseconds(deadline - absl::Now()));
absl::Notification n;
leader_client_->RegisterTaskAsync(
&call_opts, &request, &response, [&](absl::Status s) {
if (s.ok()) {
leader_incarnation_ = response.leader_incarnation();
{
absl::MutexLock l(&state_mu_);
state_ = CoordinatedTaskState::TASKSTATE_CONNECTED;
}
}
connect_status = s;
n.Notify();
});
n.WaitForNotification();
if (!connect_status.ok()) {
const int backoff = 1 << std::min(14, attempt);
absl::SleepFor(absl::Milliseconds(backoff * distribution(generator)));
}
} while (!connect_status.ok() && absl::Now() < deadline &&
(connect_status.GetPayload(CoordinationErrorPayloadKey()) ==
std::nullopt ||
absl::IsAborted(connect_status) ||
absl::IsInternal(connect_status)));
if (!connect_status.ok()) {
SetError(connect_status);
return connect_status;
}
LOG(INFO) << "Coordination agent has successfully connected.";
heartbeat_thread_.reset(env_->StartThread(
ThreadOptions(), kHeartbeatThread,
absl::bind_front(&CoordinationServiceAgentImpl::StartSendingHeartbeats,
this)));
if (configs_.poll_for_error_from_service_at_startup()) {
error_polling_thread_.reset(env_->StartThread(
ThreadOptions(), kErrorPollingThread,
absl::bind_front(&CoordinationServiceAgentImpl::StartPollingForError,
this)));
}
return absl::OkStatus();
}
void CoordinationServiceAgentImpl::StartSendingHeartbeats() {
HeartbeatRequest request;
*request.mutable_source_task() = task_;
request.set_incarnation(incarnation_id_);
HeartbeatResponse response;
const int64_t heartbeat_interval_ms =
configs_.heartbeat_timeout_in_ms() > 0
? configs_.heartbeat_timeout_in_ms() / 2
: absl::ToInt64Milliseconds(kDefaultHeartbeatTimeout) / 2;
CallOptions call_opts;
call_opts.SetTimeout(heartbeat_interval_ms);
while (true) {
absl::Status status;
absl::Notification n;
VLOG(10) << "HeartbeatRequest: " << request.DebugString();
leader_client_->HeartbeatAsync(&call_opts, &request, &response,
[&](absl::Status s) {
status = s;
n.Notify();
});
n.WaitForNotification();
VLOG(10) << "HeartbeatResponse: " << status;
if (!status.ok()) {
absl::SleepFor(absl::Seconds(1));
{
absl::MutexLock l(&heartbeat_thread_shutdown_mu_);
if (shutting_down_) {
return;
}
}
SetError(status);
} else if (response.leader_incarnation() != leader_incarnation_) {
SetError(MakeCoordinationError(
absl::AbortedError("Leader incarnation ID mismatch: the "
"coordination leader has restarted.")));
}
{
absl::MutexLock l(&heartbeat_thread_shutdown_mu_);
heartbeat_thread_cv_.WaitWithTimeout(
&heartbeat_thread_shutdown_mu_,
absl::Milliseconds(heartbeat_interval_ms));
if (shutting_down_) {
return;
}
}
}
}
void CoordinationServiceAgentImpl::StartPollingForError() {
LOG(INFO) << "Polling for error from coordination service. This thread will "
"run until an error is encountered or the agent is shutdown.";
absl::Status status = PollForError();
CHECK(!status.ok()) << "PollForError returned OK status. Should "
"always return an error.";
if (absl::IsCancelled(status)) {
LOG(INFO) << "Cancelling error polling because the service or the agent is "
"shutting down.";
return;
}
LOG(ERROR) << "An error is returned from coordination service (this can be "
"an error from this or another task).";
SetError(status);
}
absl::Status CoordinationServiceAgentImpl::PollForError() {
absl::Status status = absl::OkStatus();
absl::Notification n;
PollForErrorAsync([&](absl::Status s) {
status = s;
n.Notify();
});
n.WaitForNotification();
CHECK(!status.ok())
<< "PollForError returned OK status. Should always return an error.";
return status;
}
std::shared_ptr<CallOptions> CoordinationServiceAgentImpl::PollForErrorAsync(
StatusCallback done) {
auto call_opts = std::make_shared<CallOptions>();
absl::Status agent_running_status =
ValidateRunningAgent(true);
if (!agent_running_status.ok()) {
done(agent_running_status);
return call_opts;
}
auto request = std::make_shared<PollForErrorRequest>();
auto response = std::make_shared<PollForErrorResponse>();
*request->mutable_source_task() = task_;
VLOG(3) << "PollForErrorRequest: " << request->DebugString();
const CancellationToken token =
error_polling_cancellation_manager_->get_cancellation_token();
const bool already_cancelled =
!error_polling_cancellation_manager_->RegisterCallback(
token, [call_opts]() { call_opts->StartCancel(); });
if (already_cancelled) {
done(absl::CancelledError("PollForErrorAsync() was cancelled."));
return call_opts;
}
leader_client_->PollForErrorAsync(
call_opts.get(), request.get(), response.get(),
[call_opts, request, response, done = std::move(done),
&cm = error_polling_cancellation_manager_,
token](const absl::Status& s) {
cm->TryDeregisterCallback(token);
done(s);
});
return call_opts;
}
absl::Status CoordinationServiceAgentImpl::WaitForAllTasks(
const DeviceInfo& local_devices) {
absl::Status agent_running_status = ValidateRunningAgent();
if (!agent_running_status.ok()) {
return agent_running_status;
}
WaitForAllTasksRequest request;
*request.mutable_source_task() = task_;
*request.mutable_device_info() = local_devices;
VLOG(3) << "WaitForAllTasksRequest: " << request.DebugString();
WaitForAllTasksResponse response;
absl::Status status;
absl::Notification n;
leader_client_->WaitForAllTasksAsync(&request, &response,
[&](absl::Status s) {
status = s;
n.Notify();
});
n.WaitForNotification();
if (!status.ok()) {
VLOG(3) << "WaitForAllTasksResponse: " << status;
SetError(status);
return status;
}
VLOG(3) << "WaitForAllTasksResponse: " << response.DebugString();
cluster_devices_ = response.device_info();
return absl::OkStatus();
}
const DeviceInfo& CoordinationServiceAgentImpl::GetClusterDeviceInfo() {
return cluster_devices_;
}
absl::StatusOr<CoordinatedTask> CoordinationServiceAgentImpl::GetOwnTask() {
if (!IsInitialized()) {
return MakeCoordinationError(absl::FailedPreconditionError(
"Agent has not been initialized; we do not "
"know the associated task yet."));
}
return task_;
}
absl::StatusOr<std::vector<CoordinatedTaskStateInfo>>
CoordinationServiceAgentImpl::GetTaskState(
const std::vector<CoordinatedTask>& tasks) {
GetTaskStateRequest request;
*request.mutable_source_task() = {tasks.begin(), tasks.end()};
GetTaskStateResponse response;
absl::Notification n;
absl::StatusOr<std::vector<CoordinatedTaskStateInfo>> result;
leader_client_->GetTaskStateAsync(
&request, &response, [&](const absl::Status& s) {
if (s.ok()) {
result = std::vector<CoordinatedTaskStateInfo>(
std::make_move_iterator(response.task_state().begin()),
std::make_move_iterator(response.task_state().end()));
} else {
result = s;
}
n.Notify();
});
n.WaitForNotification();
return result;
}
absl::Status CoordinationServiceAgentImpl::ReportError(
const absl::Status& error) {
{
absl::MutexLock l(&state_mu_);
if (state_ == CoordinatedTaskState::TASKSTATE_UNINITIALIZED) {
return MakeCoordinationError(absl::FailedPreconditionError(
"Coordination service agent must be initialized first before "
"reporting error."));
} else if (state_ == CoordinatedTaskState::TASKSTATE_ERROR) {
return MakeCoordinationError(absl::FailedPreconditionError(
"Coordination service agent is already in error state."));
}
}
SetError(MakeCoordinationError(error, task_,
true));
LOG(INFO) << "Reporting error to coordination service: " << error;
ReportErrorToServiceRequest request;
request.set_error_code(error.raw_code());
request.set_error_message(std::string(error.message()));
*request.mutable_error_origin() = task_;
VLOG(5) << "ReportErrorToServiceRequest: " << request.DebugString();
ReportErrorToServiceResponse response;
absl::Notification n;
leader_client_->ReportErrorToServiceAsync(
&request, &response, [&](absl::Status s) {
VLOG(5) << "ReportErrorToServiceResponse: " << s;
if (!s.ok()) {
LOG(ERROR)
<< "Encountered another error when reporting error to "
"coordination service: "
<< s
<< "\nThis is usually caused by an earlier error during "
"execution. Check the logs (this task or the leader) for "
"an earlier error to debug further.";
}
n.Notify();
});
n.WaitForNotification();
return absl::OkStatus();
}
absl::Status CoordinationServiceAgentImpl::Shutdown() {
return ShutdownInternal();
}
absl::Status CoordinationServiceAgentImpl::ShutdownInternal() {
absl::Status status = absl::OkStatus();
bool is_connected = false;
{
absl::MutexLock l(&state_mu_);
is_connected = state_ == CoordinatedTaskState::TASKSTATE_CONNECTED;
}
if (!configs_.agent_destruction_without_shutdown() && is_connected) {
LOG(INFO) << "Coordination agent has initiated Shutdown().";
ShutdownTaskRequest request;
*request.mutable_source_task() = task_;
ShutdownTaskResponse response;
CallOptions call_opts;
const int64_t shutdown_timeout =
configs_.shutdown_barrier_timeout_in_ms() > 0
? configs_.shutdown_barrier_timeout_in_ms()
: absl::ToInt64Milliseconds(kDefaultShutdownTimeout);
call_opts.SetTimeout(shutdown_timeout);
absl::Notification n;
leader_client_->ShutdownTaskAsync(&call_opts, &request, &response,
[&status, &n](absl::Status s) {
status = s;
n.Notify();
});
n.WaitForNotification();
if (status.ok()) {
LOG(INFO) << "Coordination agent has successfully shut down.";
} else {
LOG(ERROR)
<< "Failed to disconnect from coordination service with status: "
<< TrimCoordinationErrorMessage(status)
<< "\nProceeding with agent shutdown anyway. This is usually caused "
"by an earlier error during execution. Check the logs (this task "
"or the leader) for an earlier error to debug further.";
}
}
StopHeartbeat();
StopErrorPolling();
{
absl::MutexLock l(&state_mu_);
if (state_ == CoordinatedTaskState::TASKSTATE_ERROR) {
const std::string status_message = absl::StrCat(
"Shutdown() was called while coordination agent is in error state, "
"implying that distributed execution failed. Note: agent will "
"still shutdown anyway. Agent status: ",
status_.ToString(),
"\nThis is usually caused by an earlier error during execution. "
"Check the logs (this task or the leader) for an earlier error to "
"debug further.");
status =
MakeCoordinationError(absl::FailedPreconditionError(status_message));
LOG(ERROR) << status_message;
}
state_ = CoordinatedTaskState::TASKSTATE_DISCONNECTED;
}
cancellation_manager_.StartCancel();
return status;
}
absl::Status CoordinationServiceAgentImpl::Reset() {
{
absl::MutexLock l(&state_mu_);
if (state_ != CoordinatedTaskState::TASKSTATE_ERROR) {
return MakeCoordinationError(absl::FailedPreconditionError(
"Reset() failed: coordination service agent is not in ERROR state."));
}
}
ResetTaskRequest request;
*request.mutable_source_task() = task_;
VLOG(3) << "ResetTaskRequest: " << request.DebugString();
ResetTaskResponse response;
absl::Status status;
absl::Notification n;
leader_client_->ResetTaskAsync(&request, &response,
[&status, &n](absl::Status s) {
status = s;
n.Notify();
});
n.WaitForNotification();
VLOG(3) << "ResetTaskResponse: " << status;
if (!status.ok()) {
return status;
}
StopHeartbeat();
StopErrorPolling();
ResetCancellationManager();
{
absl::MutexLock l(&state_mu_);
state_ = CoordinatedTaskState::TASKSTATE_DISCONNECTED;
}
{
absl::MutexLock l(&heartbeat_thread_shutdown_mu_);
shutting_down_ = false;
}
LOG(INFO) << "Coordination agent has been reset.";
return status;
}
absl::StatusOr<std::string> CoordinationServiceAgentImpl::GetKeyValue(
std::string_view key) {
return GetKeyValue(key, absl::InfiniteDuration());
}
absl::StatusOr<std::string> CoordinationServiceAgentImpl::GetKeyValue(
std::string_view key, absl::Duration timeout) {
auto n = std::make_shared<absl::Notification>();
auto result = std::make_shared<absl::StatusOr<std::string>>();
GetKeyValueAsync(
key, [n, result](const absl::StatusOr<std::string>& status_or_value) {
*result = status_or_value;
n->Notify();
});
bool call_completed_before_timeout =
n->WaitForNotificationWithTimeout(timeout);
if (!call_completed_before_timeout) {
VLOG(3) << "GetKeyValue(" << key << ") timed out after " << timeout;
return MakeCoordinationError(absl::DeadlineExceededError(absl::Substitute(
"GetKeyValue() timed out with key: $0 and duration: $1", key,
absl::FormatDuration(timeout))));
}
return *result;
}
std::shared_ptr<CallOptions> CoordinationServiceAgentImpl::GetKeyValueAsync(
std::string_view key, StatusOrValueCallback done) {
auto request = std::make_shared<GetKeyValueRequest>();
request->set_key(key.data(), key.size());
VLOG(3) << "GetKeyValueRequest: " << request->DebugString();
auto response = std::make_shared<GetKeyValueResponse>();
auto call_opts = std::make_shared<CallOptions>();
const CancellationToken token =
cancellation_manager_.get_cancellation_token();
const bool already_cancelled = !cancellation_manager_.RegisterCallback(
token, [call_opts]() { call_opts->StartCancel(); });
if (already_cancelled) {
done(absl::CancelledError("GetKeyValueAsync() was cancelled."));
return call_opts;
}
leader_client_->GetKeyValueAsync(
call_opts.get(), request.get(), response.get(),
[call_opts, request, response, done = std::move(done),
&cm = cancellation_manager_, token](const absl::Status& s) {
cm.TryDeregisterCallback(token);
if (!s.ok()) {
done(s);
VLOG(3) << "GetKeyValueResponse: " << s;
} else {
done(response->kv().value());
VLOG(3) << "GetKeyValueResponse: " << response->DebugString();
}
});
return call_opts;
}
absl::StatusOr<std::string> CoordinationServiceAgentImpl::TryGetKeyValue(
std::string_view key) {
absl::Notification n;
absl::StatusOr<std::string> result;
TryGetKeyValueRequest request;
request.set_key(key.data(), key.size());
VLOG(3) << "TryGetKeyValueRequest: " << request.DebugString();
TryGetKeyValueResponse response;
leader_client_->TryGetKeyValueAsync(
&request, &response, [&](const absl::Status& s) {
if (s.ok()) {
result = response.kv().value();
VLOG(3) << "TryGetKeyValueResponse: " << result.value();
} else {
result = s;
VLOG(3) << "TryGetKeyValueResponse: " << s;
}
n.Notify();
});
n.WaitForNotification();
return result;
}
absl::StatusOr<std::vector<KeyValueEntry>>
CoordinationServiceAgentImpl::GetKeyValueDir(std::string_view key) {
absl::Notification n;
absl::StatusOr<std::vector<KeyValueEntry>> result;
GetKeyValueDirAsync(
key, [&n, &result](
absl::StatusOr<std::vector<KeyValueEntry>> status_or_value) {
result = std::move(status_or_value);
n.Notify();
});
n.WaitForNotification();
return result;
}
void CoordinationServiceAgentImpl::GetKeyValueDirAsync(
std::string_view key, StatusOrValueDirCallback done) {
auto request = std::make_shared<GetKeyValueDirRequest>();
request->set_directory_key(key.data(), key.size());
VLOG(3) << "GetKeyValueDirRequest: " << request->DebugString();
auto response = std::make_shared<GetKeyValueDirResponse>();
leader_client_->GetKeyValueDirAsync(
request.get(), response.get(),
[request, response, done = std::move(done)](const absl::Status& s) {
if (!s.ok()) {
done(s);
VLOG(3) << "GetKeyValueDirResponse: " << s;
} else {
VLOG(3) << "GetKeyValueDirResponse: " << response->DebugString();
std::vector<KeyValueEntry> kv_in_directory = {
std::make_move_iterator(response->kv().begin()),
std::make_move_iterator(response->kv().end())};
done(kv_in_directory);
}
});
}
absl::Status CoordinationServiceAgentImpl::InsertKeyValue(
std::string_view key, std::string_view value) {
return InsertKeyValue(key, value, false);
}
absl::Status CoordinationServiceAgentImpl::InsertKeyValue(
std::string_view key, std::string_view value, bool allow_overwrite) {
InsertKeyValueRequest request;
request.mutable_kv()->set_key(key.data(), key.size());
request.mutable_kv()->set_value(value.data(), value.size());
request.set_allow_overwrite(allow_overwrite);
VLOG(3) << "InsertKeyValueRequest: " << request.DebugString();
InsertKeyValueResponse response;
absl::Status status;
absl::Notification n;
leader_client_->InsertKeyValueAsync(&request, &response, [&](absl::Status s) {
status = s;
n.Notify();
});
n.WaitForNotification();
VLOG(3) << "InsertKeyValueResponse: " << status;
return status;
}
absl::Status CoordinationServiceAgentImpl::DeleteKeyValue(
std::string_view key) {
DeleteKeyValueRequest request;
request.set_key(key.data(), key.size());
request.set_is_directory(true);
VLOG(3) << "DeleteKeyValueRequest: " << request.DebugString();
DeleteKeyValueResponse response;
absl::Status status;
absl::Notification n;
leader_client_->DeleteKeyValueAsync(&request, &response, [&](absl::Status s) {
status = s;
n.Notify();
});
n.WaitForNotification();
VLOG(3) << "DeleteKeyValueResponse " << status;
return absl::OkStatus();
}
absl::Status CoordinationServiceAgentImpl::UpdateKeyValue(
std::string_view key, std::string_view value) {
return MakeCoordinationError(absl::UnimplementedError(
"CoordinationServiceAgent::UpdateKeyValue is not implemented."));
}
absl::Status CoordinationServiceAgentImpl::StartWatchKey(
std::string_view key,
CoordinationServiceAgentImpl::ChangedKeyValuesCallback on_change) {
return MakeCoordinationError(absl::UnimplementedError(
"CoordinationServiceAgent::StartWatchKey is not implemented."));
}
absl::Status CoordinationServiceAgentImpl::StopWatchKey(std::string_view key) {
return MakeCoordinationError(absl::UnimplementedError(
"CoordinationServiceAgent::StopWatchKey is not implemented."));
}
void CoordinationServiceAgentImpl::SetError(const absl::Status& error) {
assert(!error.ok());
absl::MutexLock l(&state_mu_);
if (state_ == CoordinatedTaskState::TASKSTATE_ERROR) return;
absl::Status trimmed_error = TrimCoordinationErrorMessage(error);
LOG(ERROR) << "Coordination agent is set to ERROR: " << trimmed_error;
state_ = CoordinatedTaskState::TASKSTATE_ERROR;
status_ = trimmed_error;
error_fn_(trimmed_error);
}
absl::Status CoordinationServiceAgentImpl::ActivateWatch(
std::string_view key, const std::map<std::string, std::string>& kvs) {
return MakeCoordinationError(absl::UnimplementedError(
"CoordinationServiceAgent::ActivateWatch is not implemented."));
}
absl::Status CoordinationServiceAgentImpl::WaitAtBarrier(
std::string_view barrier_id, absl::Duration timeout,
const std::vector<CoordinatedTask>& tasks) {
absl::Status status;
absl::Notification n;
WaitAtBarrierAsync(barrier_id, timeout, tasks, [&](absl::Status s) {
status = s;
n.Notify();
});
n.WaitForNotification();
return status;
}
void CoordinationServiceAgentImpl::WaitAtBarrierAsync(
std::string_view barrier_id, absl::Duration timeout,
const std::vector<CoordinatedTask>& tasks, StatusCallback done) {
absl::Status agent_running_status =
ValidateRunningAgent(true);
if (!agent_running_status.ok()) {
done(agent_running_status);
return;
}
{
absl::MutexLock l(&state_mu_);
auto [it, inserted] = used_barrier_ids_.insert(std::string(barrier_id));
if (!inserted) {
done(absl::FailedPreconditionError(absl::StrCat(
"WaitAtBarrier() should not be called with the same id more than "
"once. Barrier id: ",
barrier_id)));
return;
}
}
auto request = std::make_shared<BarrierRequest>();
auto response = std::make_shared<BarrierResponse>();
request->set_barrier_id(std::string(barrier_id));
request->set_barrier_timeout_in_ms(timeout / absl::Milliseconds(1));
*request->mutable_source_task() = task_;
*request->mutable_tasks() = {tasks.begin(), tasks.end()};
VLOG(3) << "WaitAtBarrierRequest: " << request->DebugString();
leader_client_->BarrierAsync(
request.get(), response.get(),
[request, response, done = std::move(done)](const absl::Status& s) {
auto status = TrimCoordinationErrorMessage(s);
done(status);
VLOG(3) << "WaitAtBarrierResponse: " << status;
});
}
absl::Status CoordinationServiceAgentImpl::CancelBarrier(
std::string_view barrier_id) {
absl::Status status;
absl::Notification n;
CancelBarrierAsync(barrier_id, [&](const absl::Status& s) {
status = s;
n.Notify();
});
n.WaitForNotification();
return status;
}
void CoordinationServiceAgentImpl::CancelBarrierAsync(
std::string_view barrier_id, StatusCallback done) {
absl::Status agent_running_status =
ValidateRunningAgent(true);
if (!agent_running_status.ok()) {
done(agent_running_status);
return;
}
auto request = std::make_shared<CancelBarrierRequest>();
auto response = std::make_shared<CancelBarrierResponse>();
request->set_barrier_id(std::string(barrier_id));
*request->mutable_source_task() = task_;
VLOG(3) << "CancelBarrierRequest: " << request->DebugString();
leader_client_->CancelBarrierAsync(
request.get(), response.get(),
[request, response, done = std::move(done)](const absl::Status& s) {
done(s);
VLOG(3) << "CancelBarrierResponse: " << s;
});
}
absl::Status CoordinationServiceAgentImpl::ValidateRunningAgent(
bool allow_disconnected) {
absl::MutexLock l(&state_mu_);
switch (state_) {
case CoordinatedTaskState::TASKSTATE_CONNECTED:
return absl::OkStatus();
case CoordinatedTaskState::TASKSTATE_UNINITIALIZED:
return MakeCoordinationError(absl::FailedPreconditionError(
"Agent must be in CONNECTED state. It is currently UNINITIALIZED."));
case CoordinatedTaskState::TASKSTATE_DISCONNECTED:
if (allow_disconnected) return absl::OkStatus();
return MakeCoordinationError(absl::FailedPreconditionError(
"Agent must be in CONNECTED state. It is currently DISCONNECTED."));
case CoordinatedTaskState::TASKSTATE_ERROR:
return MakeCoordinationError(absl::FailedPreconditionError(
"Agent must be in CONNECTED state. It is currently in ERROR."));
default:
return MakeCoordinationError(absl::FailedPreconditionError(absl::StrCat(
"Agent is not in CONNECTED state. Current state: ", state_)));
}
}
absl::StatusOr<Env*> CoordinationServiceAgentImpl::GetEnv() {
if (!IsInitialized()) {
return MakeCoordinationError(absl::FailedPreconditionError(
"Coordination service agent has not been initialized."));
}
if (env_ == nullptr) {
return MakeCoordinationError(
absl::FailedPreconditionError("Coordination service agent was not "
"initialized with a valid Env* object."));
}
return env_;
}
}
std::unique_ptr<CoordinationServiceAgent> CreateCoordinationServiceAgent() {
return std::make_unique<CoordinationServiceAgentImpl>();
}
} | #include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "xla/tsl/distributed_runtime/call_options.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_client.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/protobuf/coordination_config.pb.h"
#include "xla/tsl/protobuf/coordination_service.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
using tensorflow::CoordinatedTask;
using tensorflow::CoordinationServiceConfig;
using tensorflow::KeyValueEntry;
using ::testing::_;
using ::testing::DoAll;
using ::testing::InvokeArgument;
using ::testing::SetArgPointee;
using ::testing::UnorderedPointwise;
using ::testing::WithArgs;
class ProtoStringMatcher {
public:
explicit ProtoStringMatcher(const tsl::protobuf::Message& expected)
: expected_(expected.DebugString()) {}
template <typename Message>
bool MatchAndExplain(const Message& p,
::testing::MatchResultListener*) const {
return p.DebugString() == expected_;
}
void DescribeTo(std::ostream* os) const { *os << expected_; }
void DescribeNegationTo(std::ostream* os) const {
*os << "not equal to expected message: " << expected_;
}
private:
const std::string expected_;
};
MATCHER(KvEq, "simple KeyValueEntry matcher") {
const KeyValueEntry& kv0 = std::get<0>(arg);
const KeyValueEntry& kv1 = std::get<1>(arg);
return kv0.key() == kv1.key() && kv0.value() == kv1.value();
}
KeyValueEntry CreateKv(const std::string& key, const std::string& value) {
KeyValueEntry kv;
kv.set_key(key);
kv.set_value(value);
return kv;
}
class TestCoordinationClient : public CoordinationClient {
public:
TestCoordinationClient() = default;
MOCK_METHOD(void, GetKeyValueAsync,
(CallOptions * call_opts, const GetKeyValueRequest*,
GetKeyValueResponse*, StatusCallback),
(override));
MOCK_METHOD(void, TryGetKeyValueAsync,
(const TryGetKeyValueRequest*, TryGetKeyValueResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, GetKeyValueDirAsync,
(const GetKeyValueDirRequest*, GetKeyValueDirResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, InsertKeyValueAsync,
(const InsertKeyValueRequest*, InsertKeyValueResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, DeleteKeyValueAsync,
(const DeleteKeyValueRequest*, DeleteKeyValueResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, RegisterTaskAsync,
(CallOptions*, const RegisterTaskRequest*, RegisterTaskResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, ShutdownTaskAsync,
(CallOptions*, const ShutdownTaskRequest*, ShutdownTaskResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, ResetTaskAsync,
(const ResetTaskRequest*, ResetTaskResponse*, StatusCallback),
(override));
MOCK_METHOD(void, ReportErrorToServiceAsync,
(const ReportErrorToServiceRequest*,
ReportErrorToServiceResponse*, StatusCallback),
(override));
MOCK_METHOD(void, BarrierAsync,
(const BarrierRequest*, BarrierResponse*, StatusCallback),
(override));
MOCK_METHOD(void, GetTaskStateAsync,
(const GetTaskStateRequest*, GetTaskStateResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, HeartbeatAsync,
(CallOptions*, const HeartbeatRequest*, HeartbeatResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, PollForErrorAsync,
(CallOptions * call_opts, const PollForErrorRequest*,
PollForErrorResponse*, StatusCallback),
(override));
#define UNIMPLEMENTED(method) \
void method##Async(const method##Request* request, \
method##Response* response, StatusCallback done) \
override { \
done(absl::UnimplementedError(#method "Async")); \
}
UNIMPLEMENTED(WaitForAllTasks);
UNIMPLEMENTED(CancelBarrier);
#undef UNIMPLEMENTED
void ReportErrorToTaskAsync(CallOptions* call_opts,
const ReportErrorToTaskRequest* request,
ReportErrorToTaskResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("ReportErrorToTaskAsync"));
}
};
class CoordinationServiceAgentTest : public ::testing::Test {
public:
void SetUp() override {
ON_CALL(*client_, RegisterTaskAsync(_, _, _, _))
.WillByDefault(InvokeArgument<3>(absl::OkStatus()));
ON_CALL(*client_, HeartbeatAsync(_, _, _, _))
.WillByDefault(InvokeArgument<3>(absl::OkStatus()));
ON_CALL(*client_, ShutdownTaskAsync(_, _, _, _))
.WillByDefault(InvokeArgument<3>(absl::OkStatus()));
ON_CALL(*client_, ReportErrorToServiceAsync(_, _, _))
.WillByDefault(InvokeArgument<2>(absl::OkStatus()));
ON_CALL(*client_, ResetTaskAsync(_, _, _))
.WillByDefault(InvokeArgument<2>(absl::OkStatus()));
ON_CALL(*client_, BarrierAsync(_, _, _))
.WillByDefault(InvokeArgument<2>(absl::OkStatus()));
ON_CALL(*client_, GetTaskStateAsync(_, _, _))
.WillByDefault(InvokeArgument<2>(absl::OkStatus()));
}
void InitializeAgent(CoordinationServiceConfig config = {}) {
config.set_service_leader("test_leader");
TF_ASSERT_OK(agent_->Initialize(
Env::Default(), "test_job",
0, config, std::move(client_),
[](absl::Status s) {
LOG(ERROR) << "Coordination agent is set to error: " << s;
}));
}
TestCoordinationClient* GetClient() {
CHECK(client_ != nullptr)
<< "GetClient() was called after InitializeAgent()";
return client_.get();
}
protected:
std::unique_ptr<CoordinationServiceAgent> agent_ =
CreateCoordinationServiceAgent();
std::unique_ptr<TestCoordinationClient> client_ =
std::make_unique<TestCoordinationClient>();
};
TEST_F(CoordinationServiceAgentTest, GetKeyValue_Simple_Success) {
const std::string& test_key = "test_key";
const std::string& test_value = "test_value";
GetKeyValueResponse mocked_response;
auto kv = mocked_response.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(DoAll(SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::OkStatus())));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key);
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, test_value);
}
TEST_F(CoordinationServiceAgentTest, GetKeyValue_WithTimeout_Success) {
const std::string& test_key = "test_key";
const std::string& test_value = "test_value";
GetKeyValueResponse mocked_response;
auto kv = mocked_response.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(DoAll(SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::OkStatus())));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key, absl::Seconds(10));
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, test_value);
}
TEST_F(CoordinationServiceAgentTest, GetKeyValue_Timeout_ReturnError) {
const std::string& test_key = "test_key";
StatusCallback owned_done;
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(WithArgs<3>([&](StatusCallback done) {
owned_done = done;
}));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key, absl::Seconds(1));
EXPECT_TRUE(absl::IsDeadlineExceeded(result.status()));
owned_done(absl::CancelledError("error"));
}
TEST_F(CoordinationServiceAgentTest,
GetKeyValue_DelayedResponse_TimeoutWithoutMemoryError) {
const std::string& test_key = "test_key";
const std::string& test_value = "test_value";
auto client = std::make_unique<TestCoordinationClient>();
GetKeyValueResponse* owned_response;
StatusCallback owned_done;
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(WithArgs<2, 3>(
[&](GetKeyValueResponse* response, StatusCallback done) {
owned_response = response;
owned_done = done;
}));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key, absl::Seconds(1));
EXPECT_TRUE(absl::IsDeadlineExceeded(result.status()));
auto kv = owned_response->mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
owned_done(absl::OkStatus());
}
TEST_F(CoordinationServiceAgentTest,
GetKeyValue_DelayedResponseBeforeTimeout_Success) {
const std::string& test_key = "test_key";
const std::string& test_value = "test_value";
auto client = std::make_unique<TestCoordinationClient>();
std::unique_ptr<Thread> async_thread;
GetKeyValueResponse* owned_response;
StatusCallback owned_done;
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(WithArgs<2, 3>(
[&](GetKeyValueResponse* response, StatusCallback done) {
owned_response = response;
owned_done = done;
async_thread = absl::WrapUnique(Env::Default()->StartThread(
ThreadOptions(), "async_thread", [&]() {
absl::SleepFor(absl::Seconds(5));
auto kv = owned_response->mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
owned_done(absl::OkStatus());
}));
}));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key, absl::Seconds(10));
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, test_value);
}
TEST_F(CoordinationServiceAgentTest, CancelGetKeyValue_Success) {
const std::string test_key = "test_key";
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(
WithArgs<0, 3>([](CallOptions* call_opts, StatusCallback done) {
call_opts->SetCancelCallback([callback = std::move(done)]() {
callback(absl::CancelledError("RPC call cancelled."));
});
}));
InitializeAgent();
absl::Status status;
std::shared_ptr<CallOptions> get_kv_call_opts = agent_->GetKeyValueAsync(
test_key, [&status](const absl::StatusOr<std::string>& result) {
status = result.status();
});
get_kv_call_opts->StartCancel();
EXPECT_TRUE(absl::IsCancelled(status)) << status;
get_kv_call_opts->ClearCancelCallback();
}
TEST_F(CoordinationServiceAgentTest, TryGetKeyValue_Simple_Success) {
const std::string& test_key = "test_key";
const std::string& test_value = "test_value";
TryGetKeyValueResponse mocked_response;
auto kv = mocked_response.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
ON_CALL(*GetClient(), TryGetKeyValueAsync(_, _, _))
.WillByDefault(DoAll(SetArgPointee<1>(mocked_response),
InvokeArgument<2>(absl::OkStatus())));
InitializeAgent();
auto result = agent_->TryGetKeyValue(test_key);
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, test_value);
}
TEST_F(CoordinationServiceAgentTest, GetKeyValueDir_Simple_Success) {
const std::string test_key = "test_key_dir";
std::vector<KeyValueEntry> test_values;
test_values.push_back(CreateKv("test_key_dir/task_0", "0"));
test_values.push_back(CreateKv("test_key_dir/task_1", "1"));
GetKeyValueDirResponse mocked_response;
mocked_response.set_directory_key(test_key);
*mocked_response.mutable_kv() = {test_values.begin(), test_values.end()};
ON_CALL(*GetClient(), GetKeyValueDirAsync(_, _, _))
.WillByDefault(DoAll(SetArgPointee<1>(mocked_response),
InvokeArgument<2>(absl::OkStatus())));
InitializeAgent();
auto result = agent_->GetKeyValueDir(test_key);
TF_ASSERT_OK(result.status());
EXPECT_THAT(*result, UnorderedPointwise(KvEq(), test_values));
}
TEST_F(CoordinationServiceAgentTest, ShutdownInErrorShouldReturnError) {
InitializeAgent();
TF_ASSERT_OK(agent_->Connect());
TF_ASSERT_OK(agent_->ReportError(absl::InternalError("Test Error.")));
absl::Status s = agent_->Shutdown();
EXPECT_TRUE(absl::IsFailedPrecondition(s));
}
TEST_F(CoordinationServiceAgentTest, Reset_ConnectedButNotInError_Fail) {
InitializeAgent();
TF_ASSERT_OK(agent_->Connect());
auto status = agent_->Reset();
EXPECT_TRUE(absl::IsFailedPrecondition(status));
}
TEST_F(CoordinationServiceAgentTest, ConnectAfterResetError) {
InitializeAgent();
TF_ASSERT_OK(agent_->Connect());
TF_ASSERT_OK(agent_->ReportError(absl::InternalError("Test Error.")));
TF_ASSERT_OK(agent_->Reset());
TF_EXPECT_OK(agent_->Connect());
}
TEST_F(CoordinationServiceAgentTest, ConnectAfterReset_WithErrorPolling) {
PollForErrorResponse mocked_response;
EXPECT_CALL(*GetClient(), PollForErrorAsync(_, _, _, _))
.WillOnce(DoAll(SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::UnavailableError("Test Error."))))
.WillOnce(DoAll(SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::InternalError("Test Error."))));
CoordinationServiceConfig config;
config.set_poll_for_error_from_service_at_startup(true);
InitializeAgent(config);
TF_ASSERT_OK(agent_->Connect());
absl::SleepFor(absl::Seconds(2));
ASSERT_TRUE(agent_->IsError());
TF_ASSERT_OK(agent_->Reset());
TF_EXPECT_OK(agent_->Connect());
absl::SleepFor(absl::Seconds(2));
EXPECT_TRUE(agent_->IsError());
}
TEST_F(CoordinationServiceAgentTest, CancelledPollForErrorRequest) {
PollForErrorResponse mocked_response;
EXPECT_CALL(*GetClient(), PollForErrorAsync(_, _, _, _))
.WillOnce(DoAll(SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::CancelledError("Test Error."))));
CoordinationServiceConfig config;
config.set_poll_for_error_from_service_at_startup(true);
InitializeAgent(config);
TF_ASSERT_OK(agent_->Connect());
absl::SleepFor(absl::Seconds(2));
ASSERT_FALSE(agent_->IsError());
}
TEST_F(CoordinationServiceAgentTest, InvalidPollForErrorRequest) {
PollForErrorResponse mocked_response;
EXPECT_CALL(*GetClient(), PollForErrorAsync(_, _, _, _))
.WillOnce(
DoAll(SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::InvalidArgumentError("Test Error."))));
CoordinationServiceConfig config;
config.set_poll_for_error_from_service_at_startup(true);
InitializeAgent(config);
TF_ASSERT_OK(agent_->Connect());
absl::SleepFor(absl::Seconds(2));
ASSERT_TRUE(agent_->IsError());
}
TEST_F(CoordinationServiceAgentTest,
PollForErrorRequestWithFailedPrecondition) {
PollForErrorResponse mocked_response;
EXPECT_CALL(*GetClient(), PollForErrorAsync(_, _, _, _))
.WillOnce(DoAll(
SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::FailedPreconditionError("Test Error."))));
CoordinationServiceConfig config;
config.set_poll_for_error_from_service_at_startup(true);
InitializeAgent(config);
TF_ASSERT_OK(agent_->Connect());
absl::SleepFor(absl::Seconds(2));
ASSERT_TRUE(agent_->IsError());
}
TEST_F(CoordinationServiceAgentTest, ResetCanBeRetried) {
EXPECT_CALL(*GetClient(), ResetTaskAsync(_, _, _))
.WillOnce(InvokeArgument<2>(absl::InternalError("Reset error")))
.WillOnce(InvokeArgument<2>(absl::OkStatus()));
InitializeAgent();
TF_ASSERT_OK(agent_->Connect());
TF_ASSERT_OK(agent_->ReportError(absl::InternalError("Test Error.")));
absl::Status reset_status = agent_->Reset();
EXPECT_TRUE(absl::IsInternal(reset_status));
TF_ASSERT_OK(agent_->Reset());
TF_EXPECT_OK(agent_->Connect());
}
TEST_F(CoordinationServiceAgentTest, GetOwnTask) {
InitializeAgent();
auto result = agent_->GetOwnTask();
TF_ASSERT_OK(result.status());
CoordinatedTask actual_task = *result;
CoordinatedTask expected_task;
expected_task.set_job_name("test_job");
expected_task.set_task_id(0);
EXPECT_EQ(actual_task.job_name(), expected_task.job_name());
EXPECT_EQ(actual_task.task_id(), expected_task.task_id());
}
TEST_F(CoordinationServiceAgentTest, GetOwnTask_Uninitialized) {
auto result = agent_->GetOwnTask();
EXPECT_TRUE(absl::IsFailedPrecondition(result.status()));
}
TEST_F(CoordinationServiceAgentTest, WaitAtBarrier_SameIdUsedTwice_Fails) {
InitializeAgent();
const std::string barrier_id = "only_use_once";
TF_ASSERT_OK(agent_->Connect());
TF_ASSERT_OK(
agent_->WaitAtBarrier(barrier_id, absl::Seconds(1), {}));
auto result =
agent_->WaitAtBarrier(barrier_id, absl::Seconds(1), {});
EXPECT_TRUE(absl::IsFailedPrecondition(result));
}
TEST_F(CoordinationServiceAgentTest, GetEnv_SucceedsAfterInit) {
EXPECT_TRUE(absl::IsFailedPrecondition(agent_->GetEnv().status()));
InitializeAgent();
absl::StatusOr<Env*> result = agent_->GetEnv();
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, Env::Default());
}
TEST_F(CoordinationServiceAgentTest, Connect_AbortedErrorShouldBeRetried) {
EXPECT_CALL(*GetClient(), RegisterTaskAsync(_, _, _, _))
.WillOnce(
InvokeArgument<3>(absl::AbortedError("DuplicateTaskRegistration")))
.WillOnce(
InvokeArgument<3>(absl::AbortedError("DuplicateTaskRegistration")))
.WillOnce(InvokeArgument<3>(absl::OkStatus()));
InitializeAgent();
TF_EXPECT_OK(agent_->Connect());
}
TEST_F(CoordinationServiceAgentTest, Connect_AbortedErrorShouldFailEventually) {
EXPECT_CALL(*GetClient(), RegisterTaskAsync(_, _, _, _))
.WillRepeatedly(
InvokeArgument<3>(absl::AbortedError("DuplicateTaskRegistration")));
CoordinationServiceConfig config;
config.set_cluster_register_timeout_in_ms(
absl::ToInt64Milliseconds(absl::Seconds(3)));
InitializeAgent(config);
absl::Status s = agent_->Connect();
EXPECT_TRUE(absl::IsAborted(s));
}
TEST_F(CoordinationServiceAgentTest, Connect_InternalErrorShouldBeRetried) {
EXPECT_CALL(*GetClient(), RegisterTaskAsync(_, _, _, _))
.WillOnce(InvokeArgument<3>(
absl::InternalError("Coordination service is not enabled.")))
.WillOnce(InvokeArgument<3>(
absl::InternalError("Coordination service is not enabled.")))
.WillOnce(InvokeArgument<3>(absl::OkStatus()));
InitializeAgent();
TF_EXPECT_OK(agent_->Connect());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/distributed_runtime/coordination/coordination_service_agent_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8323463a-09b1-420b-baaf-d77521ca8e27 | cpp | tensorflow/tensorflow | preemption_sync_manager | third_party/xla/xla/tsl/distributed_runtime/preemption/preemption_sync_manager.cc | third_party/xla/xla/tsl/distributed_runtime/preemption/preemption_sync_manager_test.cc | #include "xla/tsl/distributed_runtime/preemption/preemption_sync_manager.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "xla/tsl/distributed_runtime/call_options.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include "xla/tsl/distributed_runtime/preemption/preemption_notifier.h"
#include "xla/tsl/lib/monitoring/gauge.h"
#include "xla/tsl/protobuf/coordination_service.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
namespace tsl {
namespace {
using tensorflow::CoordinatedTask;
using tensorflow::KeyValueEntry;
constexpr int64_t kPreemptionSyncUnsetCounter = -1;
constexpr char kPreemptionNoticeKey[] = "RECEIVED_PREEMPTION_NOTICE";
constexpr char kPreemptionCounterDirKey[] = "PREEMPTION_CURRENT_COUNTER/";
constexpr char kPreemptionBarrier[] = "PREEMPTION_SYNC_BARRIER";
constexpr absl::Duration kPreemptionBarrierTimeout = absl::Minutes(3);
auto* sync_usage_metric = monitoring::Gauge<bool, 0>::New(
"/coordination_service/preempt_manager/reached_sync_point_usage",
"Records if preempt sync manager's ReachSyncPoint() was called at least "
"once.");
auto* notified_metric = monitoring::Gauge<bool, 0>::New(
"/coordination_service/preempt_manager/notified",
"Records receipt of preemption notification.");
auto* set_sync_point_metric = monitoring::Gauge<bool, 0>::New(
"/coordination_service/preempt_manager/set_sync_point",
"Records that sync point is set.");
auto* reached_sync_point_metric = monitoring::Gauge<bool, 0>::New(
"/coordination_service/preempt_manager/reached_sync_point",
"Records that sync point is reached.");
constexpr absl::Duration kProtocolDuration = absl::Minutes(15);
class PreemptionSyncManagerImpl : public PreemptionSyncManager {
public:
PreemptionSyncManagerImpl() = default;
~PreemptionSyncManagerImpl() override {
shutdown_.Notify();
}
absl::Status Initialize(CoordinationServiceAgent* agent) override;
absl::Status Initialize(CoordinationServiceAgent* agent,
const std::string& preemption_notifier_type) override;
absl::Status Initialize(
CoordinationServiceAgent* agent,
std::unique_ptr<PreemptionNotifier> notifier) override;
bool ReachedSyncPoint(int step_counter) override;
private:
void ComputeSyncCallCounter(absl::Time death_time);
void CancelPreemptionBarrier();
absl::Mutex mu_;
int64_t call_counter_ ABSL_GUARDED_BY(mu_) = 0;
int64_t preemption_sync_counter_ ABSL_GUARDED_BY(mu_) =
kPreemptionSyncUnsetCounter;
std::string current_call_counter_key_;
Env* env_;
CoordinationServiceAgent* agent_;
absl::Notification shutdown_;
std::unique_ptr<Thread> sync_protocol_thread_;
std::unique_ptr<PreemptionNotifier> preemption_notifier_;
std::shared_ptr<CallOptions> call_opts_;
};
absl::Status PreemptionSyncManagerImpl::Initialize(
CoordinationServiceAgent* agent) {
return Initialize(agent, "sigterm");
}
absl::Status PreemptionSyncManagerImpl::Initialize(
CoordinationServiceAgent* agent,
const std::string& preemption_notifier_type) {
TF_ASSIGN_OR_RETURN(Env * env, agent->GetEnv());
return Initialize(agent, PreemptionNotifier::CreatePreemptionNotifier(
preemption_notifier_type, env));
}
absl::Status PreemptionSyncManagerImpl::Initialize(
CoordinationServiceAgent* agent,
std::unique_ptr<PreemptionNotifier> notifier) {
TF_ASSIGN_OR_RETURN(Env * env, agent->GetEnv());
env_ = env;
agent_ = agent;
preemption_notifier_ = std::move(notifier);
TF_ASSIGN_OR_RETURN(CoordinatedTask own_task, agent->GetOwnTask());
const std::string task_name =
absl::StrCat("/job:", own_task.job_name(), "/task:", own_task.task_id());
current_call_counter_key_ = absl::StrCat(kPreemptionCounterDirKey, task_name);
preemption_notifier_->WillBePreemptedAtAsync(
[agent = agent_, task_name](absl::StatusOr<absl::Time> death_time) {
if (!death_time.ok()) {
if (absl::IsCancelled(death_time.status())) {
LOG(INFO) << "Preemption sync protocol cancelled by notifier: "
<< death_time.status()
<< ". This is expected during program shutdown.";
} else {
LOG(ERROR) << "Error from preemption notifier: "
<< death_time.status();
}
return;
}
notified_metric->GetCell()->Set(true);
const absl::Status s = agent->InsertKeyValue(
kPreemptionNoticeKey, absl::FormatTime(*death_time));
LOG(INFO) << "Notified coordination service that this task will "
"be preempted at "
<< *death_time << ". absl::Status: " << s;
});
call_opts_ = agent_->GetKeyValueAsync(
kPreemptionNoticeKey,
[this, agent = agent_](absl::StatusOr<std::string> status_or_death_time) {
if (absl::IsCancelled(status_or_death_time.status())) {
LOG(INFO) << "Cancelled call to retrieve preemption notice. This is "
"expected upon program shutdown.";
return;
} else if (!status_or_death_time.ok()) {
LOG(WARNING)
<< "Failed to retrieve preemption notice from "
"coordination service: "
<< status_or_death_time.status()
<< ". This is only expected if one of the tasks is unhealthy."
" Check the logs for the actual root cause.";
agent->CancelBarrierAsync(
kPreemptionBarrier, [](const absl::Status& status) {
if (!status.ok()) {
LOG(ERROR)
<< "Failed to cancel preemption barrier: " << status;
}
});
return;
}
std::string err;
absl::Time death_time;
if (absl::ParseTime(absl::RFC3339_full, *status_or_death_time,
&death_time, &err)) {
LOG(INFO) << "Received preemption notice with death_time "
<< death_time;
} else {
LOG(ERROR) << "Unable to parse preemption notice's death time: "
<< err;
CancelPreemptionBarrier();
return;
}
sync_protocol_thread_ = absl::WrapUnique(env_->StartThread(
{}, "PreemptionSyncManager_SyncProtocol",
std::bind(&PreemptionSyncManagerImpl::ComputeSyncCallCounter, this,
death_time)));
});
return absl::OkStatus();
}
void PreemptionSyncManagerImpl::ComputeSyncCallCounter(absl::Time death_time) {
const absl::Duration remaining_time = death_time - absl::Now();
if (remaining_time > kProtocolDuration) {
LOG(INFO) << "Will begin preemption sync protocol in " << remaining_time;
const absl::Duration sleep_time = remaining_time - kProtocolDuration;
if (shutdown_.WaitForNotificationWithTimeout(sleep_time)) {
LOG(WARNING)
<< "Shutdown is triggered before preemption sync protocol has begun.";
CancelPreemptionBarrier();
return;
}
}
absl::MutexLock l(&mu_);
const absl::Status notified_status = agent_->InsertKeyValue(
current_call_counter_key_, std::to_string(call_counter_));
if (!notified_status.ok()) {
LOG(ERROR) << "Preemption sync failed - could not inform service of "
"current call counter: "
<< notified_status;
CancelPreemptionBarrier();
return;
}
const absl::Status barrier_status =
agent_->WaitAtBarrier(kPreemptionBarrier, kPreemptionBarrierTimeout, {});
if (!barrier_status.ok()) {
LOG(ERROR) << "Preemption sync barrier failed: " << barrier_status;
return;
}
absl::StatusOr<std::vector<KeyValueEntry>> all_counters =
agent_->GetKeyValueDir(kPreemptionCounterDirKey);
if (!all_counters.ok()) {
LOG(ERROR) << "Preemption sync failed - unable to retrieve call counters: "
<< all_counters.status();
return;
}
int64_t max_counter = kPreemptionSyncUnsetCounter;
for (const auto& kv : *all_counters) {
int64_t call_counter;
if (!absl::SimpleAtoi(kv.value(), &call_counter)) {
LOG(ERROR) << "Preemption sync failed - failed to parse preemption call "
"counter: "
<< kv.DebugString();
return;
}
max_counter = std::max(max_counter, call_counter);
}
if (max_counter == kPreemptionSyncUnsetCounter) {
LOG(ERROR) << "Preemption sync failed - no call counters found.";
return;
}
preemption_sync_counter_ = max_counter + 1;
LOG(INFO) << "Preemption sync counter is set: " << preemption_sync_counter_;
set_sync_point_metric->GetCell()->Set(true);
}
void PreemptionSyncManagerImpl::CancelPreemptionBarrier() {
agent_->CancelBarrierAsync(
kPreemptionBarrier, [](const absl::Status& status) {
if (!status.ok()) {
LOG(ERROR) << "Failed to cancel preemption barrier: " << status;
}
});
}
bool PreemptionSyncManagerImpl::ReachedSyncPoint(int step_counter) {
sync_usage_metric->GetCell()->Set(true);
absl::MutexLock l(&mu_);
call_counter_ = step_counter;
VLOG(3) << "Current call counter: " << call_counter_
<< ", Preemption sync point: " << preemption_sync_counter_;
const bool reached_sync_point = preemption_sync_counter_ == call_counter_;
if (reached_sync_point) {
reached_sync_point_metric->GetCell()->Set(true);
}
return reached_sync_point;
}
}
std::unique_ptr<PreemptionSyncManager> CreatePreemptionSyncManager() {
return std::make_unique<PreemptionSyncManagerImpl>();
}
} | #include "xla/tsl/distributed_runtime/preemption/preemption_sync_manager.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "grpcpp/server.h"
#include "grpcpp/server_builder.h"
#include "grpcpp/support/channel_arguments.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_client.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include "xla/tsl/distributed_runtime/preemption/preemption_notifier.h"
#include "xla/tsl/distributed_runtime/rpc/async_service_interface.h"
#include "xla/tsl/distributed_runtime/rpc/coordination/grpc_coordination_client.h"
#include "xla/tsl/distributed_runtime/rpc/coordination/grpc_coordination_service_impl.h"
#include "xla/tsl/protobuf/coordination_config.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace tsl {
namespace {
using tensorflow::CoordinatedJob;
using tensorflow::CoordinatedTask;
using tensorflow::CoordinationServiceConfig;
constexpr char kJobName[] = "test_worker";
class FakePreemptionNotifier : public PreemptionNotifier {
public:
FakePreemptionNotifier() : PreemptionNotifier(nullptr) {}
~FakePreemptionNotifier() override {
NotifyRegisteredListeners(
absl::CancelledError("~FakePreemptionNotifier() was called."));
}
void AnnounceDeath(absl::Time death_time) {
LOG(WARNING) << "Received preemption notice with death time: "
<< death_time;
NotifyRegisteredListeners(death_time);
}
};
class PreemptionSyncManagerTest : public ::testing::Test {
protected:
PreemptionSyncManagerTest() {
StartCoordinationService();
InitializeAndConnectCoordinationAgents();
auto preempt_notifier = std::make_unique<FakePreemptionNotifier>();
preempt_notifier_ = preempt_notifier.get();
CHECK_OK(preempt_sync_mgr_->Initialize(coord_agent_.get(),
std::move(preempt_notifier)));
auto preempt_notifier2 = std::make_unique<FakePreemptionNotifier>();
preempt_notifier2_ = preempt_notifier2.get();
CHECK_OK(preempt_sync_mgr2_->Initialize(coord_agent2_.get(),
std::move(preempt_notifier2)));
}
~PreemptionSyncManagerTest() override {
preempt_sync_mgr_ = nullptr;
preempt_sync_mgr2_ = nullptr;
coord_agent_ = nullptr;
coord_agent2_ = nullptr;
coord_service_ = nullptr;
static_cast<tsl::GrpcCoordinationServiceImpl*>(coord_rpc_service_.get())
->SetCoordinationServiceInstance(nullptr);
grpc_server_->Shutdown();
coord_rpc_service_->Shutdown();
}
void SendPreemptionNotice(absl::Time death_time = absl::Now(),
bool to_task1 = true) {
if (to_task1) {
preempt_notifier_->AnnounceDeath(death_time);
} else {
preempt_notifier2_->AnnounceDeath(death_time);
}
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(absl::Seconds(1)));
}
void SimulateUnhealthyTaskTwo() {
CoordinatedTask task2;
task2.set_job_name(kJobName);
task2.set_task_id(1);
CHECK_OK(coord_service_->ReportTaskError(
task2, absl::InternalError("test_error")));
}
std::unique_ptr<PreemptionSyncManager> preempt_sync_mgr_ =
CreatePreemptionSyncManager();
std::unique_ptr<PreemptionSyncManager> preempt_sync_mgr2_ =
CreatePreemptionSyncManager();
protected:
void StartCoordinationService() {
::grpc::ServerBuilder builder;
coord_service_ = EnableCoordinationService();
coord_compute_pool_ = std::make_unique<thread::ThreadPool>(
Env::Default(), "CoordinationServiceRpcHandler",
1);
coord_rpc_service_ = std::make_unique<GrpcCoordinationServiceImpl>(
coord_compute_pool_.get(), &builder);
auto* grpc_coord_service =
static_cast<GrpcCoordinationServiceImpl*>(coord_rpc_service_.get());
grpc_coord_service->SetCoordinationServiceInstance(coord_service_.get());
grpc_server_ = builder.BuildAndStart();
coord_rpc_thread_ = absl::WrapUnique(Env::Default()->StartThread(
{}, "CoordinationServiceHandleRPCsLoop",
[service = coord_rpc_service_.get()]() { service->HandleRPCsLoop(); }));
}
std::unique_ptr<CoordinationServiceInterface> EnableCoordinationService() {
CoordinationServiceConfig config;
config.set_service_type("standalone");
CoordinatedJob* job = config.mutable_coordinated_job_list()->Add();
job->set_name(kJobName);
job->set_num_tasks(2);
return CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config, nullptr);
}
void InitializeAndConnectCoordinationAgents() {
std::unique_ptr<CoordinationClient> coord_client =
absl::WrapUnique(NewGrpcCoordinationClient(
grpc_server_->InProcessChannel(::grpc::ChannelArguments())));
std::unique_ptr<CoordinationClient> coord_client2 =
absl::WrapUnique(NewGrpcCoordinationClient(
grpc_server_->InProcessChannel(::grpc::ChannelArguments())));
auto error_fn = [](const absl::Status& status) {
LOG(ERROR) << "Coordination service agent in error status: " << status;
};
CoordinationServiceConfig coord_config;
coord_config.set_service_leader("test_leader");
CHECK_OK(coord_agent_->Initialize(Env::Default(), kJobName,
0, coord_config,
std::move(coord_client), error_fn));
CHECK_OK(coord_agent2_->Initialize(Env::Default(), kJobName,
1, coord_config,
std::move(coord_client2), error_fn));
CHECK_OK(coord_agent_->Connect());
CHECK_OK(coord_agent2_->Connect());
}
std::unique_ptr<CoordinationServiceInterface> coord_service_;
std::unique_ptr<::grpc::Server> grpc_server_;
std::unique_ptr<thread::ThreadPool> coord_compute_pool_;
std::unique_ptr<AsyncServiceInterface> coord_rpc_service_;
std::unique_ptr<Thread> coord_rpc_thread_;
std::unique_ptr<CoordinationServiceAgent> coord_agent_ =
CreateCoordinationServiceAgent();
FakePreemptionNotifier* preempt_notifier_;
std::unique_ptr<CoordinationServiceAgent> coord_agent2_ =
CreateCoordinationServiceAgent();
FakePreemptionNotifier* preempt_notifier2_;
};
TEST_F(PreemptionSyncManagerTest, NoPreemption_NoSyncPoint) {
int step_counter = 0;
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
}
TEST_F(PreemptionSyncManagerTest, Preemption_SingleSyncPoint) {
int step_counter = 0;
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
SendPreemptionNotice();
EXPECT_TRUE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
}
TEST_F(PreemptionSyncManagerTest, DelayedPreemption_NoSyncPointYet) {
int step_counter = 0;
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
SendPreemptionNotice(absl::Now() + absl::Hours(1));
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
}
TEST_F(PreemptionSyncManagerTest, UnhealthyTask_NoSyncPoint) {
int step_counter = 0;
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
SimulateUnhealthyTaskTwo();
SendPreemptionNotice();
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
}
TEST_F(PreemptionSyncManagerTest, ShutdownTasksWithoutPreemption) {
int step_counter = 0;
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
CHECK_OK(coord_agent_->Shutdown());
CHECK_OK(coord_agent2_->Shutdown());
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
}
TEST_F(PreemptionSyncManagerTest, PreemptSlowTask) {
int step_counter0 = 0;
int step_counter2 = 0;
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter0++));
EXPECT_FALSE(preempt_sync_mgr2_->ReachedSyncPoint(step_counter2++));
EXPECT_FALSE(preempt_sync_mgr2_->ReachedSyncPoint(step_counter2++));
EXPECT_FALSE(preempt_sync_mgr2_->ReachedSyncPoint(step_counter2++));
SendPreemptionNotice();
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter0++));
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter0++));
EXPECT_TRUE(preempt_sync_mgr_->ReachedSyncPoint(step_counter0++));
EXPECT_TRUE(preempt_sync_mgr2_->ReachedSyncPoint(step_counter2++));
}
TEST_F(PreemptionSyncManagerTest, PreemptFastTask) {
int step_counter0 = 0;
int step_counter2 = 0;
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter0++));
EXPECT_FALSE(preempt_sync_mgr2_->ReachedSyncPoint(step_counter2++));
EXPECT_FALSE(preempt_sync_mgr2_->ReachedSyncPoint(step_counter2++));
EXPECT_FALSE(preempt_sync_mgr2_->ReachedSyncPoint(step_counter2++));
SendPreemptionNotice(absl::Now(), false);
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter0++));
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter0++));
EXPECT_TRUE(preempt_sync_mgr_->ReachedSyncPoint(step_counter0++));
EXPECT_TRUE(preempt_sync_mgr2_->ReachedSyncPoint(step_counter2++));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/distributed_runtime/preemption/preemption_sync_manager.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/distributed_runtime/preemption/preemption_sync_manager_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4b72f2e1-377f-44c8-8739-38c4935bd9a9 | cpp | tensorflow/tensorflow | preemption_notifier | third_party/xla/xla/tsl/distributed_runtime/preemption/preemption_notifier.cc | third_party/xla/xla/tsl/distributed_runtime/preemption/preemption_notifier_test.cc | #include "xla/tsl/distributed_runtime/preemption/preemption_notifier.h"
#include <atomic>
#include <csignal>
#include <functional>
#include <memory>
#include <utility>
#include "absl/synchronization/notification.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/statusor.h"
#if defined(PLATFORM_GOOGLE)
#include "thread/executor.h"
#include "thread/signal.h"
#endif
namespace tsl {
namespace {
constexpr absl::Duration kListenInterval = absl::Seconds(1);
constexpr absl::Time kUnsetDeathTime = absl::InfinitePast();
static std::atomic_bool sigterm_received(false);
class SigtermNotifier : public PreemptionNotifier {
public:
explicit SigtermNotifier(Env* env);
~SigtermNotifier() override {
shutdown_notification_.Notify();
}
private:
void StartListenerThread();
absl::Notification shutdown_notification_;
std::unique_ptr<Thread> preempt_listener_thread_;
};
SigtermNotifier::SigtermNotifier(Env* env) : PreemptionNotifier(env) {
sigterm_received.store(false);
StartListenerThread();
#if defined(PLATFORM_GOOGLE)
thread::signal::Token unused_token;
thread::signal::AddHandler(
SIGTERM, thread::Executor::DefaultExecutor(),
[]() { sigterm_received.store(true); },
0,
&unused_token);
#else
std::signal(SIGTERM, [](int signal) { sigterm_received.store(true); });
#endif
}
void SigtermNotifier::StartListenerThread() {
preempt_listener_thread_.reset(
GetEnv()->StartThread({}, "PreemptionNotifier_Listen", [this]() {
while (!sigterm_received.load()) {
if (shutdown_notification_.WaitForNotificationWithTimeout(
kListenInterval)) {
NotifyRegisteredListeners(
errors::Cancelled("Preemption notifier is being deleted."));
return;
}
}
const absl::Time death_time = absl::Now();
LOG(WARNING) << "SIGTERM caught at " << death_time;
NotifyRegisteredListeners(death_time);
}));
}
}
absl::StatusOr<absl::Time> PreemptionNotifier::WillBePreemptedAt() {
absl::Notification n;
absl::StatusOr<absl::Time> result;
WillBePreemptedAtAsync(
[&n, &result](absl::StatusOr<absl::Time> async_result) {
result = async_result;
n.Notify();
});
n.WaitForNotification();
return result;
}
void PreemptionNotifier::WillBePreemptedAtAsync(PreemptTimeCallback callback) {
mutex_lock l(mu_);
if (death_time_ == kUnsetDeathTime) {
callbacks_.push_back(std::move(callback));
} else {
callback(death_time_);
}
}
void PreemptionNotifier::NotifyRegisteredListeners(
absl::StatusOr<absl::Time> death_time) {
mutex_lock l(mu_);
if (death_time.ok()) {
death_time_ = death_time.value();
}
for (const auto& callback : callbacks_) {
callback(death_time);
}
callbacks_.clear();
}
REGISTER_PREEMPTION_NOTIFIER(
"sigterm", [](Env* env) -> std::unique_ptr<PreemptionNotifier> {
return std::make_unique<SigtermNotifier>(env);
});
} | #include "xla/tsl/distributed_runtime/preemption/preemption_notifier.h"
#include <csignal>
#include <functional>
#include <memory>
#include <utility>
#include "absl/synchronization/notification.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#if defined(PLATFORM_GOOGLE)
#include "thread/executor.h"
#include "thread/signal.h"
#endif
namespace tsl {
namespace {
class PreemptNotifierTest : public ::testing::Test {
public:
PreemptNotifierTest() {
#if defined(PLATFORM_GOOGLE)
thread::signal::Token unused_token;
thread::signal::AddHandler(
SIGTERM, thread::Executor::DefaultExecutor(), []() {},
thread::signal::kOverrideDefault, &unused_token);
#endif
}
};
TEST_F(PreemptNotifierTest, WillBePreemptedAt) {
auto env = Env::Default();
std::unique_ptr<PreemptionNotifier> preempt_notifier =
PreemptionNotifier::CreatePreemptionNotifier("sigterm", env);
absl::Time start_time = absl::Now();
env->SchedClosureAfter(absl::ToInt64Microseconds(absl::Seconds(1)),
[]() { std::raise(SIGTERM); });
absl::StatusOr<absl::Time> result = preempt_notifier->WillBePreemptedAt();
TF_CHECK_OK(result.status());
absl::Time preempt_time = result.value();
absl::Duration time_diff = preempt_time - start_time;
EXPECT_GT(time_diff, absl::Seconds(1.0));
EXPECT_LT(time_diff, absl::Seconds(3));
}
TEST_F(PreemptNotifierTest,
WillBePreemptedAt_AlreadyPreempted_ReturnsImmediately) {
auto env = Env::Default();
std::unique_ptr<PreemptionNotifier> preempt_notifier =
PreemptionNotifier::CreatePreemptionNotifier("sigterm", env);
absl::Time start_time = absl::Now();
std::raise(SIGTERM);
env->SleepForMicroseconds(absl::ToInt64Microseconds(absl::Seconds(2)));
absl::StatusOr<absl::Time> result = preempt_notifier->WillBePreemptedAt();
TF_CHECK_OK(result.status());
absl::Time preempt_time = result.value();
absl::Duration time_diff = preempt_time - start_time;
EXPECT_GT(time_diff, absl::ZeroDuration());
EXPECT_LT(time_diff, absl::Seconds(2));
}
TEST_F(PreemptNotifierTest, WillBePreemptedAtAsync_SameResultForAllCallbacks) {
auto env = Env::Default();
std::unique_ptr<PreemptionNotifier> preempt_notifier =
PreemptionNotifier::CreatePreemptionNotifier("sigterm", env);
env->SchedClosureAfter(absl::ToInt64Microseconds(absl::Seconds(1)),
[]() { std::raise(SIGTERM); });
absl::StatusOr<absl::Time> preempt_time;
absl::StatusOr<absl::Time> preempt_time_2;
absl::Notification n;
absl::Notification n_2;
preempt_notifier->WillBePreemptedAtAsync(
[&preempt_time, &n](absl::StatusOr<absl::Time> result) {
preempt_time = result;
n.Notify();
});
preempt_notifier->WillBePreemptedAtAsync(
[&preempt_time_2, &n_2](absl::StatusOr<absl::Time> result) {
preempt_time_2 = result;
n_2.Notify();
});
n.WaitForNotification();
n_2.WaitForNotification();
TF_CHECK_OK(preempt_time.status());
TF_CHECK_OK(preempt_time_2.status());
EXPECT_EQ(preempt_time.value(), preempt_time_2.value());
}
TEST_F(PreemptNotifierTest, Reset_TwoDifferentPreemptTimesRecorded) {
auto env = Env::Default();
std::unique_ptr<PreemptionNotifier> preempt_notifier =
PreemptionNotifier::CreatePreemptionNotifier("sigterm", env);
std::raise(SIGTERM);
absl::StatusOr<absl::Time> result = preempt_notifier->WillBePreemptedAt();
TF_CHECK_OK(result.status());
absl::Time preempt_time = result.value();
preempt_notifier =
PreemptionNotifier::CreatePreemptionNotifier("sigterm", env);
std::raise(SIGTERM);
absl::Time preempt_time_2 = preempt_notifier->WillBePreemptedAt().value();
EXPECT_NE(preempt_time, preempt_time_2);
}
TEST_F(PreemptNotifierTest, DestructorCancelsPendingCalls) {
auto env = Env::Default();
std::unique_ptr<PreemptionNotifier> preempt_notifier =
PreemptionNotifier::CreatePreemptionNotifier("sigterm", env);
absl::StatusOr<absl::Time> result;
absl::Notification n;
preempt_notifier->WillBePreemptedAtAsync(
[&result, &n](absl::StatusOr<absl::Time> status_or_time) {
result = status_or_time;
n.Notify();
});
preempt_notifier = nullptr;
n.WaitForNotification();
EXPECT_TRUE(errors::IsCancelled(result.status()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/distributed_runtime/preemption/preemption_notifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/distributed_runtime/preemption/preemption_notifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
323ebe3f-b5fc-466c-a6df-f963cbafa3a8 | cpp | tensorflow/tensorflow | grpc_channel | third_party/xla/xla/tsl/distributed_runtime/rpc/grpc_channel.cc | third_party/xla/xla/tsl/distributed_runtime/rpc/grpc_channel_test.cc | #include "xla/tsl/distributed_runtime/rpc/grpc_channel.h"
#include <cstdlib>
#include <limits>
#include <map>
#include <string>
#include <unordered_map>
#include "absl/strings/escaping.h"
#include "absl/strings/match.h"
#include "absl/strings/str_split.h"
#include "grpcpp/create_channel.h"
#include "xla/tsl/distributed_runtime/rpc/grpc_channel_common.h"
#include "xla/tsl/lib/gtl/map_util.h"
#include "xla/tsl/protobuf/rpc_options.pb.h"
#include "xla/tsl/util/device_name_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/status.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/strcat.h"
#include "tsl/platform/thread_annotations.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace {
string MakeAddress(const string& job, int replica, int task) {
return strings::StrCat("/job:", job, "/replica:", replica, "/task:", task);
}
absl::Status ValidateHostPortPair(const string& host_port) {
string bns_prefix = "/bns/";
if (host_port.substr(0, bns_prefix.length()) == bns_prefix) {
return absl::OkStatus();
}
uint32 port;
auto colon_index = host_port.find_last_of(':');
if (!strings::safe_strtou32(host_port.substr(colon_index + 1), &port) ||
host_port.substr(0, colon_index).find('/') != string::npos) {
return errors::InvalidArgument("Could not interpret \"", host_port,
"\" as a host-port pair.");
}
return absl::OkStatus();
}
::grpc::ChannelArguments* CreateDefaultChannelArguments() {
::grpc::ChannelArguments* args = new ::grpc::ChannelArguments();
const char* env = std::getenv("TF_GRPC_DEFAULT_OPTIONS");
if (env != nullptr) {
for (auto& grpc_option : absl::StrSplit(env, ',')) {
std::vector<string> name_value = absl::StrSplit(grpc_option, '=');
if (name_value.size() != 2) {
LOG(ERROR) << "Invalid GRPC options format: " << grpc_option;
continue;
}
VLOG(3) << "Setting GRPC default for '" << name_value[0] << "' to '"
<< name_value[1] << "'";
if (name_value[1].size() >= 2 && name_value[1][0] == '"') {
string ue_value = name_value[1].substr(1, name_value[1].size() - 2);
string value;
string error;
if (!absl::CUnescape(ue_value, &value, &error)) {
LOG(ERROR) << "Failed to parse escaped string for " << grpc_option
<< ": " << error;
} else {
args->SetString(name_value[0], value);
}
} else {
int64_t value;
if (strings::safe_strto64(name_value[1], &value)) {
args->SetInt(name_value[0], value);
} else {
LOG(ERROR) << "Invalid integer value: " << grpc_option;
}
}
}
}
return args;
}
const ::grpc::ChannelArguments* GetDefaultChannelArguments() {
static const ::grpc::ChannelArguments* args = CreateDefaultChannelArguments();
return args;
}
}
::grpc::ChannelArguments GetChannelArguments(const RPCOptions* rpc_options) {
::grpc::ChannelArguments args = *GetDefaultChannelArguments();
args.SetInt(GRPC_ARG_MAX_MESSAGE_LENGTH, std::numeric_limits<int32>::max());
args.SetInt(GRPC_ARG_MAX_RECONNECT_BACKOFF_MS, 1000);
if (rpc_options != nullptr) {
if (rpc_options->compression_algorithm() == "deflate") {
args.SetCompressionAlgorithm(GRPC_COMPRESS_DEFLATE);
args.SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL,
rpc_options->compression_level());
VLOG(5) << "Setting GRPC compression : algo='"
<< rpc_options->compression_algorithm()
<< "' level=" << rpc_options->compression_level();
} else if (rpc_options->compression_algorithm() == "gzip") {
args.SetCompressionAlgorithm(GRPC_COMPRESS_GZIP);
args.SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_LEVEL,
rpc_options->compression_level());
VLOG(5) << "Setting GRPC compression : algo='"
<< rpc_options->compression_algorithm()
<< "' level=" << rpc_options->compression_level();
} else if (!rpc_options->compression_algorithm().empty()) {
LOG(ERROR) << "Invalid compression algorithm: "
<< rpc_options->compression_algorithm();
}
if (rpc_options->disable_session_connection_sharing()) {
VLOG(5) << "Disabling TCP connection sharing";
args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, true);
}
}
return args;
}
absl::Status NewHostPortGrpcChannel(const string& target,
const RPCOptions* rpc_options,
SharedGrpcChannelPtr* channel_pointer) {
TF_RETURN_IF_ERROR(ValidateHostPortPair(target));
::grpc::ChannelArguments args = GetChannelArguments(rpc_options);
*channel_pointer = ::grpc::CreateCustomChannel(
"dns:
return absl::OkStatus();
}
ChannelCreationFunction ConvertToChannelCreationFunction(
const std::function<absl::Status(string, const RPCOptions*,
SharedGrpcChannelPtr*)>&
new_channel_func_ptr) {
return [new_channel_func_ptr](const string& target) -> SharedGrpcChannelPtr {
SharedGrpcChannelPtr channel_ptr;
if (new_channel_func_ptr(target, nullptr, &channel_ptr)
.ok()) {
return channel_ptr;
} else {
return nullptr;
}
};
}
absl::Status GrpcChannelSpec::AddHostPortsJob(
const string& job_id, const std::map<int, string>& host_ports) {
if (!job_ids_.insert(job_id).second) {
return errors::InvalidArgument(
"Duplicate job ID in cluster specification: ", job_id);
}
for (const auto& id_host_port : host_ports) {
TF_RETURN_IF_ERROR(ValidateHostPortPair(id_host_port.second));
}
host_ports_jobs_.emplace_back(job_id, host_ports);
return absl::OkStatus();
}
namespace {
using CachingGrpcChannelCache = GenericCachingChannelCache<GrpcChannelCache>;
class MultiGrpcChannelCache : public CachingGrpcChannelCache {
public:
explicit MultiGrpcChannelCache(const std::vector<GrpcChannelCache*>& caches,
int num_channels_per_target)
: CachingGrpcChannelCache(num_channels_per_target), caches_(caches) {}
~MultiGrpcChannelCache() override {
for (GrpcChannelCache* cache : caches_) {
delete cache;
}
}
void ListWorkers(std::vector<string>* workers) override {
for (GrpcChannelCache* cache : caches_) {
cache->ListWorkers(workers);
}
}
void ListWorkersInJob(const string& job_name,
std::vector<string>* workers) override {
for (GrpcChannelCache* cache : caches_) {
cache->ListWorkersInJob(job_name, workers);
}
}
string TranslateTask(const string& target) override {
mutex_lock l(mu_);
GrpcChannelCache* cache = gtl::FindPtrOrNull(target_caches_, target);
if (cache == nullptr) {
for (GrpcChannelCache* c : caches_) {
string r = c->TranslateTask(target);
if (!r.empty()) {
target_caches_.insert({target, c});
cache = c;
break;
}
}
}
CHECK(cache) << "Could not find GrpcChannelCache holding channel for "
<< target;
return cache->TranslateTask(target);
}
protected:
SharedGrpcChannelPtr FindChannelOnce(const string& target) override {
for (GrpcChannelCache* cache : caches_) {
SharedGrpcChannelPtr ch(cache->FindWorkerChannel(target));
if (ch) {
mutex_lock l(mu_);
target_caches_.insert({target, cache});
return ch;
}
}
return nullptr;
}
private:
const std::vector<GrpcChannelCache*> caches_;
mutex mu_;
std::unordered_map<string, GrpcChannelCache*> target_caches_
TF_GUARDED_BY(mu_);
};
class SparseGrpcChannelCache : public CachingGrpcChannelCache {
public:
SparseGrpcChannelCache(const string& job_id,
const std::map<int, string>& host_ports,
ChannelCreationFunction channel_func,
int num_channels_per_target)
: CachingGrpcChannelCache(num_channels_per_target),
job_id_(job_id),
host_ports_(host_ports),
channel_func_(std::move(channel_func)) {
VLOG(2) << "Initialize GrpcChannelCache for job " << ToString();
}
~SparseGrpcChannelCache() override {}
void ListWorkers(std::vector<string>* workers) override {
workers->reserve(workers->size() + host_ports_.size());
for (const auto& id_host_port : host_ports_) {
std::vector<std::string> replicas =
absl::StrSplit(id_host_port.second, ',', absl::SkipEmpty());
for (int replica = 0; replica < replicas.size(); ++replica) {
workers->emplace_back(
MakeAddress(job_id_, replica, id_host_port.first));
}
}
}
void ListWorkersInJob(const string& job_name,
std::vector<string>* workers) override {
if (job_name == job_id_) {
ListWorkers(workers);
}
}
string TranslateTask(const string& target) override {
DeviceNameUtils::ParsedName parsed;
if (!DeviceNameUtils::ParseFullName(target, &parsed)) {
LOG(WARNING) << "Invalid target: " << target;
return "";
}
if (!parsed.has_job || parsed.job != job_id_) {
return "";
}
int32_t task = parsed.has_task ? parsed.task : -1;
auto iter = host_ports_.find(task);
if (iter == host_ports_.end()) {
LOG(WARNING) << "Task " << task << " was not defined in sparse job "
<< job_id_ << ": " << target;
return "";
}
std::vector<std::string> host_ports =
absl::StrSplit(iter->second, ',', absl::SkipEmpty());
if (host_ports.size() > parsed.replica) {
return host_ports[parsed.replica];
}
LOG(WARNING) << "Requested out-of-range replica, defaulting to 0: "
<< target;
return host_ports[0];
}
protected:
SharedGrpcChannelPtr FindChannelOnce(const string& target) override {
const string host_port = TranslateTask(target);
if (host_port.empty()) {
return nullptr;
}
auto chan_ptr = channel_func_(host_port);
VLOG(5) << "Channel created for: job: " << job_id_
<< " host_port: " << host_port << " target : " << target
<< " Ptr: " << chan_ptr.get();
return chan_ptr;
}
private:
string ToString() {
std::vector<string> task_strings;
task_strings.reserve(host_ports_.size());
for (const auto& id_host_port : host_ports_) {
task_strings.emplace_back(
strings::StrCat(id_host_port.first, " -> ", id_host_port.second));
}
return strings::StrCat(job_id_, " -> {", absl::StrJoin(task_strings, ", "),
"}");
}
const string job_id_;
const std::map<int, string> host_ports_;
const ChannelCreationFunction channel_func_;
SparseGrpcChannelCache(const SparseGrpcChannelCache&) = delete;
void operator=(const SparseGrpcChannelCache&) = delete;
};
}
GrpcChannelCache* NewGrpcChannelCache(const GrpcChannelSpec& spec,
ChannelCreationFunction channel_func,
const RPCOptions& options) {
const int num_jobs = spec.host_ports_jobs().size();
if (!num_jobs) {
LOG(ERROR) << "Empty channel spec.";
return nullptr;
}
std::vector<GrpcChannelCache*> caches;
caches.reserve(num_jobs);
for (auto& job : spec.host_ports_jobs()) {
VLOG(2) << "Creating Grpc Channel Cache for: " << job.job_id;
caches.push_back(
new SparseGrpcChannelCache(job.job_id, job.host_ports, channel_func,
options.num_channels_per_target()));
}
return caches.size() == 1 ? caches[0]
: new MultiGrpcChannelCache(
caches, options.num_channels_per_target());
}
} | #include "xla/tsl/distributed_runtime/rpc/grpc_channel.h"
#include <string>
#include <vector>
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/protobuf/rpc_options.pb.h"
#include "xla/tsl/util/device_name_utils.h"
#include "tsl/platform/strcat.h"
#include "tsl/platform/test.h"
namespace tsl {
#define IsSameAddrSp DeviceNameUtils::IsSameAddressSpace
TEST(GrpcChannelTest, IsSameAddressSpace) {
EXPECT_TRUE(IsSameAddrSp("/job:mnist/replica:10/task:10/cpu:0",
"/job:mnist/replica:10/task:10/cpu:1"));
EXPECT_TRUE(IsSameAddrSp("/job:mnist/replica:10/task:10/cpu:0",
"/job:mnist/replica:10/task:10/device:GPU:2"));
EXPECT_TRUE(IsSameAddrSp("/job:mnist/replica:10/task:10",
"/job:mnist/replica:10/task:10/device:GPU:2"));
EXPECT_TRUE(IsSameAddrSp("/job:mnist/replica:10/task:10/cpu:1",
"/job:mnist/replica:10/task:10"));
EXPECT_FALSE(IsSameAddrSp("/job:mnist/replica:10/task:9/cpu:0",
"/job:mnist/replica:10/task:10/cpu:0"));
EXPECT_FALSE(IsSameAddrSp("/job:mnist/replica:9/task:10/cpu:0",
"/job:mnist/replica:10/task:10/cpu:0"));
EXPECT_FALSE(IsSameAddrSp("/job:MNIST/replica:10/task:10/cpu:0",
"/job:mnist/replica:10/task:10/cpu:0"));
EXPECT_FALSE(IsSameAddrSp("random_invalid_target", "random_invalid_target"));
EXPECT_FALSE(IsSameAddrSp("/job:/replica:10/task:10/cpu:0",
"/job:/replica:10/task:10/cpu:1"));
EXPECT_FALSE(IsSameAddrSp("/job:mnist/replica:xx/task:10/cpu:0",
"/job:mnist/replica:xx/task:10/cpu:1"));
EXPECT_FALSE(IsSameAddrSp("/job:mnist/replica:10/task:yy/cpu:0",
"/job:mnist/replica:10/task:yy/cpu:1"));
}
TEST(GrpcChannelTest, HostPorts) {
GrpcChannelSpec spec;
TF_ASSERT_OK(spec.AddHostPortsJob("mnist", {{0, "a:1"},
{1, "b:2"},
{2, "c:3"},
{3, "d:4"},
{4, "e:5"},
{5, "f:6"}}));
ChannelCreationFunction channel_func =
ConvertToChannelCreationFunction(NewHostPortGrpcChannel);
std::unique_ptr<GrpcChannelCache> cc(
NewGrpcChannelCache(spec, channel_func, tensorflow::RPCOptions()));
EXPECT_EQ(nullptr, cc->FindWorkerChannel("invalid_target"));
EXPECT_EQ(nullptr, cc->FindWorkerChannel("/job:other/replica:0/task:0"));
EXPECT_EQ(nullptr, cc->FindWorkerChannel("/job:mnist/replica:0/task:6"));
{
auto a_1_1 = cc->FindWorkerChannel("/job:mnist/replica:0/task:0");
auto a_1_2 = cc->FindWorkerChannel("/job:mnist/replica:0/task:0");
auto d_4_1 = cc->FindWorkerChannel("/job:mnist/replica:0/task:3");
auto d_4_2 = cc->FindWorkerChannel("/job:mnist/replica:0/task:3");
auto e_5_1 = cc->FindWorkerChannel("/job:mnist/replica:0/task:4");
auto e_5_2 = cc->FindWorkerChannel("/job:mnist/replica:0/task:4");
EXPECT_EQ(a_1_1.get(), a_1_2.get());
EXPECT_EQ(d_4_1.get(), d_4_2.get());
EXPECT_EQ(e_5_1.get(), e_5_2.get());
EXPECT_NE(a_1_1.get(), d_4_2.get());
EXPECT_NE(a_1_1.get(), e_5_2.get());
EXPECT_NE(d_4_1.get(), e_5_2.get());
}
{
std::vector<string> workers;
cc->ListWorkers(&workers);
EXPECT_EQ(
std::vector<string>(
{"/job:mnist/replica:0/task:0", "/job:mnist/replica:0/task:1",
"/job:mnist/replica:0/task:2", "/job:mnist/replica:0/task:3",
"/job:mnist/replica:0/task:4", "/job:mnist/replica:0/task:5"}),
workers);
}
{
std::vector<string> workers;
cc->ListWorkersInJob("mnist", &workers);
EXPECT_EQ(
std::vector<string>(
{"/job:mnist/replica:0/task:0", "/job:mnist/replica:0/task:1",
"/job:mnist/replica:0/task:2", "/job:mnist/replica:0/task:3",
"/job:mnist/replica:0/task:4", "/job:mnist/replica:0/task:5"}),
workers);
}
{
std::vector<string> workers;
cc->ListWorkersInJob("other", &workers);
EXPECT_TRUE(workers.empty());
}
}
TEST(GrpcChannelTest, HostPortsMultiChannelPerTarget) {
GrpcChannelSpec spec;
TF_EXPECT_OK(
spec.AddHostPortsJob("mnist", {{0, "a:1"}, {1, "b:2"}, {2, "c:3"}}));
ChannelCreationFunction channel_func =
ConvertToChannelCreationFunction(NewHostPortGrpcChannel);
tensorflow::RPCOptions rpc_options;
rpc_options.set_num_channels_per_target(4);
std::unique_ptr<GrpcChannelCache> cc(
NewGrpcChannelCache(spec, channel_func, rpc_options));
EXPECT_EQ(nullptr, cc->FindWorkerChannel("invalid_target"));
EXPECT_EQ(nullptr, cc->FindWorkerChannel("/job:other/replica:0/task:0"));
EXPECT_EQ(nullptr, cc->FindWorkerChannel("/job:mnist/replica:0/task:3"));
{
std::vector<SharedGrpcChannelPtr> a_1_channels, b_2_channels, c_3_channels;
for (int i = 0; i < 10; i++) {
a_1_channels.push_back(
cc->FindWorkerChannel("/job:mnist/replica:0/task:0"));
b_2_channels.push_back(
cc->FindWorkerChannel("/job:mnist/replica:0/task:1"));
c_3_channels.push_back(
cc->FindWorkerChannel("/job:mnist/replica:0/task:2"));
}
for (int i = 0; i < 6; i++) {
EXPECT_EQ(a_1_channels[i].get(), a_1_channels[i + 4].get());
EXPECT_EQ(b_2_channels[i].get(), b_2_channels[i + 4].get());
EXPECT_EQ(c_3_channels[i].get(), c_3_channels[i + 4].get());
}
for (int i = 0; i < 6; i++) {
for (int j = 1; j < 4; j++) {
EXPECT_NE(a_1_channels[i].get(), a_1_channels[i + j].get());
EXPECT_NE(b_2_channels[i].get(), b_2_channels[i + j].get());
EXPECT_NE(c_3_channels[i].get(), c_3_channels[i + j].get());
}
}
for (int i = 0; i < 6; i++) {
for (int j = 0; j < 6; j++) {
EXPECT_NE(a_1_channels[i].get(), b_2_channels[j].get());
EXPECT_NE(a_1_channels[i].get(), c_3_channels[j].get());
EXPECT_NE(b_2_channels[i].get(), c_3_channels[j].get());
}
}
}
{
std::vector<string> workers;
cc->ListWorkers(&workers);
EXPECT_EQ(std::vector<string>({"/job:mnist/replica:0/task:0",
"/job:mnist/replica:0/task:1",
"/job:mnist/replica:0/task:2"}),
workers);
}
{
std::vector<string> workers;
cc->ListWorkersInJob("mnist", &workers);
EXPECT_EQ(std::vector<string>({"/job:mnist/replica:0/task:0",
"/job:mnist/replica:0/task:1",
"/job:mnist/replica:0/task:2"}),
workers);
}
{
std::vector<string> workers;
cc->ListWorkersInJob("other", &workers);
EXPECT_TRUE(workers.empty());
}
}
TEST(GrpcChannelTest, HostPortsMultiGrpcMultiChannelPerTarget) {
GrpcChannelSpec spec;
TF_EXPECT_OK(
spec.AddHostPortsJob("mnist", {{0, "a:1"}, {1, "b:2"}, {2, "c:3"}}));
TF_EXPECT_OK(
spec.AddHostPortsJob("mnist2", {{0, "a:1"}, {1, "b:2"}, {2, "c:3"}}));
ChannelCreationFunction channel_func =
ConvertToChannelCreationFunction(NewHostPortGrpcChannel);
tensorflow::RPCOptions rpc_options;
rpc_options.set_num_channels_per_target(4);
std::unique_ptr<GrpcChannelCache> cc(
NewGrpcChannelCache(spec, channel_func, rpc_options));
EXPECT_EQ(nullptr, cc->FindWorkerChannel("invalid_target"));
EXPECT_EQ(nullptr, cc->FindWorkerChannel("/job:other/replica:0/task:0"));
EXPECT_EQ(nullptr, cc->FindWorkerChannel("/job:mnist/replica:0/task:3"));
EXPECT_NE(nullptr, cc->FindWorkerChannel("/job:mnist2/replica:0/task:0"));
{
std::vector<SharedGrpcChannelPtr> a_1_channels, b_2_channels, c_3_channels;
for (int i = 0; i < 10; i++) {
a_1_channels.push_back(
cc->FindWorkerChannel("/job:mnist/replica:0/task:0"));
b_2_channels.push_back(
cc->FindWorkerChannel("/job:mnist/replica:0/task:1"));
c_3_channels.push_back(
cc->FindWorkerChannel("/job:mnist2/replica:0/task:0"));
}
for (int i = 0; i < 6; i++) {
EXPECT_EQ(a_1_channels[i].get(), a_1_channels[i + 4].get());
EXPECT_EQ(b_2_channels[i].get(), b_2_channels[i + 4].get());
EXPECT_EQ(c_3_channels[i].get(), c_3_channels[i + 4].get());
}
for (int i = 0; i < 6; i++) {
for (int j = 1; j < 4; j++) {
EXPECT_NE(a_1_channels[i].get(), a_1_channels[i + j].get());
EXPECT_NE(b_2_channels[i].get(), b_2_channels[i + j].get());
EXPECT_NE(c_3_channels[i].get(), c_3_channels[i + j].get());
}
}
for (int i = 0; i < 6; i++) {
for (int j = 0; j < 6; j++) {
EXPECT_NE(a_1_channels[i].get(), b_2_channels[j].get());
EXPECT_NE(a_1_channels[i].get(), c_3_channels[j].get());
EXPECT_NE(b_2_channels[i].get(), c_3_channels[j].get());
}
}
}
{
std::vector<string> workers;
cc->ListWorkers(&workers);
EXPECT_EQ(
std::vector<string>(
{"/job:mnist/replica:0/task:0", "/job:mnist/replica:0/task:1",
"/job:mnist/replica:0/task:2", "/job:mnist2/replica:0/task:0",
"/job:mnist2/replica:0/task:1", "/job:mnist2/replica:0/task:2"}),
workers);
}
{
std::vector<string> workers, workers2;
cc->ListWorkersInJob("mnist", &workers);
EXPECT_EQ(std::vector<string>({"/job:mnist/replica:0/task:0",
"/job:mnist/replica:0/task:1",
"/job:mnist/replica:0/task:2"}),
workers);
cc->ListWorkersInJob("mnist2", &workers2);
EXPECT_EQ(std::vector<string>({"/job:mnist2/replica:0/task:0",
"/job:mnist2/replica:0/task:1",
"/job:mnist2/replica:0/task:2"}),
workers2);
}
{
std::vector<string> workers;
cc->ListWorkersInJob("other", &workers);
EXPECT_TRUE(workers.empty());
}
}
TEST(GrpcChannelTest, SparseHostPorts) {
GrpcChannelSpec spec;
TF_EXPECT_OK(
spec.AddHostPortsJob("mnist", {{0, "a:1"}, {3, "d:4"}, {4, "e:5"}}));
ChannelCreationFunction channel_func =
ConvertToChannelCreationFunction(NewHostPortGrpcChannel);
std::unique_ptr<GrpcChannelCache> cc(
NewGrpcChannelCache(spec, channel_func, tensorflow::RPCOptions()));
EXPECT_EQ(nullptr, cc->FindWorkerChannel("invalid_target"));
EXPECT_EQ(nullptr, cc->FindWorkerChannel("/job:other/replica:0/task:0"));
EXPECT_EQ(nullptr, cc->FindWorkerChannel("/job:mnist/replica:0/task:1"));
EXPECT_EQ(nullptr, cc->FindWorkerChannel("/job:mnist/replica:0/task:2"));
EXPECT_EQ(nullptr, cc->FindWorkerChannel("/job:mnist/replica:0/task:5"));
{
auto a_1_1 = cc->FindWorkerChannel("/job:mnist/replica:0/task:0");
auto a_1_2 = cc->FindWorkerChannel("/job:mnist/replica:0/task:0");
LOG(WARNING) << " Getting task 3";
auto d_4_1 = cc->FindWorkerChannel("/job:mnist/replica:0/task:3");
auto d_4_2 = cc->FindWorkerChannel("/job:mnist/replica:0/task:3");
LOG(WARNING) << " Getting task 4";
auto e_5_1 = cc->FindWorkerChannel("/job:mnist/replica:0/task:4");
auto e_5_2 = cc->FindWorkerChannel("/job:mnist/replica:0/task:4");
EXPECT_EQ(a_1_1.get(), a_1_2.get());
EXPECT_EQ(d_4_1.get(), d_4_2.get());
EXPECT_EQ(e_5_1.get(), e_5_2.get());
EXPECT_NE(a_1_1.get(), d_4_2.get());
EXPECT_NE(a_1_1.get(), e_5_2.get());
EXPECT_NE(d_4_1.get(), e_5_2.get());
}
{
std::vector<string> workers;
cc->ListWorkers(&workers);
std::sort(workers.begin(), workers.end());
EXPECT_EQ(std::vector<string>({"/job:mnist/replica:0/task:0",
"/job:mnist/replica:0/task:3",
"/job:mnist/replica:0/task:4"}),
workers);
}
{
std::vector<string> workers;
cc->ListWorkersInJob("mnist", &workers);
EXPECT_EQ(std::vector<string>({"/job:mnist/replica:0/task:0",
"/job:mnist/replica:0/task:3",
"/job:mnist/replica:0/task:4"}),
workers);
}
{
std::vector<string> workers;
cc->ListWorkersInJob("other", &workers);
EXPECT_TRUE(workers.empty());
}
}
TEST(GrpcChannelTest, NewHostPortGrpcChannelValidation) {
SharedGrpcChannelPtr mock_ptr;
EXPECT_TRUE(NewHostPortGrpcChannel("127.0.0.1:2222", nullptr,
&mock_ptr)
.ok());
EXPECT_TRUE(NewHostPortGrpcChannel("example.com:2222",
nullptr, &mock_ptr)
.ok());
EXPECT_TRUE(NewHostPortGrpcChannel("fqdn.example.com.:2222",
nullptr, &mock_ptr)
.ok());
EXPECT_TRUE(NewHostPortGrpcChannel("[2002:a9c:258e::]:2222",
nullptr, &mock_ptr)
.ok());
EXPECT_TRUE(
NewHostPortGrpcChannel("[::]:2222", nullptr, &mock_ptr)
.ok());
EXPECT_FALSE(NewHostPortGrpcChannel("example.com/abc:2222",
nullptr, &mock_ptr)
.ok());
EXPECT_FALSE(NewHostPortGrpcChannel("127.0.0.1:2222/",
nullptr, &mock_ptr)
.ok());
EXPECT_FALSE(NewHostPortGrpcChannel(
"example.com/abc:", nullptr, &mock_ptr)
.ok());
EXPECT_FALSE(
NewHostPortGrpcChannel("[::]/:2222", nullptr, &mock_ptr)
.ok());
EXPECT_FALSE(
NewHostPortGrpcChannel("[::]:2222/", nullptr, &mock_ptr)
.ok());
EXPECT_FALSE(
NewHostPortGrpcChannel("[::]:", nullptr, &mock_ptr).ok());
EXPECT_TRUE(
NewHostPortGrpcChannel("/bns/example", nullptr, &mock_ptr)
.ok());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/distributed_runtime/rpc/grpc_channel.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/distributed_runtime/rpc/grpc_channel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3ca4d441-4a85-4359-b35d-0b039f6559a9 | cpp | tensorflow/tensorflow | grpc_util | tensorflow/core/distributed_runtime/rpc/grpc_util.cc | tensorflow/core/data/service/grpc_util_test.cc | #include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/distributed_runtime/tensor_coding.h"
namespace tensorflow {
bool GrpcMaybeParseTensorResponse(::grpc::ByteBuffer* src,
TensorResponse* dst) {
::tensorflow::GrpcByteSource byte_source(src);
auto s = dst->ParseFrom(&byte_source);
return s.ok();
}
} | #include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace grpc_util {
TEST(GrpcUtil, WrapInvalidArgument) {
grpc::Status s(grpc::StatusCode::INVALID_ARGUMENT, "test message");
Status wrapped = WrapError("wrapping message", s);
ASSERT_EQ(wrapped, errors::InvalidArgument("wrapping message: test message"));
}
TEST(GrpcUtil, WrapOk) {
grpc::Status s;
Status wrapped = WrapError("wrapping message", s);
ASSERT_EQ(wrapped, errors::Internal("Expected a non-ok grpc status. Wrapping "
"message: wrapping message"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/grpc_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/grpc_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1314d878-27b7-445b-b083-af17ea4b6829 | cpp | tensorflow/tensorflow | executor | tensorflow/core/common_runtime/executor.cc | tensorflow/core/common_runtime/executor_test.cc | #include "tensorflow/core/common_runtime/executor.h"
#include <algorithm>
#include <atomic>
#include <memory>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/str_join.h"
#include "absl/time/time.h"
#include "absl/types/optional.h"
#include "tensorflow/core/activity_watcher/activity.h"
#include "tensorflow/core/common_runtime/costmodel_manager.h"
#include "tensorflow/core/common_runtime/entry.h"
#include "tensorflow/core/common_runtime/executor_factory.h"
#include "tensorflow/core/common_runtime/graph_view.h"
#include "tensorflow/core/common_runtime/immutable_executor_state.h"
#include "tensorflow/core/common_runtime/pending_counts.h"
#include "tensorflow/core/common_runtime/propagator_state.h"
#include "tensorflow/core/common_runtime/renamed_device.h"
#include "tensorflow/core/common_runtime/simple_propagator_state.h"
#include "tensorflow/core/common_runtime/step_stats_collector.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/control_flow.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/log_memory.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_segment.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/edgeset.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/gtl/manual_constructor.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/context.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/profile_utils/cpu_utils.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/annotated_traceme.h"
#include "tensorflow/core/profiler/lib/connected_traceme.h"
#include "tensorflow/core/profiler/lib/context_types.h"
#include "tensorflow/core/profiler/lib/scoped_annotation.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/util/determinism.h"
#include "tensorflow/core/util/managed_stack_trace.h"
#include "tensorflow/core/util/tensor_slice_reader_cache.h"
#include "tsl/platform/tracing.h"
namespace tensorflow {
namespace {
static const Tensor* const kEmptyTensor = new Tensor;
namespace nodestats {
inline int64_t NowInNsec() { return EnvTime::NowNanos(); }
void SetScheduled(NodeExecStatsInterface* stats, int64_t micros) {
if (!stats) return;
stats->SetScheduled(micros * EnvTime::kMicrosToNanos);
}
void SetAllStart(NodeExecStatsInterface* stats) {
if (!stats) return;
stats->RecordExecutorStarted();
}
void SetOpStart(NodeExecStatsInterface* stats) {
if (!stats) return;
stats->RecordComputeStarted();
}
void SetOpEnd(NodeExecStatsInterface* stats) {
if (!stats) return;
stats->RecordComputeEnded();
}
void SetAllEnd(NodeExecStatsInterface* stats) {
if (!stats) return;
stats->RecordExecutorEnded();
}
void SetOutput(NodeExecStatsInterface* stats, int slot, const Tensor* v) {
if (!stats) return;
stats->SetOutput(slot, v);
}
void SetMemory(NodeExecStatsInterface* stats, OpKernelContext* ctx) {
if (!stats) return;
stats->SetMemory(ctx);
}
}
struct KernelTimer {
uint64 start_cycles = profile_utils::CpuUtils::GetCurrentClockCycle();
uint64 ElapsedCycles() {
return profile_utils::CpuUtils::GetCurrentClockCycle() - start_cycles;
}
};
typedef absl::InlinedVector<TensorValue, 4UL> TensorValueVec;
typedef absl::InlinedVector<AllocatorAttributes, 4UL> AllocatorAttributeVec;
class ExecutorImpl : public Executor {
public:
explicit ExecutorImpl(const LocalExecutorParams& p) : immutable_state_(p) {}
Status Initialize(const Graph& graph) {
TF_RETURN_IF_ERROR(immutable_state_.Initialize(graph));
kernel_stats_.Initialize(immutable_state_.graph_view());
return absl::OkStatus();
}
private:
void RunAsyncInternal(const Args& args, DoneCallback done) override;
template <class PropagatorStateType>
friend class ExecutorState;
class KernelStats {
public:
KernelStats() = default;
void Initialize(const GraphView& gview) {
is_expensive_.resize(gview.num_nodes());
cost_estimates_ =
std::make_unique<std::atomic_uint_fast64_t[]>(gview.num_nodes());
for (int32_t i = 0; i < gview.num_nodes(); ++i) {
if (gview.node(i)) {
is_expensive_[i] =
gview.node(i)->kernel && gview.node(i)->kernel->IsExpensive();
cost_estimates_[i] = kInitialCostEstimateCycles;
}
}
}
bool IsExpensive(const NodeItem& node) const {
return is_expensive_[node.node_id] &&
(cost_estimates_[node.node_id].load(std::memory_order_relaxed) >
kOpIsExpensiveThresholdCycles);
}
bool HasExpensiveMarker(const NodeItem& node) const {
return is_expensive_[node.node_id];
}
void UpdateCostEstimate(const NodeItem& node, uint64 elapsed_cycles) {
std::atomic_uint_fast64_t& cost_estimate = cost_estimates_[node.node_id];
auto prev_estimate = cost_estimate.load(std::memory_order_relaxed);
uint64 new_estimate =
((kCostDecay - 1) * prev_estimate + elapsed_cycles) / kCostDecay;
cost_estimate.store(new_estimate, std::memory_order_relaxed);
}
private:
static constexpr uint64 kInitialCostEstimateCycles = 100 * 1000 * 1000;
static constexpr uint64 kOpIsExpensiveThresholdCycles = 8000;
static constexpr uint64 kCostDecay = 10;
std::vector<bool> is_expensive_;
std::unique_ptr<std::atomic_uint_fast64_t[]> cost_estimates_;
};
ImmutableExecutorState immutable_state_;
KernelStats kernel_stats_;
ExecutorImpl(const ExecutorImpl&) = delete;
void operator=(const ExecutorImpl&) = delete;
};
template <class PropagatorStateType>
class ExecutorState {
public:
ExecutorState(const Executor::Args& args,
const ImmutableExecutorState& immutable_state_,
ExecutorImpl::KernelStats* kernel_stats_);
~ExecutorState();
void RunAsync(Executor::DoneCallback done);
private:
typedef typename PropagatorStateType::TaggedNode TaggedNode;
typedef
typename PropagatorStateType::TaggedNodeReadyQueue TaggedNodeReadyQueue;
typedef typename PropagatorStateType::TaggedNodeSeq TaggedNodeSeq;
struct AsyncState;
void Process(const TaggedNode& node, int64_t scheduled_nsec);
void ProcessInline(TaggedNodeReadyQueue* inline_ready,
int64_t scheduled_nsec);
Status ProcessSync(const NodeItem& item, OpKernelContext::Params* params,
EntryVector* outputs, NodeExecStatsInterface* stats);
void ProcessAsync(const NodeItem& item, const OpKernelContext::Params& params,
const TaggedNode& tagged_node, Entry* first_input,
NodeExecStatsInterface* stats,
activity_watcher::ActivityId activity_id);
void ProcessNoop(NodeExecStatsInterface* stats);
void ProcessConstTensor(const NodeItem& item, EntryVector* outputs,
NodeExecStatsInterface* stats);
Status PrepareInputs(const NodeItem& item, Entry* first_input,
TensorValueVec* inputs,
AllocatorAttributeVec* input_alloc_attrs,
bool* is_input_dead);
Status ProcessOutputs(const NodeItem& item, OpKernelContext* ctx,
Entry* outputs, NodeExecStatsInterface* stats);
bool NodeDone(const Status& s, TaggedNodeSeq* ready,
NodeExecStatsInterface* stats,
TaggedNodeReadyQueue* inline_ready);
void ScheduleReady(TaggedNodeSeq* ready, TaggedNodeReadyQueue* inline_ready);
template <typename Closure>
void RunTask(Closure&& c, int sample_rate = 0);
void Finish();
void ScheduleFinish();
DeviceContext* device_context_ = nullptr;
const bool vlog_;
const bool log_memory_;
int64_t step_id_;
int64_t trace_id_;
int64_t start_time_usecs_ = 0;
absl::optional<absl::Time> deadline_;
static constexpr uint64 kInlineScheduleReadyThreshold = 500;
RendezvousInterface* rendezvous_;
CollectiveExecutor* collective_executor_ = nullptr;
const ConfigProto* const session_config_;
SessionState* session_state_;
string session_handle_;
const SessionMetadata* session_metadata_ = nullptr;
TensorStore* tensor_store_;
ScopedStepContainer* step_container_;
StepStatsCollectorInterface* const stats_collector_;
const tsl::tracing::EventCollector* const event_collector_;
Context context_;
checkpoint::TensorSliceReaderCacheWrapper* slice_reader_cache_;
CallFrameInterface* call_frame_;
const ImmutableExecutorState& immutable_state_;
ExecutorImpl::KernelStats* const kernel_stats_;
CancellationManager* cancellation_manager_;
tsl::CoordinationServiceAgent* coordination_service_agent_;
absl::optional<ManagedStackTrace> stack_trace_ = absl::nullopt;
std::unique_ptr<DeviceBase> user_device_;
Executor::Args::Runner runner_;
bool sync_on_finish_;
const bool run_all_kernels_inline_;
PropagatorStateType propagator_;
Executor::DoneCallback done_cb_;
std::atomic_int_fast32_t num_outstanding_ops_;
mutex num_deferred_ops_mu_;
int64_t num_deferred_ops_ TF_GUARDED_BY(num_deferred_ops_mu_) = 0;
bool finish_when_deferred_ops_done_ TF_GUARDED_BY(num_deferred_ops_mu_) =
false;
mutex mu_;
Status status_ TF_GUARDED_BY(mu_);
};
template <class PropagatorStateType>
ExecutorState<PropagatorStateType>::ExecutorState(
const Executor::Args& args, const ImmutableExecutorState& immutable_state,
ExecutorImpl::KernelStats* kernel_stats)
: vlog_(VLOG_IS_ON(1)),
log_memory_(LogMemory::IsEnabled()),
step_id_(args.step_id),
trace_id_(args.function_trace_id ? *args.function_trace_id : step_id_),
start_time_usecs_(args.start_time_usecs),
deadline_(args.deadline),
rendezvous_(args.rendezvous),
collective_executor_(args.collective_executor),
session_config_(args.session_config),
session_state_(args.session_state),
session_handle_(args.session_handle),
session_metadata_(immutable_state.params().session_metadata),
tensor_store_(args.tensor_store),
step_container_(args.step_container),
stats_collector_(args.stats_collector),
event_collector_(tsl::tracing::GetEventCollector(
tsl::tracing::EventCategory::kCompute)),
context_(ContextKind::kThread),
slice_reader_cache_(new checkpoint::TensorSliceReaderCacheWrapper),
call_frame_(args.call_frame),
immutable_state_(immutable_state),
kernel_stats_(kernel_stats),
cancellation_manager_(args.cancellation_manager),
coordination_service_agent_(args.coordination_service_agent),
stack_trace_(args.stack_trace),
runner_(args.runner),
sync_on_finish_(args.sync_on_finish),
run_all_kernels_inline_(args.run_all_kernels_inline),
propagator_(immutable_state, step_id_, vlog_),
num_outstanding_ops_(0) {
if (args.user_intra_op_threadpool != nullptr) {
Device* device = immutable_state_.params().device;
user_device_ = RenamedDevice::NewRenamedDevice(
device->name(), device, false, false, args.user_intra_op_threadpool);
}
}
template <class PropagatorStateType>
ExecutorState<PropagatorStateType>::~ExecutorState() {
if (device_context_) {
device_context_->Unref();
}
delete slice_reader_cache_;
}
template <class PropagatorStateType>
template <typename Closure>
void ExecutorState<PropagatorStateType>::RunTask(Closure&& c, int sample_rate) {
alignas(64) static std::atomic<int64_t> num_enqueue_ops{0};
alignas(64) static std::atomic<int64_t> num_dequeue_ops{0};
auto n_enqueues = num_enqueue_ops.fetch_add(1, std::memory_order_relaxed);
if (n_enqueues % std::max(16, sample_rate) == 0) {
auto n_dequeues = num_dequeue_ops.load(std::memory_order_relaxed);
metrics::UpdateGraphPendingQueueLength(n_enqueues - n_dequeues);
}
runner_([c = std::forward<Closure>(c)]() mutable {
num_dequeue_ops.fetch_add(1, std::memory_order_relaxed);
std::forward<Closure>(c)();
});
}
template <class PropagatorStateType>
void ExecutorState<PropagatorStateType>::RunAsync(Executor::DoneCallback done) {
TaggedNodeSeq ready;
Device* device = immutable_state_.params().device;
const Status get_context_status =
device->TryGetDeviceContext(&device_context_);
if (!get_context_status.ok()) {
delete this;
done(get_context_status);
return;
}
ready.reserve(immutable_state_.root_nodes().size());
propagator_.ActivateRoots(immutable_state_.root_nodes(), &ready);
num_outstanding_ops_ = ready.size();
if (ready.empty()) {
delete this;
done(absl::OkStatus());
} else {
done_cb_ = std::move(done);
ScheduleReady(&ready, nullptr);
}
}
template <class PropagatorStateType>
struct ExecutorState<PropagatorStateType>::AsyncState {
AsyncState(const OpKernelContext::Params& p, const TaggedNode& _tagged_node,
const NodeItem* _item, Entry* _first_input,
NodeExecStatsInterface* _stats)
: saved_inputs(p.inputs.begin(), p.inputs.end()),
saved_input_alloc_attrs(p.input_alloc_attrs.begin(),
p.input_alloc_attrs.end()),
params(p),
tagged_node(_tagged_node),
item(_item),
first_input(_first_input),
ctx(ParamsButClearingEigenGPUDevice(¶ms), item->num_outputs),
stats(_stats) {
params.inputs = saved_inputs;
params.input_alloc_attrs = saved_input_alloc_attrs;
}
TensorValueVec saved_inputs;
AllocatorAttributeVec saved_input_alloc_attrs;
OpKernelContext::Params params;
TaggedNode tagged_node;
const NodeItem* item;
Entry* first_input;
OpKernelContext ctx;
NodeExecStatsInterface* stats;
private:
OpKernelContext::Params* ParamsButClearingEigenGPUDevice(
OpKernelContext::Params* p) {
p->eigen_gpu_device = nullptr;
return p;
}
};
bool MightTrace(const tsl::tracing::EventCollector* event_collector,
bool is_expensive) {
if (event_collector != nullptr) {
return true;
}
if (tsl::profiler::ScopedAnnotation::IsEnabled()) return true;
return tsl::profiler::TraceMe::Active(
tsl::profiler::GetTFTraceMeLevel(is_expensive));
}
template <class PropagatorStateType>
Status ExecutorState<PropagatorStateType>::ProcessSync(
const NodeItem& item, OpKernelContext::Params* params, EntryVector* outputs,
NodeExecStatsInterface* stats) {
Status s;
OpKernelContext ctx(params, item.num_outputs);
nodestats::SetOpStart(stats);
OpKernel* op_kernel = item.kernel;
Device* device = immutable_state_.params().device;
const bool is_expensive = kernel_stats_->IsExpensive(item);
if (TF_PREDICT_FALSE(MightTrace(event_collector_, is_expensive))) {
tsl::tracing::ScopedRegion region(tsl::tracing::EventCategory::kCompute,
op_kernel->name_view());
profiler::AnnotatedTraceMe activity(
[op_kernel, &ctx] {
return op_kernel->TraceString(
ctx, tsl::profiler::TfOpDetailsEnabled());
},
tsl::profiler::GetTFTraceMeLevel(is_expensive));
device->Compute(op_kernel, &ctx);
} else if (kernel_stats_->HasExpensiveMarker(item)) {
KernelTimer timer;
device->Compute(op_kernel, &ctx);
constexpr int kKernelExecutionTrackingInvocationSkipCount = 16;
if (is_expensive ||
timer.start_cycles % kKernelExecutionTrackingInvocationSkipCount == 0) {
kernel_stats_->UpdateCostEstimate(item, timer.ElapsedCycles());
}
} else {
device->Compute(op_kernel, &ctx);
}
nodestats::SetOpEnd(stats);
if (outputs->size() < item.num_outputs) outputs->resize(item.num_outputs);
s = ProcessOutputs(item, &ctx, outputs->data(), stats);
nodestats::SetMemory(stats, &ctx);
return s;
}
template <class PropagatorStateType>
void ExecutorState<PropagatorStateType>::ProcessAsync(
const NodeItem& item, const OpKernelContext::Params& params,
const TaggedNode& tagged_node, Entry* first_input,
NodeExecStatsInterface* stats, activity_watcher::ActivityId activity_id) {
AsyncOpKernel* async_kernel = item.kernel->AsAsync();
DCHECK(async_kernel != nullptr);
AsyncState* state =
new AsyncState(params, tagged_node, &item, first_input, stats);
nodestats::SetOpStart(stats);
{
profiler::AnnotatedTraceMe activity(
[async_kernel, state] {
return async_kernel->TraceString(
state->ctx, tsl::profiler::TfOpDetailsEnabled());
},
tsl::profiler::GetTFTraceMeLevel(false));
tsl::profiler::TraceMeProducer producer(
[&] {
return tsl::profiler::TraceMeEncode(
"ExecutorState::ProcessAsync::Start",
{{"name", async_kernel->name()},
{"kernel_type", async_kernel->type_string()},
{"step_id", step_id_}});
},
tsl::profiler::ContextType::kTfExecutor);
auto done = [this, state, activity_id, ctx_id = producer.GetContextId()]() {
tsl::profiler::TraceMeConsumer consumer(
[&] {
return profiler::TraceMeEncode(
"ExecutorState::ProcessAsync::Done",
{{"name", state->item->kernel->name()},
{"kernel_type", state->item->kernel->type_string()},
{"step_id", step_id_}});
},
tsl::profiler::ContextType::kTfExecutor, ctx_id);
Device* device = immutable_state_.params().device;
NodeExecStatsInterface* stats = state->stats;
Entry* first_input = state->first_input;
nodestats::SetOpEnd(stats);
EntryVector outputs(state->item->num_outputs);
Status s =
ProcessOutputs(*state->item, &state->ctx, outputs.data(), stats);
nodestats::SetMemory(stats, &state->ctx);
if (vlog_) {
VLOG(2) << "Async kernel done: " << state->item->node_id << " step "
<< step_id_ << " "
<< SummarizeNodeDef(state->item->kernel->def())
<< (state->tagged_node.get_is_dead() ? " is dead" : "")
<< " device: " << device->name();
}
const int num_inputs = state->item->num_inputs;
for (int i = 0; i < num_inputs; ++i) {
(first_input + i)->ClearVal();
}
propagator_.MaybeMarkCompleted(state->tagged_node);
activity_watcher::ActivityEnd(activity_id);
TaggedNodeSeq ready;
if (s.ok()) {
propagator_.PropagateOutputs(state->tagged_node, &outputs, &ready);
}
outputs.clear();
const bool completed = NodeDone(s, &ready, stats, nullptr);
delete state;
if (completed) ScheduleFinish();
};
immutable_state_.params().device->ComputeAsync(async_kernel, &state->ctx,
std::move(done));
}
}
template <class PropagatorStateType>
void ExecutorState<PropagatorStateType>::ProcessNoop(
NodeExecStatsInterface* stats) {
nodestats::SetOpStart(stats);
nodestats::SetOpEnd(stats);
}
template <class PropagatorStateType>
void ExecutorState<PropagatorStateType>::ProcessConstTensor(
const NodeItem& item, EntryVector* outputs, NodeExecStatsInterface* stats) {
nodestats::SetOpStart(stats);
nodestats::SetOpEnd(stats);
Entry& output = (*outputs)[0];
output.state = Entry::State::HAS_CONST_TENSOR;
output.const_tensor = item.const_tensor;
output.alloc_attr = item.output_attrs()[0];
}
template <class PropagatorStateType>
void ExecutorState<PropagatorStateType>::Process(const TaggedNode& tagged_node,
int64_t scheduled_nsec) {
tsl::profiler::TraceMe traceme("ExecutorState::Process Scheduled",
tsl::profiler::TraceMeLevel::kVerbose);
TaggedNodeReadyQueue inline_ready;
inline_ready.push_back(tagged_node);
return ProcessInline(&inline_ready, scheduled_nsec);
}
template <class PropagatorStateType>
void ExecutorState<PropagatorStateType>::ProcessInline(
TaggedNodeReadyQueue* inline_ready, int64_t scheduled_nsec) {
WithContext wc(context_);
auto ready = std::make_unique<TaggedNodeSeq>();
auto inputs = std::make_unique<TensorValueVec>();
AllocatorAttributeVec input_alloc_attrs;
auto params = std::make_unique<OpKernelContext::Params>();
params->step_id = step_id_;
Device* device = immutable_state_.params().device;
if (user_device_) {
params->device = user_device_.get();
} else {
params->device = device;
}
params->start_time_usecs = start_time_usecs_;
params->deadline = deadline_;
params->log_memory = log_memory_;
params->rendezvous = rendezvous_;
params->collective_executor = collective_executor_;
params->session_config = session_config_;
params->session_state = session_state_;
params->session_handle = session_handle_;
params->session_metadata = session_metadata_;
params->tensor_store = tensor_store_;
params->cancellation_manager = cancellation_manager_;
params->coordination_service_agent = coordination_service_agent_;
params->stack_trace = stack_trace_;
params->call_frame = call_frame_;
params->function_library = immutable_state_.params().function_library;
params->resource_manager = device->resource_manager();
params->step_container = step_container_;
params->slice_reader_cache = slice_reader_cache_;
params->runner = &runner_;
params->run_all_kernels_inline = run_all_kernels_inline_;
params->stats_collector = stats_collector_;
params->inc_num_deferred_ops_function = [this]() {
mutex_lock lock(num_deferred_ops_mu_);
num_deferred_ops_++;
};
params->dec_num_deferred_ops_function = [this]() {
bool finish_when_deferred_ops_done = false;
{
mutex_lock lock(num_deferred_ops_mu_);
num_deferred_ops_--;
if (num_deferred_ops_ == 0) {
finish_when_deferred_ops_done = finish_when_deferred_ops_done_;
}
}
if (finish_when_deferred_ops_done) Finish();
};
params->op_device_context = device_context_;
Status s;
NodeExecStatsInterface* stats = nullptr;
EntryVector outputs(1);
bool completed = false;
int64_t last_iter_num = -1;
std::unique_ptr<tsl::profiler::TraceMeConsumer> iteration_scope;
while (!inline_ready->empty()) {
TaggedNode tagged_node = inline_ready->front();
int64_t current_iter_num = tagged_node.get_iter_num();
if (current_iter_num != last_iter_num) {
iteration_scope = std::make_unique<tsl::profiler::TraceMeConsumer>(
[&] {
return profiler::TraceMeEncode(
"ExecutorState::Process",
{{"id", step_id_}, {"iter_num", tagged_node.get_iter_num()}});
},
tsl::profiler::ContextType::kTfExecutor, trace_id_,
tsl::profiler::TraceMeLevel::kInfo);
last_iter_num = current_iter_num;
}
inline_ready->pop_front();
const NodeItem& item = tagged_node.get_node_item();
const int id = item.node_id;
propagator_.MaybeMarkStarted(tagged_node);
const activity_watcher::ActivityId activity_id =
activity_watcher::ActivityStart(
[&]() {
return std::make_unique<activity_watcher::Activity>(
"ExecutorState::Process",
activity_watcher::ActivityCategory::kMisc,
activity_watcher::Activity::Attributes{
{"node_name", item.kernel->def().name()},
{"op", item.kernel->def().op()},
{"iter_num", absl::StrCat(tagged_node.get_iter_num())},
{"step_id", absl::StrCat(params->step_id)},
{"node_id", absl::StrCat(id)},
{"device", device->name()},
{"inputs",
absl::StrJoin(item.kernel->def().input(), "; ")},
{"original_node_names",
absl::StrJoin(item.kernel->def()
.experimental_debug_info()
.original_node_names(),
"; ")},
{"original_func_names",
absl::StrJoin(item.kernel->def()
.experimental_debug_info()
.original_func_names(),
"; ")},
});
},
2);
params->track_allocations = false;
stats = nullptr;
if (stats_collector_ && !tagged_node.get_is_dead()) {
stats = stats_collector_->CreateNodeExecStats(&item.kernel->def());
params->track_allocations = stats ? stats->TrackAllocations() : false;
nodestats::SetScheduled(stats, scheduled_nsec);
nodestats::SetAllStart(stats);
}
if (vlog_) {
VLOG(1) << "Process node: " << id << " step " << params->step_id << " "
<< SummarizeNodeDef(item.kernel->def())
<< (tagged_node.get_is_dead() ? " is dead" : "")
<< " device: " << device->name();
}
Entry* first_input = propagator_.GetInputTensors(tagged_node);
bool launched_asynchronously = false;
if (tagged_node.get_is_dead() && !item.is_transfer_node) {
if (outputs.size() < item.num_outputs) outputs.resize(item.num_outputs);
} else if (TF_PREDICT_FALSE(item.is_noop)) {
ProcessNoop(stats);
} else if (item.const_tensor != nullptr && !params->track_allocations) {
ProcessConstTensor(item, &outputs, stats);
} else {
bool is_input_dead = false;
s = PrepareInputs(item, first_input, inputs.get(), &input_alloc_attrs,
&is_input_dead);
if (!s.ok()) {
const int num_inputs = item.num_inputs;
for (int i = 0; i < num_inputs; ++i) {
(first_input + i)->ClearVal();
}
propagator_.MaybeMarkCompleted(tagged_node);
activity_watcher::ActivityEnd(activity_id);
completed = NodeDone(s, ready.get(), stats, inline_ready);
continue;
}
params->op_kernel = item.kernel;
params->frame_iter = propagator_.GetFrameAndIter(tagged_node);
params->is_input_dead = is_input_dead;
params->output_attr_array = item.output_attrs();
params->forward_from_array = item.forward_from();
params->outputs_required_array = item.outputs_required.get();
params->inputs = *inputs;
params->input_alloc_attrs = input_alloc_attrs;
if (item.kernel_is_async) {
ProcessAsync(item, *params, tagged_node, first_input, stats,
activity_id);
launched_asynchronously = true;
} else {
s = ProcessSync(item, params.get(), &outputs, stats);
}
}
if (!launched_asynchronously) {
if (vlog_) {
VLOG(2) << "Synchronous kernel done: " << id << " step "
<< params->step_id << " "
<< SummarizeNodeDef(item.kernel->def())
<< (tagged_node.get_is_dead() ? " is dead: " : "")
<< " device: " << device->name();
}
const int num_inputs = item.num_inputs;
for (int i = 0; i < num_inputs; ++i) {
(first_input + i)->ClearVal();
}
propagator_.MaybeMarkCompleted(tagged_node);
activity_watcher::ActivityEnd(activity_id);
if (s.ok()) {
propagator_.PropagateOutputs(tagged_node, &outputs, ready.get());
}
const int num_outputs = item.num_outputs;
for (int i = 0; i < num_outputs; ++i) {
outputs[i].ClearVal();
}
if (stats) {
scheduled_nsec = nodestats::NowInNsec();
}
completed = NodeDone(s, ready.get(), stats, inline_ready);
}
}
if (completed) ScheduleFinish();
}
template <class PropagatorStateType>
Status ExecutorState<PropagatorStateType>::PrepareInputs(
const NodeItem& item, Entry* first_input, TensorValueVec* inputs,
AllocatorAttributeVec* input_alloc_attrs, bool* is_input_dead) {
inputs->resize(item.num_inputs);
input_alloc_attrs->resize(item.num_inputs);
*is_input_dead = false;
for (int i = 0; i < item.num_inputs; ++i) {
const bool expect_ref = TF_PREDICT_FALSE(item.is_any_input_ref_typed) &&
IsRefType(item.input_type(i));
Entry* entry = first_input + i;
(*input_alloc_attrs)[i] = entry->alloc_attr;
TensorValue* inp = &(*inputs)[i];
switch (entry->state) {
case Entry::State::NO_VALUE: {
inp->mutex_if_ref = nullptr;
if (item.is_merge) {
inp->tensor = nullptr;
} else {
DCHECK(item.is_transfer_node)
<< item.kernel->name() << " - input " << i;
entry->state = Entry::State::HAS_CONST_TENSOR;
entry->const_tensor = kEmptyTensor;
inp->tensor = const_cast<Tensor*>(kEmptyTensor);
*is_input_dead = true;
}
break;
}
case Entry::State::HAS_VALUE: {
if (TF_PREDICT_FALSE(expect_ref)) {
return AttachDef(
errors::InvalidArgument(i, "-th input expects a ref type"),
item.kernel->def());
}
inp->mutex_if_ref = nullptr;
inp->tensor = entry->val.get();
break;
}
case Entry::State::HAS_CONST_TENSOR: {
if (TF_PREDICT_FALSE(expect_ref)) {
return AttachDef(
errors::InvalidArgument(i, "-th input expects a ref type"),
item.kernel->def());
}
inp->mutex_if_ref = nullptr;
inp->tensor = const_cast<Tensor*>(entry->const_tensor);
break;
}
case Entry::State::HAS_REF_TENSOR: {
{
tf_shared_lock ml(*entry->ref_tensor.mu);
if (TF_PREDICT_FALSE(!entry->ref_tensor.tensor->IsInitialized() &&
!item.is_initialization_op)) {
return AttachDef(errors::FailedPrecondition(
"Attempting to use uninitialized value ",
item.kernel->requested_input(i)),
item.kernel->def());
}
}
if (expect_ref) {
inp->mutex_if_ref = entry->ref_tensor.mu;
inp->tensor = entry->ref_tensor.tensor;
} else {
{
mutex* ref_mu = entry->ref_tensor.mu;
Tensor* ref_tensor = entry->ref_tensor.tensor;
tf_shared_lock l(*ref_mu);
entry->val.Init(*ref_tensor);
}
entry->state = Entry::State::HAS_VALUE;
inp->mutex_if_ref = nullptr;
inp->tensor = entry->val.get();
if (TF_PREDICT_FALSE(item.input_type(i) != inp->tensor->dtype())) {
return AttachDef(
errors::InvalidArgument(
i, "-th input expects type ",
DataTypeString(item.input_type(i)),
" but automatically dereferenced input tensor has type ",
DataTypeString(inp->tensor->dtype())),
item.kernel->def());
}
}
break;
}
}
}
return absl::OkStatus();
}
template <class PropagatorStateType>
Status ExecutorState<PropagatorStateType>::ProcessOutputs(
const NodeItem& item, OpKernelContext* ctx, Entry* outputs,
NodeExecStatsInterface* stats) {
Status s = ctx->status();
if (!s.ok()) {
s = AttachDef(s, item.kernel->def());
if (vlog_ && VLOG_IS_ON(1)) {
LOG(WARNING) << this << " Compute status: " << s;
}
if (s.code() == error::RESOURCE_EXHAUSTED) {
if (stats_collector_) {
string err =
stats_collector_->ReportAllocsOnResourceExhausted(s.message());
s = errors::CreateWithUpdatedMessage(s,
strings::StrCat(s.message(), err));
} else {
s = errors::CreateWithUpdatedMessage(
s,
strings::StrCat(
s.message(),
"\nHint: If you want to see a list of allocated tensors when "
"OOM happens, add report_tensor_allocations_upon_oom "
"to RunOptions for current allocation info. This isn't "
"available when running in Eager mode.\n"));
}
} else if (s.code() == error::UNAVAILABLE &&
!item.is_distributed_communication) {
s = errors::ReplaceErrorFromNonCommunicationOps(s, item.kernel->name());
}
return ADD_SOURCE_LOCATION(s);
}
for (int i = 0; i < item.num_outputs; ++i) {
const TensorValue val = ctx->release_output(i);
Entry* out = &outputs[i];
DCHECK(out->state == Entry::State::NO_VALUE);
if (val.tensor == nullptr) {
if (!(item.is_recv_or_switch ||
(item.outputs_required && !item.outputs_required[i]))) {
s.Update(errors::Internal("Missing ", i, "-th output from ",
FormatNodeDefForError(item.kernel->def())));
}
} else {
out->alloc_attr = ctx->output_alloc_attr(i);
DataType dtype = val.dtype_safe();
if (dtype == item.output_type(i)) {
if (stats && val.tensor->IsInitialized()) {
nodestats::SetOutput(stats, i, val.tensor);
}
if (val.is_ref()) {
out->state = Entry::State::HAS_REF_TENSOR;
out->ref_tensor.tensor = val.tensor;
out->ref_tensor.mu = val.mutex_if_ref;
if (log_memory_) {
Tensor to_log;
{
tf_shared_lock l(*out->ref_tensor.mu);
to_log = *out->ref_tensor.tensor;
}
LogMemory::RecordTensorOutput(ctx->op_kernel().name(),
ctx->step_id(), i, to_log);
}
} else {
out->state = Entry::State::HAS_VALUE;
out->val.Init(std::move(*val.tensor));
if (log_memory_) {
LogMemory::RecordTensorOutput(ctx->op_kernel().name(),
ctx->step_id(), i, *out->val);
}
}
} else {
s.Update(
errors::Internal("Output ", i, " of type ", DataTypeString(dtype),
" does not match declared output type ",
DataTypeString(item.output_type(i)), " for node ",
FormatNodeDefForError(item.kernel->def())));
}
}
if (!val.is_ref()) {
delete val.tensor;
}
}
return s;
}
template <class PropagatorStateType>
bool ExecutorState<PropagatorStateType>::NodeDone(
const Status& s, TaggedNodeSeq* ready, NodeExecStatsInterface* stats,
TaggedNodeReadyQueue* inline_ready) {
if (stats) {
nodestats::SetAllEnd(stats);
DCHECK_NE(stats_collector_, nullptr);
stats->Done(immutable_state_.params().device->name());
}
if (TF_PREDICT_TRUE(s.ok())) {
const size_t ready_size = ready->size();
if (ready_size == 0) {
return num_outstanding_ops_.fetch_sub(1) == 1;
} else {
if (ready_size > 1) {
num_outstanding_ops_.fetch_add(ready_size - 1,
std::memory_order_relaxed);
}
ScheduleReady(ready, inline_ready);
return false;
}
} else {
bool abort_run = false;
Status maybe_derived_s(s);
{
mutex_lock l(mu_);
if (status_.ok()) {
abort_run = true;
if (cancellation_manager_ && cancellation_manager_->IsCancelled() &&
(errors::IsCancelled(s) || errors::IsAborted(s))) {
status_ = StatusGroup::MakeDerived(s);
maybe_derived_s = status_;
} else {
status_ = s;
}
}
}
if (abort_run) {
TRACEPRINTF("StartAbort: %s", s.ToString());
if (cancellation_manager_) {
VLOG(1) << "[" << immutable_state_.params().device->name()
<< "] Executor start aborting: " << s;
}
if (rendezvous_) {
rendezvous_->StartAbort(s);
}
if (cancellation_manager_) {
cancellation_manager_->StartCancelWithStatus(maybe_derived_s);
} else if (collective_executor_) {
collective_executor_->StartAbort(s);
}
}
return num_outstanding_ops_.fetch_sub(1) == 1;
}
}
template <class PropagatorStateType>
void ExecutorState<PropagatorStateType>::ScheduleReady(
TaggedNodeSeq* ready, TaggedNodeReadyQueue* inline_ready) {
tsl::profiler::TraceMe activity(
[&]() {
return strings::StrCat(
"ExecutorState::ScheduleReady#",
"ready_size=", (ready == nullptr ? -1 : ready->size()),
",inline_ready_size=",
(inline_ready == nullptr ? -1 : inline_ready->size()), "#");
},
tsl::profiler::GetTFTraceMeLevel(false));
DCHECK(!ready->empty());
int64_t scheduled_nsec = 0;
if (stats_collector_) {
scheduled_nsec = nodestats::NowInNsec();
}
if (run_all_kernels_inline_) {
if (inline_ready == nullptr) {
RunTask([this, ready = std::move(*ready), scheduled_nsec]() {
for (auto& tagged_node : ready) {
Process(tagged_node, scheduled_nsec);
}
});
} else {
for (auto& tagged_node : *ready) {
inline_ready->push_back(tagged_node);
}
}
} else {
const TaggedNode* curr_expensive_node = nullptr;
TaggedNodeSeq expensive_nodes;
if (inline_ready == nullptr) {
for (auto& tagged_node : *ready) {
RunTask([=]() { Process(tagged_node, scheduled_nsec); },
ready->size());
}
} else {
for (auto& tagged_node : *ready) {
const NodeItem& item = *tagged_node.node_item;
if (tagged_node.get_is_dead() || !kernel_stats_->IsExpensive(item)) {
inline_ready->push_back(tagged_node);
} else {
if (curr_expensive_node) {
expensive_nodes.push_back(*curr_expensive_node);
}
curr_expensive_node = &tagged_node;
}
}
}
if (curr_expensive_node) {
if (inline_ready->empty()) {
inline_ready->push_back(*curr_expensive_node);
} else {
expensive_nodes.push_back(*curr_expensive_node);
}
}
if (!expensive_nodes.empty()) {
if (expensive_nodes.size() < kInlineScheduleReadyThreshold) {
for (auto& tagged_node : expensive_nodes) {
RunTask(std::bind(&ExecutorState::Process, this, tagged_node,
scheduled_nsec),
expensive_nodes.size());
}
} else {
auto it = expensive_nodes.begin();
while (it < expensive_nodes.end()) {
auto end = it;
std::advance(end, kInlineScheduleReadyThreshold);
if (end > expensive_nodes.end()) {
end = expensive_nodes.end();
}
TaggedNodeSeq ready_chunk{it, end};
RunTask(
[this, ready_chunk = std::move(ready_chunk), scheduled_nsec]() {
tsl::profiler::TraceMe activity(
[&]() {
return strings::StrCat(
"ExecutorState::ScheduleReady::"
"ChildThreadExpensiveNodes#",
"ready_chunk_size=", ready_chunk.size(), "#");
},
tsl::profiler::GetTFTraceMeLevel(false));
for (auto& tagged_node : ready_chunk) {
RunTask(std::bind(&ExecutorState::Process, this, tagged_node,
scheduled_nsec),
ready_chunk.size());
}
});
it = end;
}
}
}
}
ready->clear();
}
template <class PropagatorStateType>
void ExecutorState<PropagatorStateType>::ScheduleFinish() {
{
mutex_lock lock(num_deferred_ops_mu_);
if (num_deferred_ops_ > 0) {
finish_when_deferred_ops_done_ = true;
return;
}
}
Finish();
}
template <class PropagatorStateType>
void ExecutorState<PropagatorStateType>::Finish() {
mu_.lock();
auto status = status_;
auto done_cb = std::move(done_cb_);
auto runner = std::move(runner_);
mu_.unlock();
int64_t trace_id = trace_id_;
int64_t step_id = step_id_;
CHECK(done_cb != nullptr);
Device* device = immutable_state_.params().device;
if (vlog_ && !status.ok() && VLOG_IS_ON(1)) {
propagator_.DumpState();
}
if (!device->AllowsSyncOnCompletion()) {
status.Update(device->RefreshStatus());
if (!status.ok()) {
if (rendezvous_) {
rendezvous_->StartAbort(status);
}
if (cancellation_manager_) {
cancellation_manager_->StartCancelWithStatus(status);
} else if (collective_executor_) {
collective_executor_->StartAbort(status);
}
}
delete this;
runner([step_id, trace_id, status, done_cb = std::move(done_cb)]() {
tsl::profiler::TraceMeConsumer activity(
[&] {
return tsl::profiler::TraceMeEncode("ExecutorDoneCallback",
{{"id", step_id}});
},
tsl::profiler::ContextType::kTfExecutor, trace_id,
tsl::profiler::TraceMeLevel::kInfo);
done_cb(status);
});
return;
}
if (sync_on_finish_ && status.ok()) {
device->Sync([this, step_id, trace_id, runner = std::move(runner),
done_cb = std::move(done_cb)](const Status& status) mutable {
delete this;
runner([step_id, trace_id, status, done_cb = std::move(done_cb)]() {
tsl::profiler::TraceMeConsumer activity(
[&] {
return tsl::profiler::TraceMeEncode("ExecutorDoneCallback",
{{"id", step_id}});
},
tsl::profiler::ContextType::kTfExecutor, trace_id,
tsl::profiler::TraceMeLevel::kInfo);
done_cb(status);
});
});
} else {
delete this;
runner([step_id, trace_id, status, done_cb = std::move(done_cb)]() {
tsl::profiler::TraceMeConsumer activity(
[&] {
return tsl::profiler::TraceMeEncode("ExecutorDoneCallback",
{{"id", step_id}});
},
tsl::profiler::ContextType::kTfExecutor, trace_id,
tsl::profiler::TraceMeLevel::kInfo);
done_cb(status);
});
}
}
void ExecutorImpl::RunAsyncInternal(const Args& args, DoneCallback done) {
if (OpOrderDeterminismRequired()) {
(new ExecutorState<OrderedPropagatorState>(args, immutable_state_,
&kernel_stats_))
->RunAsync(std::move(done));
} else if (immutable_state_.requires_control_flow_support()) {
(new ExecutorState<PropagatorState>(args, immutable_state_, &kernel_stats_))
->RunAsync(std::move(done));
} else {
(new ExecutorState<SimplePropagatorState>(args, immutable_state_,
&kernel_stats_))
->RunAsync(std::move(done));
}
}
}
Status NewLocalExecutor(const LocalExecutorParams& params, const Graph& graph,
Executor** executor) {
ExecutorImpl* impl = new ExecutorImpl(params);
const Status s = impl->Initialize(graph);
if (s.ok()) {
*executor = impl;
} else {
delete impl;
}
return s;
}
Status CreateNonCachedKernel(Device* device, FunctionLibraryRuntime* flib,
const std::shared_ptr<const NodeProperties>& props,
int graph_def_version, OpKernel** kernel) {
const auto device_type = DeviceType(device->attributes().device_type());
auto allocator = device->GetAllocator(AllocatorAttributes());
return CreateOpKernel(device_type, device, allocator, flib,
device->resource_manager(), props, graph_def_version,
kernel);
}
void DeleteNonCachedKernel(OpKernel* kernel) { delete kernel; }
namespace {
class DefaultExecutorRegistrar {
public:
DefaultExecutorRegistrar() {
Factory* factory = new Factory;
ExecutorFactory::Register("", factory);
ExecutorFactory::Register("DEFAULT", factory);
}
private:
class Factory : public ExecutorFactory {
Status NewExecutor(const LocalExecutorParams& params, const Graph& graph,
std::unique_ptr<Executor>* out_executor) override {
Executor* ret = nullptr;
TF_RETURN_IF_ERROR(NewLocalExecutor(params, std::move(graph), &ret));
out_executor->reset(ret);
return absl::OkStatus();
}
};
};
static DefaultExecutorRegistrar registrar;
}
} | #include "tensorflow/core/common_runtime/executor.h"
#include <algorithm>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/common_runtime/step_stats_collector.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/local_rendezvous.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/rendezvous.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
class ExecutorTest : public ::testing::Test {
protected:
ExecutorTest()
: device_(DeviceFactory::NewDevice("CPU", {},
"/job:localhost/replica:0/task:0")),
step_stats_collector_(&step_stats_) {
SessionOptions options;
thread_pool_ = ComputePool(options);
}
~ExecutorTest() override {
while (!rendez_->RefCountIsOne()) {
LOG(INFO) << "Waiting for rendezvous to release. Current refcount: "
<< rendez_->RefCount();
absl::SleepFor(absl::Milliseconds(200));
LocalRendezvous::ReleaseAbortedRendezvous();
}
CHECK(rendez_->Unref());
delete exec_;
}
void Create(std::unique_ptr<const Graph> graph) {
const int version = graph->versions().producer();
LocalExecutorParams params;
params.device = device_.get();
params.create_kernel =
[this, version](const std::shared_ptr<const NodeProperties>& props,
OpKernel** kernel) {
return CreateNonCachedKernel(device_.get(), nullptr, props, version,
kernel);
};
params.delete_kernel = [](OpKernel* kernel) {
DeleteNonCachedKernel(kernel);
};
rendez_ = NewLocalRendezvous();
delete exec_;
TF_CHECK_OK(NewLocalExecutor(params, *graph, &exec_));
runner_ = [this](std::function<void()> fn) { thread_pool_->Schedule(fn); };
}
Status Run(Rendezvous* rendez) {
Executor::Args args;
args.rendezvous = rendez;
args.stats_collector = &step_stats_collector_;
args.runner = runner_;
return exec_->Run(args);
}
thread::ThreadPool* thread_pool_ = nullptr;
std::unique_ptr<Device> device_;
Executor* exec_ = nullptr;
StepStatsCollector step_stats_collector_;
StepStats step_stats_;
Executor::Args::Runner runner_;
Rendezvous* rendez_ = nullptr;
};
Tensor V(const float val) {
Tensor tensor(DT_FLOAT, TensorShape({}));
tensor.scalar<float>()() = val;
return tensor;
}
Tensor VI(const int32_t val) {
Tensor tensor(DT_INT32, TensorShape({}));
tensor.scalar<int32>()() = val;
return tensor;
}
Tensor VB(const bool val) {
Tensor tensor(DT_BOOL, TensorShape({}));
tensor.scalar<bool>()() = val;
return tensor;
}
Tensor VD(const double val) {
Tensor tensor(DT_DOUBLE, TensorShape({}));
tensor.scalar<double>()() = val;
return tensor;
}
float V(const Tensor& tensor) {
CHECK_EQ(tensor.dtype(), DT_FLOAT);
CHECK(TensorShapeUtils::IsScalar(tensor.shape()));
return tensor.scalar<float>()();
}
static uint64 kIncarnation = 1;
Rendezvous::ParsedKey Key(const string& sender, const uint64 incarnation,
const string& receiver, const string& name) {
Rendezvous::ParsedKey result;
CHECK(
Rendezvous::ParseKey(Rendezvous::CreateKey(sender, incarnation, receiver,
name, FrameAndIter(0, 0)),
&result)
.ok());
return result;
}
#define ALICE "/job:j/replica:0/task:0/cpu:0"
#define BOB "/job:j/replica:0/task:0/device:GPU:0"
TEST_F(ExecutorTest, SimpleAdd) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
auto in0 = test::graph::Recv(g.get(), "a", "float", ALICE, 1, BOB);
auto in1 = test::graph::Recv(g.get(), "b", "float", ALICE, 1, BOB);
auto tmp = test::graph::Add(g.get(), in0, in1);
test::graph::Send(g.get(), tmp, "c", BOB, 1, ALICE);
Create(std::move(g));
Rendezvous::Args args;
TF_ASSERT_OK(rendez_->Send(Key(ALICE, kIncarnation, BOB, "a"), args, V(1.0),
false));
TF_ASSERT_OK(rendez_->Send(Key(ALICE, kIncarnation, BOB, "b"), args, V(1.0),
false));
TF_ASSERT_OK(Run(rendez_));
Tensor out = V(-1);
bool is_dead = false;
TF_ASSERT_OK(
rendez_->Recv(Key(BOB, kIncarnation, ALICE, "c"), args, &out, &is_dead));
EXPECT_EQ(2.0, V(out));
}
TEST_F(ExecutorTest, SelfAdd) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
auto v = test::graph::Recv(g.get(), "a", "float", ALICE, 1, BOB);
const int N = 10;
for (int i = 1; i <= N; ++i) {
v = test::graph::Add(g.get(), v, v);
}
test::graph::Send(g.get(), v, "b", BOB, 1, ALICE);
Create(std::move(g));
Rendezvous::Args args;
TF_ASSERT_OK(
rendez_->Send(Key(ALICE, kIncarnation, BOB, "a"), args, V(1.0), false));
TF_ASSERT_OK(Run(rendez_));
Tensor out = V(-1);
bool is_dead = false;
TF_ASSERT_OK(
rendez_->Recv(Key(BOB, kIncarnation, ALICE, "b"), args, &out, &is_dead));
EXPECT_EQ(1024.0, V(out));
}
void BuildTree(int N, Graph* g) {
CHECK_GT(N, 1);
auto in = test::graph::Recv(g, "a", "float", ALICE, 1, BOB);
std::vector<Node*> nodes;
int i = 0;
for (; i < N; ++i) {
nodes.push_back(test::graph::Identity(g, in, 0));
}
random::PhiloxRandom philox(testing::RandomSeed(), 17);
random::SimplePhilox rnd(&philox);
while (nodes.size() > 1) {
int x = rnd.Uniform(nodes.size());
auto in0 = nodes[x];
nodes[x] = nodes.back();
nodes.resize(nodes.size() - 1);
x = rnd.Uniform(nodes.size());
auto in1 = nodes[x];
nodes[x] = test::graph::Add(g, in0, in1);
}
test::graph::Send(g, nodes.back(), "b", BOB, 1, ALICE);
}
TEST_F(ExecutorTest, RandomTree) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
BuildTree(4096, g.get());
Create(std::move(g));
Rendezvous::Args args;
TF_ASSERT_OK(
rendez_->Send(Key(ALICE, kIncarnation, BOB, "a"), args, V(1.0), false));
TF_ASSERT_OK(Run(rendez_));
Tensor out = V(-1);
bool is_dead = false;
TF_ASSERT_OK(
rendez_->Recv(Key(BOB, kIncarnation, ALICE, "b"), args, &out, &is_dead));
EXPECT_EQ(4096.0, V(out));
}
void BuildConcurrentAddAssign(Graph* g) {
auto one = test::graph::Constant(g, V(1.0));
auto var = test::graph::Var(g, DT_FLOAT, TensorShape({}));
auto init = test::graph::Assign(g, var, one);
auto out = test::graph::Send(g, var, "out", ALICE, kIncarnation, BOB);
for (int i = 0; i < 1024; ++i) {
auto add = test::graph::Add(g, var, one);
g->AddControlEdge(init, add);
auto assign = test::graph::Assign(g, var, add);
g->AddControlEdge(assign, out);
}
}
#ifndef THREAD_SANITIZER
TEST_F(ExecutorTest, ConcurrentAddAssign) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
BuildConcurrentAddAssign(g.get());
Create(std::move(g));
for (int iters = 0; iters < 16; ++iters) {
Rendezvous* rendez = NewLocalRendezvous();
TF_ASSERT_OK(Run(rendez));
Rendezvous::Args args;
Tensor out;
bool is_dead;
TF_ASSERT_OK(rendez->Recv(Key(ALICE, kIncarnation, BOB, "out"), args, &out,
&is_dead));
VLOG(1) << "Get " << V(out);
EXPECT_LE(V(out), 1025.0);
rendez->Unref();
}
}
#endif
TEST_F(ExecutorTest, SimpleSwitchLive) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
auto in0 = test::graph::Recv(g.get(), "a", "float", ALICE, 1, BOB);
auto in1 = test::graph::Constant(g.get(), VB(false));
auto tmp = test::graph::Switch(g.get(), in0, in1);
test::graph::Send(g.get(), tmp, "c", BOB, 1, ALICE);
Create(std::move(g));
Rendezvous::Args args;
TF_ASSERT_OK(rendez_->Send(Key(ALICE, kIncarnation, BOB, "a"), args, V(1.0),
false));
TF_ASSERT_OK(Run(rendez_));
Tensor out = V(-1);
bool is_dead = false;
TF_ASSERT_OK(
rendez_->Recv(Key(BOB, kIncarnation, ALICE, "c"), args, &out, &is_dead));
EXPECT_EQ(1.0, V(out));
EXPECT_FALSE(is_dead);
}
TEST_F(ExecutorTest, SimpleSwitchDead) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
auto in0 = test::graph::Recv(g.get(), "a", "float", ALICE, 1, BOB);
auto in1 = test::graph::Constant(g.get(), VB(true));
auto tmp = test::graph::Switch(g.get(), in0, in1);
test::graph::Send(g.get(), tmp, "c", BOB, 1, ALICE);
Create(std::move(g));
Rendezvous::Args args;
TF_ASSERT_OK(rendez_->Send(Key(ALICE, kIncarnation, BOB, "a"), args, V(1.0),
false));
TF_ASSERT_OK(Run(rendez_));
Tensor out = V(-1);
bool is_dead = false;
TF_ASSERT_OK(
rendez_->Recv(Key(BOB, kIncarnation, ALICE, "c"), args, &out, &is_dead));
EXPECT_TRUE(is_dead);
}
TEST_F(ExecutorTest, Abort) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
auto in0 = test::graph::Recv(g.get(), "a", "float", ALICE, 1, BOB);
auto in1 = test::graph::Recv(g.get(), "b", "float", ALICE, 1, BOB);
auto in2 = test::graph::Recv(g.get(), "c", "float", ALICE, 1, BOB);
auto in3 = test::graph::Recv(g.get(), "d", "float", ALICE, 1, BOB);
auto add0 = test::graph::Add(g.get(), in0, in1);
auto add1 = test::graph::Add(g.get(), in2, in3);
auto add2 = test::graph::Add(g.get(), add0, add1);
test::graph::Send(g.get(), add2, "e", BOB, 1, ALICE);
Create(std::move(g));
rendez_->Ref();
SchedClosure([this]() {
Env::Default()->SleepForMicroseconds(100 * 1000);
Status s = rendez_->Send(Key(ALICE, kIncarnation, BOB, "a"),
Rendezvous::Args(), V(1.0), false);
rendez_->Unref();
});
rendez_->Ref();
SchedClosure([this]() {
Env::Default()->SleepForMicroseconds(100 * 1000);
Status s = rendez_->Send(Key(ALICE, kIncarnation, BOB, "b"),
Rendezvous::Args(), V(1.0), false);
rendez_->Unref();
});
rendez_->Ref();
SchedClosure([this]() {
Env::Default()->SleepForMicroseconds(100 * 1000);
Status s = rendez_->Send(Key(ALICE, kIncarnation, BOB, "c"),
Rendezvous::Args(), V(1.0), false);
rendez_->Unref();
});
rendez_->Ref();
SchedClosure([this]() {
Env::Default()->SleepForMicroseconds(100 * 1000);
rendez_->StartAbort(errors::Aborted(""));
rendez_->Unref();
});
EXPECT_TRUE(errors::IsAborted(Run(rendez_)));
Tensor out = V(-1);
bool is_dead = false;
EXPECT_TRUE(errors::IsAborted(rendez_->Recv(
Key(BOB, kIncarnation, ALICE, "c"), Rendezvous::Args(), &out, &is_dead)));
}
TEST_F(ExecutorTest, RecvInvalidDtype) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
auto one = test::graph::Recv(g.get(), "one", "float", ALICE, 1, BOB);
auto var = test::graph::Var(g.get(), DT_FLOAT, TensorShape({1}));
auto init = test::graph::Assign(g.get(), var, one);
auto* two = test::graph::Send(g.get(), var, "two", BOB, 1, ALICE);
g->AddControlEdge(init, two);
Create(std::move(g));
Rendezvous* rendez = NewLocalRendezvous();
TF_ASSERT_OK(rendez->Send(Key(ALICE, 1, BOB, "one"), Rendezvous::Args(),
VD(1.0), false));
EXPECT_TRUE(errors::IsInternal(Run(rendez)));
Tensor output;
bool is_dead;
EXPECT_TRUE(errors::IsInternal(rendez->Recv(
Key(BOB, 1, ALICE, "two"), Rendezvous::Args(), &output, &is_dead)));
rendez->Unref();
}
TEST_F(ExecutorTest, RecvInvalidRefDtype) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
auto var = test::graph::InvalidRefType(g.get(), DT_FLOAT, DT_DOUBLE);
test::graph::Send(g.get(), var, "out", BOB, 1, ALICE);
Create(std::move(g));
Rendezvous* rendez = NewLocalRendezvous();
EXPECT_TRUE(errors::IsInternal(Run(rendez)));
Tensor output;
bool is_dead;
EXPECT_TRUE(errors::IsInternal(rendez->Recv(
Key(BOB, 1, ALICE, "out"), Rendezvous::Args(), &output, &is_dead)));
rendez->Unref();
}
TEST_F(ExecutorTest, NoInputTensors) {
auto g = std::make_unique<Graph>(OpRegistry::Global());
test::graph::Constant(g.get(), V(1.0));
Create(std::move(g));
TF_ASSERT_OK(Run(rendez_));
}
static void BM_executor(::testing::benchmark::State& state) {
const int width = state.range(0);
const int depth = state.range(1);
Graph* g = new Graph(OpRegistry::Global());
random::PhiloxRandom philox(1729, 17);
random::SimplePhilox rand(&philox);
uint64 cur = 0;
uint32 r = 1 + rand.Rand32() % width;
std::vector<Node*> ready_nodes;
for (int i = 0; i < r; ++i) {
ready_nodes.push_back(test::graph::NoOp(g, {}));
++cur;
}
std::random_device random_device;
std::mt19937 rng(random_device());
for (int i = 0; i < depth; ++i) {
std::shuffle(ready_nodes.begin(), ready_nodes.end(), rng);
r = 1 + rand.Rand32() % (ready_nodes.size());
std::vector<Node*> control_inputs;
for (int j = 0; j < r; ++j) {
control_inputs.push_back(ready_nodes.back());
ready_nodes.pop_back();
}
Node* n = test::graph::NoOp(g, control_inputs);
++cur;
r = 1 + rand.Rand32() % width;
for (int j = 0; j < r; ++j) {
ready_nodes.push_back(test::graph::NoOp(g, {n}));
++cur;
}
}
FixupSourceAndSinkEdges(g);
test::Benchmark("cpu", g, false).Run(state);
state.SetLabel(strings::StrCat("Nodes = ", cur));
state.SetItemsProcessed(cur * static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_executor)->UseRealTime()->ArgPair(16, 1024);
BENCHMARK(BM_executor)->UseRealTime()->ArgPair(32, 8192);
BENCHMARK(BM_executor)->UseRealTime()->ArgPair(1024, 16);
BENCHMARK(BM_executor)->UseRealTime()->ArgPair(8192, 32);
BENCHMARK(BM_executor)->UseRealTime()->ArgPair(1024, 1024);
static void BM_const_identity(::testing::benchmark::State& state) {
const int width = state.range(0);
const int outputs_per_const = state.range(1);
Graph* g = new Graph(OpRegistry::Global());
for (int i = 0; i < width; ++i) {
Tensor i_t(i);
Node* const_node = test::graph::Constant(g, i_t);
for (int j = 0; j < outputs_per_const; ++j) {
test::graph::Identity(g, const_node);
}
}
FixupSourceAndSinkEdges(g);
test::Benchmark("cpu", g, false).Run(state);
state.SetLabel(strings::StrCat("Nodes = ", (1 + outputs_per_const) * width));
state.SetItemsProcessed((1 + outputs_per_const) * width *
static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_const_identity)
->UseRealTime()
->ArgPair(1, 1)
->ArgPair(1, 100)
->ArgPair(100, 1)
->ArgPair(100, 100);
static void BM_FeedInputFetchOutput(::testing::benchmark::State& state) {
Graph* g = new Graph(OpRegistry::Global());
Node* x = test::graph::Recv(g, "x", "float", ALICE, 1, BOB);
Node* y = test::graph::Recv(g, "y", "float", ALICE, 1, BOB);
Node* sum = test::graph::Add(g, x, y);
Node* z = test::graph::Send(g, sum, "z", BOB, 1, ALICE);
string x_key = test::GetRendezvousKey(x);
string y_key = test::GetRendezvousKey(y);
string z_key = test::GetRendezvousKey(z);
Tensor val(DT_FLOAT, TensorShape({}));
val.scalar<float>()() = 3.14;
FixupSourceAndSinkEdges(g);
test::Benchmark("cpu", g, false)
.RunWithRendezvousArgs({{x_key, val}, {y_key, val}}, {z_key}, state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()));
}
BENCHMARK(BM_FeedInputFetchOutput);
Status ReplaceEdgeWithSendRecv(Graph* g, const Edge* edge, const string& tensor,
const string& sender,
const uint64 sender_incarnation,
const string& receiver) {
Node* send;
NodeDef send_def;
TF_CHECK_OK(NodeDefBuilder(g->NewName("n"), "_Send")
.Input(edge->src()->name(), edge->src_output(),
edge->src()->output_type(edge->src_output()))
.Attr("tensor_name", tensor)
.Attr("send_device", sender)
.Attr("send_device_incarnation",
static_cast<int64_t>(sender_incarnation))
.Attr("recv_device", receiver)
.Finalize(&send_def));
TF_ASSIGN_OR_RETURN(send, g->AddNode(send_def));
Node* recv;
NodeDef recv_def;
TF_CHECK_OK(
NodeDefBuilder(g->NewName("n"), "_Recv")
.Attr("tensor_name", tensor)
.Attr("send_device", sender)
.Attr("send_device_incarnation",
static_cast<int64_t>(sender_incarnation))
.Attr("recv_device", receiver)
.Attr("tensor_type", edge->dst()->input_type(edge->dst_input()))
.Finalize(&recv_def));
TF_ASSIGN_OR_RETURN(recv, g->AddNode(recv_def));
g->AddEdge(edge->src(), edge->src_output(), send, 0);
g->AddEdge(recv, 0, edge->dst(), edge->dst_input());
g->AddControlEdge(edge->src(), recv);
g->RemoveEdge(edge);
return absl::OkStatus();
}
static void BM_WhileLoopHelper(::testing::benchmark::State& state,
int loop_iters, int loop_vars, bool lower,
bool transfer) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
const Tensor one_t = test::AsScalar<int32>(1);
std::vector<string> args;
args.reserve(loop_vars);
args.push_back("x: int32");
for (int i = 1; i < loop_vars; ++i) {
args.push_back(strings::StrCat("x", i, ": int32"));
}
std::vector<string> body_rets;
body_rets.reserve(loop_vars);
body_rets.push_back("y: int32");
for (int i = 1; i < loop_vars; ++i) {
body_rets.push_back(strings::StrCat("y", i, ": int32"));
}
std::vector<FunctionDefHelper::Node> body_nodes;
body_nodes.reserve(1 + loop_vars);
body_nodes.push_back(
{{"one"}, "Const", {}, {{"value", one_t}, {"dtype", DT_INT32}}});
body_nodes.push_back({{"y"}, "Add", {"x", "one"}, {{"T", DT_INT32}}});
for (int i = 1; i < loop_vars; ++i) {
body_nodes.push_back({{strings::StrCat("y", i)},
"Relu",
{strings::StrCat("x", i)},
{{"T", DT_INT32}}});
}
*f_lib_proto.add_function() = FunctionDefHelper::Define(
"XPlusOne",
args,
body_rets,
{},
body_nodes);
const Tensor loop_iters_t = test::AsScalar<int32>(loop_iters);
*f_lib_proto.add_function() = FunctionDefHelper::Define(
"LessThanOrEqualToN",
args,
{"z: bool"},
{},
{
{{"N"}, "Const", {}, {{"value", loop_iters_t}, {"dtype", DT_INT32}}},
{{"z"}, "LessEqual", {"x", "N"}, {{"T", DT_INT32}}},
});
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Const(root.WithOpName("A"), 0, {});
Node* while_node;
std::vector<NodeBuilder::NodeOut> inputs;
std::vector<DataType> input_types(loop_vars, DT_INT32);
inputs.reserve(loop_vars);
for (int i = 0; i < loop_vars; ++i) {
inputs.push_back(NodeBuilder::NodeOut(a.node()));
}
AttrValue int32_attr;
int32_attr.set_type(DT_INT32);
AttrValue cond_func;
cond_func.mutable_func()->set_name("LessThanOrEqualToN");
AttrValue body_func;
body_func.mutable_func()->set_name("XPlusOne");
TF_ASSERT_OK(
NodeBuilder("while", "While", &root.graph()->flib_def())
.Input(inputs)
.Attr("T", input_types)
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr("parallel_iterations", 20)
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Finalize(root.graph(), &while_node));
auto c = ops::Identity(
root.WithOpName("C").WithControlDependencies(Output(while_node)),
Output(while_node));
TF_ASSERT_OK(root.DoShapeInference(while_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
if (lower) {
FunctionLibraryDefinition flib_def(graph->flib_def());
GraphOptimizationPassOptions opt_options;
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_do_function_inlining(true);
opt_options.session_options = &session_options;
opt_options.graph = &graph;
opt_options.flib_def = &flib_def;
LowerFunctionalOpsPass pass;
TF_ASSERT_OK(pass.Run(opt_options));
if (transfer) {
for (Node* node : graph->nodes()) {
if (node->type_string() != "LoopCond") {
continue;
}
for (const Edge* edge : node->out_edges()) {
if (edge->dst()->type_string() != "Switch") {
continue;
}
string tensor_name = strings::StrCat("c", edge->id());
TF_ASSERT_OK(ReplaceEdgeWithSendRecv(graph.get(), edge, tensor_name,
BOB, 1, ALICE));
}
}
}
}
SessionOptions options;
options.config.set_inter_op_parallelism_threads(4);
FixupSourceAndSinkEdges(graph.get());
test::Benchmark("cpu", graph.release(), &options, nullptr, nullptr, "",
false)
.Run(state);
}
static void BM_LoweredWhileLoop(::testing::benchmark::State& state) {
const int loop_iters = state.range(0);
const int loop_vars = state.range(1);
BM_WhileLoopHelper(state, loop_iters, loop_vars, true,
false);
}
BENCHMARK(BM_LoweredWhileLoop)
->ArgPair(0, 1)
->ArgPair(1, 1)
->ArgPair(10, 1)
->ArgPair(100, 1)
->ArgPair(1000, 1)
->ArgPair(0, 100)
->ArgPair(1, 100)
->ArgPair(10, 100)
->ArgPair(100, 100)
->ArgPair(1000, 100);
static void BM_LoweredWhileLoopWithTransfer(
::testing::benchmark::State& state) {
const int loop_iters = state.range(0);
const int loop_vars = state.range(1);
BM_WhileLoopHelper(state, loop_iters, loop_vars, true,
true);
}
BENCHMARK(BM_LoweredWhileLoopWithTransfer)
->ArgPair(0, 100)
->ArgPair(1, 100)
->ArgPair(10, 100)
->ArgPair(100, 100)
->ArgPair(1000, 100)
->ArgPair(1, 5000)
->ArgPair(10, 5000)
->ArgPair(100, 5000)
->ArgPair(1000, 5000);
static void BM_FunctionalWhileLoop(::testing::benchmark::State& state) {
const int loop_iters = state.range(0);
const int loop_vars = state.range(1);
BM_WhileLoopHelper(state, loop_iters, loop_vars, false,
false);
}
BENCHMARK(BM_FunctionalWhileLoop)
->ArgPair(0, 1)
->ArgPair(1, 1)
->ArgPair(10, 1)
->ArgPair(100, 1)
->ArgPair(1000, 1)
->ArgPair(0, 100)
->ArgPair(1, 100)
->ArgPair(10, 100)
->ArgPair(100, 100)
->ArgPair(1000, 100);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/executor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/executor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
734ac35f-845b-4102-a28f-233a8b460dc1 | cpp | tensorflow/tensorflow | convolution_thunk | third_party/xla/xla/service/gpu/runtime/convolution_thunk.cc | third_party/xla/xla/backends/cpu/runtime/convolution_thunk_test.cc | #include "xla/service/gpu/runtime/convolution_thunk.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/service/buffer_assignment.h"
#if TENSORFLOW_USE_ROCM
#include "xla/service/gpu/stream_executor_util.h"
#endif
#include "xla/service/gpu/gpu_conv_runner.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/scratch_allocator.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
ConvolutionThunk::ConvolutionThunk(
ThunkInfo thunk_info, GpuConvConfig config,
std::vector<BufferAllocation::Slice> operand_slices,
std::vector<BufferAllocation::Slice> result_slices,
BufferAllocation::Slice scratch_slice)
: Thunk(Kind::kConvolution, thunk_info),
operand_buffers_(std::move(operand_slices)),
result_buffers_(std::move(result_slices)),
scratch_buffer_(scratch_slice),
config_(std::move(config)) {}
GenericConvRunner& ConvolutionThunk::GetOrCreateRunner(
const stream_executor::Stream* stream, bool* runner_created) {
absl::MutexLock lock(&mu_);
auto it = runner_cache_.find(stream);
*runner_created = (it == runner_cache_.end());
if (*runner_created) {
it = runner_cache_
.insert({stream, std::make_unique<GenericConvRunner>(config_)})
.first;
}
return *it->second;
}
absl::Status ConvolutionThunk::ExecuteOnStream(const ExecuteParams& params) {
const auto& buffer_allocations = *params.buffer_allocations;
std::vector<se::DeviceMemoryBase> operand_se_buffers, result_se_buffers;
operand_se_buffers.reserve(operand_buffers_.size());
for (BufferAllocation::Slice buffer : operand_buffers_) {
operand_se_buffers.push_back(buffer_allocations.GetDeviceAddress(buffer));
}
result_se_buffers.reserve(result_buffers_.size());
for (BufferAllocation::Slice buffer : result_buffers_) {
result_se_buffers.push_back(buffer_allocations.GetDeviceAddress(buffer));
}
se::DeviceMemoryBase scratch =
buffer_allocations.GetDeviceAddress(scratch_buffer_);
bool runner_created = false;
RunConvOptions opts;
opts.runner_cache = &GetOrCreateRunner(params.stream, &runner_created);
#if TENSORFLOW_USE_ROCM
if (runner_created) {
TF_ASSIGN_OR_RETURN(
GpuConvParams conv_params,
GetGpuConvParams(config_, operand_se_buffers, result_se_buffers));
TF_ASSIGN_OR_RETURN(se::dnn::ConvolutionKind kind,
GetDNNConvKindFromCudnnConvKind(config_.kind));
TF_ASSIGN_OR_RETURN(se::dnn::DataType input_type,
GetDNNDataTypeFromPrimitiveType(config_.input_type));
TF_ASSIGN_OR_RETURN(se::dnn::DataType output_type,
GetDNNDataTypeFromPrimitiveType(config_.output_type));
TF_ASSIGN_OR_RETURN(auto dnn,
se::dnn::internal::GetDnnFromStream(params.stream));
se::OwningScratchAllocator<> scratch_allocator(
buffer_allocations.device_ordinal(),
buffer_allocations.memory_allocator());
std::vector<se::dnn::ProfileResult> profile_results;
dnn->GetMIOpenConvolveAlgorithms(
kind, input_type, output_type, params.stream, config_.input_descriptor,
conv_params.input_buf, config_.filter_descriptor,
conv_params.filter_buf, config_.output_descriptor,
conv_params.output_buf, config_.conv_desc, &scratch_allocator,
&profile_results);
}
#endif
TF_RETURN_IF_ERROR(RunGpuConv(config_, absl::MakeSpan(operand_se_buffers),
absl::MakeSpan(result_se_buffers), scratch,
params.stream, opts));
if (!params.stream->ok()) {
return Internal("ConvolutionThunk::ExecuteOnStream failed.");
}
return absl::OkStatus();
}
ConvolutionReorderThunk::ConvolutionReorderThunk(
ThunkInfo thunk_info, absl::Span<int64_t> filter_nchw,
absl::InlinedVector<BufferAllocation::Slice, 2> operand_slices,
absl::InlinedVector<BufferAllocation::Slice, 2> result_slices)
: Thunk(Kind::kConvolutionReorder, thunk_info),
filter_descriptor_(CreateFilterDescriptor(filter_nchw)),
operand_buffers_(operand_slices),
result_buffers_(result_slices) {}
absl::Status ConvolutionReorderThunk::ExecuteOnStream(
const ExecuteParams& params) {
bool has_bias = operand_buffers_.size() > 1;
CHECK_EQ(operand_buffers_.size(), result_buffers_.size());
const auto& buffer_allocations = *params.buffer_allocations;
auto filter_input = se::DeviceMemory<int8_t>(
buffer_allocations.GetDeviceAddress(operand_buffers_[0]));
auto filter_output = se::DeviceMemory<int8_t>(
buffer_allocations.GetDeviceAddress(result_buffers_[0]));
auto bias_input =
has_bias ? std::make_optional(se::DeviceMemory<float>(
buffer_allocations.GetDeviceAddress(operand_buffers_[1])))
: std::nullopt;
auto bias_output =
has_bias ? std::make_optional(se::DeviceMemory<float>(
buffer_allocations.GetDeviceAddress(result_buffers_[1])))
: std::nullopt;
auto dnn = params.stream->parent()->AsDnn();
if (dnn == nullptr) {
return absl::InternalError("No DNN for stream.");
}
return dnn->CudnnReorderConvolutionFilterAndBias(
params.stream, filter_descriptor_, filter_input, &filter_output,
std::move(bias_input), std::move(bias_output));
}
se::dnn::FilterDescriptor ConvolutionReorderThunk::CreateFilterDescriptor(
absl::Span<int64_t> filter_nchw) {
CHECK_EQ(filter_nchw.size(), 4);
se::dnn::FilterDescriptor filter_desc(2);
filter_desc.set_layout(se::dnn::FilterLayout::kOutputInputYX32);
filter_desc.set_output_feature_map_count(filter_nchw[0]);
filter_desc.set_input_feature_map_count(filter_nchw[1]);
filter_desc.set_input_filter_height(filter_nchw[2]);
filter_desc.set_input_filter_width(filter_nchw[3]);
return filter_desc;
}
}
} | #include "xla/backends/cpu/runtime/convolution_thunk.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "Eigen/Core"
#include "xla/backends/cpu/runtime/buffer_allocations.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/primitive_util.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
struct ConvolutionDimensions {
explicit ConvolutionDimensions(int convolution_rank = 2)
: convolution_rank(convolution_rank) {}
int convolution_rank = 2;
int batch_size = 1;
int input_size = 3;
int input_channels = 5;
int kernel_size = 3;
int output_channels = 3;
int output_size = input_size - kernel_size + 1;
};
template <typename T>
class ConvolutionThunkTypedTest : public ::testing::Test {};
using CorrectTypes = ::testing::Types<float, Eigen::half>;
TYPED_TEST_SUITE(ConvolutionThunkTypedTest, CorrectTypes);
std::vector<int64_t> MakeInputDims(
ConvolutionDimensions dims = ConvolutionDimensions()) {
std::vector<int64_t> input_dims = {dims.batch_size};
for (int i = 0; i < dims.convolution_rank; ++i) {
input_dims.push_back(dims.input_size);
}
input_dims.push_back(dims.input_channels);
return input_dims;
}
std::vector<int64_t> MakeKernelDims(
ConvolutionDimensions dims = ConvolutionDimensions()) {
std::vector<int64_t> kernel_dims = {};
for (int i = 0; i < dims.convolution_rank; ++i) {
kernel_dims.push_back(dims.kernel_size);
}
kernel_dims.push_back(dims.input_channels);
kernel_dims.push_back(dims.output_channels);
return kernel_dims;
}
std::vector<int64_t> MakeOutputDims(
ConvolutionDimensions dims = ConvolutionDimensions()) {
std::vector<int64_t> output_dims = {dims.batch_size};
for (int i = 0; i < dims.convolution_rank; ++i) {
output_dims.push_back(dims.output_size);
}
output_dims.push_back(dims.output_channels);
return output_dims;
}
template <typename ElementType>
std::vector<ElementType> MakeDataVector(const std::vector<int64_t>& dims) {
auto size = absl::c_accumulate(dims, 1, std::multiplies<int>());
return std::vector<ElementType>(size, ElementType(0.0));
}
template <typename ElementType>
std::vector<MaybeOwningDeviceMemory> MakeBuffers(
const std::vector<ElementType>& input,
const std::vector<ElementType>& kernel,
const std::vector<ElementType>& output) {
std::vector<MaybeOwningDeviceMemory> buffers;
size_t input_size_in_bytes = input.size() * sizeof(ElementType);
buffers.emplace_back(se::DeviceMemoryBase(input.data(), input_size_in_bytes));
size_t kernel_size_in_bytes = kernel.size() * sizeof(ElementType);
buffers.emplace_back(
se::DeviceMemoryBase(kernel.data(), kernel_size_in_bytes));
size_t output_size_in_bytes = output.size() * sizeof(ElementType);
buffers.emplace_back(
se::DeviceMemoryBase(output.data(), output_size_in_bytes));
return buffers;
}
ConvolutionThunk::Options MakeConvolutionOptions() {
ConvolutionThunk::Options options;
options.multi_threaded = false;
options.use_acl = false;
return options;
}
ConvolutionDimensionNumbers MakeConvolutionDimensionNumbers(
int convolution_rank) {
ConvolutionDimensionNumbers dnums;
int dim = 0;
dnums.set_input_batch_dimension(dim++);
for (int i = 0; i < convolution_rank; ++i) {
dnums.add_input_spatial_dimensions(dim++);
}
dnums.set_input_feature_dimension(dim++);
dim = 0;
for (int i = 0; i < convolution_rank; ++i) {
dnums.add_kernel_spatial_dimensions(dim++);
}
dnums.set_kernel_input_feature_dimension(dim++);
dnums.set_kernel_output_feature_dimension(dim++);
dim = 0;
dnums.set_output_batch_dimension(dim++);
for (int i = 0; i < convolution_rank; ++i) {
dnums.add_output_spatial_dimensions(dim++);
}
dnums.set_output_feature_dimension(dim++);
return dnums;
}
Window MakeWindow(int convolution_rank) {
Window window;
for (int i = 0; i < convolution_rank; ++i) {
WindowDimension* window_dim = window.add_dimensions();
window_dim->set_stride(1);
window_dim->set_padding_low(0);
window_dim->set_padding_high(0);
window_dim->set_window_dilation(1);
window_dim->set_base_dilation(1);
}
return window;
}
template <typename ElementType>
class ConvolutionThunkBuilder {
public:
void SetOptions(ConvolutionThunk::Options options) {
options_ = std::move(options);
}
auto Build(ConvolutionDimensions dims = ConvolutionDimensions()) {
auto input_dims = MakeInputDims(dims);
auto kernel_dims = MakeKernelDims(dims);
auto output_dims = MakeOutputDims(dims);
return Build(input_dims, kernel_dims, output_dims);
}
auto Build(const std::vector<int64_t>& input_dims,
const std::vector<int64_t>& kernel_dims,
const std::vector<int64_t>& output_dims) {
int convolution_rank = input_dims.size() - 2;
input_ = MakeDataVector<ElementType>(input_dims);
kernel_ = MakeDataVector<ElementType>(kernel_dims);
output_ = MakeDataVector<ElementType>(output_dims);
size_t input_size_in_bytes = input_.size() * sizeof(ElementType);
buffers_.emplace_back(
se::DeviceMemoryBase(input_.data(), input_size_in_bytes));
size_t kernel_size_in_bytes = kernel_.size() * sizeof(ElementType);
buffers_.emplace_back(
se::DeviceMemoryBase(kernel_.data(), kernel_size_in_bytes));
size_t output_size_in_bytes = output_.size() * sizeof(ElementType);
buffers_.emplace_back(
se::DeviceMemoryBase(output_.data(), output_size_in_bytes));
allocations_ = std::make_unique<BufferAllocations>(buffers_);
input_alloc_ =
std::make_unique<BufferAllocation>(0, input_size_in_bytes, 0);
kernel_alloc_ =
std::make_unique<BufferAllocation>(1, kernel_size_in_bytes, 0);
output_alloc_ =
std::make_unique<BufferAllocation>(2, output_size_in_bytes, 0);
BufferAllocation::Slice input_slice(input_alloc_.get(), 0,
input_size_in_bytes);
BufferAllocation::Slice kernel_slice(kernel_alloc_.get(), 0,
kernel_size_in_bytes);
BufferAllocation::Slice output_slice(output_alloc_.get(), 0,
output_size_in_bytes);
auto primitive_type = primitive_util::NativeToPrimitiveType<ElementType>();
Shape input_shape = ShapeUtil::MakeShape(primitive_type, input_dims);
Shape kernel_shape = ShapeUtil::MakeShape(primitive_type, kernel_dims);
Shape output_shape = ShapeUtil::MakeShape(primitive_type, output_dims);
auto dnums = MakeConvolutionDimensionNumbers(convolution_rank);
auto window = MakeWindow(convolution_rank);
return ConvolutionThunk::Create(
{"convolution"}, options_, std::move(input_slice), input_shape,
std::move(kernel_slice), kernel_shape, std::move(output_slice),
output_shape, dnums, window,
1);
}
auto GetExecutionParams() {
return Thunk::ExecuteParams{nullptr, allocations_.get()};
}
private:
std::vector<ElementType> input_;
std::vector<ElementType> kernel_;
std::vector<ElementType> output_;
std::vector<MaybeOwningDeviceMemory> buffers_;
ConvolutionThunk::Options options_ = MakeConvolutionOptions();
std::unique_ptr<BufferAllocations> allocations_;
std::unique_ptr<BufferAllocation> input_alloc_;
std::unique_ptr<BufferAllocation> kernel_alloc_;
std::unique_ptr<BufferAllocation> output_alloc_;
};
template <typename ElementType>
void SuccessfulConvolution(int convolution_rank) {
ConvolutionThunkBuilder<ElementType> builder;
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, builder.Build(ConvolutionDimensions(convolution_rank)));
Thunk::ExecuteParams params = builder.GetExecutionParams();
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError()) << execute_event.GetError();
}
TYPED_TEST(ConvolutionThunkTypedTest, SuccessfulConvolution1D) {
SuccessfulConvolution<TypeParam>(1);
}
TYPED_TEST(ConvolutionThunkTypedTest, SuccessfulConvolution2D) {
SuccessfulConvolution<TypeParam>(2);
}
TYPED_TEST(ConvolutionThunkTypedTest, SuccessfulConvolution3D) {
SuccessfulConvolution<TypeParam>(3);
}
TEST(ConvolutionThunkTest, CreationErrorOnUnsupportedType) {
ConvolutionThunkBuilder<int> builder;
auto status_or_thunk = builder.Build();
EXPECT_EQ(status_or_thunk.status().code(),
absl::StatusCode::kInvalidArgument);
EXPECT_THAT(status_or_thunk.status().message(),
::testing::HasSubstr("Unsupported element type (S32)"));
}
TEST(ConvolutionThunkTest, CreationErrorOnTooHighConvolutionRank) {
ConvolutionThunkBuilder<float> builder;
auto status_or_thunk =
builder.Build(ConvolutionDimensions(4));
EXPECT_EQ(status_or_thunk.status().code(),
absl::StatusCode::kInvalidArgument);
EXPECT_THAT(status_or_thunk.status().message(),
::testing::HasSubstr("Incorrect convolution rank (4)"));
}
TEST(ConvolutionThunkTest, CreationErrorOnTooLowConvolutionRank) {
ConvolutionThunkBuilder<float> builder;
auto status_or_thunk =
builder.Build(ConvolutionDimensions(0));
EXPECT_EQ(status_or_thunk.status().code(),
absl::StatusCode::kInvalidArgument);
EXPECT_THAT(status_or_thunk.status().message(),
::testing::HasSubstr("Incorrect convolution rank (0)"));
}
TEST(ConvolutionThunkTest, CreationErrorOnMismatchedKernelBufferRank) {
ConvolutionThunkBuilder<float> builder;
ConvolutionDimensions dims_2d(2);
auto input_dims = MakeInputDims(dims_2d);
auto output_dims = MakeOutputDims(dims_2d);
ConvolutionDimensions dims_3d(3);
auto kernel_dims = MakeKernelDims(dims_3d);
auto status_or_thunk = builder.Build(input_dims, kernel_dims, output_dims);
EXPECT_EQ(status_or_thunk.status().code(),
absl::StatusCode::kInvalidArgument);
EXPECT_THAT(status_or_thunk.status().message(),
::testing::HasSubstr("Buffer ranks mismatch. Input rank (4) vs "
"kernel rank (5) vs output rank (4)"));
}
TEST(ConvolutionThunkTest, CreationErrorOnMismatchedOutputBufferRank) {
ConvolutionThunkBuilder<float> builder;
ConvolutionDimensions dims_2d(2);
auto input_dims = MakeInputDims(dims_2d);
auto kernel_dims = MakeKernelDims(dims_2d);
ConvolutionDimensions dims_3d(3);
auto output_dims = MakeOutputDims(dims_3d);
auto status_or_thunk = builder.Build(input_dims, kernel_dims, output_dims);
EXPECT_EQ(status_or_thunk.status().code(),
absl::StatusCode::kInvalidArgument);
EXPECT_THAT(status_or_thunk.status().message(),
::testing::HasSubstr("Buffer ranks mismatch. Input rank (4) vs "
"kernel rank (4) vs output rank (5)"));
}
TEST(ConvolutionThunkTest, CreationErrorOnBatchSizeMismatch) {
ConvolutionThunkBuilder<float> builder;
ConvolutionDimensions dims;
dims.batch_size = 1;
auto input_dims = MakeInputDims(dims);
auto kernel_dims = MakeKernelDims(dims);
dims.batch_size = 2;
auto output_dims = MakeOutputDims(dims);
auto status_or_thunk = builder.Build(input_dims, kernel_dims, output_dims);
EXPECT_EQ(status_or_thunk.status().code(),
absl::StatusCode::kInvalidArgument);
EXPECT_THAT(status_or_thunk.status().message(),
::testing::HasSubstr(
"Batch sizes mismatch. Input batch (1) vs output batch (2)"));
}
TEST(ConvolutionThunkTest, CreationErrorOnOutputChannelsMismatch) {
ConvolutionThunkBuilder<float> builder;
ConvolutionDimensions dims;
dims.output_channels = 3;
auto input_dims = MakeInputDims(dims);
auto kernel_dims = MakeKernelDims(dims);
dims.output_channels = 4;
auto output_dims = MakeOutputDims(dims);
auto status_or_thunk = builder.Build(input_dims, kernel_dims, output_dims);
EXPECT_EQ(status_or_thunk.status().code(),
absl::StatusCode::kInvalidArgument);
EXPECT_THAT(
status_or_thunk.status().message(),
::testing::HasSubstr("Output channels mismatch. Kernel filters count (3) "
"should be the same as output channels count (4)"));
}
TEST(ConvolutionThunkTest,
ExecutionErrorOnMissingThreadPoolInMultiThreadedMode) {
ConvolutionThunkBuilder<float> builder;
auto options = MakeConvolutionOptions();
options.multi_threaded = true;
builder.SetOptions(options);
TF_ASSERT_OK_AND_ASSIGN(auto thunk, builder.Build(ConvolutionDimensions()));
Thunk::ExecuteParams params = builder.GetExecutionParams();
params.intra_op_threadpool = nullptr;
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_TRUE(execute_event.IsError());
auto status = execute_event.GetError();
EXPECT_EQ(absl::StatusCode::kInternal, status.code());
EXPECT_EQ(
"Intra-op threadpool must be provided for ConvolutionThunk in "
"multi-threaded mode.",
status.message());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/convolution_thunk.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/convolution_thunk_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a6c01e1e-ca85-4c99-9bc7-027086e9a935 | cpp | tensorflow/tensorflow | thunk | third_party/xla/xla/service/gpu/runtime/thunk.cc | third_party/xla/xla/backends/cpu/runtime/thunk_test.cc | #include "xla/service/gpu/runtime/thunk.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/executable_run_options.h"
#include "xla/ffi/execution_context.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/translate/mhlo_to_hlo/location_exporter.h"
#include "xla/service/global_device_id.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/gpu_executable_run_options.h"
#include "xla/service/gpu/runtime/nccl_api.h"
#include "xla/service/gpu/runtime/nccl_clique.h"
#include "xla/service/gpu/runtime/nccl_clique_key.h"
#include "xla/service/service_executable_run_options.h"
#include "xla/stream_executor/stream.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
Thunk::CollectiveCliques::CollectiveCliques(
NcclClique::AcquiredCliquesMap cliques_map)
: cliques_map_(std::move(cliques_map)) {}
absl::StatusOr<NcclApi::NcclCommHandle> Thunk::CollectiveCliques::GetComm(
const NcclCliqueKey& clique_key, int32_t rank) const {
auto clique = cliques_map_.find(clique_key);
if (clique == cliques_map_.end()) {
return absl::NotFoundError(absl::StrCat("No clique found for clique key: ",
clique_key.ToString()));
}
auto communicator = (*clique->second)->comm(rank);
if (!communicator.has_value()) {
return absl::InternalError(absl::StrCat("Communicator for rank ", rank,
" not found in a NCCL clique ",
clique_key.ToString()));
}
return *communicator;
}
absl::StatusOr<bool> Thunk::CollectiveCliques::is_local_clique(
const NcclCliqueKey& clique_key) const {
auto clique = cliques_map_.find(clique_key);
if (clique == cliques_map_.end()) {
return absl::NotFoundError(absl::StrCat("No clique found for clique key: ",
clique_key.ToString()));
}
return (*clique->second)->IsLocal();
}
absl::StatusOr<size_t> Thunk::CollectiveCliques::num_communicators(
const NcclCliqueKey& clique_key) const {
auto clique = cliques_map_.find(clique_key);
if (clique == cliques_map_.end()) {
return absl::NotFoundError(absl::StrCat("No clique found for clique key: ",
clique_key.ToString()));
}
return (*clique->second)->num_communicators();
}
using GlobalDeviceIdMap = Thunk::CollectiveExecuteParams::GlobalDeviceIdMap;
static absl::StatusOr<GlobalDeviceId> GetGlobalDeviceId(
const GlobalDeviceIdMap* device_id_map, int64_t local_device_ordinal) {
if (!device_id_map) return GlobalDeviceId(local_device_ordinal);
auto it = device_id_map->find(local_device_ordinal);
if (it == device_id_map->end())
return absl::NotFoundError(
absl::StrCat("No global device id found for local device ordinal: ",
local_device_ordinal));
return it->second;
}
absl::StatusOr<Thunk::CollectiveExecuteParams>
Thunk::CollectiveExecuteParams::Create(
const ServiceExecutableRunOptions& run_options,
absl::Span<se::Stream* const> async_streams, int64_t local_device_ordinal,
int64_t collective_max_nchannels, int64_t p2p_max_nchannels) {
const GpuExecutableRunOptions* gpu_options =
run_options.run_options().gpu_executable_run_options();
auto* device_id_map = gpu_options && gpu_options->gpu_global_device_ids()
? &*gpu_options->gpu_global_device_ids()
: nullptr;
auto* nccl_callback = gpu_options && gpu_options->nccl_clique_id_callback()
? &gpu_options->nccl_clique_id_callback()
: nullptr;
TF_ASSIGN_OR_RETURN(GlobalDeviceId global_device_id,
GetGlobalDeviceId(device_id_map, local_device_ordinal));
return CollectiveExecuteParams(
run_options.stream()->parent(), run_options.run_options().run_id(),
async_streams, local_device_ordinal, global_device_id,
run_options.run_options().device_assignment(), device_id_map,
nccl_callback, collective_max_nchannels, p2p_max_nchannels);
}
Thunk::CollectiveExecuteParams::CollectiveExecuteParams(
se::StreamExecutor* executor, RunId run_id,
absl::Span<se::Stream* const> async_streams, int64_t local_device_ordinal,
GlobalDeviceId global_device_id, const DeviceAssignment* device_assn,
const GlobalDeviceIdMap* global_device_id_map,
const NcclCliqueIdCallback* nccl_clique_id_callback,
int64_t collective_max_nchannels, int64_t p2p_max_nchannels)
: executor(executor),
run_id(run_id),
async_streams(async_streams.begin(), async_streams.end()),
local_device_ordinal(local_device_ordinal),
global_device_id(global_device_id),
device_assn(device_assn),
global_device_id_map(global_device_id_map),
nccl_clique_id_callback(nccl_clique_id_callback),
collective_max_nchannels(collective_max_nchannels),
p2p_max_nchannels(p2p_max_nchannels) {}
Thunk::ExecuteParams Thunk::ExecuteParams::Create(
const ServiceExecutableRunOptions& run_options,
const BufferAllocations& buffer_allocations, se::Stream* stream,
se::Stream* command_buffer_trace_stream,
CollectiveExecuteParams* collective_params,
CollectiveCliques* collective_cliques,
ExecutionStreamIdMap additional_compute_streams) {
return ExecuteParams(&buffer_allocations, stream, command_buffer_trace_stream,
collective_params, collective_cliques,
run_options.run_options().device_to_host_stream(),
run_options.run_options().host_to_device_stream(),
run_options.run_options().send_device_memory_function(),
run_options.run_options().recv_device_memory_function(),
run_options.run_options().ffi_execution_context(),
additional_compute_streams,
run_options.run_options().gpu_executable_run_options()
? run_options.run_options()
.gpu_executable_run_options()
->enable_mock_nccl_collectives()
: false);
}
Thunk::ExecuteParams Thunk::ExecuteParams::CloneWithNewAllocations(
const Thunk::ExecuteParams& params,
const BufferAllocations& buffer_allocations) {
return ExecuteParams(
&buffer_allocations, params.stream, params.command_buffer_trace_stream,
params.collective_params, params.collective_cliques,
params.device_to_host_stream, params.host_to_device_stream,
params.send_device_memory_function, params.recv_device_memory_function,
params.ffi_execution_context, params.additional_compute_streams);
}
Thunk::ExecuteParams::ExecuteParams(
const BufferAllocations* buffer_allocations, se::Stream* stream,
se::Stream* command_buffer_trace_stream,
CollectiveExecuteParams* collective_params,
CollectiveCliques* collective_cliques, se::Stream* device_to_host_stream,
se::Stream* host_to_device_stream,
SendDeviceMemoryFunction* send_device_memory_function,
RecvDeviceMemoryFunction* recv_device_memory_function,
const ffi::ExecutionContext* ffi_execution_context,
ExecutionStreamIdMap additional_compute_streams, bool mock_collectives)
: buffer_allocations(buffer_allocations),
stream(stream),
command_buffer_trace_stream(command_buffer_trace_stream),
collective_params(collective_params),
collective_cliques(collective_cliques),
device_to_host_stream(device_to_host_stream),
host_to_device_stream(host_to_device_stream),
send_device_memory_function(send_device_memory_function),
recv_device_memory_function(recv_device_memory_function),
ffi_execution_context(ffi_execution_context),
additional_compute_streams(additional_compute_streams),
mock_collectives(mock_collectives) {}
absl::string_view Thunk::KindToString(Thunk::Kind kind) {
#define CASE(x) \
case Thunk::x: \
return #x
switch (kind) {
CASE(kDynamicSlice);
CASE(kCholesky);
CASE(kCommandBuffer);
CASE(kConditional);
CASE(kConvolution);
CASE(kConvolutionReorder);
CASE(kCopy);
CASE(kCopyDone);
CASE(kCubSort);
CASE(kCublasLtMatmul);
CASE(kCustomCall);
CASE(kCustomKernel);
CASE(kNcclAllGather);
CASE(kNcclAllGatherStart);
CASE(kNcclAllGatherDone);
CASE(kNcclAllReduce);
CASE(kNcclAllReduceStart);
CASE(kNcclAllReduceDone);
CASE(kNcclCollectiveBroadcast);
CASE(kNcclCollectiveBroadcastStart);
CASE(kNcclCollectiveBroadcastDone);
CASE(kNcclCollectivePermute);
CASE(kNcclCollectivePermuteStart);
CASE(kNcclCollectivePermuteDone);
CASE(kNcclReduceScatter);
CASE(kNcclReduceScatterStart);
CASE(kNcclReduceScatterDone);
CASE(kNcclAllToAll);
CASE(kNcclAllToAllStart);
CASE(kNcclAllToAllDone);
CASE(kNcclSend);
CASE(kNcclSendDone);
CASE(kNcclRecv);
CASE(kNcclRecvDone);
CASE(kFft);
CASE(kGemm);
CASE(kInfeed);
CASE(kKernel);
CASE(kMemset32BitValue);
CASE(kMemzero);
CASE(kNorm);
CASE(kOutfeed);
CASE(kSend);
CASE(kSendDone);
CASE(kPartitionId);
CASE(kReplicaId);
CASE(kRecv);
CASE(kRecvDone);
CASE(kSequential);
CASE(kTriangularSolve);
CASE(kWhile);
CASE(kWaitForStreams);
CASE(kCuDnn);
}
}
absl::StatusOr<se::Stream*> Thunk::GetStreamForExecution(
ExecutionStreamId stream_id, const ExecuteParams& params) {
if (stream_id == kDefaultExecutionStreamId) {
return params.stream;
}
auto iter = params.additional_compute_streams.find(stream_id);
if (iter == params.additional_compute_streams.end()) {
return absl::InvalidArgumentError("Invalid execution stream id.");
}
return iter->second;
}
std::ostream& operator<<(std::ostream& os, Thunk::Kind kind) {
return os << Thunk::KindToString(kind);
}
bool IsReductionCollective(Thunk::Kind kind) {
return kind == Thunk::kNcclAllReduce || kind == Thunk::kNcclAllReduceStart ||
kind == Thunk::kNcclReduceScatter ||
kind == Thunk::kNcclReduceScatterStart;
}
Thunk::ThunkInfo Thunk::ThunkInfo::WithProfileAnnotation(
const HloInstruction* instr) {
ThunkInfo thunk_info;
thunk_info.profile_annotation = instr->name();
auto gpu_backend_config = instr->backend_config<GpuBackendConfig>();
if (gpu_backend_config.ok()) {
thunk_info.execution_stream_id =
std::max<uint64_t>(kDefaultExecutionStreamId.value(),
gpu_backend_config->operation_queue_id());
}
return thunk_info;
}
bool Thunk::IsCollective() const {
switch (kind()) {
case kNcclAllGather:
case kNcclAllGatherStart:
case kNcclAllGatherDone:
case kNcclAllReduce:
case kNcclAllReduceStart:
case kNcclAllReduceDone:
case kNcclCollectiveBroadcast:
case kNcclCollectiveBroadcastStart:
case kNcclCollectiveBroadcastDone:
case kNcclCollectivePermute:
case kNcclCollectivePermuteStart:
case kNcclCollectivePermuteDone:
case kNcclReduceScatter:
case kNcclReduceScatterStart:
case kNcclReduceScatterDone:
case kNcclAllToAll:
case kNcclAllToAllStart:
case kNcclAllToAllDone:
case kNcclSend:
case kNcclSendDone:
case kNcclRecv:
case kNcclRecvDone:
return true;
default:
return false;
}
}
}
} | #include "xla/backends/cpu/runtime/thunk.h"
#include <cstdint>
#include <utility>
#include "xla/executable_run_options.h"
#include "xla/service/cpu/collectives_interface.h"
#include "xla/service/cpu/cpu_executable_run_options.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
class ThunkExecuteStateTestHelper : public Thunk {
public:
static ExecuteState CreateExecuteState(int64_t parallel_tasks) {
return ExecuteState(parallel_tasks);
}
};
TEST(ThunkTest, OkExecuteEventSingleton) {
auto event = Thunk::OkExecuteEventSingleton();
ASSERT_TRUE(event.IsConcrete());
}
TEST(ThunkExecuteStateTest, OneTask) {
auto execute_state =
ThunkExecuteStateTestHelper::CreateExecuteState(1);
EXPECT_FALSE(execute_state.event.IsAvailable());
execute_state.Notify();
EXPECT_TRUE(execute_state.event.IsAvailable());
}
TEST(ThunkExecuteStateTest, MultipleTasks) {
int parallel_tasks = 10;
auto execute_state =
ThunkExecuteStateTestHelper::CreateExecuteState(parallel_tasks);
for (int i = 0; i < parallel_tasks; ++i) {
EXPECT_FALSE(execute_state.event.IsAvailable());
execute_state.Notify();
}
EXPECT_TRUE(execute_state.event.IsAvailable());
}
TEST(ThunkTest, ExecuteSession) {
Thunk::ExecuteSession session(2, 2);
EXPECT_EQ(session.num_workers(), 0);
{
Thunk::ExecuteSession::Lock lock = session.Join();
EXPECT_TRUE(lock);
EXPECT_EQ(session.num_workers(), 1);
}
EXPECT_EQ(session.num_workers(), 0);
Thunk::ExecuteSession::Lock lock0 = session.TryJoin();
Thunk::ExecuteSession::Lock lock1 = session.TryJoin();
EXPECT_TRUE(lock0);
EXPECT_TRUE(lock1);
EXPECT_EQ(session.num_workers(), 2);
Thunk::ExecuteSession::Lock lock2 = session.TryJoin();
EXPECT_FALSE(lock2);
EXPECT_EQ(session.num_workers(), 2);
Thunk::ExecuteSession::Lock lock3 = session.Join();
EXPECT_TRUE(lock3);
EXPECT_EQ(session.num_workers(), 3);
auto sink = [](Thunk::ExecuteSession::Lock lock) {};
sink(std::move(lock0));
sink(std::move(lock1));
sink(std::move(lock3));
EXPECT_EQ(session.num_workers(), 0);
Thunk::ExecuteSession::Lock lock4 = session.Join();
Thunk::ExecuteSession::Lock lock5 = lock4;
EXPECT_TRUE(lock4);
EXPECT_TRUE(lock5);
EXPECT_EQ(session.num_workers(), 2);
}
TEST(ThunkTest, CollectiveExecuteParams) {
ExecutableRunOptions run_options;
run_options.set_device_ordinal(0);
TF_ASSERT_OK_AND_ASSIGN(auto params,
Thunk::CollectiveExecuteParams::Create(&run_options));
EXPECT_NE(params.collectives, nullptr);
CpuExecutableRunOptions cpu_run_options;
cpu_run_options.set_collectives(
reinterpret_cast<CollectivesInterface*>(0x12345678));
run_options.set_cpu_executable_run_options(&cpu_run_options);
TF_ASSERT_OK_AND_ASSIGN(params,
Thunk::CollectiveExecuteParams::Create(&run_options));
EXPECT_EQ(params.collectives,
reinterpret_cast<CollectivesInterface*>(0x12345678));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/thunk.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/thunk_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
01d2348a-58d6-42fa-82b6-d0616d993f89 | cpp | tensorflow/tensorflow | conditional_thunk | third_party/xla/xla/service/gpu/runtime/conditional_thunk.cc | third_party/xla/xla/backends/cpu/runtime/conditional_thunk_test.cc | #include "xla/service/gpu/runtime/conditional_thunk.h"
#include <cstdint>
#include <memory>
#include <string_view>
#include <utility>
#include <variant>
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/gpu/variant_visitor.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/memory_allocation.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
ConditionalThunk::ConditionalThunk(
ThunkInfo thunk_info, ConditionalThunkConfig config,
const BufferAllocation::Slice& branch_index_buffer_index)
: Thunk(Kind::kConditional, thunk_info),
config_(std::move(config)),
branch_index_buffer_index_(branch_index_buffer_index) {}
absl::Status ConditionalThunk::Prepare(const PrepareParams& params,
ResourceRequests& resource_requests) {
if (config_.branch_index_is_bool) {
TF_RET_CHECK(config_.branch_thunks.size() == 2);
} else {
TF_RET_CHECK(!config_.branch_thunks.empty());
}
for (auto& branch_thunk : config_.branch_thunks) {
TF_RETURN_IF_ERROR(branch_thunk->Prepare(params, resource_requests));
}
return absl::OkStatus();
}
absl::Status ConditionalThunk::Initialize(const InitializeParams& params) {
if (config_.branch_index_is_bool) {
TF_RET_CHECK(config_.branch_thunks.size() == 2);
} else {
TF_RET_CHECK(!config_.branch_thunks.empty());
}
for (auto& branch_thunk : config_.branch_thunks) {
TF_RETURN_IF_ERROR(branch_thunk->Initialize(params));
}
absl::MutexLock lock(&mutex_);
if (auto it = predicates_.find(params.executor); it == predicates_.end()) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::MemoryAllocation> allocation,
params.executor->HostMemoryAllocate(
config_.branch_index_is_bool ? sizeof(bool) : sizeof(int32_t)));
predicates_.emplace(params.executor, std::move(allocation));
}
return absl::OkStatus();
}
absl::Status ConditionalThunk::ExecuteOnStream(const ExecuteParams& params) {
auto& stream = *params.stream;
auto branch_index_or_pred = [&]() -> std::variant<int32_t*, bool*> {
absl::MutexLock lock(&mutex_);
se::StreamExecutor* executor = stream.parent();
if (config_.branch_index_is_bool) {
return reinterpret_cast<bool*>(predicates_.at(executor)->opaque());
} else {
return reinterpret_cast<int32_t*>(predicates_.at(executor)->opaque());
}
}();
se::DeviceMemoryBase branch_index_address =
params.buffer_allocations->GetDeviceAddress(branch_index_buffer_index_);
if (config_.branch_index_is_bool) {
TF_RETURN_IF_ERROR(stream.Memcpy(std::get<bool*>(branch_index_or_pred),
branch_index_address, sizeof(bool)));
} else {
TF_RETURN_IF_ERROR(stream.Memcpy(std::get<int32_t*>(branch_index_or_pred),
branch_index_address, sizeof(int32_t)));
}
if (absl::Status blocked = stream.BlockHostUntilDone(); !blocked.ok()) {
return Internal("Failed to retrieve branch_index value on stream %p: %s.",
&stream, blocked.message());
}
int32_t branch_index = std::visit(
VariantVisitor{[](int32_t* branch_index) { return *branch_index; },
[](bool* pred) { return *pred ? 0 : 1; }},
branch_index_or_pred);
std::string_view branch_kind =
std::visit(VariantVisitor{[](int32_t*) { return "index"; },
[](bool*) { return "pred"; }},
branch_index_or_pred);
VLOG(3) << "ConditionalThunk: branch_index=" << branch_index
<< " (kind: " << branch_kind << ")";
if (branch_index < 0 || branch_index >= config_.branch_count) {
branch_index = config_.branch_count - 1;
}
TF_RETURN_IF_ERROR(
config_.branch_thunks[branch_index]->ExecuteOnStream(params));
return absl::OkStatus();
}
}
} | #include "xla/backends/cpu/runtime/conditional_thunk.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "xla/backends/cpu/runtime/resource_use.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/backends/cpu/runtime/thunk_testlib.h"
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(ConditionalThunkTest, BufferUses) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice branch_index_slice(&alloc, 0, sizeof(int32_t));
BufferAllocation::Slice read_slice(&alloc, 10, 10);
std::vector<ThunkSequence> branch_sequences(1);
branch_sequences[0].push_back(
std::make_unique<BufferUseThunk>(BufferUse::Read(read_slice)));
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, ConditionalThunk::Create({"conditional"}, branch_index_slice,
std::move(branch_sequences)));
EXPECT_EQ(thunk->buffer_uses().size(), 2);
EXPECT_EQ(thunk->buffer_uses()[0], BufferUse::Read(branch_index_slice));
EXPECT_EQ(thunk->buffer_uses()[1], BufferUse::Read(read_slice));
}
TEST(ConditionalThunkTest, ResourceUses) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice branch_index_slice(&alloc, 0, sizeof(int32_t));
auto token = Resource::Create(Resource::kToken);
std::vector<ThunkSequence> branch_sequences(1);
branch_sequences[0].push_back(
std::make_unique<ResourceUseThunk>(ResourceUse::Read(token)));
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, ConditionalThunk::Create({"conditional"}, branch_index_slice,
std::move(branch_sequences)));
EXPECT_EQ(thunk->resource_uses().size(), 1);
EXPECT_EQ(thunk->resource_uses()[0], ResourceUse::Read(token));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/conditional_thunk.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/conditional_thunk_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
715a0fe5-3ad4-439c-9e62-b63e79c3c1be | cpp | tensorflow/tensorflow | logical_id_thunk | third_party/xla/xla/backends/cpu/runtime/logical_id_thunk.cc | third_party/xla/xla/backends/cpu/runtime/logical_id_thunk_test.cc | #include "xla/backends/cpu/runtime/logical_id_thunk.h"
#include <cstdint>
#include <cstring>
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/computation_placer.h"
#include "xla/service/global_device_id.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla::cpu::internal {
static Thunk::Kind ToThunkKind(LogicalIdKind logical_id_kind) {
switch (logical_id_kind) {
case LogicalIdKind::kPartitionId:
return Thunk::Kind::kPartitionId;
case LogicalIdKind::kReplicaId:
return Thunk::Kind::kReplicaId;
}
}
template <LogicalIdKind logical_id_kind>
absl::StatusOr<std::unique_ptr<LogicalIdThunk<logical_id_kind>>>
LogicalIdThunk<logical_id_kind>::Create(
Info info, BufferAllocation::Slice logical_id_buffer) {
return absl::WrapUnique(
new LogicalIdThunk(std::move(info), logical_id_buffer));
}
template <LogicalIdKind logical_id_kind>
LogicalIdThunk<logical_id_kind>::LogicalIdThunk(
Info info, BufferAllocation::Slice logical_id_buffer)
: Thunk(ToThunkKind(logical_id_kind), info),
logical_id_buffer_(logical_id_buffer) {}
template <LogicalIdKind logical_id_kind>
static constexpr auto ToString() {
if constexpr (logical_id_kind == LogicalIdKind::kPartitionId) {
return "Partition";
} else if constexpr (logical_id_kind == LogicalIdKind::kReplicaId) {
return "Replica";
}
}
template <LogicalIdKind logical_id_kind>
absl::StatusOr<int32_t> LogicalIdThunk<logical_id_kind>::GetIdForDevice(
const DeviceAssignment* device_assignment, GlobalDeviceId device_id) const {
if constexpr (logical_id_kind == LogicalIdKind::kPartitionId) {
return device_assignment->PartitionIdForDevice(device_id);
} else if constexpr (logical_id_kind == LogicalIdKind::kReplicaId) {
return device_assignment->ReplicaIdForDevice(device_id);
}
}
template <LogicalIdKind logical_id_kind>
tsl::AsyncValueRef<typename LogicalIdThunk<logical_id_kind>::ExecuteEvent>
LogicalIdThunk<logical_id_kind>::Execute(const ExecuteParams& params) {
tsl::profiler::TraceMe trace([&] { return TraceMeEncode(); });
TF_ASSIGN_OR_RETURN(
se::DeviceMemoryBase logical_id_data,
params.buffer_allocations->GetDeviceAddress(logical_id_buffer_));
TF_RET_CHECK(logical_id_data.size() == sizeof(int32_t))
<< "Logical id buffer must be able to fit logical id value";
TF_RET_CHECK(params.collective_params)
<< ToString<logical_id_kind>() << " id requires collective params";
TF_ASSIGN_OR_RETURN(
int32_t logical_id,
GetIdForDevice(params.collective_params->device_assignment,
params.collective_params->global_device_id));
VLOG(3) << absl::StreamFormat("%s id: %d", ToString<logical_id_kind>(),
logical_id);
VLOG(3) << absl::StreamFormat(" logical_id: slice %s (%p)",
logical_id_buffer_.ToString(),
logical_id_data.opaque());
std::memcpy(logical_id_data.opaque(), &logical_id, sizeof(int32_t));
return OkExecuteEvent();
}
template <LogicalIdKind logical_id_kind>
Thunk::BufferUses LogicalIdThunk<logical_id_kind>::buffer_uses() const {
return {BufferUse::Write(logical_id_buffer_)};
}
template class LogicalIdThunk<LogicalIdKind::kReplicaId>;
template class LogicalIdThunk<LogicalIdKind::kPartitionId>;
} | #include "xla/backends/cpu/runtime/logical_id_thunk.h"
#include <cstdint>
#include <limits>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/backends/cpu/runtime/buffer_allocations.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/executable_run_options.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
absl::StatusOr<DeviceAssignment> CreateDeviceAssignment(
std::vector<std::vector<int64_t>> devices) {
const auto computation_count = devices.size();
if (devices.empty()) {
return absl::InternalError("Devices must not be empty.");
}
const auto replica_count = devices[0].size();
DeviceAssignment device_assignment(replica_count, computation_count);
for (int64_t partition = 0; partition < computation_count; ++partition) {
for (int64_t replica = 0; replica < replica_count; ++replica) {
device_assignment(replica, partition) = devices[partition][replica];
}
}
return device_assignment;
}
TEST(LogicalIdThunkTest, GetReplicaId) {
std::vector<int32_t> dst(1, std::numeric_limits<int32_t>::min());
std::vector<MaybeOwningDeviceMemory> buffers;
buffers.emplace_back(se::DeviceMemoryBase(dst.data(), sizeof(int32_t)));
BufferAllocation alloc(0, sizeof(int32_t), 0);
BufferAllocation::Slice id_slice(&alloc, 0,
sizeof(int32_t));
std::string name(Thunk::KindToString(Thunk::Kind::kReplicaId));
TF_ASSERT_OK_AND_ASSIGN(auto thunk, ReplicaIdThunk::Create({name}, id_slice));
BufferAllocations allocations(buffers);
TF_ASSERT_OK_AND_ASSIGN(DeviceAssignment device_assn,
CreateDeviceAssignment({{0, 1}}));
ExecutableRunOptions run_options;
run_options.set_device_ordinal(0);
run_options.set_device_assignment(&device_assn);
TF_ASSERT_OK_AND_ASSIGN(Thunk::CollectiveExecuteParams collective_params,
Thunk::CollectiveExecuteParams::Create(&run_options));
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
params.collective_params = &collective_params;
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
EXPECT_EQ(dst[0], 0);
}
TEST(LogicalIdThunkTest, GetPartitionId) {
std::vector<int32_t> dst(2, std::numeric_limits<int32_t>::min());
std::vector<MaybeOwningDeviceMemory> buffers;
static constexpr auto kDataSize = 2 * sizeof(int32_t);
buffers.emplace_back(se::DeviceMemoryBase(dst.data(), kDataSize));
BufferAllocation alloc(0, kDataSize, 0);
BufferAllocation::Slice id_slice(&alloc, sizeof(int32_t),
sizeof(int32_t));
std::string name(Thunk::KindToString(Thunk::Kind::kPartitionId));
TF_ASSERT_OK_AND_ASSIGN(auto thunk,
PartitionIdThunk::Create({name}, id_slice));
BufferAllocations allocations(buffers);
TF_ASSERT_OK_AND_ASSIGN(DeviceAssignment device_assn,
CreateDeviceAssignment({{0}, {1}}));
ExecutableRunOptions run_options;
run_options.set_device_ordinal(0);
run_options.set_device_assignment(&device_assn);
TF_ASSERT_OK_AND_ASSIGN(Thunk::CollectiveExecuteParams collective_params,
Thunk::CollectiveExecuteParams::Create(&run_options));
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
params.collective_params = &collective_params;
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
EXPECT_EQ(dst[0], std::numeric_limits<int32_t>::min());
EXPECT_EQ(dst[1], 0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/logical_id_thunk.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/logical_id_thunk_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fed78beb-029c-47b3-aad3-c4c29d5b52fc | cpp | tensorflow/tensorflow | sort_thunk | third_party/xla/xla/backends/cpu/runtime/sort_thunk.cc | third_party/xla/xla/backends/cpu/runtime/sort_thunk_test.cc | #include "xla/backends/cpu/runtime/sort_thunk.h"
#include <algorithm>
#include <array>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <functional>
#include <iterator>
#include <memory>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/optimization.h"
#include "absl/container/inlined_vector.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/layout_util.h"
#include "xla/primitive_util.h"
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla::cpu {
static absl::Status VerifySortInputs(absl::Span<const SortThunk::Input> inputs,
int64_t dimension) {
if (inputs.empty()) {
return Internal("Inputs must not be empty");
}
auto equal = Shape::Equal().IgnoreElementType();
const Shape& shape = inputs[0].shape;
for (const SortThunk::Input& input : inputs) {
if (!equal(shape, input.shape)) {
return Internal("Inputs must have the same shape");
}
}
int64_t sort_dimension =
dimension >= 0 ? dimension : shape.rank() + dimension;
if (shape.rank() <= sort_dimension) {
return Internal(
"Shape of dimensions [%s] can't be sorted along dimension %d",
absl::StrJoin(shape.dimensions(), ","), dimension);
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<SortThunk>> SortThunk::Create(
Info info, absl::Span<const Input> inputs, int64_t dimension,
bool is_stable, LessThan less_than) {
TF_RETURN_IF_ERROR(VerifySortInputs(inputs, dimension));
return absl::WrapUnique(new SortThunk(std::move(info), inputs, dimension,
is_stable, std::move(less_than)));
}
absl::StatusOr<std::unique_ptr<SortThunk>> SortThunk::Create(
Info info, absl::Span<const Input> inputs, int64_t dimension,
bool is_stable, std::string comparator_name) {
TF_RETURN_IF_ERROR(VerifySortInputs(inputs, dimension));
return absl::WrapUnique(new SortThunk(std::move(info), inputs, dimension,
is_stable, std::move(comparator_name)));
}
SortThunk::SortThunk(Info info, absl::Span<const Input> inputs,
int64_t dimension, bool is_stable, LessThan less_than)
: Thunk(Kind::kSort, std::move(info)),
inputs_(inputs.begin(), inputs.end()),
dimension_(dimension),
is_stable_(is_stable),
less_than_(std::move(less_than)),
less_than_ptr_(&*less_than_) {}
SortThunk::SortThunk(Info info, absl::Span<const Input> inputs,
int64_t dimension, bool is_stable,
std::string comparator_name)
: Thunk(Kind::kSort, std::move(info)),
inputs_(inputs.begin(), inputs.end()),
dimension_(dimension),
is_stable_(is_stable),
comparator_name_(std::move(comparator_name)),
less_than_ptr_(nullptr) {}
namespace {
static constexpr size_t kMaxElementSize = 16;
template <size_t n>
struct Ref;
struct DRef;
template <size_t n>
struct Value {
Value(const Ref<n>& ref);
const void* compared_value(size_t i) const { return value[i].data(); }
using ValueStorage = std::array<std::byte, kMaxElementSize>;
alignas(alignof(std::max_align_t)) std::array<ValueStorage, n> value;
std::array<uint8_t, n> value_sizes;
};
struct DValue {
DValue(const DRef& ref);
const void* compared_value(size_t i) const { return value[i].data(); }
using ValueStorage = std::array<std::byte, kMaxElementSize>;
std::vector<ValueStorage> value;
std::vector<uint8_t> value_sizes;
size_t n;
};
template <size_t n>
struct Ref {
Ref(std::array<std::byte*, n> ptr, std::array<uint8_t, n> ptr_sizes)
: ptr(ptr), ptr_sizes(ptr_sizes) {}
Ref& operator=(const Value<n>& value);
Ref& operator=(const Ref<n>& other);
const void* compared_value(size_t i) const { return ptr[i]; }
std::array<std::byte*, n> ptr;
std::array<uint8_t, n> ptr_sizes;
};
struct DRef {
DRef(std::vector<std::byte*> ptr, std::vector<uint8_t> ptr_sizes)
: ptr(ptr), ptr_sizes(ptr_sizes), n(ptr.size()) {}
DRef& operator=(const DValue& value);
DRef& operator=(const DRef& other);
const void* compared_value(size_t i) const { return ptr[i]; }
std::vector<std::byte*> ptr;
std::vector<uint8_t> ptr_sizes;
const size_t n;
};
template <size_t n>
Value<n>::Value(const Ref<n>& ref) : value_sizes(ref.ptr_sizes) {
for (size_t i = 0; i < n; ++i) {
std::memcpy(value[i].data(), ref.ptr[i], ref.ptr_sizes[i]);
}
}
DValue::DValue(const DRef& ref)
: value_sizes(ref.ptr_sizes), n(ref.ptr.size()) {
value.reserve(n);
for (size_t i = 0; i < n; ++i) {
value.emplace_back();
std::memcpy(value[i].data(), ref.ptr[i], ref.ptr_sizes[i]);
}
}
template <size_t n>
Ref<n>& Ref<n>::operator=(const Value<n>& value) {
DCHECK(ptr_sizes == value.value_sizes);
for (size_t i = 0; i < n; ++i) {
std::memcpy(ptr[i], value.value[i].data(), value.value_sizes[i]);
}
return *this;
}
DRef& DRef::operator=(const DValue& value) {
DCHECK(ptr_sizes == value.value_sizes);
for (size_t i = 0; i < n; ++i) {
std::memcpy(ptr[i], value.value[i].data(), value.value_sizes[i]);
}
return *this;
}
template <size_t n>
Ref<n>& Ref<n>::operator=(const Ref<n>& other) {
DCHECK(ptr_sizes == other.ptr_sizes);
for (size_t i = 0; i < n; ++i) {
std::memcpy(ptr[i], other.ptr[i], other.ptr_sizes[i]);
}
return *this;
}
DRef& DRef::operator=(const DRef& other) {
DCHECK(ptr_sizes == other.ptr_sizes);
const size_t n = other.ptr.size();
for (size_t i = 0; i < n; ++i) {
std::memcpy(ptr[i], other.ptr[i], other.ptr_sizes[i]);
}
return *this;
}
template <size_t n>
void swap(const Ref<n>& lhs, const Ref<n>& rhs) {
for (size_t i = 0; i < n; ++i) {
std::array<std::byte, kMaxElementSize> tmp;
std::memcpy(tmp.data(), lhs.ptr[i], lhs.ptr_sizes[i]);
std::memcpy(lhs.ptr[i], rhs.ptr[i], rhs.ptr_sizes[i]);
std::memcpy(rhs.ptr[i], tmp.data(), lhs.ptr_sizes[i]);
}
}
void swap(const DRef& lhs, const DRef& rhs) {
DCHECK(lhs.ptr_sizes == rhs.ptr_sizes);
const size_t n = lhs.ptr.size();
for (size_t i = 0; i < n; ++i) {
std::array<std::byte, kMaxElementSize> tmp;
std::memcpy(tmp.data(), lhs.ptr[i], lhs.ptr_sizes[i]);
std::memcpy(lhs.ptr[i], rhs.ptr[i], rhs.ptr_sizes[i]);
std::memcpy(rhs.ptr[i], tmp.data(), lhs.ptr_sizes[i]);
}
}
template <size_t n>
struct Ptr {
using difference_type = std::ptrdiff_t;
Ptr() = default;
Ptr(std::array<std::byte*, n> ptr, std::array<uint8_t, n> ptr_sizes)
: ptr(ptr), ptr_sizes(ptr_sizes) {}
Ref<n> operator*() const { return Ref<n>{ptr, ptr_sizes}; }
Ptr& operator+=(difference_type diff) {
for (size_t i = 0; i < n; ++i) ptr[i] += diff * ptr_sizes[i];
return *this;
}
Ptr& operator-=(difference_type diff) {
for (size_t i = 0; i < n; ++i) ptr[i] -= diff * ptr_sizes[i];
return *this;
}
Ptr operator+(difference_type diff) const {
std::array<std::byte*, n> upd;
for (size_t i = 0; i < n; ++i) upd[i] = ptr[i] + diff * ptr_sizes[i];
return Ptr{upd, ptr_sizes};
}
Ptr operator-(difference_type diff) const {
std::array<std::byte*, n> upd;
for (size_t i = 0; i < n; ++i) upd[i] = ptr[i] - diff * ptr_sizes[i];
return Ptr{upd, ptr_sizes};
}
difference_type operator-(const Ptr& rhs) const {
DCHECK(ptr_sizes == rhs.ptr_sizes);
return (ptr[0] - rhs.ptr[0]) / ptr_sizes[0];
}
bool operator==(const Ptr& rhs) const { return ptr[0] == rhs.ptr[0]; }
bool operator!=(const Ptr& rhs) const { return ptr[0] != rhs.ptr[0]; }
bool operator>(const Ptr& rhs) const { return ptr[0] > rhs.ptr[0]; }
bool operator<(const Ptr& rhs) const { return ptr[0] < rhs.ptr[0]; }
bool operator>=(const Ptr& rhs) const { return ptr[0] >= rhs.ptr[0]; }
bool operator<=(const Ptr& rhs) const { return ptr[0] <= rhs.ptr[0]; }
std::array<std::byte*, n> ptr;
std::array<uint8_t, n> ptr_sizes;
};
struct DPtr {
using difference_type = std::ptrdiff_t;
DPtr() = default;
DPtr(std::vector<std::byte*> ptr, std::vector<uint8_t> ptr_sizes)
: ptr(ptr), ptr_sizes(ptr_sizes), n(ptr.size()) {}
DRef operator*() const { return DRef{ptr, ptr_sizes}; }
DPtr& operator+=(difference_type diff) {
for (size_t i = 0; i < n; ++i) ptr[i] += diff * ptr_sizes[i];
return *this;
}
DPtr& operator-=(difference_type diff) {
for (size_t i = 0; i < n; ++i) ptr[i] -= diff * ptr_sizes[i];
return *this;
}
DPtr operator+(difference_type diff) const {
std::vector<std::byte*> upd(n);
for (size_t i = 0; i < n; ++i) upd[i] = ptr[i] + diff * ptr_sizes[i];
return DPtr{upd, ptr_sizes};
}
DPtr operator-(difference_type diff) const {
std::vector<std::byte*> upd(n);
for (size_t i = 0; i < n; ++i) upd[i] = ptr[i] - diff * ptr_sizes[i];
return DPtr{upd, ptr_sizes};
}
difference_type operator-(const DPtr& rhs) const {
DCHECK(ptr_sizes == rhs.ptr_sizes);
return (ptr[0] - rhs.ptr[0]) / ptr_sizes[0];
}
bool operator==(const DPtr& rhs) const { return ptr[0] == rhs.ptr[0]; }
bool operator!=(const DPtr& rhs) const { return ptr[0] != rhs.ptr[0]; }
bool operator>(const DPtr& rhs) const { return ptr[0] > rhs.ptr[0]; }
bool operator<(const DPtr& rhs) const { return ptr[0] < rhs.ptr[0]; }
bool operator>=(const DPtr& rhs) const { return ptr[0] >= rhs.ptr[0]; }
bool operator<=(const DPtr& rhs) const { return ptr[0] <= rhs.ptr[0]; }
std::vector<std::byte*> ptr;
std::vector<uint8_t> ptr_sizes;
size_t n;
};
template <class Value, class Ref, class Ptr>
class SortIterator {
public:
using iterator_category = std::random_access_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = Value;
using reference = Ref;
using pointer = Ptr;
SortIterator() = default;
SortIterator(pointer ptr, difference_type stride)
: ptr_(ptr), stride_(stride) {}
SortIterator(const SortIterator& other) = default;
SortIterator& operator=(const SortIterator& other) = default;
SortIterator(SortIterator&& other) = default;
SortIterator& operator=(SortIterator&& other) = default;
reference operator*() const { return *ptr_; }
difference_type operator-(const SortIterator& rhs) const {
return (ptr_ - rhs.ptr_) / stride_;
}
SortIterator& operator+=(difference_type diff) {
ptr_ += diff * stride_;
return *this;
}
SortIterator& operator-=(difference_type diff) {
ptr_ -= diff * stride_;
return *this;
}
SortIterator& operator++() {
ptr_ += stride_;
return *this;
}
SortIterator& operator--() {
ptr_ -= stride_;
return *this;
}
SortIterator operator+(difference_type diff) const {
return SortIterator(ptr_ + diff * stride_, stride_);
}
SortIterator operator-(difference_type diff) const {
return SortIterator(ptr_ - diff * stride_, stride_);
}
bool operator==(const SortIterator& rhs) const { return ptr_ == rhs.ptr_; }
bool operator!=(const SortIterator& rhs) const { return ptr_ != rhs.ptr_; }
bool operator>(const SortIterator& rhs) const { return ptr_ > rhs.ptr_; }
bool operator<(const SortIterator& rhs) const { return ptr_ < rhs.ptr_; }
bool operator>=(const SortIterator& rhs) const { return ptr_ >= rhs.ptr_; }
bool operator<=(const SortIterator& rhs) const { return ptr_ <= rhs.ptr_; }
private:
pointer ptr_;
difference_type stride_ = 1;
};
struct SortDims {
int64_t outer_dim_size;
int64_t sort_dim_size;
int64_t inner_dim_size;
int64_t num_iterations;
};
}
static SortDims GetSortDims(const Shape& shape, int64_t dimension) {
int64_t sort_dimension =
dimension >= 0 ? dimension : shape.rank() + dimension;
Shape physical_shape =
ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(shape);
auto logical_to_physical = LayoutUtil::MakeLogicalToPhysical(shape.layout());
sort_dimension = logical_to_physical[sort_dimension];
auto product = [](absl::Span<const int64_t> dims) {
return absl::c_accumulate(dims, int64_t{1}, std::multiplies<>());
};
absl::Span<const int64_t> dimensions = physical_shape.dimensions();
int64_t outer_dim_size = product(dimensions.subspan(0, sort_dimension));
int64_t sort_dim_size = dimensions[sort_dimension];
int64_t inner_dim_size = product(dimensions.subspan(sort_dimension + 1));
int64_t num_iterations = outer_dim_size * inner_dim_size;
return SortDims{outer_dim_size, sort_dim_size, inner_dim_size,
num_iterations};
}
template <size_t n>
static void SortInplace(const SortDims& sort_dims, int64_t offset,
absl::Span<se::DeviceMemoryBase> data,
absl::Span<const Shape> shapes, bool is_stable,
SortThunk::LessThan* less_than) {
std::array<std::byte*, n> ptr;
std::array<uint8_t, n> ptr_sizes;
for (size_t i = 0; i < n; ++i) {
std::byte* base = reinterpret_cast<std::byte*>(data[i].opaque());
ptr_sizes[i] = primitive_util::ByteWidth(shapes[i].element_type());
ptr[i] = base + offset * ptr_sizes[i];
}
auto compare = [&](const auto& a, const auto& b) {
std::array<const void*, 2 * n> data;
for (size_t i = 0, j = 0; i < n; i += 1, j += 2) {
data[j] = a.compared_value(i);
data[j + 1] = b.compared_value(i);
}
return (*less_than)(data.data());
};
SortIterator<Value<n>, Ref<n>, Ptr<n>> begin(
Ptr<n>(ptr, ptr_sizes),
sort_dims.inner_dim_size);
if (is_stable) {
std::stable_sort(begin, begin + sort_dims.sort_dim_size, compare);
} else {
std::sort(begin, begin + sort_dims.sort_dim_size, compare);
}
}
static void DSortInplace(const SortDims& sort_dims, int64_t offset,
absl::Span<se::DeviceMemoryBase> data,
absl::Span<const Shape> shapes, bool is_stable,
SortThunk::LessThan* less_than, size_t n) {
std::vector<std::byte*> ptr(n);
std::vector<uint8_t> ptr_sizes(n);
for (size_t i = 0; i < n; ++i) {
std::byte* base = reinterpret_cast<std::byte*>(data[i].opaque());
ptr_sizes[i] = primitive_util::ByteWidth(shapes[i].element_type());
ptr[i] = base + offset * ptr_sizes[i];
}
auto compare = [&](const auto& a, const auto& b) {
std::vector<const void*> data(2 * n);
for (size_t i = 0, j = 0; i < n; i += 1, j += 2) {
data[j] = a.compared_value(i);
data[j + 1] = b.compared_value(i);
}
return (*less_than)(data.data());
};
SortIterator<DValue, DRef, DPtr> begin(DPtr(ptr, ptr_sizes),
sort_dims.inner_dim_size);
if (is_stable) {
std::stable_sort(begin, begin + sort_dims.sort_dim_size, compare);
} else {
std::sort(begin, begin + sort_dims.sort_dim_size, compare);
}
}
static absl::Status SortInplace(absl::Span<se::DeviceMemoryBase> data,
absl::Span<const Shape> shapes,
int64_t dimension, bool is_stable,
SortThunk::LessThan* less_than) {
SortDims sort_dims = GetSortDims(shapes[0], dimension);
for (int64_t i = 0; i < sort_dims.num_iterations; ++i) {
int64_t inner_idx = i % sort_dims.inner_dim_size;
int64_t offset = inner_idx + (i - inner_idx) * sort_dims.sort_dim_size;
auto sort = [&](auto num_inputs) {
SortInplace<decltype(num_inputs)::value>(sort_dims, offset, data, shapes,
is_stable, less_than);
};
auto dsort = [&](size_t num_inputs) {
DSortInplace(sort_dims, offset, data, shapes, is_stable, less_than,
num_inputs);
};
switch (data.size()) {
case 1:
sort(std::integral_constant<size_t, 1>{});
break;
case 2:
sort(std::integral_constant<size_t, 2>{});
break;
case 3:
sort(std::integral_constant<size_t, 3>{});
break;
case 4:
sort(std::integral_constant<size_t, 4>{});
break;
case 5:
sort(std::integral_constant<size_t, 5>{});
break;
case 6:
sort(std::integral_constant<size_t, 6>{});
break;
case 7:
sort(std::integral_constant<size_t, 7>{});
break;
case 8:
sort(std::integral_constant<size_t, 8>{});
break;
case 9:
sort(std::integral_constant<size_t, 9>{});
break;
case 10:
sort(std::integral_constant<size_t, 10>{});
break;
case 11:
sort(std::integral_constant<size_t, 11>{});
break;
case 12:
sort(std::integral_constant<size_t, 12>{});
break;
case 13:
sort(std::integral_constant<size_t, 13>{});
break;
case 14:
sort(std::integral_constant<size_t, 14>{});
break;
case 15:
sort(std::integral_constant<size_t, 15>{});
break;
case 16:
sort(std::integral_constant<size_t, 16>{});
break;
case 17:
sort(std::integral_constant<size_t, 17>{});
break;
case 18:
sort(std::integral_constant<size_t, 18>{});
break;
case 19:
sort(std::integral_constant<size_t, 19>{});
break;
case 20:
sort(std::integral_constant<size_t, 20>{});
break;
case 21:
sort(std::integral_constant<size_t, 21>{});
break;
case 22:
sort(std::integral_constant<size_t, 22>{});
break;
case 23:
sort(std::integral_constant<size_t, 23>{});
break;
case 24:
sort(std::integral_constant<size_t, 24>{});
break;
case 25:
sort(std::integral_constant<size_t, 25>{});
break;
default:
dsort(data.size());
break;
}
}
return absl::OkStatus();
}
tsl::AsyncValueRef<SortThunk::ExecuteEvent> SortThunk::Execute(
const ExecuteParams& params) {
tsl::profiler::TraceMe trace([&] { return TraceMeEncode(); });
VLOG(3) << absl::StreamFormat(
"Sort %d inputs along dimension %d (is_stable=%v)", inputs_.size(),
dimension_, is_stable_);
absl::InlinedVector<se::DeviceMemoryBase, 8> data;
data.reserve(inputs_.size());
absl::InlinedVector<Shape, 8> shapes;
shapes.reserve(inputs_.size());
for (const Input& input : inputs_) {
size_t idx = data.size();
TF_ASSIGN_OR_RETURN(
data.emplace_back(),
params.buffer_allocations->GetDeviceAddress(input.slice));
shapes.push_back(input.shape);
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(data.back().opaque(),
data.back().size());
VLOG(3) << absl::StreamFormat(" sort input #%d: %s in slice %s (%p)", idx,
input.shape.ToString(true),
input.slice.ToString(), data.back().opaque());
}
LessThan* less_than = less_than_ptr_.load();
if (ABSL_PREDICT_FALSE(less_than == nullptr)) {
TF_ASSIGN_OR_RETURN(
FunctionRegistry::Comparator comparator,
params.function_registry->FindComparator(comparator_name_));
absl::MutexLock lock(&mutex_);
less_than_ = [comparator](const void** data) {
bool result;
comparator(&result, nullptr, data, nullptr, nullptr, nullptr);
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(&result, sizeof(result));
return result;
};
less_than_ptr_.store(less_than = &*less_than_);
}
TF_RETURN_IF_ERROR(SortInplace(absl::MakeSpan(data), shapes, dimension_,
is_stable_, less_than));
return OkExecuteEvent();
}
SortThunk::BufferUses SortThunk::buffer_uses() const {
BufferUses buffer_uses;
buffer_uses.reserve(inputs_.size());
for (const Input& input : inputs_) {
buffer_uses.emplace_back(BufferUse::Write(input.slice));
}
return buffer_uses;
}
} | #include "xla/backends/cpu/runtime/sort_thunk.h"
#include <array>
#include <cstddef>
#include <cstdint>
#include <numeric>
#include <string_view>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/backends/cpu/runtime/buffer_allocations.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace xla::cpu {
namespace {
class SortThunkTest : public testing::TestWithParam<bool> {};
static bool LessThan(const void** data) {
auto* lhs = reinterpret_cast<const float*>(data[0]);
auto* rhs = reinterpret_cast<const float*>(data[1]);
return *lhs < *rhs;
}
class LessThanComparator : public Thunk::FunctionRegistry {
public:
static void LessThanWrapper(bool* result, const void*, const void** data,
const void*, const void*, const void*) {
*result = LessThan(data);
}
absl::StatusOr<Comparator> FindComparator(std::string_view name) final {
DCHECK_EQ(name, "less_than");
return LessThanWrapper;
}
};
TEST_P(SortThunkTest, Sort1D) {
bool is_stable = GetParam();
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> data = {2.0, 4.0, 1.0, 3.0};
std::vector<int32_t> indices = {0, 1, 2, 3};
size_t size_in_bytes = data.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(data.data(), size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(indices.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation alloc0(0, size_in_bytes, 0);
BufferAllocation alloc1(1, size_in_bytes, 0);
BufferAllocation::Slice slice0(&alloc0, 0, size_in_bytes);
BufferAllocation::Slice slice1(&alloc1, 0, size_in_bytes);
Shape data_shape = ShapeUtil::MakeShape(F32, {4});
Shape indices_shape = ShapeUtil::MakeShape(S32, {4});
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, SortThunk::Create(
{"sort"}, {{slice0, data_shape}, {slice1, indices_shape}},
0, is_stable, LessThan));
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
std::vector<float> expected_data = {1.0, 2.0, 3.0, 4.0};
std::vector<int32_t> expected_indices = {2, 0, 3, 1};
EXPECT_EQ(data, expected_data);
EXPECT_EQ(indices, expected_indices);
}
TEST_P(SortThunkTest, DynamicSort1D) {
bool is_stable = GetParam();
constexpr int num_of_empty_slices = 33;
constexpr int total_num_of_slices = num_of_empty_slices + 2;
constexpr int data_size = 31;
constexpr float starting_value = 5.0f;
std::array<float, data_size> data{
17.0f, 16.0f, 5.0f, 10.0f, 30.0f, 8.0f, 9.0f, 21.0f,
14.0f, 32.0f, 29.0f, 28.0f, 19.0f, 12.0f, 25.0f, 22.0f,
18.0f, 35.0f, 34.0f, 23.0f, 7.0f, 13.0f, 26.0f, 33.0f,
15.0f, 24.0f, 20.0f, 31.0f, 6.0f, 27.0f, 11.0f};
std::array<int32_t, data_size> indices;
std::iota(indices.begin(), indices.end(), 0);
std::array<uint32_t, data_size * num_of_empty_slices> empty;
const size_t data_size_in_bytes = data.size() * sizeof(float);
const size_t ind_size_in_bytes = indices.size() * sizeof(int32_t);
const size_t empty_size_in_bytes = empty.size() * sizeof(uint32_t);
const BufferAllocation alloc0(0, data_size_in_bytes, 0);
const BufferAllocation alloc1(1, ind_size_in_bytes, 0);
const BufferAllocation rest(2, empty_size_in_bytes, 0);
const BufferAllocation::Slice slice0(&alloc0, 0, data_size_in_bytes);
const BufferAllocation::Slice slice1(&alloc1, 0, ind_size_in_bytes);
const Shape data_shape = ShapeUtil::MakeShape(F32, {data_size});
const Shape indices_shape = ShapeUtil::MakeShape(S32, {data_size});
const Shape rest_shape = ShapeUtil::MakeShape(U32, {data_size});
std::vector<MaybeOwningDeviceMemory> buffers;
buffers.emplace_back(se::DeviceMemoryBase(data.data(), data_size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(indices.data(), ind_size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(empty.data(), empty_size_in_bytes));
BufferAllocations allocations(buffers);
std::array<SortThunk::Input, total_num_of_slices> inputs{
{{slice0, data_shape}, {slice1, indices_shape}}};
for (int i = 0; i < num_of_empty_slices; ++i) {
constexpr size_t empty_slice_in_bytes = data_size * sizeof(uint32_t);
inputs[i + 2].slice = BufferAllocation::Slice(
&rest, i * empty_slice_in_bytes, empty_slice_in_bytes);
inputs[i + 2].shape = rest_shape;
}
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, SortThunk::Create({"sort"}, inputs,
0, is_stable, LessThan));
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
std::array<float, data_size> expected_data;
std::iota(expected_data.begin(), expected_data.end(), starting_value);
const std::array<int32_t, data_size> expected_indices{
2, 28, 20, 5, 6, 3, 30, 13, 21, 8, 24, 1, 0, 16, 12, 26,
7, 15, 19, 25, 14, 22, 29, 11, 10, 4, 27, 9, 23, 18, 17};
EXPECT_EQ(data, expected_data);
EXPECT_EQ(indices, expected_indices);
}
TEST_P(SortThunkTest, Sort2D) {
bool is_stable = GetParam();
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> data = {2.0, 4.0, 1.0, 3.0};
std::vector<int32_t> indices = {0, 1, 2, 3};
size_t size_in_bytes = data.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(data.data(), size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(indices.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation alloc0(0, size_in_bytes, 0);
BufferAllocation alloc1(1, size_in_bytes, 0);
BufferAllocation::Slice slice0(&alloc0, 0, size_in_bytes);
BufferAllocation::Slice slice1(&alloc1, 0, size_in_bytes);
Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2});
Shape indices_shape = ShapeUtil::MakeShape(S32, {2, 2});
TF_ASSERT_OK_AND_ASSIGN(
auto sort_dim0,
SortThunk::Create({"sort"},
{{slice0, data_shape}, {slice1, indices_shape}},
0, is_stable, "less_than"));
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
LessThanComparator less_than_comparator;
params.function_registry = &less_than_comparator;
auto execute_event0 = sort_dim0->Execute(params);
tsl::BlockUntilReady(execute_event0);
ASSERT_FALSE(execute_event0.IsError());
std::vector<float> expected_data = {1.0, 3.0, 2.0, 4.0};
std::vector<int32_t> expected_indices = {2, 3, 0, 1};
EXPECT_EQ(data, expected_data);
EXPECT_EQ(indices, expected_indices);
data = {4.0, 3.0, 2.0, 1.0};
indices = {0, 1, 2, 3};
TF_ASSERT_OK_AND_ASSIGN(
auto sort_dim1,
SortThunk::Create({"sort"},
{{slice0, data_shape}, {slice1, indices_shape}},
1,
false, "less_than"));
auto execute_event1 = sort_dim1->Execute(params);
tsl::BlockUntilReady(execute_event1);
ASSERT_FALSE(execute_event1.IsError());
expected_data = {3.0, 4.0, 1.0, 2.0};
expected_indices = {1, 0, 3, 2};
EXPECT_EQ(data, expected_data);
EXPECT_EQ(indices, expected_indices);
}
TEST_P(SortThunkTest, Sort2DWithLayout) {
bool is_stable = GetParam();
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> data = {4.0, 3.0, 2.0, 1.0};
std::vector<int32_t> indices = {0, 1, 2, 3};
size_t size_in_bytes = data.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(data.data(), size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(indices.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation alloc0(0, size_in_bytes, 0);
BufferAllocation alloc1(1, size_in_bytes, 0);
BufferAllocation::Slice slice0(&alloc0, 0, size_in_bytes);
BufferAllocation::Slice slice1(&alloc1, 0, size_in_bytes);
Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2});
*data_shape.mutable_layout() = LayoutUtil::MakeLayout({0, 1});
Shape indices_shape = ShapeUtil::MakeShape(S32, {2, 2});
*indices_shape.mutable_layout() = LayoutUtil::MakeLayout({0, 1});
TF_ASSERT_OK_AND_ASSIGN(
auto sort_dim0,
SortThunk::Create({"sort"},
{{slice0, data_shape}, {slice1, indices_shape}},
0, is_stable, "less_than"));
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
LessThanComparator less_than_comparator;
params.function_registry = &less_than_comparator;
auto execute_event0 = sort_dim0->Execute(params);
tsl::BlockUntilReady(execute_event0);
ASSERT_FALSE(execute_event0.IsError());
std::vector<float> expected_data = {3.0, 4.0, 1.0, 2.0};
std::vector<int32_t> expected_indices = {1, 0, 3, 2};
EXPECT_EQ(data, expected_data);
EXPECT_EQ(indices, expected_indices);
data = {2.0, 4.0, 1.0, 3.0};
indices = {0, 1, 2, 3};
TF_ASSERT_OK_AND_ASSIGN(
auto sort_dim1,
SortThunk::Create({"sort"},
{{slice0, data_shape}, {slice1, indices_shape}},
1,
false, "less_than"));
auto execute_event1 = sort_dim1->Execute(params);
tsl::BlockUntilReady(execute_event1);
ASSERT_FALSE(execute_event1.IsError());
expected_data = {1.0, 3.0, 2.0, 4.0};
expected_indices = {2, 3, 0, 1};
EXPECT_EQ(data, expected_data);
EXPECT_EQ(indices, expected_indices);
}
void BM_DynamicSort1D(::testing::benchmark::State& state, bool is_stable) {
const int total_num_of_slices = state.range(0);
const int num_of_empty_slices = total_num_of_slices - 2;
constexpr int data_size = 31;
const std::array<float, data_size> data{
17.0f, 16.0f, 5.0f, 10.0f, 30.0f, 8.0f, 9.0f, 21.0f,
14.0f, 32.0f, 29.0f, 28.0f, 19.0f, 12.0f, 25.0f, 22.0f,
18.0f, 35.0f, 34.0f, 23.0f, 7.0f, 13.0f, 26.0f, 33.0f,
15.0f, 24.0f, 20.0f, 31.0f, 6.0f, 27.0f, 11.0f};
std::array<int32_t, data_size> indices;
std::iota(indices.begin(), indices.end(), 0);
std::vector<uint32_t> empty(data_size * num_of_empty_slices);
const size_t data_size_in_bytes = data.size() * sizeof(float);
const size_t ind_size_in_bytes = indices.size() * sizeof(int32_t);
const size_t empty_size_in_bytes = empty.size() * sizeof(uint32_t);
const BufferAllocation alloc0(0, data_size_in_bytes, 0);
const BufferAllocation alloc1(1, ind_size_in_bytes, 0);
const BufferAllocation rest(2, empty_size_in_bytes, 0);
const BufferAllocation::Slice slice0(&alloc0, 0, data_size_in_bytes);
const BufferAllocation::Slice slice1(&alloc1, 0, ind_size_in_bytes);
const Shape data_shape = ShapeUtil::MakeShape(F32, {data_size});
const Shape indices_shape = ShapeUtil::MakeShape(S32, {data_size});
const Shape rest_shape = ShapeUtil::MakeShape(U32, {data_size});
for (auto s : state) {
state.PauseTiming();
auto data_clone(data);
auto indices_clone(indices);
std::vector<MaybeOwningDeviceMemory> buffers;
buffers.emplace_back(
se::DeviceMemoryBase(data_clone.data(), data_size_in_bytes));
buffers.emplace_back(
se::DeviceMemoryBase(indices_clone.data(), ind_size_in_bytes));
buffers.emplace_back(
se::DeviceMemoryBase(empty.data(), empty_size_in_bytes));
BufferAllocations allocations(buffers);
std::vector<SortThunk::Input> inputs(total_num_of_slices);
inputs[0] = {slice0, data_shape};
inputs[1] = {slice1, indices_shape};
for (int i = 0; i < num_of_empty_slices; ++i) {
constexpr size_t empty_slice_in_bytes = data_size * sizeof(uint32_t);
inputs[i + 2].slice = BufferAllocation::Slice(
&rest, i * empty_slice_in_bytes, empty_slice_in_bytes);
inputs[i + 2].shape = rest_shape;
}
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
state.ResumeTiming();
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, SortThunk::Create({"sort"}, inputs,
0, is_stable, LessThan));
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
}
}
void BM_StableDynamicSort1D(::testing::benchmark::State& state) {
BM_DynamicSort1D(state, true);
}
void BM_UnstableDynamicSort1D(::testing::benchmark::State& state) {
BM_DynamicSort1D(state, false);
}
BENCHMARK(BM_StableDynamicSort1D)
->MeasureProcessCPUTime()
->Arg(35)
->Arg(50)
->Arg(100);
BENCHMARK(BM_UnstableDynamicSort1D)
->MeasureProcessCPUTime()
->Arg(35)
->Arg(50)
->Arg(100);
INSTANTIATE_TEST_SUITE_P(SortThunk, SortThunkTest, testing::Bool(),
testing::PrintToStringParamName());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/sort_thunk.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/sort_thunk_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9ec2ecf4-73bd-4fdb-aa81-263d6baa14ff | cpp | tensorflow/tensorflow | outfeed_thunk | third_party/xla/xla/service/gpu/runtime/outfeed_thunk.cc | third_party/xla/xla/backends/cpu/runtime/outfeed_thunk_test.cc | #include "xla/service/gpu/runtime/outfeed_thunk.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/gpu_transfer_manager.h"
#include "xla/service/gpu/outfeed_manager.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
OutfeedThunk::OutfeedThunk(ThunkInfo thunk_info,
std::vector<ShapedSlice> source_slices)
: Thunk(Kind::kOutfeed, thunk_info),
source_slices_(std::move(source_slices)) {}
absl::Status OutfeedThunk::ExecuteOnStream(const ExecuteParams& params) {
se::Stream& stream = *params.stream;
const BufferAllocations& buffer_allocations = *params.buffer_allocations;
VLOG(2) << "Outfeeding from GPU";
OutfeedManager* outfeed_manager =
GpuTransferManager::GetOrCreateOutfeedManager(stream.parent());
ShapeTree<std::unique_ptr<OutfeedBuffer>>* output_buffers =
outfeed_manager->BlockingGetNextDestination();
if (source_slices_.empty()) {
return absl::OkStatus();
}
const int64_t leaf_count = output_buffers->leaf_count();
TF_RET_CHECK(source_slices_.size() == leaf_count)
<< "Mismatch between number of outfeed inputs (" << source_slices_.size()
<< ") and outputs (" << leaf_count << ")";
auto output_leaf_it = output_buffers->leaf_begin();
for (int64_t index = 0; index < leaf_count; ++index) {
const ShapeIndex& shape_index = output_leaf_it->first;
std::unique_ptr<OutfeedBuffer>& buffer = output_leaf_it->second;
++output_leaf_it;
const Shape& output_shape =
ShapeUtil::GetSubshape(output_buffers->shape(), shape_index);
TF_RET_CHECK(
ShapeUtil::ReshapeIsBitcast(source_slices_[index].shape, output_shape))
<< "Mismatch between outfeed output buffer shape "
<< ShapeUtil::HumanStringWithLayout(output_shape)
<< " and outfeed source buffer shape "
<< ShapeUtil::HumanStringWithLayout(source_slices_[index].shape);
BufferAllocation::Slice source_slice = source_slices_[index].slice;
if (!source_slice.allocation())
return Internal("outfeed source missing buffer allocation");
se::DeviceMemoryBase data_address =
buffer_allocations.GetDeviceAddress(source_slice);
TF_RETURN_IF_ERROR(stream.Memcpy(buffer->destination()->untyped_data(),
data_address, buffer->length()));
TF_RETURN_IF_ERROR(stream.DoHostCallback([&buffer]() { buffer->Done(); }));
}
absl::Status block_status = stream.BlockHostUntilDone();
if (!block_status.ok()) {
return Internal("Failed to complete data transfer on stream %p: %s",
&stream, block_status.message());
}
VLOG(2) << "Outfeeding from GPU complete";
return absl::OkStatus();
}
}
} | #include "xla/backends/cpu/runtime/outfeed_thunk.h"
#include <memory>
#include "xla/backends/cpu/runtime/resource_use.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(OutfeedThunkTest, BufferAndResourceUses) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice outfeed_slice(&alloc, 10, 40);
OutfeedThunk::OutfeedBuffer outfeed_buffer = {
outfeed_slice,
ShapeUtil::MakeShape(F32, {10}),
};
auto consume_token = Resource::Create(Resource::kToken);
auto produce_token = Resource::Create(Resource::kToken);
TF_ASSERT_OK_AND_ASSIGN(auto thunk,
OutfeedThunk::Create({"outfeed"}, {outfeed_buffer},
{consume_token, produce_token}));
EXPECT_EQ(thunk->buffer_uses().size(), 1);
EXPECT_EQ(thunk->buffer_uses()[0], BufferUse::Read(outfeed_slice));
EXPECT_EQ(thunk->resource_uses().size(), 2);
EXPECT_EQ(thunk->resource_uses()[0], ResourceUse::Read(consume_token));
EXPECT_EQ(thunk->resource_uses()[1], ResourceUse::Write(produce_token));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/outfeed_thunk.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/outfeed_thunk_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
229b23b1-a820-4bd3-ae84-ec9d912fadd4 | cpp | tensorflow/tensorflow | infeed_thunk | third_party/xla/xla/service/gpu/runtime/infeed_thunk.cc | third_party/xla/xla/backends/cpu/runtime/infeed_thunk_test.cc | #include "xla/service/gpu/runtime/infeed_thunk.h"
#include <cstddef>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/gpu_transfer_manager.h"
#include "xla/service/gpu/infeed_manager.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_handle.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
InfeedThunk::InfeedThunk(ThunkInfo thunk_info,
std::vector<ShapedSlice> dest_slices)
: Thunk(Kind::kInfeed, thunk_info), dest_slices_(std::move(dest_slices)) {}
absl::Status InfeedThunk::ExecuteOnStream(const ExecuteParams& params) {
se::Stream& stream = *params.stream;
const BufferAllocations& buffer_allocations = *params.buffer_allocations;
VLOG(2) << "Infeeding to GPU";
ShapeTree<se::DeviceMemoryHandle> source_buffers =
GpuTransferManager::GetOrCreateInfeedManager(stream.parent())
->BlockingGetNextDestination();
size_t index = 0;
for (auto& source : source_buffers.leaves()) {
const ShapeIndex& shape_index = source.first;
se::DeviceMemoryHandle& buffer = source.second;
const Shape& source_shape =
ShapeUtil::GetSubshape(source_buffers.shape(), shape_index);
TF_RET_CHECK(
ShapeUtil::ReshapeIsBitcast(dest_slices_[index].shape, source_shape))
<< "Mismatch between infeed source buffer shape "
<< ShapeUtil::HumanStringWithLayout(source_shape)
<< " and infeed dest buffer shape "
<< ShapeUtil::HumanStringWithLayout(dest_slices_[index].shape);
se::DeviceMemoryBase dest_address =
buffer_allocations.GetDeviceAddress(dest_slices_[index++].slice);
TF_RETURN_IF_ERROR(
stream.Memcpy(&dest_address, buffer.memory(), buffer.memory().size()));
}
CHECK_EQ(index, dest_slices_.size())
<< "Infeed did not populate all destination buffers";
absl::Status block_status = stream.BlockHostUntilDone();
if (!block_status.ok()) {
return Internal("Failed to complete data transfer on stream %p: %s",
&stream, block_status.message());
}
VLOG(2) << "Infeeding to GPU complete";
return absl::OkStatus();
}
}
} | #include "xla/backends/cpu/runtime/infeed_thunk.h"
#include <memory>
#include "xla/backends/cpu/runtime/resource_use.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(InfeedThunkTest, BufferAndResourceUses) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice infeed_slice(&alloc, 10, 40);
InfeedThunk::InfeedBuffer infeed_buffer = {
infeed_slice,
ShapeUtil::MakeShape(F32, {10}),
};
auto consume_token = Resource::Create(Resource::kToken);
auto produce_token = Resource::Create(Resource::kToken);
TF_ASSERT_OK_AND_ASSIGN(auto thunk,
InfeedThunk::Create({"infeed"}, {infeed_buffer},
{consume_token, produce_token}));
EXPECT_EQ(thunk->buffer_uses().size(), 1);
EXPECT_EQ(thunk->buffer_uses()[0], BufferUse::Write(infeed_slice));
EXPECT_EQ(thunk->resource_uses().size(), 2);
EXPECT_EQ(thunk->resource_uses()[0], ResourceUse::Read(consume_token));
EXPECT_EQ(thunk->resource_uses()[1], ResourceUse::Write(produce_token));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/infeed_thunk.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/infeed_thunk_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f0dde765-55b6-493c-95a6-d09131a7d8c2 | cpp | tensorflow/tensorflow | while_thunk | third_party/xla/xla/service/gpu/runtime/while_thunk.cc | third_party/xla/xla/backends/cpu/runtime/while_thunk_test.cc | #include "xla/service/gpu/runtime/while_thunk.h"
#include <cstdint>
#include <iterator>
#include <list>
#include <memory>
#include <optional>
#include <utility>
#include "absl/cleanup/cleanup.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/runtime/sequential_thunk.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/memory_allocation.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
static std::list<int64_t>& LoopCounters() {
static thread_local std::list<int64_t> loop_counters;
return loop_counters;
}
absl::StatusOr<int64_t> WhileThunk::CurrentLoopIteration(int64_t depth) {
if (depth >= LoopCounters().size()) {
return absl::InvalidArgumentError(absl::StrFormat(
"Loop depth %d is greater than the number of tracked loops %d", depth,
LoopCounters().size()));
}
auto counter = LoopCounters().begin();
std::advance(counter, depth);
return *counter;
}
WhileThunk::WhileThunk(
ThunkInfo thunk_info,
const BufferAllocation::Slice& condition_result_buffer_index,
std::unique_ptr<SequentialThunk> condition_thunk_sequence,
std::unique_ptr<SequentialThunk> body_thunk_sequence,
std::optional<int64_t> trip_count)
: Thunk(Kind::kWhile, thunk_info),
condition_result_buffer_index_(condition_result_buffer_index),
condition_thunk_sequence_(std::move(condition_thunk_sequence)),
body_thunk_sequence_(std::move(body_thunk_sequence)),
trip_count_(trip_count) {}
absl::Status WhileThunk::Prepare(const PrepareParams& params,
ResourceRequests& resource_requests) {
TF_RETURN_IF_ERROR(
condition_thunk_sequence_->Prepare(params, resource_requests));
TF_RETURN_IF_ERROR(body_thunk_sequence_->Prepare(params, resource_requests));
return absl::OkStatus();
}
absl::Status WhileThunk::Initialize(const InitializeParams& params) {
TF_RETURN_IF_ERROR(condition_thunk_sequence_->Initialize(params));
TF_RETURN_IF_ERROR(body_thunk_sequence_->Initialize(params));
absl::MutexLock lock(&mutex_);
if (auto it = predicates_.find(params.executor); it == predicates_.end()) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<se::MemoryAllocation> allocation,
params.executor->HostMemoryAllocate(sizeof(bool)));
predicates_.emplace(params.executor, std::move(allocation));
}
return absl::OkStatus();
}
absl::Status WhileThunk::ExecuteOnStream(const ExecuteParams& params) {
auto& stream = *params.stream;
int64_t& iter = LoopCounters().emplace_front();
absl::Cleanup cleanup = [&] { LoopCounters().pop_front(); };
se::DeviceMemoryBase condition_result_data =
params.buffer_allocations->GetDeviceAddress(
condition_result_buffer_index_);
if (trip_count_.has_value()) {
VLOG(2) << "Executing WhileThunk for " << *trip_count_ << " iterations";
for (iter = 0; iter < trip_count_; ++iter) {
VLOG(3) << "Executing iteration # " << iter;
TF_RETURN_IF_ERROR(body_thunk_sequence_->ExecuteOnStream(params));
}
return absl::OkStatus();
}
bool* condition_result = [&] {
absl::MutexLock lock(&mutex_);
return reinterpret_cast<bool*>(predicates_.at(stream.parent())->opaque());
}();
while (true) {
VLOG(3) << "Executing WhileThunk condition computation; iter=" << iter;
TF_RETURN_IF_ERROR(condition_thunk_sequence_->ExecuteOnStream(params));
TF_RETURN_IF_ERROR(
stream.Memcpy(condition_result, condition_result_data, sizeof(bool)));
if (absl::Status blocked = stream.BlockHostUntilDone(); !blocked.ok()) {
return absl::InternalError(absl::StrFormat(
"Failed to complete all kernels launched on stream %p: %s", &stream,
blocked.message()));
}
VLOG(3) << "condition_result = " << *condition_result;
if (!*condition_result) {
VLOG(3) << "Break WhileThunk loop; iter=" << iter;
break;
}
VLOG(3) << "Executing WhileThunk body computation; iter=" << iter;
TF_RETURN_IF_ERROR(body_thunk_sequence_->ExecuteOnStream(params));
++iter;
}
return absl::OkStatus();
}
}
} | #include "xla/backends/cpu/runtime/while_thunk.h"
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "xla/backends/cpu/runtime/buffer_allocations.h"
#include "xla/backends/cpu/runtime/resource_use.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/backends/cpu/runtime/thunk_testlib.h"
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
#define EIGEN_USE_THREADS
#include "Eigen/ThreadPool"
#include "unsupported/Eigen/CXX11/Tensor"
namespace xla::cpu {
namespace {
TEST(WhileThunkTest, BufferUses) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice pred_slice(&alloc, 0, sizeof(char));
BufferAllocation::Slice cond_read_slice(&alloc, 10, 10);
BufferAllocation::Slice body_read_slice(&alloc, 20, 10);
ThunkSequence cond_sequence;
cond_sequence.push_back(
std::make_unique<BufferUseThunk>(BufferUse::Read(cond_read_slice)));
ThunkSequence body_sequence;
body_sequence.push_back(
std::make_unique<BufferUseThunk>(BufferUse::Read(body_read_slice)));
TF_ASSERT_OK_AND_ASSIGN(
auto thunk,
WhileThunk::Create({"while"}, pred_slice, std::move(cond_sequence),
std::move(body_sequence)));
EXPECT_EQ(thunk->buffer_uses().size(), 3);
EXPECT_EQ(thunk->buffer_uses()[0], BufferUse::Write(pred_slice));
EXPECT_EQ(thunk->buffer_uses()[1], BufferUse::Read(cond_read_slice));
EXPECT_EQ(thunk->buffer_uses()[2], BufferUse::Read(body_read_slice));
}
TEST(WhileThunkTest, ResourceUses) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice pred_slice(&alloc, 0, sizeof(char));
auto token0 = Resource::Create(Resource::kToken);
auto token1 = Resource::Create(Resource::kToken);
ThunkSequence cond_sequence;
cond_sequence.push_back(
std::make_unique<ResourceUseThunk>(ResourceUse::Read(token0)));
ThunkSequence body_sequence;
body_sequence.push_back(
std::make_unique<ResourceUseThunk>(ResourceUse::Read(token1)));
TF_ASSERT_OK_AND_ASSIGN(
auto thunk,
WhileThunk::Create({"while"}, pred_slice, std::move(cond_sequence),
std::move(body_sequence)));
EXPECT_EQ(thunk->resource_uses().size(), 2);
EXPECT_EQ(thunk->resource_uses()[0], ResourceUse::Read(token0));
EXPECT_EQ(thunk->resource_uses()[1], ResourceUse::Read(token1));
}
class CondThunk : public Thunk {
public:
CondThunk(size_t counter, BufferAllocation::Slice pred_slice)
: Thunk(Kind::kKernel, {"cond"}),
counter_(counter + 1),
pred_slice_(pred_slice) {}
tsl::AsyncValueRef<ExecuteEvent> Execute(const ExecuteParams& params) final {
auto event = tsl::MakeConstructedAsyncValueRef<ExecuteEvent>();
TF_ASSIGN_OR_RETURN(
se::DeviceMemoryBase predicate_mem,
params.buffer_allocations->GetDeviceAddress(pred_slice_));
bool* predicate = reinterpret_cast<bool*>(predicate_mem.opaque());
*predicate = counter_.fetch_sub(1) > 1;
params.intra_op_threadpool->getPool()->Schedule(
[event] { event.SetStateConcrete(); });
return event;
}
BufferUses buffer_uses() const final {
return {BufferUse::Write(pred_slice_)};
}
private:
std::atomic<size_t> counter_;
BufferAllocation::Slice pred_slice_;
};
class BodyThunk : public Thunk {
public:
explicit BodyThunk(BufferAllocation::Slice counter_slice)
: Thunk(Kind::kKernel, {"body"}), counter_slice_(counter_slice) {}
tsl::AsyncValueRef<ExecuteEvent> Execute(const ExecuteParams& params) final {
auto event = tsl::MakeConstructedAsyncValueRef<ExecuteEvent>();
TF_ASSIGN_OR_RETURN(
se::DeviceMemoryBase counter_mem,
params.buffer_allocations->GetDeviceAddress(counter_slice_));
int32_t* counter = reinterpret_cast<int32_t*>(counter_mem.opaque());
++*counter;
params.intra_op_threadpool->getPool()->Schedule(
[event] { event.SetStateConcrete(); });
return event;
}
BufferUses buffer_uses() const final { return {}; }
private:
BufferAllocation::Slice counter_slice_;
};
TEST(WhileThunkTest, NonBlockingExecute) {
static constexpr size_t kNumIterations = 100;
BufferAllocation pred_alloc(0, sizeof(char), 0);
BufferAllocation cnt_alloc(1, sizeof(int32_t), 0);
BufferAllocation::Slice pred_slice(&pred_alloc, 0, sizeof(char));
BufferAllocation::Slice cnt_slice(&cnt_alloc, 0, sizeof(int32_t));
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<char> predicate = {false};
std::vector<int32_t> counter = {0};
buffers.emplace_back(se::DeviceMemoryBase(predicate.data(), sizeof(char)));
buffers.emplace_back(se::DeviceMemoryBase(counter.data(), sizeof(int32_t)));
BufferAllocations allocations(buffers);
ThunkSequence cond_sequence;
cond_sequence.push_back(
std::make_unique<CondThunk>(kNumIterations, pred_slice));
ThunkSequence body_sequence;
body_sequence.push_back(std::make_unique<BodyThunk>(cnt_slice));
TF_ASSERT_OK_AND_ASSIGN(
auto thunk,
WhileThunk::Create({"while"}, pred_slice, std::move(cond_sequence),
std::move(body_sequence)));
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "while-test", 8);
Eigen::ThreadPoolDevice device(thread_pool.AsEigenThreadPool(),
thread_pool.NumThreads());
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
params.intra_op_threadpool = &device;
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
EXPECT_EQ(counter[0], kNumIterations);
}
TEST(WhileThunkTest, NonBlockingExecuteWithTripCount) {
static constexpr size_t kNumIterations = 100;
BufferAllocation pred_alloc(0, sizeof(char), 0);
BufferAllocation cnt_alloc(1, sizeof(int32_t), 0);
BufferAllocation::Slice pred_slice(&pred_alloc, 0, sizeof(char));
BufferAllocation::Slice cnt_slice(&cnt_alloc, 0, sizeof(int32_t));
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<char> predicate = {false};
std::vector<int32_t> counter = {0};
buffers.emplace_back(se::DeviceMemoryBase(predicate.data(), sizeof(char)));
buffers.emplace_back(se::DeviceMemoryBase(counter.data(), sizeof(int32_t)));
BufferAllocations allocations(buffers);
ThunkSequence cond_sequence;
ThunkSequence body_sequence;
body_sequence.push_back(std::make_unique<BodyThunk>(cnt_slice));
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, WhileThunk::Create(
{"while"}, pred_slice, std::move(cond_sequence),
std::move(body_sequence), kNumIterations));
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "while-test", 8);
Eigen::ThreadPoolDevice device(thread_pool.AsEigenThreadPool(),
thread_pool.NumThreads());
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
params.intra_op_threadpool = &device;
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
EXPECT_EQ(counter[0], kNumIterations);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/while_thunk.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/while_thunk_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c297c433-ff38-48f0-b470-44bddb8dde91 | cpp | tensorflow/tensorflow | thunk_executor | third_party/xla/xla/backends/cpu/runtime/thunk_executor.cc | third_party/xla/xla/backends/cpu/runtime/thunk_executor_test.cc | #include "xla/backends/cpu/runtime/thunk_executor.h"
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/backends/cpu/runtime/resource_use.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/runtime/buffer_use.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/logging.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla::cpu {
ThunkExecutor::ThunkExecutor(ThunkSequence thunk_sequence,
std::vector<NodeDef> nodes_defs,
const ThunkExecutor::Options& options)
: thunk_sequence_(std::move(thunk_sequence)),
options_(options),
num_thunks_(thunk_sequence_.size()),
nodes_defs_(std::move(nodes_defs)),
is_sequential_(true) {
for (NodeId i = 0; i < nodes_defs_.size(); ++i) {
if (nodes_defs_[i].in_edges.empty()) {
source_.push_back(i);
}
if (nodes_defs_[i].out_edges.empty()) {
sink_.push_back(i);
}
}
int64_t num_erased_edges = RunTransitiveReductionAndUpdatePriorities();
for (NodeId i = 1; i < nodes_defs_.size() && is_sequential_; ++i) {
is_sequential_ &= (absl::c_count(nodes_defs_[i].in_edges, i - 1) != 0);
}
auto uses_small_buffers = [&](const std::unique_ptr<Thunk>& thunk) {
return absl::c_all_of(thunk->buffer_uses(), [&](const BufferUse& use) {
return use.slice().size() <= options.execute_sequential_buffer_threshold;
});
};
bool small_buffers = absl::c_all_of(thunk_sequence_, uses_small_buffers);
is_sequential_ |= small_buffers;
is_sequential_ |=
thunk_sequence_.size() <= options.execute_sequential_num_thunks_threshold;
VLOG(2) << absl::StreamFormat(
"Constructed ThunkExecutor with %d nodes: #source_nodes=%d "
"#sink_nodes=%d, #erased_edges=%d, is_sequential=%v, small_buffers=%v",
nodes_defs_.size(), source_.size(), sink_.size(), num_erased_edges,
is_sequential_, small_buffers);
DCHECK((!source_.empty() && !sink_.empty() && !thunk_sequence_.empty()) ||
(source_.empty() && sink_.empty() && thunk_sequence_.empty()));
}
absl::StatusOr<ThunkExecutor> ThunkExecutor::Create(
ThunkSequence thunk_sequence, const ThunkExecutor::Options& options) {
std::vector<NodeDef> defs(thunk_sequence.size());
std::vector<BufferUse::ReadWriteSet> buffer_rwsets(thunk_sequence.size());
std::vector<ResourceUse::ReadWriteSet> resource_rwsets(thunk_sequence.size());
for (NodeId i = 0; i < thunk_sequence.size(); ++i) {
defs[i].id = i;
Thunk& thunk = *thunk_sequence[i];
buffer_rwsets[i].AddAll(thunk.buffer_uses());
resource_rwsets[i].AddAll(thunk.resource_uses());
for (NodeId j = 0; j < i; ++j) {
if (buffer_rwsets[j].HasConflicts(buffer_rwsets[i]) ||
resource_rwsets[j].HasConflicts(resource_rwsets[i])) {
defs[j].out_edges.push_back(i);
defs[i].in_edges.push_back(j);
}
}
}
for (NodeId i = 0; i < defs.size(); ++i) {
DCHECK(absl::c_is_sorted(defs[i].out_edges));
DCHECK(absl::c_is_sorted(defs[i].in_edges));
}
return ThunkExecutor(std::move(thunk_sequence), std::move(defs), options);
}
ThunkExecutor::ExecuteState::Node::Node(const NodeDef& node_def)
: counter(node_def.in_edges.size()), out_edges(&node_def.out_edges) {}
ThunkExecutor::ExecuteState::ExecuteState(ThunkExecutor* executor,
Thunk::TaskRunner* runner)
: executor(executor),
runner(runner),
nodes(executor->nodes_defs().size()),
execute_event(tsl::MakeConstructedAsyncValueRef<ExecuteEvent>()),
pending_sink_nodes(executor->sink().size()),
abort(false) {
DCHECK(runner == nullptr || static_cast<bool>(*runner))
<< "`runner` must be nullptr or a valid TaskRunner";
NodeStorage* node = nodes.data();
for (const NodeDef& node_def : executor->nodes_defs()) {
new (node++) Node(node_def);
}
}
tsl::AsyncValueRef<ThunkExecutor::ExecuteEvent> ThunkExecutor::Execute(
const Thunk::ExecuteParams& params) {
if (ABSL_PREDICT_FALSE(num_thunks_ == 0)) {
return Thunk::OkExecuteEventSingleton();
}
if (ABSL_PREDICT_FALSE(num_thunks_ == 1)) {
return thunk_sequence_[0]->Execute(params);
}
if (is_sequential_) {
return ExecuteSequential(params);
}
auto state = std::make_unique<ExecuteState>(this, params.task_runner);
if (options_.use_priority_ready_queue) {
Execute(state.get(), params, PriorityReadyQueue(nodes_defs_, source_),
nullptr);
} else {
Execute(state.get(), params, FifoReadyQueue(source_),
nullptr);
}
if (ABSL_PREDICT_TRUE(state->execute_event.IsAvailable())) {
return std::move(state->execute_event);
}
tsl::AsyncValueRef<ExecuteEvent> execute_event = state->execute_event;
execute_event.AndThen([state = std::move(state)] {
auto cnt = state->pending_sink_nodes.load(std::memory_order_acquire);
DCHECK_EQ(cnt, 0)
<< "All sink nodes must be completed before execute_event is marked "
"available.";
});
return execute_event;
}
tsl::AsyncValueRef<ThunkExecutor::ExecuteEvent>
ThunkExecutor::ExecuteSequential(const Thunk::ExecuteParams& params) {
for (auto it = thunk_sequence_.begin(); it != thunk_sequence_.end(); ++it) {
Thunk& thunk = **it;
auto execute_event = thunk.Execute(params);
if (ABSL_PREDICT_TRUE(thunk.IsOkExecuteEvent(execute_event))) {
continue;
}
if (ABSL_PREDICT_FALSE(!execute_event.IsAvailable())) {
auto event = tsl::MakeConstructedAsyncValueRef<ExecuteEvent>();
execute_event.AndThen([this, ¶ms, it, event](absl::Status status) {
if (ABSL_PREDICT_FALSE(!status.ok())) {
event.SetError(std::move(status));
} else {
ResumeExecuteSequential(it + 1, params, std::move(event));
}
});
return event;
}
if (ABSL_PREDICT_FALSE(execute_event.IsError())) {
return execute_event;
}
}
return Thunk::OkExecuteEventSingleton();
}
void ThunkExecutor::ResumeExecuteSequential(
ThunkIterator it, const Thunk::ExecuteParams& params,
tsl::AsyncValueRef<ExecuteEvent> event) {
for (; it != thunk_sequence_.end(); ++it) {
Thunk& thunk = **it;
auto execute_event = thunk.Execute(params);
if (ABSL_PREDICT_TRUE(thunk.IsOkExecuteEvent(execute_event))) {
continue;
}
if (ABSL_PREDICT_FALSE(!execute_event.IsAvailable())) {
execute_event.AndThen(
[this, ¶ms, it, event = std::move(event)](absl::Status status) {
if (ABSL_PREDICT_FALSE(!status.ok())) {
event.SetError(std::move(status));
} else {
ResumeExecuteSequential(it + 1, params, std::move(event));
}
});
return;
}
if (ABSL_PREDICT_FALSE(execute_event.IsError())) {
event.SetError(execute_event.GetError());
return;
}
}
event.SetStateConcrete();
}
template <typename ReadyQueue>
void ThunkExecutor::Execute(ExecuteState* state,
const Thunk::ExecuteParams& params,
ReadyQueue ready_queue,
Thunk::ExecuteSession::Lock lock) {
DCHECK(!ready_queue.Empty()) << "Ready queue must not be empty";
tsl::profiler::TraceMe trace("ThunkExecutor::Execute");
bool has_runner = state->runner != nullptr;
bool has_lock = static_cast<bool>(lock);
int64_t split_threshold = params.session.split_threshold();
while (!ready_queue.Empty()) {
DCHECK_EQ(static_cast<bool>(lock), has_lock)
<< "Execute session lock must not be lost in the middle of the loop";
NodeId id = ready_queue.Pop();
ExecuteState::Node& node = state->node(id);
int64_t cnt = node.counter.load(std::memory_order_acquire);
DCHECK_EQ(cnt, 0) << "Node counter must be 0";
int64_t num_ready_thunks = ready_queue.Size();
if (ABSL_PREDICT_FALSE(has_runner && num_ready_thunks > split_threshold)) {
SplitReadyQueue(state, params, ready_queue, split_threshold);
}
Thunk& thunk = *state->executor->thunk_sequence_[id];
tsl::AsyncValueRef<ExecuteEvent> execute_event =
ABSL_PREDICT_FALSE(state->abort.load(std::memory_order_relaxed))
? Thunk::OkExecuteEventSingleton()
: thunk.Execute(params);
if (ABSL_PREDICT_TRUE(execute_event.IsAvailable())) {
ProcessOutEdges(state, execute_event.AsPtr(), node, ready_queue);
} else {
execute_event.AndThen(
[¶ms, &node, state, execute_event = execute_event.AsPtr(),
ready_queue = ready_queue.CreateEmptyReadyQueue(),
lock = ready_queue.Empty() ? std::move(lock)
: params.session.Join()]() mutable {
state->executor->ProcessOutEdges(state, execute_event, node,
ready_queue);
if (ABSL_PREDICT_TRUE(!ready_queue.Empty())) {
state->executor->Execute(state, params, std::move(ready_queue),
std::move(lock));
}
});
}
}
}
template <typename ReadyQueue>
inline ABSL_ATTRIBUTE_ALWAYS_INLINE void ThunkExecutor::SplitReadyQueue(
ExecuteState* state, const Thunk::ExecuteParams& params,
ReadyQueue& ready_queue, int64_t split_threshold) {
DCHECK(state->runner) << "TaskRunner must be set";
while (ready_queue.Size() > split_threshold) {
Thunk::ExecuteSession::Lock task_runner_lock = params.session.TryJoin();
if (!task_runner_lock) {
break;
}
(*state->runner)([¶ms, state, ready_queue = ready_queue.PopHalf(),
lock = std::move(task_runner_lock)]() mutable {
state->executor->Execute(state, params, std::move(ready_queue),
std::move(lock));
});
}
}
template <typename ReadyQueue>
void ThunkExecutor::ProcessOutEdges(
ExecuteState* state, tsl::AsyncValuePtr<Thunk::ExecuteEvent> node_event,
ExecuteState::Node& node, ReadyQueue& ready_queue) {
if (ABSL_PREDICT_FALSE(node_event.IsError())) {
absl::MutexLock lock(&state->abort_mutex);
state->abort = true;
state->abort_status.Update(node_event.GetError());
}
bool is_sink = node.out_edges->empty();
for (NodeId out_edge : *node.out_edges) {
ExecuteState::Node& out_node = state->node(out_edge);
int64_t cnt = out_node.counter.fetch_sub(1, std::memory_order_release);
DCHECK_GE(cnt, 1) << "Node counter can't drop below 0";
if (cnt == 1) ready_queue.Push(out_edge);
}
if (ABSL_PREDICT_FALSE(is_sink)) {
bool is_done =
state->pending_sink_nodes.fetch_sub(1, std::memory_order_acq_rel) == 1;
if (ABSL_PREDICT_TRUE(!is_done)) return;
if (ABSL_PREDICT_FALSE(state->abort.load(std::memory_order_relaxed))) {
auto take_error = [&] {
absl::MutexLock lock(&state->abort_mutex);
DCHECK(!state->abort_status.ok())
<< "Abort status must be set if execution is aborted";
return std::move(state->abort_status);
};
state->execute_event.SetError(take_error());
} else {
state->execute_event.SetStateConcrete();
}
}
}
static int64_t EraseEdge(ThunkExecutor::NodeDef& from,
ThunkExecutor::NodeDef& to) {
DCHECK_NE(from.id, to.id) << "Nodes must be different";
DCHECK_LT(from.id, to.id) << "Nodes must be ordered";
if (from.out_edges.empty() || to.in_edges.empty()) {
DCHECK_EQ(absl::c_count(from.out_edges, to.id), 0) << "Unexpected out edge";
DCHECK_EQ(absl::c_count(to.in_edges, from.id), 0) << "Unexpected in edge";
return 0;
}
if (from.out_edges.back() < to.id || to.in_edges.front() > from.id) {
DCHECK_EQ(absl::c_count(from.out_edges, to.id), 0) << "Unexpected out edge";
DCHECK_EQ(absl::c_count(to.in_edges, from.id), 0) << "Unexpected in edge";
return 0;
}
auto out_edges_it = absl::c_lower_bound(from.out_edges, to.id);
bool has_out_edge =
out_edges_it != from.out_edges.end() && *out_edges_it == to.id;
if (!has_out_edge) {
DCHECK_EQ(absl::c_count(to.in_edges, from.id), 0) << "Unexpected in edge";
return 0;
}
auto in_edges_it = absl::c_lower_bound(to.in_edges, from.id);
bool has_in_edge =
in_edges_it != to.in_edges.end() && *in_edges_it == from.id;
DCHECK(has_in_edge) << "In-edge must exist if out-edge exists";
from.out_edges.erase(out_edges_it);
to.in_edges.erase(in_edges_it);
return 1;
}
int64_t ThunkExecutor::RunTransitiveReductionAndUpdatePriorities() {
int64_t num_erased_edges = 0;
std::vector<int64_t> stack;
std::vector<bool> visited;
auto add_to_stack = [&](int64_t node_id) {
if (!visited[node_id]) {
stack.push_back(node_id);
visited[node_id] = true;
}
};
for (int64_t i = nodes_defs_.size() - 1; i >= 0; --i) {
NodeDef& source_node = nodes_defs_[i];
stack.clear();
visited.assign(nodes_defs_.size(), false);
for (int64_t out_id : source_node.out_edges) {
NodeDef& out_node = nodes_defs_[out_id];
visited[out_id] = true;
for (int64_t start_id : out_node.out_edges) add_to_stack(start_id);
}
while (!stack.empty()) {
int64_t node_id = stack.back();
stack.pop_back();
NodeDef& node = nodes_defs_[node_id];
num_erased_edges += EraseEdge(source_node, node);
for (int64_t out_id : node.out_edges) add_to_stack(out_id);
}
source_node.priority = absl::c_count(visited, true);
}
return num_erased_edges;
}
std::string ThunkExecutor::ToString() const {
std::string str = absl::StrFormat(
"ThunkExecutor: #thunks=%d #source_nodes=%d #sink_nodes=%d", num_thunks_,
source_.size(), sink_.size());
std::vector<std::vector<std::string>> in_edges(num_thunks_);
for (const auto& node_def : nodes_defs_) {
for (NodeId in_edge : node_def.in_edges) {
in_edges[node_def.id].push_back(thunk_sequence_[in_edge]->info().op_name);
}
}
for (NodeId i = 0; i < num_thunks_; ++i) {
const Thunk& thunk = *thunk_sequence_[i];
bool is_source = absl::c_find(source_, i) != source_.end();
bool is_sink = absl::c_find(sink_, i) != sink_.end();
absl::StrAppendFormat(&str,
"\n thunk #%05d: op_name=%s, dependencies=[%s], "
"source=%v, sink=%v, priority=%d",
i, thunk.info().op_name,
absl::StrJoin(in_edges[i], ", "), is_source, is_sink,
nodes_defs_[i].priority);
}
return str;
}
ThunkExecutor::FifoReadyQueue::FifoReadyQueue(
absl::Span<const NodeId> ready_nodes)
: queue_(ready_nodes.begin(), ready_nodes.end()) {}
void ThunkExecutor::FifoReadyQueue::Push(NodeId id) { queue_.push_back(id); }
ThunkExecutor::NodeId ThunkExecutor::FifoReadyQueue::Pop() {
DCHECK(!Empty()) << "Queue must not be empty";
return queue_[head_++];
}
ThunkExecutor::FifoReadyQueue ThunkExecutor::FifoReadyQueue::PopHalf() {
DCHECK(!Empty()) << "Queue must not be empty";
auto mid = queue_.begin() + head_ + Size() / 2;
FifoReadyQueue popped(absl::MakeConstSpan(&*mid, queue_.end() - mid));
queue_.resize(mid - queue_.begin());
return popped;
}
size_t ThunkExecutor::FifoReadyQueue::Size() const {
return queue_.size() - head_;
}
bool ThunkExecutor::FifoReadyQueue::Empty() const {
return head_ == queue_.size();
}
ThunkExecutor::FifoReadyQueue
ThunkExecutor::FifoReadyQueue::CreateEmptyReadyQueue() const {
return FifoReadyQueue(absl::Span<const NodeId>());
}
ThunkExecutor::PriorityReadyQueue::PriorityReadyQueue(
absl::Span<const NodeDef> nodes_defs, absl::Span<const NodeId> ready_nodes)
: nodes_defs_(nodes_defs),
queue_(ready_nodes.begin(), ready_nodes.end(), Compare{nodes_defs}) {}
void ThunkExecutor::PriorityReadyQueue::Push(NodeId id) { queue_.push(id); }
ThunkExecutor::NodeId ThunkExecutor::PriorityReadyQueue::Pop() {
DCHECK(!Empty()) << "Queue must not be empty";
NodeId id = queue_.top();
queue_.pop();
return id;
}
ThunkExecutor::PriorityReadyQueue ThunkExecutor::PriorityReadyQueue::PopHalf() {
DCHECK(!Empty()) << "Queue must not be empty";
int64_t keep_top_nodes = queue_.size() / 2;
PriorityReadyQueue popped(nodes_defs_, {});
while (keep_top_nodes-- > 0) {
popped.queue_.push(queue_.top());
queue_.pop();
}
popped.queue_.swap(queue_);
return popped;
}
size_t ThunkExecutor::PriorityReadyQueue::Size() const { return queue_.size(); }
bool ThunkExecutor::PriorityReadyQueue::Empty() const { return queue_.empty(); }
ThunkExecutor::PriorityReadyQueue
ThunkExecutor::PriorityReadyQueue::CreateEmptyReadyQueue() const {
return PriorityReadyQueue(nodes_defs_, {});
}
} | #include "xla/backends/cpu/runtime/thunk_executor.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <random>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/backends/cpu/runtime/buffer_allocations.h"
#include "xla/backends/cpu/runtime/resource_use.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/threadpool.h"
#define EIGEN_USE_THREADS
#include "unsupported/Eigen/CXX11/Tensor"
namespace xla::cpu {
namespace {
using ::testing::ElementsAre;
static int64_t shared_resource;
class AddI32Thunk final : public Thunk {
public:
AddI32Thunk(std::string name, std::vector<BufferAllocation::Slice> srcs,
std::vector<BufferAllocation::Slice> dsts,
std::vector<std::string>* trace, bool use_shared_resource,
bool inject_error);
static std::unique_ptr<Thunk> Create(
std::string name, std::vector<BufferAllocation::Slice> srcs,
std::vector<BufferAllocation::Slice> dsts,
std::vector<std::string>* trace = nullptr,
bool use_shared_resource = false, bool inject_error = false);
static std::vector<MaybeOwningDeviceMemory> AsDeviceMemory(
absl::Span<std::vector<int32_t>* const> data);
static absl::Status Execute(const BufferAllocations* allocations,
BufferAllocation::Slice src_slice,
BufferAllocation::Slice dst_slice);
tsl::AsyncValueRef<ExecuteEvent> Execute(const ExecuteParams&) final;
BufferUses buffer_uses() const final;
ResourceUses resource_uses() const final;
private:
std::vector<BufferAllocation::Slice> srcs_;
std::vector<BufferAllocation::Slice> dsts_;
std::vector<std::string>* trace_;
bool use_shared_resource_;
bool inject_error_;
};
std::unique_ptr<Thunk> AddI32Thunk::Create(
std::string name, std::vector<BufferAllocation::Slice> srcs,
std::vector<BufferAllocation::Slice> dsts, std::vector<std::string>* trace,
bool use_shared_resource, bool inject_error) {
return std::make_unique<AddI32Thunk>(std::move(name), std::move(srcs),
std::move(dsts), trace,
use_shared_resource, inject_error);
}
std::vector<MaybeOwningDeviceMemory> AddI32Thunk::AsDeviceMemory(
absl::Span<std::vector<int32_t>* const> data) {
std::vector<MaybeOwningDeviceMemory> buffers;
for (auto& vec : data) {
buffers.emplace_back(
se::DeviceMemoryBase(vec->data(), vec->size() * sizeof(int32_t)));
}
return buffers;
}
AddI32Thunk::AddI32Thunk(std::string name,
std::vector<BufferAllocation::Slice> srcs,
std::vector<BufferAllocation::Slice> dsts,
std::vector<std::string>* trace,
bool use_shared_resource, bool inject_error)
: Thunk(Kind::kKernel, Info{name}),
srcs_(std::move(srcs)),
dsts_(std::move(dsts)),
trace_(trace),
use_shared_resource_(use_shared_resource),
inject_error_(inject_error) {}
absl::Status AddI32Thunk::Execute(const BufferAllocations* allocations,
BufferAllocation::Slice src_slice,
BufferAllocation::Slice dst_slice) {
TF_ASSIGN_OR_RETURN(se::DeviceMemoryBase src,
allocations->GetDeviceAddress(src_slice));
TF_ASSIGN_OR_RETURN(se::DeviceMemoryBase dst,
allocations->GetDeviceAddress(dst_slice));
CHECK_EQ(src.size() % sizeof(int32_t), 0);
CHECK_EQ(dst.size() % sizeof(int32_t), 0);
int32_t* src_ptr = static_cast<int32_t*>(src.opaque());
int32_t* dst_ptr = static_cast<int32_t*>(dst.opaque());
size_t len = std::min(src.size(), dst.size()) / sizeof(int32_t);
for (int j = 0; j < len; ++j) dst_ptr[j] += src_ptr[j];
return absl::OkStatus();
}
tsl::AsyncValueRef<Thunk::ExecuteEvent> AddI32Thunk::Execute(
const ExecuteParams& params) {
if (trace_) trace_->push_back(info().op_name);
auto execute = [&]() -> absl::Status {
CHECK_EQ(srcs_.size(), dsts_.size());
for (int i = 0; i < srcs_.size(); ++i) {
TF_RETURN_IF_ERROR(
Execute(params.buffer_allocations, srcs_.at(i), dsts_.at(i)));
}
return absl::OkStatus();
};
if (params.intra_op_threadpool) {
auto event = tsl::MakeConstructedAsyncValueRef<ExecuteEvent>();
params.intra_op_threadpool->getPool()->Schedule([&, event, execute] {
if (use_shared_resource_) {
shared_resource++;
}
if (inject_error_) {
event.SetError(absl::InternalError("Injected error"));
} else {
CHECK_OK(execute());
event.SetStateConcrete();
}
});
return event;
}
if (use_shared_resource_) {
shared_resource++;
}
if (inject_error_) {
return tsl::MakeErrorAsyncValueRef(absl::InternalError("Injected error"));
}
TF_RETURN_IF_ERROR(execute());
return Thunk::OkExecuteEvent();
}
AddI32Thunk::BufferUses AddI32Thunk::buffer_uses() const {
BufferUses buffer_uses;
for (const auto& src : srcs_) buffer_uses.push_back(BufferUse::Read(src));
for (const auto& dst : dsts_) buffer_uses.push_back(BufferUse::Write(dst));
return buffer_uses;
}
AddI32Thunk::ResourceUses AddI32Thunk::resource_uses() const {
static std::shared_ptr<Resource>* shared_resource =
new std::shared_ptr<Resource>(Resource::Create(Resource::kToken));
return use_shared_resource_
? ResourceUses{ResourceUse::Write(*shared_resource)}
: ResourceUses{};
}
static ThunkExecutor::Options OptionsForTest() {
return ThunkExecutor::Options{0,
0};
}
TEST(ThunkExecutorTest, FifoReadyQueueTest) {
ThunkExecutor::FifoReadyQueue queue({});
EXPECT_TRUE(queue.Empty());
EXPECT_EQ(queue.Size(), 0);
queue.Push(1);
queue.Push(2);
queue.Push(3);
EXPECT_EQ(queue.Size(), 3);
EXPECT_EQ(queue.Pop(), 1);
EXPECT_EQ(queue.Pop(), 2);
EXPECT_EQ(queue.Pop(), 3);
EXPECT_TRUE(queue.Empty());
EXPECT_EQ(queue.Size(), 0);
queue.Push(1);
queue.Push(2);
queue.Push(3);
ThunkExecutor::FifoReadyQueue half0 = queue.PopHalf();
EXPECT_EQ(half0.Size(), 2);
EXPECT_EQ(half0.Pop(), 2);
EXPECT_EQ(half0.Pop(), 3);
EXPECT_EQ(queue.Size(), 1);
ThunkExecutor::FifoReadyQueue half1 = queue.PopHalf();
EXPECT_EQ(half1.Size(), 1);
EXPECT_EQ(queue.Size(), 0);
queue.Push(1);
queue.Push(2);
queue.Push(3);
queue.Push(4);
queue.Push(5);
EXPECT_EQ(queue.Pop(), 1);
ThunkExecutor::FifoReadyQueue half2 = queue.PopHalf();
EXPECT_EQ(half2.Size(), 2);
EXPECT_EQ(half2.Pop(), 4);
EXPECT_EQ(half2.Pop(), 5);
}
TEST(ThunkExecutorTest, PriorityReadyQueueTest) {
std::vector<ThunkExecutor::NodeDef> nodes_defs(16);
for (size_t i = 0; i < nodes_defs.size(); ++i) {
nodes_defs[i].priority = i;
}
ThunkExecutor::PriorityReadyQueue queue(nodes_defs, {});
EXPECT_TRUE(queue.Empty());
EXPECT_EQ(queue.Size(), 0);
queue.Push(1);
queue.Push(3);
queue.Push(2);
EXPECT_EQ(queue.Pop(), 3);
EXPECT_EQ(queue.Pop(), 2);
EXPECT_EQ(queue.Pop(), 1);
EXPECT_TRUE(queue.Empty());
EXPECT_EQ(queue.Size(), 0);
queue.Push(2);
queue.Push(1);
queue.Push(3);
ThunkExecutor::PriorityReadyQueue half0 = queue.PopHalf();
EXPECT_EQ(half0.Size(), 2);
EXPECT_EQ(half0.Pop(), 2);
EXPECT_EQ(half0.Pop(), 1);
EXPECT_EQ(queue.Size(), 1);
ThunkExecutor::PriorityReadyQueue half1 = queue.PopHalf();
EXPECT_EQ(half1.Size(), 1);
EXPECT_EQ(half1.Pop(), 3);
EXPECT_EQ(queue.Size(), 0);
queue.Push(4);
queue.Push(3);
queue.Push(5);
queue.Push(1);
queue.Push(2);
EXPECT_EQ(queue.Pop(), 5);
ThunkExecutor::PriorityReadyQueue half2 = queue.PopHalf();
EXPECT_EQ(half2.Size(), 2);
EXPECT_EQ(half2.Pop(), 2);
EXPECT_EQ(half2.Pop(), 1);
}
TEST(ThunkExecutorTest, DependencyOrdering) {
BufferAllocation alloc(0, 80, 0);
BufferAllocation::Slice slice0(&alloc, 0, 40);
BufferAllocation::Slice slice1(&alloc, 40, 40);
BufferAllocation::Slice slice2(&alloc, 20, 40);
ThunkSequence sequence;
sequence.push_back(AddI32Thunk::Create("a", {slice0}, {slice0}));
sequence.push_back(AddI32Thunk::Create("b", {slice1}, {slice1}));
sequence.push_back(AddI32Thunk::Create("c", {slice2}, {slice2}));
TF_ASSERT_OK_AND_ASSIGN(
ThunkExecutor executor,
ThunkExecutor::Create(std::move(sequence), OptionsForTest()));
EXPECT_FALSE(executor.is_sequential());
EXPECT_THAT(executor.source(), ElementsAre(0, 1));
EXPECT_THAT(executor.sink(), ElementsAre(2));
EXPECT_EQ(executor.node_def(0).priority, 1);
EXPECT_EQ(executor.node_def(1).priority, 1);
EXPECT_EQ(executor.node_def(2).priority, 0);
}
TEST(ThunkExecutorTest, SequentialOrdering) {
BufferAllocation alloc(0, 80, 0);
BufferAllocation::Slice slice(&alloc, 0, 40);
ThunkSequence sequence;
sequence.push_back(AddI32Thunk::Create("a", {slice}, {slice}));
sequence.push_back(AddI32Thunk::Create("b", {slice}, {slice}));
sequence.push_back(AddI32Thunk::Create("c", {slice}, {slice}));
TF_ASSERT_OK_AND_ASSIGN(
ThunkExecutor executor,
ThunkExecutor::Create(std::move(sequence), OptionsForTest()));
EXPECT_TRUE(executor.is_sequential());
EXPECT_THAT(executor.source(), ElementsAre(0));
EXPECT_THAT(executor.sink(), ElementsAre(2));
EXPECT_EQ(executor.node_def(0).priority, 2);
EXPECT_EQ(executor.node_def(1).priority, 1);
EXPECT_EQ(executor.node_def(2).priority, 0);
}
TEST(ThunkExecutorTest, ResourceOrdering) {
BufferAllocation alloc(0, 80, 0);
BufferAllocation::Slice slice0(&alloc, 0, 40);
BufferAllocation::Slice slice1(&alloc, 40, 40);
ThunkSequence sequence;
sequence.push_back(AddI32Thunk::Create("a", {slice0}, {slice0},
nullptr,
true));
sequence.push_back(AddI32Thunk::Create("b", {slice1}, {slice1},
nullptr,
true));
TF_ASSERT_OK_AND_ASSIGN(
ThunkExecutor executor,
ThunkExecutor::Create(std::move(sequence), OptionsForTest()));
EXPECT_TRUE(executor.is_sequential());
EXPECT_THAT(executor.source(), ElementsAre(0));
EXPECT_THAT(executor.sink(), ElementsAre(1));
EXPECT_EQ(executor.node_def(0).priority, 1);
EXPECT_EQ(executor.node_def(1).priority, 0);
}
TEST(ThunkExecutorTest, TransitiveReduction) {
BufferAllocation alloc(0, 80, 0);
BufferAllocation::Slice slice(&alloc, 0, 40);
ThunkSequence sequence;
sequence.push_back(AddI32Thunk::Create("a", {slice}, {slice}));
sequence.push_back(AddI32Thunk::Create("b", {slice}, {slice}));
sequence.push_back(AddI32Thunk::Create("c", {slice}, {slice}));
TF_ASSERT_OK_AND_ASSIGN(
ThunkExecutor executor,
ThunkExecutor::Create(std::move(sequence), OptionsForTest()));
EXPECT_THAT(executor.source(), ElementsAre(0));
EXPECT_THAT(executor.sink(), ElementsAre(2));
EXPECT_THAT(executor.node_def(0).out_edges, ElementsAre(1));
EXPECT_THAT(executor.node_def(1).in_edges, ElementsAre(0));
EXPECT_THAT(executor.node_def(1).out_edges, ElementsAre(2));
EXPECT_THAT(executor.node_def(2).in_edges, ElementsAre(1));
EXPECT_EQ(executor.node_def(0).priority, 2);
EXPECT_EQ(executor.node_def(1).priority, 1);
EXPECT_EQ(executor.node_def(2).priority, 0);
}
TEST(ThunkExecutorTest, Execute) {
BufferAllocation alloc(0, 80, 0);
BufferAllocation::Slice slice0(&alloc, 0, 40);
BufferAllocation::Slice slice1(&alloc, 40, 40);
BufferAllocation::Slice slice2(&alloc, 20, 40);
std::vector<std::string> trace;
ThunkSequence sequence;
sequence.push_back(AddI32Thunk::Create("a", {slice0}, {slice0}, &trace));
sequence.push_back(AddI32Thunk::Create("b", {slice1}, {slice1}, &trace));
sequence.push_back(AddI32Thunk::Create("c", {slice2}, {slice2}, &trace));
TF_ASSERT_OK_AND_ASSIGN(
ThunkExecutor executor,
ThunkExecutor::Create(std::move(sequence), OptionsForTest()));
std::vector<int32_t> data(20, 1);
auto buffers = AddI32Thunk::AsDeviceMemory({&data});
BufferAllocations allocations(buffers);
Thunk::TaskRunner task_runner = [&](Thunk::Task task) {
trace.push_back("<TaskRunner>");
task();
};
Thunk::ExecuteParams params = {nullptr, &allocations};
params.task_runner = &task_runner;
params.session =
Thunk::ExecuteSession(8, 0);
auto execute_event = executor.Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_TRUE(execute_event.IsConcrete());
EXPECT_THAT(trace, ElementsAre("<TaskRunner>", "b", "a", "c"));
EXPECT_THAT(data, ElementsAre(2, 2, 2, 2, 2,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
2, 2, 2, 2, 2));
}
enum class SharedResourceUse { kNo, kAll, kRandom };
struct GeneratedThunkSequence {
BufferAllocation src_alloc;
BufferAllocation dst_alloc;
std::vector<int32_t> src;
std::vector<int32_t> dst;
std::vector<int32_t> expected;
int32_t expected_shared_resource_value;
std::vector<MaybeOwningDeviceMemory> expected_buffers;
std::vector<MaybeOwningDeviceMemory> buffers;
ThunkSequence sequence;
};
static absl::StatusOr<std::unique_ptr<GeneratedThunkSequence>>
GenerateThunkSequence(size_t num_elements, size_t num_thunks,
SharedResourceUse shared_resource_use,
bool inject_errors) {
auto g = std::make_unique<GeneratedThunkSequence>(GeneratedThunkSequence{
BufferAllocation(0, num_elements * sizeof(int32_t), 0),
BufferAllocation(1, num_elements * sizeof(int32_t), 0),
std::vector<int32_t>(num_elements, 1),
std::vector<int32_t>(num_elements, 0),
std::vector<int32_t>(num_elements, 0),
0,
});
g->sequence.reserve(num_thunks);
g->expected_buffers = AddI32Thunk::AsDeviceMemory({&g->src, &g->expected});
g->buffers = AddI32Thunk::AsDeviceMemory({&g->src, &g->dst});
std::minstd_rand0 engine;
std::uniform_int_distribution<size_t> offset_dist(0, num_elements - 1);
std::uniform_int_distribution<size_t> size_dist(32, 64);
std::uniform_int_distribution<size_t> use_resource_dist(0, num_thunks / 10);
std::uniform_int_distribution<size_t> inject_error_dist(0, num_thunks / 10);
auto random_slice = [&](BufferAllocation* alloc) {
size_t start = offset_dist(engine);
size_t size = std::min(num_elements - start, size_dist(engine));
return BufferAllocation::Slice(alloc, start * sizeof(int32_t),
size * sizeof(int32_t));
};
for (int i = 0; i < num_thunks; ++i) {
BufferAllocation::Slice src = random_slice(&g->src_alloc);
BufferAllocation::Slice dst = random_slice(&g->dst_alloc);
BufferAllocations allocations(g->expected_buffers);
TF_RETURN_IF_ERROR(AddI32Thunk::Execute(&allocations, src, dst));
bool use_resource = [&] {
switch (shared_resource_use) {
case SharedResourceUse::kNo:
return false;
case SharedResourceUse::kAll:
return true;
case SharedResourceUse::kRandom:
return use_resource_dist(engine) == 0;
}
}();
if (use_resource) g->expected_shared_resource_value++;
bool inject_error = inject_errors && inject_error_dist(engine) == 0;
g->sequence.push_back(AddI32Thunk::Create(absl::StrCat(i), {src}, {dst},
nullptr, use_resource,
inject_error));
}
return g;
}
class ThunkExecutorStressTest
: public testing::TestWithParam<
std::tuple<int32_t, bool, bool, SharedResourceUse, bool, bool>> {
public:
void SetUp() override {
auto& [num_thunks, use_task_runner, use_device, shared_resource_use,
inject_errors, use_priority_ready_queue] = GetParam();
use_task_runner_ = use_task_runner;
use_device_ = use_device;
if (use_task_runner_ || use_device_) {
thread_pool_.emplace(tsl::Env::Default(), "thunk-executor", 8);
device_.emplace(thread_pool_->AsEigenThreadPool(),
thread_pool_->NumThreads());
task_runner_.emplace([this](Thunk::Task task) {
thread_pool_->Schedule(std::move(task));
});
}
}
Thunk::TaskRunner* task_runner() {
if (!use_task_runner_) return nullptr;
return &*task_runner_;
}
Eigen::ThreadPoolDevice* device() {
if (!use_device_) return nullptr;
return &*device_;
}
private:
bool use_task_runner_;
bool use_device_;
std::optional<tsl::thread::ThreadPool> thread_pool_;
std::optional<Eigen::ThreadPoolDevice> device_;
std::optional<Thunk::TaskRunner> task_runner_;
};
TEST_P(ThunkExecutorStressTest, Execute) {
auto [num_thunks, use_task_runner, use_device, shared_resource_use,
inject_errors, use_priority_ready_queue] = GetParam();
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<GeneratedThunkSequence> g,
GenerateThunkSequence(1024, num_thunks,
shared_resource_use, inject_errors));
ThunkExecutor::Options executor_options = {
0,
use_priority_ready_queue,
};
TF_ASSERT_OK_AND_ASSIGN(
ThunkExecutor executor,
ThunkExecutor::Create(std::move(g->sequence), executor_options));
BufferAllocations allocations(g->buffers);
Thunk::ExecuteParams params = {nullptr, &allocations, nullptr, device(),
task_runner()};
shared_resource = 0;
auto execute_event = executor.Execute(params);
tsl::BlockUntilReady(execute_event);
if (inject_errors) {
ASSERT_TRUE(execute_event.IsError());
EXPECT_EQ(execute_event.GetError(), absl::InternalError("Injected error"));
} else {
ASSERT_TRUE(execute_event.IsConcrete());
EXPECT_EQ(shared_resource, g->expected_shared_resource_value);
EXPECT_EQ(g->dst, g->expected);
}
}
INSTANTIATE_TEST_SUITE_P(
ThunkExecutor, ThunkExecutorStressTest,
testing::Combine(testing::ValuesIn({10, 100, 1000}),
testing::Bool(),
testing::Bool(),
testing::Values(SharedResourceUse::kNo,
SharedResourceUse::kAll,
SharedResourceUse::kRandom),
testing::Bool(),
testing::Bool()));
static void BM_FifoReadyQueuePushPop(benchmark::State& state) {
ThunkExecutor::FifoReadyQueue queue({});
const size_t num_push_pop = state.range(0);
for (auto _ : state) {
for (int i = 0; i < num_push_pop; ++i) {
queue.Push(i);
}
for (int i = 0; i < num_push_pop; ++i) {
benchmark::DoNotOptimize(queue.Pop());
}
}
}
static void BM_FifoReadyQueuePushPopHalf(benchmark::State& state) {
ThunkExecutor::FifoReadyQueue queue({});
const size_t num_push_pop = state.range(0);
for (auto _ : state) {
for (int i = 0; i < num_push_pop; ++i) {
queue.Push(i);
}
benchmark::DoNotOptimize(queue.PopHalf());
}
}
static void BM_PriorityReadyQueuePushPop(benchmark::State& state) {
std::vector<ThunkExecutor::NodeDef> nodes_defs(16);
for (size_t i = 0; i < nodes_defs.size(); ++i) {
nodes_defs[i].priority = i;
}
std::default_random_engine rng;
absl::c_shuffle(nodes_defs, rng);
ThunkExecutor::PriorityReadyQueue queue(nodes_defs, {});
const size_t num_push_pop = state.range(0);
for (auto _ : state) {
for (int i = 0; i < num_push_pop; ++i) {
queue.Push(i);
}
for (int i = 0; i < num_push_pop; ++i) {
benchmark::DoNotOptimize(queue.Pop());
}
}
}
static void BM_PriorityReadyQueuePushPopHalf(benchmark::State& state) {
std::vector<ThunkExecutor::NodeDef> nodes_defs(16);
for (size_t i = 0; i < nodes_defs.size(); ++i) {
nodes_defs[i].priority = i;
}
std::default_random_engine rng;
absl::c_shuffle(nodes_defs, rng);
ThunkExecutor::PriorityReadyQueue queue(nodes_defs, {});
const size_t num_push_pop = state.range(0);
for (auto _ : state) {
for (int i = 0; i < num_push_pop; ++i) {
queue.Push(i);
}
benchmark::DoNotOptimize(queue.PopHalf());
}
}
#define BENCHMARK_READY_QUEUE(name) \
BENCHMARK(name) \
->MeasureProcessCPUTime() \
->Arg(1) \
->Arg(2) \
->Arg(4) \
->Arg(8) \
->Arg(16)
BENCHMARK_READY_QUEUE(BM_FifoReadyQueuePushPop);
BENCHMARK_READY_QUEUE(BM_FifoReadyQueuePushPopHalf);
BENCHMARK_READY_QUEUE(BM_PriorityReadyQueuePushPop);
BENCHMARK_READY_QUEUE(BM_PriorityReadyQueuePushPopHalf);
static void BM_CreateThunkExecutor(benchmark::State& state) {
const size_t num_thunks = state.range(0);
for (auto _ : state) {
auto g = GenerateThunkSequence(1024, num_thunks,
SharedResourceUse::kNo, false);
CHECK_OK(ThunkExecutor::Create(std::move((*g)->sequence), OptionsForTest())
.status());
}
}
static void BM_SequentialThunkExecutor(benchmark::State& state) {
const size_t num_thunks = state.range(0);
auto g =
GenerateThunkSequence(1024, num_thunks,
SharedResourceUse::kAll,
false)
.value();
auto e =
ThunkExecutor::Create(std::move(g->sequence), OptionsForTest()).value();
BufferAllocations allocations(g->buffers);
Thunk::ExecuteParams params = {nullptr, &allocations};
for (auto _ : state) {
auto execute_event = e.Execute(params);
tsl::BlockUntilReady(execute_event);
CHECK(execute_event.IsConcrete());
}
}
static void BM_SyncThunkExecutor(benchmark::State& state) {
const size_t num_thunks = state.range(0);
auto g = GenerateThunkSequence(1024, num_thunks,
SharedResourceUse::kNo,
false)
.value();
auto e =
ThunkExecutor::Create(std::move(g->sequence), OptionsForTest()).value();
BufferAllocations allocations(g->buffers);
Thunk::ExecuteParams params = {nullptr, &allocations};
for (auto _ : state) {
auto execute_event = e.Execute(params);
tsl::BlockUntilReady(execute_event);
CHECK(execute_event.IsConcrete());
}
}
static void BM_AsyncThunkExecutor(benchmark::State& state) {
const size_t num_thunks = state.range(0);
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "thunk-executor", 8);
Eigen::ThreadPoolDevice device(thread_pool.AsEigenThreadPool(),
thread_pool.NumThreads());
auto g = GenerateThunkSequence(1024, num_thunks,
SharedResourceUse::kNo,
false)
.value();
auto e =
ThunkExecutor::Create(std::move(g->sequence), OptionsForTest()).value();
BufferAllocations allocations(g->buffers);
Thunk::TaskRunner task_runner = [&](Thunk::Task task) {
thread_pool.Schedule(std::move(task));
};
Thunk::ExecuteParams params = {nullptr, &allocations, nullptr, &device,
&task_runner};
for (auto _ : state) {
auto execute_event = e.Execute(params);
tsl::BlockUntilReady(execute_event);
CHECK(execute_event.IsConcrete());
}
}
#define BENCHMARK_THUNK_EXECUTOR(name) \
BENCHMARK(name) \
->MeasureProcessCPUTime() \
->Arg(1) \
->Arg(2) \
->Arg(4) \
->Arg(8) \
->Arg(16) \
->Arg(32) \
->Arg(64) \
->Arg(128) \
->Arg(256) \
->Arg(512)
BENCHMARK_THUNK_EXECUTOR(BM_CreateThunkExecutor);
BENCHMARK_THUNK_EXECUTOR(BM_SequentialThunkExecutor);
BENCHMARK_THUNK_EXECUTOR(BM_SyncThunkExecutor);
BENCHMARK_THUNK_EXECUTOR(BM_AsyncThunkExecutor);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/thunk_executor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/thunk_executor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3c7cc377-e95e-41fa-9b3f-d5f718e8e8de | cpp | tensorflow/tensorflow | kernel_thunk | third_party/xla/xla/service/gpu/runtime/kernel_thunk.cc | third_party/xla/xla/backends/cpu/runtime/kernel_thunk_test.cc | #include "xla/service/gpu/runtime/kernel_thunk.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/kernel_arguments.h"
#include "xla/service/gpu/kernels/custom_kernel.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
KernelThunk::KernelThunk(const HloInstruction* instr, std::string kernel_name,
absl::Span<const KernelArgument> kernel_arguments,
LaunchDimensions launch_dimensions,
std::optional<se::ClusterDim> cluster_dim,
int64_t shmem_bytes)
: Thunk(Kind::kKernel, Thunk::ThunkInfo::WithProfileAnnotation(instr)),
kernel_name_(std::move(kernel_name)),
launch_dimensions_(std::move(launch_dimensions)),
cluster_dim_(std::move(cluster_dim)),
shmem_bytes_(shmem_bytes) {
args_.reserve(kernel_arguments.size());
written_.reserve(kernel_arguments.size());
for (const auto& kernel_argument : kernel_arguments) {
if (!kernel_argument.first_with_same_slice().has_value()) {
args_.push_back(kernel_argument.slice());
written_.push_back(kernel_argument.written());
}
}
}
std::string KernelThunk::ToString(int indent) const {
return absl::StrFormat(
", kernel = %s, launch dimensions = %s, cluster_dim = %s", kernel_name_,
launch_dimensions_.ToString(),
cluster_dim_.has_value() ? cluster_dim_->ToString() : "nullopt");
}
absl::Status KernelThunk::Initialize(const InitializeParams& params) {
absl::MutexLock lock(&mutex_);
auto it = kernel_cache_.find(params.executor);
if (kernel_cache_.end() == it) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::Kernel> kernel,
CreateKernel(kernel_name_, args_.size(), params.src.text,
params.src.binary, params.executor, shmem_bytes_));
kernel_cache_.emplace(params.executor, std::move(kernel));
}
return absl::OkStatus();
}
static void PrintBufferContents(
se::Stream* stream, absl::Span<const se::DeviceMemoryBase> buffer_args) {
int input_idx = 0;
for (const se::DeviceMemoryBase& buf : buffer_args) {
auto host_buffer = std::make_unique<char[]>(buf.size());
CHECK_OK(stream->Memcpy(host_buffer.get(), buf, buf.size()));
CHECK_OK(stream->BlockHostUntilDone());
std::string buffer_contents;
for (int i = 0; i < buf.size(); i++) {
absl::StrAppendFormat(&buffer_contents, "%x ",
static_cast<unsigned>(host_buffer[i]));
}
VLOG(100) << "BUF(" << input_idx++ << ") = " << buffer_contents;
}
}
absl::Status KernelThunk::ExecuteOnStream(const ExecuteParams& params) {
se::StreamExecutor* executor = params.stream->parent();
LaunchDimensions launch_dimensions;
std::optional<se::ClusterDim> cluster_dim;
const se::Kernel* kernel = nullptr;
TF_ASSIGN_OR_RETURN(
se::Stream * stream,
GetStreamForExecution(Thunk::execution_stream_id(), params));
{
absl::MutexLock lock(&mutex_);
auto it = kernel_cache_.find(executor);
CHECK(it != kernel_cache_.end())
<< "Initialize() not called for StreamExecutor " << executor;
launch_dimensions = launch_dimensions_;
cluster_dim = cluster_dim_;
kernel = it->second.get();
}
VLOG(3) << "Launching " << kernel->name();
absl::InlinedVector<se::DeviceMemoryBase, 4> buffer_args;
for (const BufferAllocation::Slice& arg : args_) {
se::DeviceMemoryBase buf = params.buffer_allocations->GetDeviceAddress(arg);
VLOG(3) << " Arg: alloc #" << arg.index() << ", offset: " << arg.offset()
<< ": " << buf.opaque() << " (" << buf.size() << "B)";
buffer_args.push_back(buf);
}
if (VLOG_IS_ON(100)) {
PrintBufferContents(stream, buffer_args);
}
if (cluster_dim.has_value()) {
return ExecuteKernelOnStream(*kernel, buffer_args, launch_dimensions,
cluster_dim.value(), stream);
} else {
return ExecuteKernelOnStream(*kernel, buffer_args, launch_dimensions,
stream);
}
}
CustomKernelThunk::CustomKernelThunk(
const HloInstruction* instr, CustomKernel custom_kernel,
absl::Span<const KernelArgument> kernel_arguments)
: Thunk(Kind::kCustomKernel,
Thunk::ThunkInfo::WithProfileAnnotation(instr)),
custom_kernel_(std::move(custom_kernel)) {
args_.reserve(kernel_arguments.size());
written_.reserve(kernel_arguments.size());
for (const auto& kernel_argument : kernel_arguments) {
if (!kernel_argument.first_with_same_slice().has_value()) {
args_.push_back(kernel_argument.slice());
written_.push_back(kernel_argument.written());
}
}
}
std::string CustomKernelThunk::ToString(int indent) const {
return custom_kernel_.ToString();
}
absl::Status CustomKernelThunk::Initialize(const InitializeParams& params) {
absl::MutexLock lock(&mutex_);
auto it = kernel_cache_.find(params.executor);
if (kernel_cache_.end() == it) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::Kernel> kernel,
params.executor->LoadKernel(custom_kernel_.kernel_spec()));
kernel_cache_.emplace(params.executor, std::move(kernel));
}
return absl::OkStatus();
}
absl::Status CustomKernelThunk::ExecuteOnStream(const ExecuteParams& params) {
se::StreamExecutor* executor = params.stream->parent();
const se::Kernel* kernel = [&] {
absl::MutexLock lock(&mutex_);
return kernel_cache_[executor].get();
}();
VLOG(3) << "Launching " << custom_kernel_.ToString() << " as device kernel "
<< kernel->name();
absl::InlinedVector<se::DeviceMemoryBase, 4> buffer_args;
for (const BufferAllocation::Slice& arg : args_) {
se::DeviceMemoryBase buf = params.buffer_allocations->GetDeviceAddress(arg);
VLOG(3) << " Arg: alloc #" << arg.index() << ", offset: " << arg.offset()
<< ": " << buf.opaque() << " (" << buf.size() << "B)";
buffer_args.push_back(buf);
}
if (VLOG_IS_ON(100)) {
PrintBufferContents(params.stream, buffer_args);
}
se::KernelArgsDeviceMemoryArray args(buffer_args,
custom_kernel_.shared_memory_bytes());
if (auto cluster = custom_kernel_.cluster_dims(); cluster.has_value()) {
return params.stream->Launch(custom_kernel_.thread_dims(),
custom_kernel_.block_dims(), *cluster, *kernel,
args);
} else {
return params.stream->Launch(custom_kernel_.thread_dims(),
custom_kernel_.block_dims(), *kernel, args);
}
}
}
} | #include "xla/backends/cpu/runtime/kernel_thunk.h"
#include <cstddef>
#include <cstdint>
#include <string_view>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "xla/backends/cpu/runtime/buffer_allocations.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/host/host_kernel_c_api.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
class AddF32HostKernel : public Thunk::FunctionRegistry {
public:
absl::StatusOr<Kernel> FindKernel(std::string_view name) override {
return +[](const SE_HOST_KernelCallFrame* call_frame) {
const SE_HOST_KernelArg& in = call_frame->args[0];
const SE_HOST_KernelArg& out = call_frame->args[1];
float* in_ptr = reinterpret_cast<float*>(in.data);
float* out_ptr = reinterpret_cast<float*>(out.data);
uint64_t i = call_frame->thread->x;
*(out_ptr + i) = *(in_ptr + i) + *(in_ptr + i);
return static_cast<SE_HOST_KernelError*>(nullptr);
};
}
};
TEST(KernelThunkTest, CheckAlignment) {
auto thunk =
KernelThunk::Create({"test"}, {}, {}, "test", se::ThreadDim(), {},
3);
EXPECT_TRUE(absl::StrContains(thunk.status().message(),
"minimum alignment 3 is not a power of 2"));
}
TEST(KernelThunkTest, AddF32) {
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> in = {1.0, 2.0, 3.0, 4.0};
std::vector<float> out(4, 0.0);
size_t size_in_bytes = in.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(in.data(), size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(out.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation in_alloc(0, size_in_bytes, 0);
BufferAllocation out_alloc(1, size_in_bytes, 0);
BufferAllocation::Slice in_slice(&in_alloc, 0, size_in_bytes);
BufferAllocation::Slice out_slice(&out_alloc, 0, size_in_bytes);
TF_ASSERT_OK_AND_ASSIGN(
auto thunk,
KernelThunk::Create({"add_f32"}, {in_slice}, {out_slice}, "add_f32",
se::ThreadDim(4), {0}));
AddF32HostKernel host_kernels;
Thunk::ExecuteParams params = {&host_kernels, &allocations};
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError()) << execute_event.GetError();
std::vector<float> expected = {2.0, 4.0, 6.0, 8.0};
EXPECT_EQ(out, expected);
}
TEST(KernelThunkTest, AddF32Inline) {
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> in_out = {1.0, 2.0, 3.0, 4.0};
size_t size_in_bytes = in_out.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(in_out.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation in_out_alloc(0, size_in_bytes, 0);
BufferAllocation::Slice in_out_slice(&in_out_alloc, 0, size_in_bytes);
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, KernelThunk::Create(
{"add_f32"}, {in_out_slice}, {in_out_slice}, "add_f32",
se::ThreadDim(4), {}));
AddF32HostKernel host_kernels;
Thunk::ExecuteParams params = {&host_kernels, &allocations};
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
std::vector<float> expected = {2.0, 4.0, 6.0, 8.0};
EXPECT_EQ(in_out, expected);
}
TEST(KernelThunkInvariantBuffersTest, MissingBufferSlice) {
#ifdef NDEBUG
GTEST_SKIP() << "Invariant buffers check is disabled in optimized build.";
#endif
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> in = {1.0, 2.0, 3.0, 4.0};
std::vector<float> out(4, 0.0);
size_t size_in_bytes = in.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(in.data(), size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(out.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation in_alloc(0, size_in_bytes, 0);
BufferAllocation out_alloc(1, size_in_bytes, 0);
BufferAllocation::Slice in_slice(&in_alloc, 0, size_in_bytes);
BufferAllocation::Slice out_slice(&out_alloc, 0, size_in_bytes);
TF_ASSERT_OK_AND_ASSIGN(
auto thunk,
KernelThunk::Create({"add_f32"}, {in_slice}, {out_slice}, "add_f32",
se::ThreadDim(4), {}));
AddF32HostKernel host_kernels;
Thunk::ExecuteParams params = {&host_kernels, &allocations};
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_TRUE(execute_event.IsError());
auto status = execute_event.GetError();
EXPECT_EQ(status.code(), absl::StatusCode::kInternal);
EXPECT_TRUE(absl::StrContains(status.message(),
"Mismatch in invariant buffers metadata"));
}
TEST(KernelThunkInvariantBuffersTest, ExtraInputOutputBufferSlice) {
#ifdef NDEBUG
GTEST_SKIP() << "Invariant buffers check is disabled in optimized build.";
#endif
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> in_out = {1.0, 2.0, 3.0, 4.0};
size_t size_in_bytes = in_out.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(in_out.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation in_out_alloc(0, size_in_bytes, 0);
BufferAllocation::Slice in_out_slice(&in_out_alloc, 0, size_in_bytes);
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, KernelThunk::Create(
{"add_f32"}, {in_out_slice}, {in_out_slice}, "add_f32",
se::ThreadDim(4), {0}));
AddF32HostKernel host_kernels;
Thunk::ExecuteParams params = {&host_kernels, &allocations};
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_TRUE(execute_event.IsError());
auto status = execute_event.GetError();
EXPECT_EQ(status.code(), absl::StatusCode::kInternal);
EXPECT_TRUE(absl::StrContains(status.message(),
"Mismatch in invariant buffers metadata"));
}
TEST(KernelThunkInvariantBuffersTest,
MemorySectionIncorrectlyMarkedAsInvariant) {
#ifdef NDEBUG
GTEST_SKIP() << "Invariant buffers check is disabled in optimized build.";
#endif
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> in_out = {1.0, 2.0, 3.0, 4.0};
size_t size_in_bytes = in_out.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(in_out.data(), size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(in_out.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation in_0_alloc(0, size_in_bytes, 0);
BufferAllocation in_1_alloc(1, size_in_bytes, 0);
BufferAllocation::Slice in_0_slice(&in_0_alloc, 0, size_in_bytes);
BufferAllocation::Slice in_1_slice(&in_1_alloc, 0, size_in_bytes);
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, KernelThunk::Create({"add_f32"}, {in_0_slice, in_1_slice},
{in_0_slice}, "add_f32", se::ThreadDim(4),
{1}));
AddF32HostKernel host_kernels;
Thunk::ExecuteParams params = {&host_kernels, &allocations};
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_TRUE(execute_event.IsError());
auto status = execute_event.GetError();
EXPECT_EQ(status.code(), absl::StatusCode::kInternal);
EXPECT_TRUE(absl::StrContains(status.message(),
"Mismatch in invariant buffers metadata"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/kernel_thunk.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/kernel_thunk_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5a9a5a37-1608-4723-b051-5296431e6522 | cpp | tensorflow/tensorflow | resource_use | third_party/xla/xla/backends/cpu/runtime/resource_use.cc | third_party/xla/xla/backends/cpu/runtime/resource_use_test.cc | #include "xla/backends/cpu/runtime/resource_use.h"
#include <memory>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/memory/memory.h"
#include "absl/types/span.h"
namespace xla::cpu {
std::shared_ptr<Resource> Resource::Create(Kind kind) {
return absl::WrapUnique(new Resource(kind));
}
Resource::Resource(Kind kind) : kind_(kind) {}
ResourceUse::ResourceUse(std::shared_ptr<Resource> resource,
ResourceAccess access)
: resource_(resource), access_(access) {}
ResourceUse::ReadWriteSet::ReadWriteSet() = default;
void ResourceUse::ReadWriteSet::Add(ResourceUse use) {
switch (use.access()) {
case ResourceUse::kRead:
read_.insert(use.resource());
break;
case ResourceUse::kWrite:
write_.insert(use.resource());
break;
}
}
void ResourceUse::ReadWriteSet::AddAll(absl::Span<const ResourceUse> uses) {
for (const auto& use : uses) Add(use);
}
bool ResourceUse::ReadWriteSet::HasConflicts(const ResourceUse& use) const {
return use.access() == ResourceAccess::kWrite
? write_.contains(use.resource()) || read_.contains(use.resource())
: write_.contains(use.resource());
}
bool ResourceUse::ReadWriteSet::HasConflicts(
absl::Span<const ResourceUse> uses) const {
return absl::c_any_of(
uses, [&](const ResourceUse& use) { return HasConflicts(use); });
}
bool ResourceUse::ReadWriteSet::HasConflicts(const ReadWriteSet& other) {
return absl::c_any_of(other.read_,
[&](const std::shared_ptr<Resource>& resource) {
return HasConflicts(ResourceUse::Read(resource));
}) ||
absl::c_any_of(other.write_,
[&](const std::shared_ptr<Resource>& resource) {
return HasConflicts(ResourceUse::Write(resource));
});
}
} | #include "xla/backends/cpu/runtime/resource_use.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(ResourceUseTest, Equality) {
auto token = Resource::Create(Resource::kToken);
auto use0 = ResourceUse::Read(token);
auto use1 = ResourceUse::Write(token);
auto use2 = ResourceUse::Read(token);
EXPECT_NE(use0, use1);
EXPECT_EQ(use0, use2);
}
TEST(ResourceUseTest, ReadWriteSet) {
ResourceUse::ReadWriteSet rwset;
auto token0 = Resource::Create(Resource::kToken);
auto token1 = Resource::Create(Resource::kToken);
rwset.Add(ResourceUse::Read(token0));
EXPECT_FALSE(rwset.HasConflicts({ResourceUse::Read(token0)}));
EXPECT_TRUE(rwset.HasConflicts({ResourceUse::Write(token0)}));
EXPECT_FALSE(rwset.HasConflicts({ResourceUse::Read(token1)}));
EXPECT_FALSE(rwset.HasConflicts({ResourceUse::Write(token1)}));
rwset.Add(ResourceUse::Write(token0));
EXPECT_TRUE(rwset.HasConflicts({ResourceUse::Read(token0)}));
EXPECT_TRUE(rwset.HasConflicts({ResourceUse::Write(token0)}));
EXPECT_FALSE(rwset.HasConflicts({ResourceUse::Read(token1)}));
EXPECT_FALSE(rwset.HasConflicts({ResourceUse::Write(token1)}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/resource_use.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/resource_use_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d46120d8-760a-4260-9f54-29cb14e1086c | cpp | tensorflow/tensorflow | copy_thunk | third_party/xla/xla/service/gpu/runtime/copy_thunk.cc | third_party/xla/xla/backends/cpu/runtime/copy_thunk_test.cc | #include "xla/service/gpu/runtime/copy_thunk.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/event.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
DeviceToDeviceCopyThunk::DeviceToDeviceCopyThunk(
ThunkInfo thunk_info, const BufferAllocation::Slice& source_buffer,
const BufferAllocation::Slice& destination_buffer, uint64_t mem_size)
: Thunk(Kind::kCopy, std::move(thunk_info)),
source_buffer_(source_buffer),
destination_buffer_(destination_buffer),
mem_size_(mem_size) {}
absl::Status DeviceToDeviceCopyThunk::ExecuteOnStream(
const ExecuteParams& params) {
se::DeviceMemoryBase destination_data =
params.buffer_allocations->GetDeviceAddress(destination_buffer_);
se::DeviceMemoryBase source_data =
params.buffer_allocations->GetDeviceAddress(source_buffer_);
VLOG(3) << "Memcpy D2D of size " << mem_size_ << " from "
<< source_data.opaque() << " to " << destination_data.opaque();
return params.stream->Memcpy(&destination_data, source_data, mem_size_);
}
CopyThunk::CopyThunk(ThunkInfo thunk_info,
const BufferAllocation::Slice& source_buffer,
const BufferAllocation::Slice& destination_buffer,
uint64_t mem_size)
: Thunk(Kind::kCopy, std::move(thunk_info)),
source_buffer_(source_buffer),
destination_buffer_(destination_buffer),
mem_size_(mem_size) {}
absl::Status CopyThunk::ExecuteOnStream(const ExecuteParams& params) {
return absl::OkStatus();
}
absl::Status CopyThunk::AsyncEvents::Emplace(se::StreamExecutor* executor,
const HloInstruction* instr,
std::unique_ptr<se::Event> event) {
Key key = {executor, instr};
absl::MutexLock lock(&mutex_);
VLOG(3) << "Emplace event " << event.get();
if (auto [it, inserted] = events_.try_emplace(key, std::move(event));
inserted) {
return absl::OkStatus();
}
return absl::InternalError("Async copy event already exists!");
}
absl::StatusOr<std::unique_ptr<se::Event>> CopyThunk::AsyncEvents::Extract(
se::StreamExecutor* executor, const HloInstruction* instr) {
Key key = {executor, instr};
absl::MutexLock lock(&mutex_);
if (auto event = events_.extract(key)) {
VLOG(3) << "Extract event " << event.mapped().get();
return std::move(event.mapped());
}
return absl::InternalError("Async copy event was not found!");
}
DeviceToHostCopyThunk::DeviceToHostCopyThunk(
ThunkInfo thunk_info, const BufferAllocation::Slice& source_buffer,
const BufferAllocation::Slice& destination_buffer, uint64_t mem_size,
std::shared_ptr<CopyThunk::AsyncEvents> async_events,
const HloInstruction* instr)
: CopyThunk(std::move(thunk_info), source_buffer, destination_buffer,
mem_size),
async_events_(std::move(async_events)),
instr_(instr) {}
absl::Status DeviceToHostCopyThunk::ExecuteOnStream(
const ExecuteParams& params) {
se::DeviceMemoryBase destination_data =
params.buffer_allocations->GetDeviceAddress(destination());
se::DeviceMemoryBase source_data =
params.buffer_allocations->GetDeviceAddress(source());
void* cpu_dst = destination_data.opaque();
TF_ASSIGN_OR_RETURN(
se::Stream * stream,
GetStreamForExecution(Thunk::execution_stream_id(), params));
TF_RETURN_IF_ERROR(stream->Memcpy(cpu_dst, source_data, size_bytes()));
if (stream == params.stream) {
VLOG(2) << "Memcpy D2H from the main stream";
return absl::OkStatus();
}
VLOG(2) << "Memcpy D2H from the other stream";
se::StreamExecutor* executor = params.stream->parent();
TF_ASSIGN_OR_RETURN(auto event, executor->CreateEvent());
TF_RETURN_IF_ERROR(stream->RecordEvent(event.get()));
VLOG(3) << "Emplace events: " << event.get()
<< " for instr: " << instr_->ToString();
return async_events_->Emplace(executor, instr_, std::move(event));
}
HostToDeviceCopyThunk::HostToDeviceCopyThunk(
ThunkInfo thunk_info, const BufferAllocation::Slice& source_buffer,
const BufferAllocation::Slice& destination_buffer, uint64_t mem_size,
std::shared_ptr<CopyThunk::AsyncEvents> async_events,
const HloInstruction* instr)
: CopyThunk(std::move(thunk_info), source_buffer, destination_buffer,
mem_size),
async_events_(std::move(async_events)),
instr_(instr) {}
absl::Status HostToDeviceCopyThunk::ExecuteOnStream(
const ExecuteParams& params) {
se::DeviceMemoryBase destination_data =
params.buffer_allocations->GetDeviceAddress(destination());
se::DeviceMemoryBase source_data =
params.buffer_allocations->GetDeviceAddress(source());
void* cpu_src = source_data.opaque();
TF_ASSIGN_OR_RETURN(
se::Stream * stream,
GetStreamForExecution(Thunk::execution_stream_id(), params));
TF_RETURN_IF_ERROR(stream->Memcpy(&destination_data, cpu_src, size_bytes()));
if (stream == params.stream) {
VLOG(2) << "Memcpy H2D from the main stream";
return absl::OkStatus();
}
VLOG(2) << "Memcpy H2D from the other stream";
se::StreamExecutor* executor = params.stream->parent();
TF_ASSIGN_OR_RETURN(auto event, executor->CreateEvent());
TF_RETURN_IF_ERROR(stream->RecordEvent(event.get()));
VLOG(3) << "Emplace events: " << event.get()
<< " for instr: " << instr_->ToString();
return async_events_->Emplace(executor, instr_, std::move(event));
}
CopyDoneThunk::CopyDoneThunk(
Thunk::Kind kind, ThunkInfo thunk_info,
std::shared_ptr<CopyThunk::AsyncEvents> async_events,
const HloInstruction* copy_start_instr)
: Thunk(kind, std::move(thunk_info)),
async_events_(std::move(async_events)),
copy_start_instr_(copy_start_instr) {}
absl::Status CopyDoneThunk::ExecuteOnStream(const ExecuteParams& params) {
VLOG(3) << "CopyDone thunk between a host and a device for: "
<< copy_start_instr_->ToString();
se::StreamExecutor* executor = params.stream->parent();
TF_ASSIGN_OR_RETURN(std::unique_ptr<se::Event> event,
async_events_->Extract(executor, copy_start_instr_));
return params.stream->WaitFor(event.get());
}
}
} | #include "xla/backends/cpu/runtime/copy_thunk.h"
#include <cstddef>
#include <vector>
#include "xla/backends/cpu/runtime/buffer_allocations.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/layout_util.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(CopyThunkTest, CopyEmptyShape) {
std::vector<MaybeOwningDeviceMemory> buffers;
buffers.emplace_back(se::DeviceMemoryBase(nullptr, 0));
buffers.emplace_back(se::DeviceMemoryBase(nullptr, 0));
BufferAllocations allocations(buffers);
BufferAllocation src_alloc(0, 100, 0);
BufferAllocation dst_alloc(1, 100, 0);
BufferAllocation::Slice src_slice(&src_alloc, 0, 0);
BufferAllocation::Slice dst_slice(&dst_alloc, 0, 0);
Shape shape = ShapeUtil::MakeShape(F32, {0, 2});
TF_ASSERT_OK_AND_ASSIGN(
auto thunk,
CopyThunk::Create({"copy"}, src_slice, shape, dst_slice, shape));
Thunk::ExecuteParams params = {nullptr, &allocations};
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
}
TEST(CopyThunkTest, CopySameShape) {
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> src = {1.0, 2.0, 3.0, 4.0};
std::vector<float> dst(4, 0.0);
size_t size_in_bytes = src.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(src.data(), size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(dst.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation src_alloc(0, size_in_bytes, 0);
BufferAllocation dst_alloc(1, size_in_bytes, 0);
BufferAllocation::Slice src_slice(&src_alloc, 0, size_in_bytes);
BufferAllocation::Slice dst_slice(&dst_alloc, 0, size_in_bytes);
Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
TF_ASSERT_OK_AND_ASSIGN(
auto thunk,
CopyThunk::Create({"copy"}, src_slice, shape, dst_slice, shape));
Thunk::ExecuteParams params = {nullptr, &allocations};
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
EXPECT_EQ(src, dst);
}
TEST(CopyThunkTest, CopyTransposed) {
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> src = {1.0, 2.0, 3.0, 4.0};
std::vector<float> dst(4, 0.0);
size_t size_in_bytes = src.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(src.data(), size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(dst.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation src_alloc(0, size_in_bytes, 0);
BufferAllocation dst_alloc(1, size_in_bytes, 0);
BufferAllocation::Slice src_slice(&src_alloc, 0, size_in_bytes);
BufferAllocation::Slice dst_slice(&dst_alloc, 0, size_in_bytes);
Shape src_shape = ShapeUtil::MakeShape(F32, {2, 2});
*src_shape.mutable_layout() = LayoutUtil::MakeLayout({0, 1});
Shape dst_shape = ShapeUtil::MakeShape(F32, {2, 2});
TF_ASSERT_OK_AND_ASSIGN(
auto thunk,
CopyThunk::Create({"copy"}, src_slice, src_shape, dst_slice, dst_shape));
Thunk::ExecuteParams params = {nullptr, &allocations};
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
std::vector<float> expected = {1.0, 3.0, 2.0, 4.0};
EXPECT_EQ(expected, dst);
}
TEST(CopyThunkTest, CopyTransposedEmptyShape) {
std::vector<MaybeOwningDeviceMemory> buffers;
buffers.emplace_back(se::DeviceMemoryBase(nullptr, 0));
buffers.emplace_back(se::DeviceMemoryBase(nullptr, 0));
BufferAllocations allocations(buffers);
BufferAllocation src_alloc(0, 100, 0);
BufferAllocation dst_alloc(1, 100, 0);
BufferAllocation::Slice src_slice(&src_alloc, 0, 0);
BufferAllocation::Slice dst_slice(&dst_alloc, 0, 0);
Shape src_shape = ShapeUtil::MakeShape(F32, {0, 2});
*src_shape.mutable_layout() = LayoutUtil::MakeLayout({0, 1});
Shape dst_shape = ShapeUtil::MakeShape(F32, {0, 2});
TF_ASSERT_OK_AND_ASSIGN(
auto thunk,
CopyThunk::Create({"copy"}, src_slice, src_shape, dst_slice, dst_shape));
Thunk::ExecuteParams params = {nullptr, &allocations};
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/copy_thunk.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/copy_thunk_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ab83b9d8-6c24-44c9-a4c8-cd77b911d2de | cpp | tensorflow/tensorflow | cupti_buffer_events | third_party/xla/xla/backends/profiler/gpu/cupti_buffer_events.cc | third_party/xla/xla/backends/profiler/gpu/cupti_buffer_events_test.cc | #include "xla/backends/profiler/gpu/cupti_buffer_events.h"
#include "absl/strings/str_cat.h"
#include "third_party/gpus/cuda/include/cuda.h"
#include "xla/backends/profiler/gpu/cupti_interface.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/mem.h"
namespace xla {
namespace profiler {
namespace {
using absl::StatusCode;
template <typename CuptiActivity>
struct CuptiActivityHasGraphId {
static constexpr bool value = false;
};
#if CUDA_VERSION >= 12000
#define TF_CUPTI_HAS_CHANNEL_ID 1
using CuptiActivityKernelTy = CUpti_ActivityKernel9;
using CuptiActivityMemcpyTy = CUpti_ActivityMemcpy5;
using CuptiActivityMemcpyP2PTy = CUpti_ActivityMemcpyPtoP4;
using CuptiActivityMemsetTy = CUpti_ActivityMemset4;
template <>
struct CuptiActivityHasGraphId<CuptiActivityKernelTy> {
static constexpr bool value = true;
};
template <>
struct CuptiActivityHasGraphId<CuptiActivityMemcpyTy> {
static constexpr bool value = true;
};
template <>
struct CuptiActivityHasGraphId<CuptiActivityMemcpyP2PTy> {
static constexpr bool value = true;
};
template <>
struct CuptiActivityHasGraphId<CuptiActivityMemsetTy> {
static constexpr bool value = true;
};
#elif CUDA_VERSION >= 11060
#define TF_CUPTI_HAS_CHANNEL_ID 1
using CuptiActivityKernelTy = CUpti_ActivityKernel7;
using CuptiActivityMemcpyTy = CUpti_ActivityMemcpy5;
using CuptiActivityMemcpyP2PTy = CUpti_ActivityMemcpyPtoP4;
using CuptiActivityMemsetTy = CUpti_ActivityMemset4;
template <>
struct CuptiActivityHasGraphId<CuptiActivityKernelTy> {
static constexpr bool value = true;
};
template <>
struct CuptiActivityHasGraphId<CuptiActivityMemcpyTy> {
static constexpr bool value = true;
};
template <>
struct CuptiActivityHasGraphId<CuptiActivityMemcpyP2PTy> {
static constexpr bool value = true;
};
template <>
struct CuptiActivityHasGraphId<CuptiActivityMemsetTy> {
static constexpr bool value = true;
};
#else
using CuptiActivityKernelTy = CUpti_ActivityKernel4;
using CuptiActivityMemcpyTy = CUpti_ActivityMemcpy;
using CuptiActivityMemcpyP2PTy = CUpti_ActivityMemcpy2;
using CuptiActivityMemsetTy = CUpti_ActivityMemset;
#endif
#if CUDA_VERSION >= 11070
using CuptiActivityGraphTraceTy = CUpti_ActivityGraphTrace;
#endif
const char *getActivityOverheadKindString(CUpti_ActivityOverheadKind kind) {
switch (kind) {
case CUPTI_ACTIVITY_OVERHEAD_DRIVER_COMPILER:
return "COMPILER";
case CUPTI_ACTIVITY_OVERHEAD_CUPTI_BUFFER_FLUSH:
return "BUFFER_FLUSH";
case CUPTI_ACTIVITY_OVERHEAD_CUPTI_INSTRUMENTATION:
return "INSTRUMENTATION";
case CUPTI_ACTIVITY_OVERHEAD_CUPTI_RESOURCE:
return "RESOURCE";
default:
break;
}
return "<UNKNOWN>";
}
const char *getActivityUnifiedMemoryKindString(
CUpti_ActivityUnifiedMemoryCounterKind kind) {
switch (kind) {
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD:
return "UM_BYTES_TRANSFER_HTOD";
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOH:
return "UM_BYTES_TRANSFER_DTOH";
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_CPU_PAGE_FAULT_COUNT:
return "UM_CPU_PAGE_FAULT";
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_GPU_PAGE_FAULT:
return "UM_GPU_PAGE_FAULT";
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THRASHING:
return "UM_THRASHING";
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THROTTLING:
return "UM_THROTTLING";
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_REMOTE_MAP:
return "UM_REMOTE_MAP";
case CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOD:
return "UM_BYTES_TRANSFER_DTOD";
default:
break;
}
return "<UNKNOWN>";
}
template <typename CuptiActivity>
void SetEventGraphId(CuptiTracerEvent &event,
const CuptiActivity *cupti_activity) {
if constexpr (CuptiActivityHasGraphId<CuptiActivity>::value) {
event.graph_id = cupti_activity->graphId;
}
}
template <bool cupti_has_channel_id, typename CuptiActivityKernel>
void AddKernelActivityEvent(CuptiEventCollectorDelegate &collector,
const CuptiActivityKernel *kernel) {
CuptiTracerEvent event{};
event.type = CuptiTracerEventType::Kernel;
event.source = CuptiTracerEventSource::Activity;
event.name = kernel->name;
event.start_time_ns = kernel->start;
event.end_time_ns = kernel->end;
event.device_id = kernel->deviceId;
event.context_id = kernel->contextId;
event.stream_id = kernel->streamId;
event.correlation_id = kernel->correlationId;
AnnotationMap::AnnotationInfo info =
collector.annotation_map.LookUp(event.device_id, event.correlation_id);
event.annotation = info.annotation;
event.nvtx_range = info.nvtx_range;
SetEventGraphId(event, kernel);
event.kernel_info.registers_per_thread = kernel->registersPerThread;
event.kernel_info.static_shared_memory_usage = kernel->staticSharedMemory;
event.kernel_info.dynamic_shared_memory_usage = kernel->dynamicSharedMemory;
event.kernel_info.block_x = kernel->blockX;
event.kernel_info.block_y = kernel->blockY;
event.kernel_info.block_z = kernel->blockZ;
event.kernel_info.grid_x = kernel->gridX;
event.kernel_info.grid_y = kernel->gridY;
event.kernel_info.grid_z = kernel->gridZ;
if constexpr (cupti_has_channel_id) {
event.kernel_info.channel_id = kernel->channelID;
event.kernel_info.channel_type = kernel->channelType;
}
collector.receive(std::move(event));
}
void AddGraphTraceActivityEvent(CuptiEventCollectorDelegate &collector,
CuptiActivityGraphTraceTy *graph_trace) {
AnnotationMap::AnnotationInfo info = collector.annotation_map.LookUp(
graph_trace->deviceId, graph_trace->correlationId);
collector.receive(CuptiTracerEvent{
CuptiTracerEventType::CudaGraph,
CuptiTracerEventSource::Activity,
absl::StrCat("CudaGraphExec:", graph_trace->graphId),
info.annotation,
info.nvtx_range,
graph_trace->start,
graph_trace->end,
graph_trace->deviceId,
graph_trace->correlationId,
CuptiTracerEvent::kInvalidThreadId,
graph_trace->contextId,
graph_trace->streamId,
graph_trace->graphId,
});
}
void AddMemcpyActivityEvent(CuptiEventCollectorDelegate &collector,
const CuptiActivityMemcpyTy *memcpy) {
CuptiTracerEvent event{};
switch (memcpy->copyKind) {
case CUPTI_ACTIVITY_MEMCPY_KIND_HTOD:
event.type = CuptiTracerEventType::MemcpyH2D;
event.name = "MemcpyH2D";
break;
case CUPTI_ACTIVITY_MEMCPY_KIND_DTOH:
event.type = CuptiTracerEventType::MemcpyD2H;
event.name = "MemcpyD2H";
break;
case CUPTI_ACTIVITY_MEMCPY_KIND_DTOD:
event.type = CuptiTracerEventType::MemcpyD2D;
event.name = "MemcpyD2D";
break;
case CUPTI_ACTIVITY_MEMCPY_KIND_PTOP:
event.type = CuptiTracerEventType::MemcpyP2P;
event.name = "MemcpyP2P";
break;
default:
event.type = CuptiTracerEventType::MemcpyOther;
event.name = "MemcpyOther";
break;
}
event.source = CuptiTracerEventSource::Activity;
event.start_time_ns = memcpy->start;
event.end_time_ns = memcpy->end;
event.device_id = memcpy->deviceId;
event.context_id = memcpy->contextId;
event.stream_id = memcpy->streamId;
event.correlation_id = memcpy->correlationId;
AnnotationMap::AnnotationInfo info =
collector.annotation_map.LookUp(event.device_id, event.correlation_id);
event.annotation = info.annotation;
SetEventGraphId(event, memcpy);
event.memcpy_info.copy_kind = memcpy->copyKind;
event.memcpy_info.num_bytes = memcpy->bytes;
event.memcpy_info.destination = memcpy->deviceId;
event.memcpy_info.async = memcpy->flags & CUPTI_ACTIVITY_FLAG_MEMCPY_ASYNC;
event.memcpy_info.src_mem_kind = memcpy->srcKind;
event.memcpy_info.dst_mem_kind = memcpy->dstKind;
#if TF_CUPTI_HAS_CHANNEL_ID
event.memcpy_info.channel_id = memcpy->channelID;
event.memcpy_info.channel_type = memcpy->channelType;
#endif
collector.receive(std::move(event));
}
void AddMemcpyP2PActivityEvent(CuptiEventCollectorDelegate &collector,
const CuptiActivityMemcpyP2PTy *memcpy) {
CuptiTracerEvent event{};
event.type = CuptiTracerEventType::MemcpyP2P;
event.name = "MemcpyP2P";
event.source = CuptiTracerEventSource::Activity;
event.start_time_ns = memcpy->start;
event.end_time_ns = memcpy->end;
event.device_id = memcpy->srcDeviceId;
event.context_id = memcpy->contextId;
event.stream_id = memcpy->streamId;
event.correlation_id = memcpy->correlationId;
AnnotationMap::AnnotationInfo info =
collector.annotation_map.LookUp(event.device_id, event.correlation_id);
event.annotation = info.annotation;
SetEventGraphId(event, memcpy);
event.memcpy_info.copy_kind = CUPTI_ACTIVITY_MEMCPY_KIND_PTOP;
event.memcpy_info.num_bytes = memcpy->bytes;
event.memcpy_info.destination = memcpy->dstDeviceId;
event.memcpy_info.async = memcpy->flags & CUPTI_ACTIVITY_FLAG_MEMCPY_ASYNC;
event.memcpy_info.src_mem_kind = memcpy->srcKind;
event.memcpy_info.dst_mem_kind = memcpy->dstKind;
#if TF_CUPTI_HAS_CHANNEL_ID
event.memcpy_info.channel_id = memcpy->channelID;
event.memcpy_info.channel_type = memcpy->channelType;
#endif
collector.receive(std::move(event));
}
void AddCuptiOverheadActivityEvent(CuptiEventCollectorDelegate &collector,
const CUpti_ActivityOverhead *overhead) {
CuptiTracerEvent event{};
event.type = CuptiTracerEventType::Overhead;
event.name = getActivityOverheadKindString(overhead->overheadKind);
event.source = CuptiTracerEventSource::Activity;
event.start_time_ns = overhead->start;
event.end_time_ns = overhead->end;
event.device_id = 0;
switch (overhead->objectKind) {
case CUPTI_ACTIVITY_OBJECT_UNKNOWN:
return;
case CUPTI_ACTIVITY_OBJECT_THREAD:
case CUPTI_ACTIVITY_OBJECT_PROCESS:
event.thread_id = overhead->objectId.pt.threadId;
break;
case CUPTI_ACTIVITY_OBJECT_STREAM:
event.stream_id = overhead->objectId.dcs.streamId;
TF_FALLTHROUGH_INTENDED;
case CUPTI_ACTIVITY_OBJECT_DEVICE:
case CUPTI_ACTIVITY_OBJECT_CONTEXT:
event.device_id = overhead->objectId.dcs.deviceId;
break;
default:
LOG(ERROR) << "Unexpected object kind: " << overhead->objectKind;
return;
}
collector.receive(std::move(event));
}
void AddUnifiedMemoryActivityEvent(
CuptiEventCollectorDelegate &collector,
const CUpti_ActivityUnifiedMemoryCounter2 *record) {
VLOG(3) << "Cuda Unified Memory Activity, kind: " << record->counterKind
<< " src: " << record->srcId << " dst: " << record->dstId;
CuptiTracerEvent event{};
event.type = CuptiTracerEventType::UnifiedMemory;
event.name = getActivityUnifiedMemoryKindString(record->counterKind);
event.source = CuptiTracerEventSource::Activity;
event.start_time_ns = record->start;
if (record->counterKind ==
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_CPU_PAGE_FAULT_COUNT ||
record->counterKind ==
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_THRASHING ||
record->counterKind ==
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_REMOTE_MAP ||
record->end <= record->start) {
event.end_time_ns = record->start + 1;
} else {
event.end_time_ns = record->end;
}
event.device_id = record->srcId;
constexpr int kPseudoStreamId = 0x10000000;
event.stream_id = kPseudoStreamId + record->counterKind;
event.memcpy_info.copy_kind = CUPTI_ACTIVITY_MEMCPY_KIND_UNKNOWN;
if (record->counterKind ==
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_HTOD ||
record->counterKind ==
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOH ||
record->counterKind ==
CUPTI_ACTIVITY_UNIFIED_MEMORY_COUNTER_KIND_BYTES_TRANSFER_DTOD) {
event.memcpy_info.num_bytes = record->value;
} else {
event.memcpy_info.num_bytes = 0;
}
event.memcpy_info.destination = record->dstId;
event.memcpy_info.async = false;
collector.receive(std::move(event));
}
void AddMemoryActivityEvent(CuptiEventCollectorDelegate &collector,
const CUpti_ActivityMemory *memory) {
CuptiTracerEvent event{};
event.name = absl::StrCat("Memory ", GetMemoryKindName(memory->memoryKind));
event.type = CuptiTracerEventType::MemoryResidency;
event.source = CuptiTracerEventSource::Activity;
event.start_time_ns = memory->start;
event.end_time_ns = std::max(memory->end, memory->start + 1);
event.device_id = memory->deviceId;
event.context_id = memory->contextId;
event.stream_id = 0;
event.memory_residency_info.num_bytes = memory->bytes;
event.memory_residency_info.mem_kind = memory->memoryKind;
event.memory_residency_info.address = memory->address;
VLOG(5) << "Cuda activity " << event.name
<< " addr: " << reinterpret_cast<void *>(memory->address)
<< " bytes: " << memory->bytes;
collector.receive(std::move(event));
}
void AddMemsetActivityEvent(CuptiEventCollectorDelegate &collector,
const CuptiActivityMemsetTy *memset) {
auto mem_kind = memset->memoryKind;
CuptiTracerEvent event{};
event.type = CuptiTracerEventType::Memset;
event.source = CuptiTracerEventSource::Activity;
event.name = absl::StrCat("Memset ", mem_kind);
event.start_time_ns = memset->start;
event.end_time_ns = std::max(memset->end, memset->start + 1);
event.device_id = memset->deviceId;
event.correlation_id = memset->correlationId;
event.context_id = memset->contextId;
event.stream_id = memset->streamId;
SetEventGraphId(event, memset);
event.memset_info.num_bytes = memset->bytes;
event.memset_info.mem_kind = mem_kind;
event.memset_info.async = (memset->flags & CUPTI_ACTIVITY_FLAG_MEMSET_ASYNC);
#if TF_CUPTI_HAS_CHANNEL_ID
event.memset_info.channel_id = memset->channelID;
event.memset_info.channel_type = memset->channelType;
#endif
VLOG(5) << "Cuda activity " << event.name << " bytes: " << memset->bytes
<< " async: " << event.memset_info.async;
collector.receive(std::move(event));
}
void AddSynchronizationActivityEvent(
CuptiEventCollectorDelegate &collector,
const CUpti_ActivitySynchronization *sync) {
CuptiTracerEvent event{};
event.type = CuptiTracerEventType::Generic;
event.source = CuptiTracerEventSource::Activity;
switch (sync->type) {
case CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_EVENT_SYNCHRONIZE:
event.name = "cuEventSynchronize";
break;
case CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_STREAM_WAIT_EVENT:
event.name = "cuStreamWaitEvent";
break;
case CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_STREAM_SYNCHRONIZE:
event.name = "cuStreamSynchronize";
break;
case CUPTI_ACTIVITY_SYNCHRONIZATION_TYPE_CONTEXT_SYNCHRONIZE:
event.name = "cuCtxSynchronize";
break;
default:
event.name = "unknown synchronization event";
break;
}
event.start_time_ns = sync->start;
event.end_time_ns = std::max(sync->end, sync->start + 1);
event.correlation_id = sync->correlationId;
event.context_id = sync->contextId;
VLOG(5) << "Cuda activity " << event.name;
collector.receive(std::move(event));
}
static absl::Status ConvertActivityBuffer(
CuptiEventCollectorDelegate &collector, uint8_t *buffer, const size_t size,
const size_t max_activity_event_count, size_t &total_activity_event_count,
size_t &dropped_activity_event_count) {
CuptiInterface *cupti_interface = GetCuptiInterface();
CUpti_Activity *record = nullptr;
while (true) {
CUptiResult status =
cupti_interface->ActivityGetNextRecord(buffer, size, &record);
if (status == CUPTI_SUCCESS) {
if (total_activity_event_count >= max_activity_event_count) {
dropped_activity_event_count++;
continue;
}
total_activity_event_count++;
switch (record->kind) {
case CUPTI_ACTIVITY_KIND_KERNEL:
case CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL:
AddKernelActivityEvent<TF_CUPTI_HAS_CHANNEL_ID>(
collector, reinterpret_cast<CuptiActivityKernelTy *>(record));
break;
case CUPTI_ACTIVITY_KIND_CDP_KERNEL:
AddKernelActivityEvent<false>(
collector, reinterpret_cast<CUpti_ActivityCdpKernel *>(record));
break;
case CUPTI_ACTIVITY_KIND_MEMCPY:
AddMemcpyActivityEvent(
collector, reinterpret_cast<CuptiActivityMemcpyTy *>(record));
break;
case CUPTI_ACTIVITY_KIND_MEMCPY2:
AddMemcpyP2PActivityEvent(
collector, reinterpret_cast<CuptiActivityMemcpyP2PTy *>(record));
break;
case CUPTI_ACTIVITY_KIND_OVERHEAD:
AddCuptiOverheadActivityEvent(
collector, reinterpret_cast<CUpti_ActivityOverhead *>(record));
break;
case CUPTI_ACTIVITY_KIND_UNIFIED_MEMORY_COUNTER:
AddUnifiedMemoryActivityEvent(
collector,
reinterpret_cast<CUpti_ActivityUnifiedMemoryCounter2 *>(record));
break;
case CUPTI_ACTIVITY_KIND_MEMORY: {
AddMemoryActivityEvent(
collector, reinterpret_cast<CUpti_ActivityMemory *>(record));
} break;
case CUPTI_ACTIVITY_KIND_MEMSET:
AddMemsetActivityEvent(
collector, reinterpret_cast<CuptiActivityMemsetTy *>(record));
break;
case CUPTI_ACTIVITY_KIND_SYNCHRONIZATION:
AddSynchronizationActivityEvent(
collector,
reinterpret_cast<CUpti_ActivitySynchronization *>(record));
break;
#if CUDA_VERSION >= 11070
case CUPTI_ACTIVITY_KIND_GRAPH_TRACE:
AddGraphTraceActivityEvent(
collector, reinterpret_cast<CuptiActivityGraphTraceTy *>(record));
break;
#endif
default:
VLOG(3) << "Activity type " << record->kind << " is not supported.";
break;
}
} else if (status == CUPTI_ERROR_MAX_LIMIT_REACHED) {
break;
} else if (status == CUPTI_ERROR_INVALID_KIND) {
VLOG(3) << "CUPTI parse ACTIVITY buffer got CUPTI_ERROR_INVALID_KIND";
break;
} else {
LOG(WARNING) << "CUPTI parse ACTIVITY buffer error: " << status;
return absl::Status(StatusCode::kInternal,
"Parse cupti activity buffer error.");
}
}
VLOG(3) << "CUPTI tracer post-process one ACTIVITY buffer of size: " << size
<< ", total events count:" << total_activity_event_count;
return absl::OkStatus();
}
}
absl::string_view StringDeduper::Dedup(absl::string_view str,
size_t max_unique_count) {
if (str.empty()) return absl::string_view();
auto it = strings_.find(str);
if (it != strings_.end()) return *it;
if (max_unique_count == 0 || strings_.size() < max_unique_count)
return *strings_.emplace(str).first;
return absl::string_view();
}
void AnnotationMap::Add(uint32_t device_id, uint32_t correlation_id,
const absl::string_view annotation,
const absl::string_view nvtx_range) {
if (annotation.empty() && nvtx_range.empty()) return;
VLOG(3) << "Add annotation: device_id: " << device_id
<< " correlation_id: " << correlation_id
<< " annotation: " << annotation;
if (device_id >= per_device_map_.size()) return;
auto &per_device_map = per_device_map_[device_id];
if (per_device_map.annotation_deduper.Size() < max_size_) {
AnnotationInfo info;
info.annotation = per_device_map.annotation_deduper.Dedup(annotation);
info.nvtx_range = per_device_map.nvtx_range_deduper.Dedup(nvtx_range);
per_device_map.correlation_map.emplace(correlation_id, info);
}
}
AnnotationMap::AnnotationInfo AnnotationMap::LookUp(
uint32_t device_id, uint32_t correlation_id) const {
if (device_id >= per_device_map_.size()) return AnnotationInfo();
auto &per_device_map = per_device_map_[device_id];
auto it = per_device_map.correlation_map.find(correlation_id);
return it != per_device_map.correlation_map.end() ? it->second
: AnnotationInfo();
}
CuptiActivityBufferManager::ActivityBufferAndSize::ActivityBufferAndSize(
uint8_t *p, size_t sz)
: buffer(p,
[](uint8_t *p) {
if (p != nullptr) tsl::port::AlignedFree(p);
}),
size(sz) {}
void AddActivityBufferListEventsTo(
CuptiEventCollectorDelegate &collector,
std::list<CuptiActivityBufferManager::ActivityBufferAndSize> &buffer_list,
size_t max_activity_event_count, size_t &dropped_activity_event_count) {
dropped_activity_event_count = 0;
size_t total_activity_event_count = 0;
while (!buffer_list.empty()) {
CuptiActivityBufferManager::ActivityBufferAndSize buffer_and_size(
std::move(buffer_list.front()));
buffer_list.pop_front();
ConvertActivityBuffer(collector, buffer_and_size.buffer.get(),
buffer_and_size.size, max_activity_event_count,
total_activity_event_count,
dropped_activity_event_count)
.IgnoreError();
}
}
CallbackAnnotationsAndEvents::CallbackAnnotationsAndEvents(
CallbackAnnotationsAndEvents &&another) {
*this = std::move(another);
}
CallbackAnnotationsAndEvents &CallbackAnnotationsAndEvents::operator=(
CallbackAnnotationsAndEvents &&another) {
annotations_ = std::move(another.annotations_);
nvtx_ranges_ = std::move(another.nvtx_ranges_);
num_dropped_events_ = another.num_dropped_events_;
event_queue_ = std::move(another.event_queue_);
another.Clear();
return *this;
}
void CallbackAnnotationsAndEvents::Clear() {
annotations_.Clear();
nvtx_ranges_.Clear();
num_dropped_events_ = 0;
event_queue_.Clear();
}
}
} | #include "xla/backends/profiler/gpu/cupti_buffer_events.h"
#include "tsl/platform/test.h"
namespace xla {
namespace profiler {
namespace test {
namespace {
TEST(CuptiBufferEventsTest, EventInitialization) {
CuptiTracerEvent event{
CuptiTracerEventType::CudaGraph,
CuptiTracerEventSource::Activity,
"CudaGraphExec:2",
"annotation",
"nvtx_range",
100,
200,
6,
8,
12345,
9,
2,
5,
};
EXPECT_EQ(event.type, CuptiTracerEventType::CudaGraph);
EXPECT_EQ(event.source, CuptiTracerEventSource::Activity);
EXPECT_EQ(event.name, "CudaGraphExec:2");
EXPECT_EQ(event.annotation, "annotation");
EXPECT_EQ(event.nvtx_range, "nvtx_range");
EXPECT_EQ(event.start_time_ns, 100);
EXPECT_EQ(event.end_time_ns, 200);
EXPECT_EQ(event.device_id, 6);
EXPECT_EQ(event.correlation_id, 8);
EXPECT_EQ(event.thread_id, 12345);
EXPECT_EQ(event.context_id, 9);
EXPECT_EQ(event.stream_id, 2);
EXPECT_EQ(event.graph_id, 5);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/profiler/gpu/cupti_buffer_events.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/profiler/gpu/cupti_buffer_events_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f054a731-cf43-403c-8e10-e90641ca41dd | cpp | tensorflow/tensorflow | cupti_error_manager | third_party/xla/xla/backends/profiler/gpu/cupti_error_manager.cc | third_party/xla/xla/backends/profiler/gpu/cupti_error_manager_test.cc | #include "xla/backends/profiler/gpu/cupti_error_manager.h"
#include <utility>
#include "absl/debugging/leak_check.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace profiler {
using tsl::mutex_lock;
CuptiErrorManager::CuptiErrorManager(std::unique_ptr<CuptiInterface> interface)
: interface_(std::move(interface)), disabled_(0), undo_disabled_(false) {}
#define IGNORE_CALL_IF_DISABLED \
if (disabled_) { \
LOG(ERROR) << "cupti" << __func__ << ": ignored due to a previous error."; \
return CUPTI_ERROR_DISABLED; \
} \
VLOG(1) << "cupti" << __func__;
#define ALLOW_ERROR(e, ERROR) \
if (e == ERROR) { \
VLOG(1) << "cupti" << __func__ << ": error " << static_cast<int>(e) \
<< ": " << ResultString(e) << " (allowed)"; \
return e; \
}
#define LOG_AND_DISABLE_IF_ERROR(e) \
if (e != CUPTI_SUCCESS) { \
LOG(ERROR) << "cupti" << __func__ << ": error " << static_cast<int>(e) \
<< ": " << ResultString(e); \
UndoAndDisable(); \
}
void CuptiErrorManager::RegisterUndoFunction(
const CuptiErrorManager::UndoFunction& func) {
mutex_lock lock(undo_stack_mu_);
undo_stack_.push_back(func);
}
CUptiResult CuptiErrorManager::ActivityDisable(CUpti_ActivityKind kind) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->ActivityDisable(kind);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::ActivityEnable(CUpti_ActivityKind kind) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->ActivityEnable(kind);
if (error == CUPTI_SUCCESS) {
auto f = std::bind(&CuptiErrorManager::ActivityDisable, this, kind);
RegisterUndoFunction(f);
}
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::ActivityFlushAll(uint32_t flag) {
CUptiResult error = interface_->ActivityFlushAll(flag);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::ActivityGetNextRecord(
uint8_t* buffer, size_t valid_buffer_size_bytes, CUpti_Activity** record) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->ActivityGetNextRecord(
buffer, valid_buffer_size_bytes, record);
ALLOW_ERROR(error, CUPTI_ERROR_MAX_LIMIT_REACHED);
ALLOW_ERROR(error, CUPTI_ERROR_INVALID_KIND);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::ActivityGetNumDroppedRecords(CUcontext context,
uint32_t stream_id,
size_t* dropped) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error =
interface_->ActivityGetNumDroppedRecords(context, stream_id, dropped);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::ActivityConfigureUnifiedMemoryCounter(
CUpti_ActivityUnifiedMemoryCounterConfig* config, uint32_t count) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error =
interface_->ActivityConfigureUnifiedMemoryCounter(config, count);
return error;
}
CUptiResult CuptiErrorManager::ActivityRegisterCallbacks(
CUpti_BuffersCallbackRequestFunc func_buffer_requested,
CUpti_BuffersCallbackCompleteFunc func_buffer_completed) {
IGNORE_CALL_IF_DISABLED;
absl::LeakCheckDisabler disabler;
CUptiResult error = interface_->ActivityRegisterCallbacks(
func_buffer_requested, func_buffer_completed);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::ActivityUsePerThreadBuffer() {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->ActivityUsePerThreadBuffer();
return error;
}
CUptiResult CuptiErrorManager::SetActivityFlushPeriod(uint32_t period_ms) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->SetActivityFlushPeriod(period_ms);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
};
CUptiResult CuptiErrorManager::GetDeviceId(CUcontext context,
uint32_t* device_id) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->GetDeviceId(context, device_id);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::GetTimestamp(uint64_t* timestamp) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->GetTimestamp(timestamp);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::Finalize() {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->Finalize();
ALLOW_ERROR(error, CUPTI_ERROR_API_NOT_IMPLEMENTED);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::EnableCallback(uint32_t enable,
CUpti_SubscriberHandle subscriber,
CUpti_CallbackDomain domain,
CUpti_CallbackId callback_id) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error =
interface_->EnableCallback(enable, subscriber, domain, callback_id);
if (error == CUPTI_SUCCESS) {
if (enable == 1) {
auto f = std::bind(&CuptiErrorManager::EnableCallback, this,
0 , subscriber, domain, callback_id);
RegisterUndoFunction(f);
}
} else {
LOG(ERROR) << "cupti" << __func__
<< ": error with domain:" << static_cast<int>(domain)
<< " and callback_id:" << static_cast<int>(callback_id);
}
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::EnableDomain(uint32_t enable,
CUpti_SubscriberHandle subscriber,
CUpti_CallbackDomain domain) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->EnableDomain(enable, subscriber, domain);
if (error == CUPTI_SUCCESS) {
if (enable == 1) {
auto f = std::bind(&CuptiErrorManager::EnableDomain, this,
0 , subscriber, domain);
RegisterUndoFunction(f);
}
}
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::Subscribe(CUpti_SubscriberHandle* subscriber,
CUpti_CallbackFunc callback,
void* userdata) {
IGNORE_CALL_IF_DISABLED;
absl::LeakCheckDisabler disabler;
CUptiResult error = interface_->Subscribe(subscriber, callback, userdata);
if (error == CUPTI_SUCCESS) {
auto f = std::bind(&CuptiErrorManager::Unsubscribe, this, *subscriber);
RegisterUndoFunction(f);
}
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::Unsubscribe(CUpti_SubscriberHandle subscriber) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->Unsubscribe(subscriber);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
void CuptiErrorManager::UndoAndDisable() {
if (undo_disabled_) {
return;
}
mutex_lock lock(undo_stack_mu_);
undo_disabled_ = true;
while (!undo_stack_.empty()) {
LOG(ERROR) << "CuptiErrorManager is disabling profiling automatically.";
undo_stack_.back()();
undo_stack_.pop_back();
}
undo_disabled_ = false;
disabled_ = 1;
}
CUptiResult CuptiErrorManager::GetResultString(CUptiResult result,
const char** str) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->GetResultString(result, str);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::GetContextId(CUcontext context,
uint32_t* context_id) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->GetContextId(context, context_id);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::GetStreamIdEx(CUcontext context, CUstream stream,
uint8_t per_thread_stream,
uint32_t* stream_id) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error =
interface_->GetStreamIdEx(context, stream, per_thread_stream, stream_id);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::GetGraphId(CUgraph graph, uint32_t* graph_id) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->GetGraphId(graph, graph_id);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
CUptiResult CuptiErrorManager::GetGraphExecId(CUgraphExec graph_exec,
uint32_t* graph_id) {
IGNORE_CALL_IF_DISABLED;
CUptiResult error = interface_->GetGraphExecId(graph_exec, graph_id);
LOG_AND_DISABLE_IF_ERROR(error);
return error;
}
void CuptiErrorManager::CleanUp() {
if (undo_disabled_) {
return;
}
mutex_lock lock(undo_stack_mu_);
undo_disabled_ = true;
while (!undo_stack_.empty()) {
undo_stack_.pop_back();
}
undo_disabled_ = false;
}
std::string CuptiErrorManager::ResultString(CUptiResult error) const {
const char* error_message = nullptr;
if (interface_->GetResultString(error, &error_message) == CUPTI_SUCCESS &&
error_message != nullptr) {
return error_message;
}
return "";
}
}
} | #if GOOGLE_CUDA
#include "xla/backends/profiler/gpu/cupti_error_manager.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include "xla/backends/profiler/gpu/cuda_test.h"
#include "xla/backends/profiler/gpu/cupti_interface.h"
#include "xla/backends/profiler/gpu/cupti_tracer.h"
#include "xla/backends/profiler/gpu/cupti_wrapper.h"
#include "xla/backends/profiler/gpu/mock_cupti.h"
#include "xla/tsl/profiler/utils/time_utils.h"
#include "tsl/platform/test.h"
namespace xla {
namespace profiler {
namespace test {
using xla::profiler::CuptiInterface;
using xla::profiler::CuptiTracer;
using xla::profiler::CuptiTracerCollectorOptions;
using xla::profiler::CuptiTracerOptions;
using xla::profiler::CuptiWrapper;
using ::testing::_;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::Sequence;
using ::testing::StrictMock;
class TestableCuptiTracer : public CuptiTracer {
public:
explicit TestableCuptiTracer(CuptiInterface* cupti_interface)
: CuptiTracer(cupti_interface) {}
};
class CuptiErrorManagerTest : public ::testing::Test {
protected:
CuptiErrorManagerTest() {}
void SetUp() override {
ASSERT_GT(CuptiTracer::NumGpus(), 0) << "No devices found";
auto mock_cupti = std::make_unique<StrictMock<MockCupti>>();
mock_ = mock_cupti.get();
cupti_error_manager_ =
std::make_unique<CuptiErrorManager>(std::move(mock_cupti));
cupti_tracer_ =
std::make_unique<TestableCuptiTracer>(cupti_error_manager_.get());
cupti_wrapper_ = std::make_unique<CuptiWrapper>();
CuptiTracerCollectorOptions collector_options;
collector_options.num_gpus = CuptiTracer::NumGpus();
uint64_t start_gputime_ns = CuptiTracer::GetTimestamp();
uint64_t start_walltime_ns = tsl::profiler::GetCurrentTimeNanos();
cupti_collector_ = CreateCuptiCollector(
collector_options, start_walltime_ns, start_gputime_ns);
}
void EnableProfiling(const CuptiTracerOptions& option) {
cupti_tracer_->Enable(option, cupti_collector_.get());
}
void DisableProfiling() { cupti_tracer_->Disable(); }
bool CuptiDisabled() const { return cupti_error_manager_->Disabled(); }
void RunGpuApp() {
MemCopyH2D();
PrintfKernel(10);
Synchronize();
MemCopyD2H();
}
StrictMock<MockCupti>* mock_;
std::unique_ptr<TestableCuptiTracer> cupti_tracer_ = nullptr;
std::unique_ptr<CuptiInterface> cupti_error_manager_;
std::unique_ptr<CuptiWrapper> cupti_wrapper_;
std::unique_ptr<xla::profiler::CuptiTraceCollector> cupti_collector_;
};
TEST_F(CuptiErrorManagerTest, GpuTraceActivityEnableTest) {
Sequence s1;
EXPECT_CALL(*mock_, Subscribe(_, _, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Subscribe));
const int cb_enable_times = IsCudaNewEnoughForGraphTraceTest() ? 4 : 1;
EXPECT_CALL(*mock_, EnableCallback(1, _, _, _))
.Times(cb_enable_times)
.InSequence(s1)
.WillRepeatedly(
Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableCallback));
EXPECT_CALL(*mock_, ActivityUsePerThreadBuffer())
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(),
&CuptiWrapper::ActivityUsePerThreadBuffer));
EXPECT_CALL(*mock_, ActivityRegisterCallbacks(_, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(),
&CuptiWrapper::ActivityRegisterCallbacks));
EXPECT_CALL(*mock_, ActivityEnable(CUPTI_ACTIVITY_KIND_KERNEL))
.InSequence(s1)
.WillOnce(Return(CUPTI_ERROR_UNKNOWN));
EXPECT_CALL(*mock_, GetResultString(CUPTI_ERROR_UNKNOWN, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::GetResultString));
EXPECT_CALL(*mock_, EnableCallback(0, _, _, _))
.Times(cb_enable_times)
.InSequence(s1)
.WillRepeatedly(
Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableCallback));
EXPECT_CALL(*mock_, Unsubscribe(_))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Unsubscribe));
EXPECT_FALSE(CuptiDisabled());
CuptiTracerOptions options;
options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_KERNEL);
options.cbids_selected.push_back(CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel);
EnableProfiling(options);
EXPECT_TRUE(CuptiDisabled());
RunGpuApp();
EXPECT_TRUE(CuptiDisabled());
DisableProfiling();
EXPECT_TRUE(CuptiDisabled());
}
TEST_F(CuptiErrorManagerTest, GpuTraceAutoEnableTest) {
EXPECT_FALSE(CuptiDisabled());
Sequence s1;
EXPECT_CALL(*mock_, Subscribe(_, _, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Subscribe));
const int cb_enable_times = IsCudaNewEnoughForGraphTraceTest() ? 3 : 0;
if (cb_enable_times > 0) {
EXPECT_CALL(*mock_, EnableCallback(1, _, _, _))
.Times(cb_enable_times)
.InSequence(s1)
.WillRepeatedly(
Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableCallback));
}
EXPECT_CALL(*mock_, EnableDomain(1, _, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableDomain));
EXPECT_CALL(*mock_, ActivityUsePerThreadBuffer())
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(),
&CuptiWrapper::ActivityUsePerThreadBuffer));
EXPECT_CALL(*mock_, ActivityRegisterCallbacks(_, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(),
&CuptiWrapper::ActivityRegisterCallbacks));
EXPECT_CALL(*mock_, ActivityEnable(CUPTI_ACTIVITY_KIND_MEMCPY))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::ActivityEnable));
EXPECT_CALL(*mock_, ActivityEnable(CUPTI_ACTIVITY_KIND_MEMCPY2))
.InSequence(s1)
.WillOnce(Return(CUPTI_ERROR_UNKNOWN));
EXPECT_CALL(*mock_, GetResultString(CUPTI_ERROR_UNKNOWN, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::GetResultString));
EXPECT_CALL(*mock_, ActivityDisable(CUPTI_ACTIVITY_KIND_MEMCPY))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::ActivityDisable));
EXPECT_CALL(*mock_, EnableDomain(0, _, _))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableDomain));
if (cb_enable_times > 0) {
EXPECT_CALL(*mock_, EnableCallback(0, _, _, _))
.Times(cb_enable_times)
.InSequence(s1)
.WillRepeatedly(
Invoke(cupti_wrapper_.get(), &CuptiWrapper::EnableCallback));
}
EXPECT_CALL(*mock_, Unsubscribe(_))
.InSequence(s1)
.WillOnce(Invoke(cupti_wrapper_.get(), &CuptiWrapper::Unsubscribe));
EXPECT_FALSE(CuptiDisabled());
CuptiTracerOptions options;
options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_MEMCPY);
options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_MEMCPY2);
options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_KERNEL);
EnableProfiling(options);
EXPECT_TRUE(CuptiDisabled());
RunGpuApp();
EXPECT_TRUE(CuptiDisabled());
DisableProfiling();
EXPECT_TRUE(CuptiDisabled());
}
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/profiler/gpu/cupti_error_manager.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/profiler/gpu/cupti_error_manager_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d937813a-ec6a-45e2-9277-8cabb12ff3bb | cpp | tensorflow/tensorflow | host_tracer | third_party/xla/xla/backends/profiler/cpu/host_tracer.cc | third_party/xla/xla/backends/profiler/cpu/host_tracer_test.cc | #include "xla/backends/profiler/cpu/host_tracer.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "xla/tsl/profiler/backends/cpu/host_tracer_utils.h"
#include "xla/tsl/profiler/backends/cpu/threadpool_listener.h"
#include "xla/tsl/profiler/backends/cpu/traceme_recorder.h"
#include "xla/tsl/profiler/utils/time_utils.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/profiler/lib/profiler_collection.h"
#include "tsl/profiler/lib/profiler_interface.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace xla {
namespace profiler {
namespace {
class HostTracer : public tsl::profiler::ProfilerInterface {
public:
explicit HostTracer(int host_trace_level);
~HostTracer() override;
absl::Status Start() override;
absl::Status Stop() override;
absl::Status CollectData(
tensorflow::profiler::XSpace* space) override;
private:
const int host_trace_level_;
bool recording_ = false;
uint64_t start_timestamp_ns_ = 0;
tsl::profiler::TraceMeRecorder::Events events_;
};
HostTracer::HostTracer(int host_trace_level)
: host_trace_level_(host_trace_level) {}
HostTracer::~HostTracer() { Stop().IgnoreError(); }
absl::Status HostTracer::Start() {
if (recording_) {
return tsl::errors::Internal("TraceMeRecorder already started");
}
start_timestamp_ns_ = tsl::profiler::GetCurrentTimeNanos();
recording_ = tsl::profiler::TraceMeRecorder::Start(host_trace_level_);
if (!recording_) {
return tsl::errors::Internal("Failed to start TraceMeRecorder");
}
return absl::OkStatus();
}
absl::Status HostTracer::Stop() {
if (!recording_) {
return tsl::errors::Internal("TraceMeRecorder not started");
}
events_ = tsl::profiler::TraceMeRecorder::Stop();
recording_ = false;
return absl::OkStatus();
}
absl::Status HostTracer::CollectData(
tensorflow::profiler::XSpace* space) {
VLOG(2) << "Collecting data to XSpace from HostTracer.";
if (recording_) {
return tsl::errors::Internal("TraceMeRecorder not stopped");
}
if (events_.empty()) {
return absl::OkStatus();
}
tensorflow::profiler::XPlane* plane =
tsl::profiler::FindOrAddMutablePlaneWithName(
space, tsl::profiler::kHostThreadsPlaneName);
ConvertCompleteEventsToXPlane(start_timestamp_ns_, std::exchange(events_, {}),
plane);
return absl::OkStatus();
}
}
std::unique_ptr<tsl::profiler::ProfilerInterface> CreateHostTracer(
const HostTracerOptions& options) {
if (options.trace_level == 0) return nullptr;
std::vector<std::unique_ptr<tsl::profiler::ProfilerInterface>> profilers;
profilers.push_back(std::make_unique<HostTracer>(options.trace_level));
profilers.push_back(
std::make_unique<tsl::profiler::ThreadpoolProfilerInterface>());
return std::make_unique<tsl::profiler::ProfilerCollection>(
std::move(profilers));
}
}
} | #include "xla/backends/profiler/cpu/host_tracer.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <ostream>
#include <string>
#include <gtest/gtest.h>
#include "absl/types/optional.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/timespan.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "tsl/platform/blocking_counter.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/lib/profiler_interface.h"
#include "tsl/profiler/lib/traceme.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace xla {
namespace profiler {
namespace {
using ::tsl::Env;
using ::tsl::Thread;
using ::tsl::ThreadOptions;
using ::tsl::profiler::StatType;
using ::tsl::profiler::Timespan;
using ::tsl::profiler::TraceMe;
using ::tsl::profiler::XEventVisitor;
using ::tsl::profiler::XLineVisitor;
using ::tsl::profiler::XPlaneVisitor;
using ::tsl::profiler::XStatVisitor;
TEST(HostTracerTest, CollectsTraceMeEventsAsXSpace) {
tsl::uint32 thread_id;
std::string thread_name = "MyThreadName";
tensorflow::profiler::XSpace space;
std::unique_ptr<Thread> traced_thread(
Env::Default()->StartThread(ThreadOptions(), thread_name, [&] {
ASSERT_TRUE(Env::Default()->GetCurrentThreadName(&thread_name));
thread_id = Env::Default()->GetCurrentThreadId();
auto tracer = CreateHostTracer({});
TF_ASSERT_OK(tracer->Start());
{ TraceMe traceme("hello"); }
{ TraceMe traceme("world"); }
{ TraceMe traceme("contains#inside"); }
{ TraceMe traceme("good#key1=value1#"); }
{ TraceMe traceme("morning#key1=value1,key2=value2#"); }
{ TraceMe traceme("incomplete#key1=value1,key2#"); }
{ TraceMe traceme("Iterator::XXX::YYY::ParallelMap"); }
TF_ASSERT_OK(tracer->Stop());
TF_ASSERT_OK(tracer->CollectData(&space));
}));
traced_thread.reset();
ASSERT_NO_FATAL_FAILURE();
ASSERT_EQ(space.planes_size(), 1);
const auto& plane = space.planes(0);
XPlaneVisitor xplane(&plane);
ASSERT_EQ(plane.name(), ::tsl::profiler::kHostThreadsPlaneName);
ASSERT_EQ(plane.lines_size(), 1);
ASSERT_EQ(plane.event_metadata_size(), 7);
ASSERT_EQ(plane.stat_metadata_size(), 4);
const auto& line = plane.lines(0);
EXPECT_EQ(line.id(), thread_id);
EXPECT_EQ(line.name(), thread_name);
ASSERT_EQ(line.events_size(), 7);
const auto& events = line.events();
XEventVisitor e0(&xplane, &line, &events[0]);
EXPECT_EQ(e0.Name(), "hello");
ASSERT_EQ(events[0].stats_size(), 0);
XEventVisitor e1(&xplane, &line, &events[1]);
EXPECT_EQ(e1.Name(), "world");
ASSERT_EQ(events[1].stats_size(), 0);
XEventVisitor e2(&xplane, &line, &events[2]);
EXPECT_EQ(e2.Name(), "contains#inside");
ASSERT_EQ(events[2].stats_size(), 0);
XEventVisitor e3(&xplane, &line, &events[3]);
EXPECT_EQ(e3.Name(), "good");
ASSERT_EQ(events[3].stats_size(), 1);
{
std::optional<std::string> value;
e3.ForEachStat([&](const XStatVisitor& stat) {
if (stat.Name() == "key1") value = stat.ToString();
});
ASSERT_TRUE(value);
EXPECT_EQ(*value, "value1");
}
XEventVisitor e4(&xplane, &line, &events[4]);
EXPECT_EQ(e4.Name(), "morning");
ASSERT_EQ(events[4].stats_size(), 2);
{
std::optional<std::string> value1, value2;
e4.ForEachStat([&](const XStatVisitor& stat) {
if (stat.Name() == "key1") {
value1 = stat.ToString();
} else if (stat.Name() == "key2") {
value2 = stat.ToString();
}
});
ASSERT_TRUE(value1 && value2);
EXPECT_EQ(*value1, "value1");
EXPECT_EQ(*value2, "value2");
}
XEventVisitor e5(&xplane, &line, &events[5]);
EXPECT_EQ(e5.Name(), "incomplete");
ASSERT_EQ(events[5].stats_size(), 1);
{
std::optional<std::string> value1, value2;
e5.ForEachStat([&](const XStatVisitor& stat) {
if (stat.Name() == "key1") {
value1 = stat.ToString();
} else if (stat.Name() == "key2") {
value2 = stat.ToString();
}
});
ASSERT_TRUE(value1 && !value2);
EXPECT_EQ(*value1, "value1");
}
XEventVisitor e6(&xplane, &line, &events[6]);
EXPECT_EQ(e6.Name(), "Iterator::XXX::YYY::ParallelMap");
EXPECT_EQ(e6.DisplayName(), "Iterator::ParallelMap");
}
TEST(HostTracerTest, CollectEventsFromThreadPool) {
auto thread_pool =
std::make_unique<tsl::thread::ThreadPool>(Env::Default(),
"HostTracerTest",
1);
tsl::BlockingCounter counter(1);
auto tracer = CreateHostTracer({});
TF_EXPECT_OK(tracer->Start());
thread_pool->Schedule([&counter] {
TraceMe traceme("hello");
counter.DecrementCount();
});
counter.Wait();
thread_pool.reset();
TF_EXPECT_OK(tracer->Stop());
tensorflow::profiler::XSpace space;
TF_EXPECT_OK(tracer->CollectData(&space));
EXPECT_THAT(space.planes(), testing::SizeIs(1));
XPlaneVisitor xplane = tsl::profiler::CreateTfXPlaneVisitor(&space.planes(0));
bool has_record_event = false;
bool has_start_region_event = false;
bool has_end_region_event = false;
int64_t record_region_id = 0;
int64_t start_region_id = 0;
Timespan region_timespan;
Timespan traceme_timespan;
xplane.ForEachLine([&](const XLineVisitor& line) {
line.ForEachEvent([&](const XEventVisitor& event) {
if (event.Name() == tsl::profiler::kThreadpoolListenerRecord) {
has_record_event = true;
const auto& stat = event.GetStat(StatType::kProducerId);
EXPECT_TRUE(stat.has_value());
record_region_id = stat->IntOrUintValue();
} else if (event.Name() ==
tsl::profiler::kThreadpoolListenerStartRegion) {
has_start_region_event = true;
const auto& stat = event.GetStat(StatType::kConsumerId);
EXPECT_TRUE(stat.has_value());
start_region_id = stat->IntOrUintValue();
region_timespan = event.GetTimespan();
} else if (event.Name() == tsl::profiler::kThreadpoolListenerStopRegion) {
has_end_region_event = true;
region_timespan = Timespan::FromEndPoints(region_timespan.begin_ps(),
event.GetTimespan().end_ps());
} else if (event.Name() == "hello") {
traceme_timespan = event.GetTimespan();
}
});
});
EXPECT_TRUE(has_record_event);
EXPECT_TRUE(has_start_region_event);
EXPECT_TRUE(has_end_region_event);
EXPECT_EQ(record_region_id, start_region_id);
EXPECT_TRUE(region_timespan.Includes(traceme_timespan));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/profiler/cpu/host_tracer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/profiler/cpu/host_tracer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
779d4496-04b8-4f88-ac0d-4810141e0540 | cpp | tensorflow/tensorflow | plugin_tracer_impl | third_party/xla/xla/backends/profiler/plugin/plugin_tracer_impl.cc | third_party/xla/xla/backends/profiler/plugin/plugin_tracer_impl_test.cc | #include "xla/backends/profiler/plugin/plugin_tracer_impl.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <vector>
#include "xla/backends/profiler/plugin/profiler_c_api.h"
#include "xla/backends/profiler/plugin/profiler_error.h"
#include "tsl/platform/logging.h"
#include "tsl/profiler/lib/profiler_collection.h"
#include "tsl/profiler/lib/profiler_factory.h"
#include "tsl/profiler/protobuf/profiler_options.pb.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace xla {
namespace profiler {
PLUGIN_Profiler_Error* PLUGIN_Profiler_Create(
PLUGIN_Profiler_Create_Args* args) {
VLOG(1) << "Creating plugin profiler";
auto profiler = std::make_unique<PLUGIN_Profiler>();
profiler->stopped = true;
tensorflow::ProfileOptions options;
options.ParseFromArray(args->options, args->options_size);
profiler->impl = std::make_unique<tsl::profiler::ProfilerCollection>(
tsl::profiler::CreateProfilers(options));
args->profiler = profiler.release();
return nullptr;
}
PLUGIN_Profiler_Error* PLUGIN_Profiler_Destroy(
PLUGIN_Profiler_Destroy_Args* args) {
VLOG(1) << "Destroying plugin profiler";
if (args->profiler != nullptr) {
delete args->profiler;
}
return nullptr;
}
PLUGIN_Profiler_Error* PLUGIN_Profiler_Start(PLUGIN_Profiler_Start_Args* args) {
VLOG(1) << "Starting profiler";
if (!args->profiler->stopped) {
VLOG(1) << "Profiler is already started";
return nullptr;
}
args->profiler->byte_size = 0;
PLUGIN_PROFILER_RETURN_IF_ERROR(args->profiler->impl->Start());
args->profiler->stopped = false;
return nullptr;
}
PLUGIN_Profiler_Error* PLUGIN_Profiler_Stop(PLUGIN_Profiler_Stop_Args* args) {
VLOG(1) << "Stopping profiler";
if (args->profiler->stopped) {
VLOG(1) << "Profiler is already stopped";
return nullptr;
}
PLUGIN_PROFILER_RETURN_IF_ERROR(args->profiler->impl->Stop());
args->profiler->stopped = false;
return nullptr;
}
PLUGIN_Profiler_Error* PLUGIN_Profiler_CollectData(
PLUGIN_Profiler_CollectData_Args* args) {
VLOG(1) << "Collecting data from profiler";
tensorflow::profiler::XSpace space;
if (!args->profiler->space) {
VLOG(1) << "TpuProfiler CollectData";
PLUGIN_PROFILER_RETURN_IF_ERROR(args->profiler->impl->CollectData(&space));
args->profiler->byte_size = space.ByteSizeLong();
VLOG(2) << "TpuProfiler CollectData: Number of XPlanes: "
<< space.planes_size();
}
const size_t profiler_data_size = space.ByteSizeLong();
if (args->buffer == nullptr) {
args->profiler->buffer =
std::make_unique<std::vector<uint8_t>>(profiler_data_size + 1);
space.SerializeToArray(args->profiler->buffer->data(), profiler_data_size);
args->buffer_size_in_bytes = args->profiler->buffer->size();
args->buffer = args->profiler->buffer->data();
return nullptr;
}
return nullptr;
}
}
} | #include "xla/backends/profiler/plugin/plugin_tracer_impl.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "xla/backends/profiler/plugin/plugin_tracer.h"
#include "xla/backends/profiler/plugin/profiler_c_api.h"
#include "xla/backends/profiler/plugin/profiler_error.h"
#include "xla/tsl/profiler/utils/xplane_builder.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
#include "tsl/platform/logging.h"
#include "tsl/profiler/lib/profiler_factory.h"
#include "tsl/profiler/lib/profiler_interface.h"
#include "tsl/profiler/protobuf/profiler_options.pb.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace xla {
namespace profiler {
using tensorflow::ProfileOptions;
using tsl::profiler::ProfilerInterface;
using tsl::profiler::XPlaneBuilder;
class PluginTracerImpl : public ProfilerInterface {
public:
explicit PluginTracerImpl(const ProfileOptions& options)
: options_(options) {}
absl::Status Start() override {
LOG(INFO) << "Starting Tracer";
return absl::OkStatus();
}
absl::Status Stop() override {
LOG(INFO) << "Stopping Tracer";
return absl::OkStatus();
}
absl::Status CollectData(tensorflow::profiler::XSpace* space) override {
LOG(INFO) << "Collecting data";
tensorflow::profiler::XPlane* plane = space->add_planes();
XPlaneBuilder builder(plane);
builder.SetName("GpuBackendTracer");
tensorflow::profiler::XStatMetadata* metadata =
builder.GetOrCreateStatMetadata((int64_t)0);
metadata->set_name("ProfileOptions");
builder.AddStatValue(*metadata, options_.SerializeAsString());
return absl::OkStatus();
}
private:
ProfileOptions options_;
};
std::unique_ptr<ProfilerInterface> CreatePluginTracer(
const ProfileOptions& options) {
return std::make_unique<PluginTracerImpl>(options);
}
static auto register_test_tracer = [] {
RegisterProfilerFactory(&CreatePluginTracer);
return 0;
}();
TEST(PluginTracerTest, TestPluginWithPluginTracer) {
PLUGIN_Profiler_Api api;
api.create = &PLUGIN_Profiler_Create;
api.start = &PLUGIN_Profiler_Start;
api.stop = &PLUGIN_Profiler_Stop;
api.collect_data = &PLUGIN_Profiler_CollectData;
api.destroy = &PLUGIN_Profiler_Destroy;
api.error_destroy = &PLUGIN_Profiler_Error_Destroy;
api.error_message = &PLUGIN_Profiler_Error_Message;
api.error_get_code = &PLUGIN_Profiler_Error_GetCode;
api.struct_size = PLUGIN_Profiler_Api_STRUCT_SIZE;
ProfileOptions options;
options.set_repository_path("TestRepositoryPath");
options.set_device_tracer_level(2);
PluginTracer tracer(&api, options);
tensorflow::profiler::XSpace xspace;
EXPECT_TRUE(tracer.Start().ok());
EXPECT_TRUE(tracer.Stop().ok());
EXPECT_TRUE(tracer.CollectData(&xspace).ok());
ASSERT_THAT(xspace.planes(), testing::SizeIs(1));
ASSERT_THAT(xspace.planes(0).stats(), testing::SizeIs(1));
tsl::profiler::XPlaneVisitor visitor(&xspace.planes(0));
std::optional<tsl::profiler::XStatVisitor> stat =
visitor.GetStat(0, *visitor.GetStatMetadata(0));
ASSERT_TRUE(stat.has_value());
EXPECT_EQ(stat->Name(), "ProfileOptions");
EXPECT_EQ(stat->StrOrRefValue(), options.SerializeAsString());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/profiler/plugin/plugin_tracer_impl.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/profiler/plugin/plugin_tracer_impl_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3cd770ea-afe3-4a25-b3c7-1b35a92ac8d0 | cpp | tensorflow/tensorflow | buffer_use | third_party/xla/xla/runtime/buffer_use.cc | third_party/xla/xla/runtime/buffer_use_test.cc | #include "xla/runtime/buffer_use.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/types/span.h"
#include "xla/service/buffer_assignment.h"
namespace xla {
BufferUse::ReadWriteSet::ReadWriteSet() = default;
void BufferUse::ReadWriteSet::Add(BufferUse use) {
switch (use.access()) {
case BufferUse::kRead:
AddRead(use.slice());
break;
case BufferUse::kWrite:
AddWrite(use.slice());
break;
}
}
void BufferUse::ReadWriteSet::AddRead(BufferAllocation::Slice slice) {
read_.insert(slice);
}
void BufferUse::ReadWriteSet::AddWrite(BufferAllocation::Slice slice) {
write_.insert(slice);
}
void BufferUse::ReadWriteSet::AddAll(absl::Span<const BufferUse> uses) {
for (const auto& use : uses) Add(use);
}
bool BufferUse::ReadWriteSet::HasConflicts(const BufferUse& use) const {
auto overlaps = [](const absl::flat_hash_set<BufferAllocation::Slice>& set,
const BufferUse& use) {
return set.contains(use.slice()) ||
absl::c_any_of(set, [&](const BufferAllocation::Slice& slice) {
return slice.OverlapsWith(use.slice());
});
};
return use.access() == MemoryAccess::kWrite
? overlaps(write_, use) || overlaps(read_, use)
: overlaps(write_, use);
}
bool BufferUse::ReadWriteSet::HasConflicts(const ReadWriteSet& other) {
return absl::c_any_of(other.read_,
[&](const BufferAllocation::Slice& slice) {
return HasConflicts(BufferUse::Read(slice));
}) ||
absl::c_any_of(other.write_,
[&](const BufferAllocation::Slice& slice) {
return HasConflicts(BufferUse::Write(slice));
});
}
} | #include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
TEST(BufferUseTest, Equality) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice slice0(&alloc, 0, 10);
BufferUse use0(slice0, BufferUse::MemoryAccess::kRead);
BufferUse use1(slice0, BufferUse::MemoryAccess::kWrite);
BufferUse use2(slice0, BufferUse::MemoryAccess::kRead);
EXPECT_NE(use0, use1);
EXPECT_EQ(use0, use2);
}
TEST(BufferUseTest, ReadWriteSet) {
BufferUse::ReadWriteSet rwset;
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice slice0(&alloc, 0, 10);
BufferAllocation::Slice slice1(&alloc, 5, 10);
BufferAllocation::Slice slice2(&alloc, 10, 10);
rwset.Add(BufferUse::Read(slice0));
EXPECT_FALSE(rwset.HasConflicts({BufferUse::Read(slice1)}));
EXPECT_TRUE(rwset.HasConflicts({BufferUse::Write(slice1)}));
EXPECT_FALSE(rwset.HasConflicts({BufferUse::Write(slice2)}));
rwset.Add(BufferUse::Read(slice1));
EXPECT_TRUE(rwset.HasConflicts({BufferUse::Write(slice2)}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/runtime/buffer_use.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/runtime/buffer_use_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
53279176-e6bf-43d8-9e11-0769e087a2d8 | cpp | tensorflow/tensorflow | literal_test_util | third_party/xla/xla/tests/literal_test_util.cc | third_party/xla/xla/tests/literal_test_util_test.cc | #include "xla/tests/literal_test_util.h"
#include "absl/strings/str_format.h"
#include "xla/literal_comparison.h"
#include "tsl/platform/env.h"
#include "tsl/platform/path.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
void WriteLiteralToTempFile(const LiteralSlice& literal,
const std::string& name) {
std::string outdir;
if (!tsl::io::GetTestUndeclaredOutputsDir(&outdir)) {
outdir = tsl::testing::TmpDir();
}
auto* env = tsl::Env::Default();
std::string filename = tsl::io::JoinPath(
outdir, absl::StrFormat("tempfile-%d-%s", env->NowMicros(), name));
TF_CHECK_OK(tsl::WriteBinaryProto(env, absl::StrCat(filename, ".pb"),
literal.ToProto()));
TF_CHECK_OK(tsl::WriteStringToFile(env, absl::StrCat(filename, ".txt"),
literal.ToString()));
LOG(ERROR) << "wrote Literal to " << name << " file: " << filename
<< ".{pb,txt}";
}
void OnMiscompare(const LiteralSlice& expected, const LiteralSlice& actual,
const LiteralSlice& mismatches,
const ShapeIndex& ,
const literal_comparison::ErrorBuckets& ) {
LOG(INFO) << "expected: " << ShapeUtil::HumanString(expected.shape()) << " "
<< literal_comparison::ToStringTruncated(expected);
LOG(INFO) << "actual: " << ShapeUtil::HumanString(actual.shape()) << " "
<< literal_comparison::ToStringTruncated(actual);
LOG(INFO) << "Dumping literals to temp files...";
WriteLiteralToTempFile(expected, "expected");
WriteLiteralToTempFile(actual, "actual");
WriteLiteralToTempFile(mismatches, "mismatches");
}
::testing::AssertionResult StatusToAssertion(const absl::Status& s) {
if (s.ok()) {
return ::testing::AssertionSuccess();
}
return ::testing::AssertionFailure() << s.message();
}
}
::testing::AssertionResult LiteralTestUtil::EqualShapes(
const Shape& expected, const Shape& actual) {
return StatusToAssertion(literal_comparison::EqualShapes(expected, actual));
}
::testing::AssertionResult LiteralTestUtil::EqualShapesAndLayouts(
const Shape& expected, const Shape& actual) {
if (expected.ShortDebugString() != actual.ShortDebugString()) {
return ::testing::AssertionFailure()
<< "want: " << expected.ShortDebugString()
<< " got: " << actual.ShortDebugString();
}
return ::testing::AssertionSuccess();
}
::testing::AssertionResult LiteralTestUtil::Equal(
const LiteralSlice& expected, const LiteralSlice& actual) {
return StatusToAssertion(literal_comparison::Equal(expected, actual));
}
::testing::AssertionResult LiteralTestUtil::Near(
const LiteralSlice& expected, const LiteralSlice& actual,
const ErrorSpec& error_spec, std::optional<bool> detailed_message) {
return StatusToAssertion(literal_comparison::Near(
expected, actual, error_spec, detailed_message, &OnMiscompare));
}
::testing::AssertionResult LiteralTestUtil::NearOrEqual(
const LiteralSlice& expected, const LiteralSlice& actual,
const std::optional<ErrorSpec>& error) {
if (error.has_value()) {
VLOG(1) << "Expects near";
return StatusToAssertion(literal_comparison::Near(
expected, actual, *error, std::nullopt,
&OnMiscompare));
}
VLOG(1) << "Expects equal";
return StatusToAssertion(literal_comparison::Equal(expected, actual));
}
} | #include "xla/tests/literal_test_util.h"
#include <vector>
#include "absl/strings/str_join.h"
#include "xla/literal.h"
#include "xla/test_helpers.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/path.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
TEST(LiteralTestUtilTest, ComparesEqualTuplesEqual) {
Literal literal = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<int32_t>(42),
LiteralUtil::CreateR0<int32_t>(64),
});
EXPECT_TRUE(LiteralTestUtil::Equal(literal, literal));
}
TEST(LiteralTestUtilTest, ComparesEqualComplex64TuplesEqual) {
Literal literal = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<complex64>({42.0, 64.0}),
LiteralUtil::CreateR0<complex64>({64.0, 42.0}),
});
EXPECT_TRUE(LiteralTestUtil::Equal(literal, literal));
}
TEST(LiteralTestUtilTest, ComparesEqualComplex128TuplesEqual) {
Literal literal = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<complex128>({42.0, 64.0}),
LiteralUtil::CreateR0<complex128>({64.0, 42.0}),
});
EXPECT_TRUE(LiteralTestUtil::Equal(literal, literal));
}
TEST(LiteralTestUtilTest, ComparesUnequalComplex64TuplesUnequal) {
Literal literal0 = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<complex64>({42.0, 64.0}),
LiteralUtil::CreateR0<complex64>({64.0, 42.0}),
});
Literal literal1 = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<complex64>({64.0, 42.0}),
LiteralUtil::CreateR0<complex64>({42.0, 64.0}),
});
Literal literal2 = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<complex64>({42.42, 64.0}),
LiteralUtil::CreateR0<complex64>({64.0, 42.0}),
});
Literal literal3 = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<complex64>({42.0, 64.0}),
LiteralUtil::CreateR0<complex64>({64.0, 42.42}),
});
EXPECT_FALSE(LiteralTestUtil::Equal(literal0, literal1));
EXPECT_FALSE(LiteralTestUtil::Equal(literal0, literal2));
EXPECT_FALSE(LiteralTestUtil::Equal(literal0, literal3));
EXPECT_FALSE(LiteralTestUtil::Equal(literal2, literal3));
}
TEST(LiteralTestUtilTest, ComparesUnequalComplex128TuplesUnequal) {
Literal literal0 = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<complex128>({42.0, 64.0}),
LiteralUtil::CreateR0<complex128>({64.0, 42.0}),
});
Literal literal1 = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<complex128>({64.0, 42.0}),
LiteralUtil::CreateR0<complex128>({42.0, 64.0}),
});
Literal literal2 = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<complex128>({42.42, 64.0}),
LiteralUtil::CreateR0<complex128>({64.0, 42.0}),
});
Literal literal3 = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<complex128>({42.0, 64.0}),
LiteralUtil::CreateR0<complex128>({64.0, 42.42}),
});
EXPECT_FALSE(LiteralTestUtil::Equal(literal0, literal1));
EXPECT_FALSE(LiteralTestUtil::Equal(literal0, literal2));
EXPECT_FALSE(LiteralTestUtil::Equal(literal0, literal3));
EXPECT_FALSE(LiteralTestUtil::Equal(literal2, literal3));
}
TEST(LiteralTestUtilTest, ComparesUnequalTuplesUnequal) {
auto unequal_things_are_equal = [] {
Literal lhs = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<int32_t>(42),
LiteralUtil::CreateR0<int32_t>(64),
});
Literal rhs = LiteralUtil::MakeTupleFromSlices({
LiteralUtil::CreateR0<int32_t>(64),
LiteralUtil::CreateR0<int32_t>(42),
});
CHECK(LiteralTestUtil::Equal(lhs, rhs)) << "LHS and RHS are unequal";
};
ASSERT_DEATH(unequal_things_are_equal(), "LHS and RHS are unequal");
}
TEST(LiteralTestUtilTest, ExpectNearFailurePlacesResultsInTemporaryDirectory) {
auto dummy_lambda = [] {
auto two = LiteralUtil::CreateR0<float>(2);
auto four = LiteralUtil::CreateR0<float>(4);
ErrorSpec error(0.001);
CHECK(LiteralTestUtil::Near(two, four, error)) << "two is not near four";
};
tsl::Env* env = tsl::Env::Default();
std::string outdir;
if (!tsl::io::GetTestUndeclaredOutputsDir(&outdir)) {
outdir = tsl::testing::TmpDir();
}
std::string pattern = tsl::io::JoinPath(outdir, "tempfile-*.pb");
std::vector<std::string> files;
TF_CHECK_OK(env->GetMatchingPaths(pattern, &files));
for (const auto& f : files) {
TF_CHECK_OK(env->DeleteFile(f)) << f;
}
ASSERT_DEATH(dummy_lambda(), "two is not near four");
std::vector<std::string> results;
TF_CHECK_OK(env->GetMatchingPaths(pattern, &results));
LOG(INFO) << "results: [" << absl::StrJoin(results, ", ") << "]";
EXPECT_EQ(3, results.size());
for (const std::string& result : results) {
LiteralProto literal_proto;
TF_CHECK_OK(
tsl::ReadBinaryProto(tsl::Env::Default(), result, &literal_proto));
Literal literal = Literal::CreateFromProto(literal_proto).value();
if (result.find("expected") != std::string::npos) {
EXPECT_EQ("f32[] 2", literal.ToString());
} else if (result.find("actual") != std::string::npos) {
EXPECT_EQ("f32[] 4", literal.ToString());
} else if (result.find("mismatches") != std::string::npos) {
EXPECT_EQ("pred[] true", literal.ToString());
} else {
FAIL() << "unknown file in temporary directory: " << result;
}
}
}
TEST(LiteralTestUtilTest, NotEqualHasValuesInMessage) {
auto expected = LiteralUtil::CreateR1<int32_t>({1, 2, 3});
auto actual = LiteralUtil::CreateR1<int32_t>({4, 5, 6});
::testing::AssertionResult result = LiteralTestUtil::Equal(expected, actual);
EXPECT_THAT(result.message(),
::testing::HasSubstr("Expected literal:\ns32[3] {1, 2, 3}"));
EXPECT_THAT(result.message(),
::testing::HasSubstr("Actual literal:\ns32[3] {4, 5, 6}"));
}
TEST(LiteralTestUtilTest, NearComparatorR1) {
auto a = LiteralUtil::CreateR1<float>(
{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8});
auto b = LiteralUtil::CreateR1<float>(
{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8});
EXPECT_TRUE(LiteralTestUtil::Near(a, b, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, NearComparatorR1Complex64) {
auto a = LiteralUtil::CreateR1<complex64>({{0.0, 1.0},
{0.1, 1.1},
{0.2, 1.2},
{0.3, 1.3},
{0.4, 1.4},
{0.5, 1.5},
{0.6, 1.6},
{0.7, 1.7},
{0.8, 1.8}});
auto b = LiteralUtil::CreateR1<complex64>({{0.0, 1.0},
{0.1, 1.1},
{0.2, 1.2},
{0.3, 1.3},
{0.4, 1.4},
{0.5, 1.5},
{0.6, 1.6},
{0.7, 1.7},
{0.8, 1.8}});
auto c = LiteralUtil::CreateR1<complex64>({{0.0, 1.0},
{0.1, 1.1},
{0.2, 1.2},
{0.3, 1.3},
{0.4, 1.4},
{0.5, 1.5},
{0.6, 1.6},
{0.7, 1.7},
{0.9, 1.8}});
auto d = LiteralUtil::CreateR1<complex64>({{0.0, 1.0},
{0.1, 1.1},
{0.2, 1.2},
{0.3, 1.3},
{0.4, 1.4},
{0.5, 1.5},
{0.6, 1.6},
{0.7, 1.7},
{0.8, 1.9}});
EXPECT_TRUE(LiteralTestUtil::Near(a, b, ErrorSpec{0.0001}));
EXPECT_FALSE(LiteralTestUtil::Near(a, c, ErrorSpec{0.0001}));
EXPECT_FALSE(LiteralTestUtil::Near(a, d, ErrorSpec{0.0001}));
EXPECT_FALSE(LiteralTestUtil::Near(c, d, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, NearComparatorR1Complex128) {
auto a = LiteralUtil::CreateR1<complex128>({{0.0, 1.0},
{0.1, 1.1},
{0.2, 1.2},
{0.3, 1.3},
{0.4, 1.4},
{0.5, 1.5},
{0.6, 1.6},
{0.7, 1.7},
{0.8, 1.8}});
auto b = LiteralUtil::CreateR1<complex128>({{0.0, 1.0},
{0.1, 1.1},
{0.2, 1.2},
{0.3, 1.3},
{0.4, 1.4},
{0.5, 1.5},
{0.6, 1.6},
{0.7, 1.7},
{0.8, 1.8}});
auto c = LiteralUtil::CreateR1<complex128>({{0.0, 1.0},
{0.1, 1.1},
{0.2, 1.2},
{0.3, 1.3},
{0.4, 1.4},
{0.5, 1.5},
{0.6, 1.6},
{0.7, 1.7},
{0.9, 1.8}});
auto d = LiteralUtil::CreateR1<complex128>({{0.0, 1.0},
{0.1, 1.1},
{0.2, 1.2},
{0.3, 1.3},
{0.4, 1.4},
{0.5, 1.5},
{0.6, 1.6},
{0.7, 1.7},
{0.8, 1.9}});
EXPECT_TRUE(LiteralTestUtil::Near(a, b, ErrorSpec{0.0001}));
EXPECT_FALSE(LiteralTestUtil::Near(a, c, ErrorSpec{0.0001}));
EXPECT_FALSE(LiteralTestUtil::Near(a, d, ErrorSpec{0.0001}));
EXPECT_FALSE(LiteralTestUtil::Near(c, d, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, NearComparatorR1Nan) {
auto a = LiteralUtil::CreateR1<float>(
{0.0, 0.1, 0.2, 0.3, NAN, 0.5, 0.6, 0.7, 0.8});
auto b = LiteralUtil::CreateR1<float>(
{0.0, 0.1, 0.2, 0.3, NAN, 0.5, 0.6, 0.7, 0.8});
EXPECT_TRUE(LiteralTestUtil::Near(a, b, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtil, NearComparatorDifferentLengths) {
auto a = LiteralUtil::CreateR1<float>(
{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8});
auto b =
LiteralUtil::CreateR1<float>({0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7});
EXPECT_FALSE(LiteralTestUtil::Near(a, b, ErrorSpec{0.0001}));
EXPECT_FALSE(LiteralTestUtil::Near(b, a, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, ExpectNearDoubleOutsideFloatValueRange) {
auto two_times_float_max =
LiteralUtil::CreateR0<double>(2.0 * std::numeric_limits<float>::max());
ErrorSpec error(0.001);
EXPECT_TRUE(
LiteralTestUtil::Near(two_times_float_max, two_times_float_max, error));
}
TEST(LiteralTestUtilTest, DynamicEqualityR1) {
auto literal1 = Literal(ShapeUtil::MakeShape(U32, {10}));
literal1.PopulateR1<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
literal1.SetDynamicSize(0, 5);
auto literal2 = Literal(ShapeUtil::MakeShape(U32, {10}));
literal2.PopulateR1<uint32_t>({1, 2, 3, 4, 5, 99, 99, 99, 99, 99});
literal2.SetDynamicSize(0, 5);
EXPECT_TRUE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, DynamicEqualityR2Dim) {
auto literal1 = Literal(ShapeUtil::MakeShape(U32, {3, 3}));
literal1.PopulateR2<uint32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal1.SetDynamicSize(0, 2);
auto literal2 = Literal(ShapeUtil::MakeShape(U32, {3, 3}));
literal2.PopulateR2<uint32_t>({{1, 2, 3}, {4, 5, 6}, {99, 99, 99}});
literal2.SetDynamicSize(0, 2);
EXPECT_TRUE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, DynamicEqualityR2Dim1) {
auto literal1 = Literal(ShapeUtil::MakeShape(U32, {3, 3}));
literal1.PopulateR2<uint32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal1.SetDynamicSize(1, 2);
auto literal2 = Literal(ShapeUtil::MakeShape(U32, {3, 3}));
literal2.PopulateR2<uint32_t>({{1, 2, 99}, {4, 5, 99}, {7, 8, 99}});
literal2.SetDynamicSize(1, 2);
EXPECT_TRUE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, DynamicNearEqualityR1) {
auto literal1 = Literal(ShapeUtil::MakeShape(F32, {10}));
literal1.PopulateR1<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
literal1.SetDynamicSize(0, 5);
auto literal2 = Literal(ShapeUtil::MakeShape(F32, {10}));
literal2.PopulateR1<float>({1, 2, 3, 4, 5, 99, 99, 99, 99, 99});
literal2.SetDynamicSize(0, 5);
ErrorSpec error(0.001);
EXPECT_TRUE(LiteralTestUtil::Near(literal1, literal2, error));
}
TEST(LiteralTestUtilTest, DynamicNearEqualityR2Dim) {
auto literal1 = Literal(ShapeUtil::MakeShape(F32, {3, 3}));
literal1.PopulateR2<float>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal1.SetDynamicSize(0, 2);
auto literal2 = Literal(ShapeUtil::MakeShape(F32, {3, 3}));
literal2.PopulateR2<float>({{1, 2, 3}, {4, 5, 6}, {99, 99, 99}});
literal2.SetDynamicSize(0, 2);
ErrorSpec error(0.001);
EXPECT_TRUE(LiteralTestUtil::Near(literal1, literal2, error));
}
TEST(LiteralTestUtilTest, DynamicNearEqualityR2Dim1) {
auto literal1 = Literal(ShapeUtil::MakeShape(F32, {3, 3}));
literal1.PopulateR2<float>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal1.SetDynamicSize(1, 2);
auto literal2 = Literal(ShapeUtil::MakeShape(F32, {3, 3}));
literal2.PopulateR2<float>({{1, 2, 99}, {4, 5, 99}, {7, 8, 99}});
literal2.SetDynamicSize(1, 2);
ErrorSpec error(0.001);
EXPECT_TRUE(LiteralTestUtil::Near(literal1, literal2, error));
}
TEST(LiteralTestUtilTest, UnequalDynamicDimensionsR1) {
auto literal1 = Literal(ShapeUtil::MakeShape(U32, {10}));
literal1.PopulateR1<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
literal1.SetDynamicSize(0, 5);
auto literal2 = Literal(ShapeUtil::MakeShape(U32, {10}));
literal2.PopulateR1<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
literal2.SetDynamicSize(0, 6);
EXPECT_FALSE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, UnequalDynamicDimensionsR1_F32) {
auto literal1 = Literal(ShapeUtil::MakeShape(F32, {10}));
literal1.PopulateR1<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
literal1.SetDynamicSize(0, 5);
auto literal2 = Literal(ShapeUtil::MakeShape(F32, {10}));
literal2.PopulateR1<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
literal2.SetDynamicSize(0, 6);
EXPECT_FALSE(LiteralTestUtil::Near(literal1, literal2, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, ExpectedIsDynamicActualIsNotR1) {
auto literal1 = Literal(ShapeUtil::MakeShape(U32, {10}));
literal1.PopulateR1<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
literal1.SetDynamicSize(0, 5);
auto literal2 = Literal(ShapeUtil::MakeShape(U32, {10}));
literal2.PopulateR1<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
EXPECT_FALSE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, ExpectedIsDynamicActualIsNotR1_F32) {
auto literal1 = Literal(ShapeUtil::MakeShape(F32, {10}));
literal1.PopulateR1<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
literal1.SetDynamicSize(0, 5);
auto literal2 = Literal(ShapeUtil::MakeShape(F32, {10}));
literal2.PopulateR1<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
EXPECT_FALSE(LiteralTestUtil::Near(literal1, literal2, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, ActualIsDynamicExpectedIsNotR1) {
auto literal1 = Literal(ShapeUtil::MakeShape(U32, {10}));
literal1.PopulateR1<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
auto literal2 = Literal(ShapeUtil::MakeShape(U32, {10}));
literal2.PopulateR1<uint32_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
literal2.SetDynamicSize(0, 5);
EXPECT_FALSE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, ActualIsDynamicExpectedIsNotR1_F32) {
auto literal1 = Literal(ShapeUtil::MakeShape(F32, {10}));
literal1.PopulateR1<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
auto literal2 = Literal(ShapeUtil::MakeShape(F32, {10}));
literal2.PopulateR1<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
literal2.SetDynamicSize(0, 5);
EXPECT_FALSE(LiteralTestUtil::Near(literal1, literal2, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, UnequalDynamicDimensionsR2Dim0) {
auto literal1 = Literal(ShapeUtil::MakeShape(U32, {3, 3}));
literal1.PopulateR2<uint32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal1.SetDynamicSize(0, 2);
auto literal2 = Literal(ShapeUtil::MakeShape(U32, {3, 3}));
literal2.PopulateR2<uint32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal2.SetDynamicSize(0, 3);
EXPECT_FALSE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, UnequalDynamicDimensionsR2Dim0_F32) {
auto literal1 = Literal(ShapeUtil::MakeShape(F32, {3, 3}));
literal1.PopulateR2<float>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal1.SetDynamicSize(0, 2);
auto literal2 = Literal(ShapeUtil::MakeShape(F32, {3, 3}));
literal2.PopulateR2<float>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal2.SetDynamicSize(0, 3);
EXPECT_FALSE(LiteralTestUtil::Near(literal1, literal2, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, UnequalDynamicDimensionsR2Dim1) {
auto literal1 = Literal(ShapeUtil::MakeShape(U32, {3, 3}));
literal1.PopulateR2<uint32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal1.SetDynamicSize(1, 2);
auto literal2 = Literal(ShapeUtil::MakeShape(U32, {3, 3}));
literal2.PopulateR2<uint32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal2.SetDynamicSize(1, 3);
EXPECT_FALSE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, UnequalDynamicDimensionsR2Dim1_F32) {
auto literal1 = Literal(ShapeUtil::MakeShape(F32, {3, 3}));
literal1.PopulateR2<float>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal1.SetDynamicSize(1, 2);
auto literal2 = Literal(ShapeUtil::MakeShape(F32, {3, 3}));
literal2.PopulateR2<float>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal2.SetDynamicSize(1, 3);
EXPECT_FALSE(LiteralTestUtil::Near(literal1, literal2, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, UnequalDynamicDimensionsR2DifferentDimensions) {
auto literal1 = Literal(ShapeUtil::MakeShape(U32, {3, 3}));
literal1.PopulateR2<uint32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal1.SetDynamicSize(1, 2);
auto literal2 = Literal(ShapeUtil::MakeShape(U32, {3, 3}));
literal2.PopulateR2<uint32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal2.SetDynamicSize(0, 2);
EXPECT_FALSE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, UnequalDynamicDimensionsR2DifferentDimensions_F32) {
auto literal1 = Literal(ShapeUtil::MakeShape(F32, {3, 3}));
literal1.PopulateR2<float>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal1.SetDynamicSize(1, 2);
auto literal2 = Literal(ShapeUtil::MakeShape(F32, {3, 3}));
literal2.PopulateR2<float>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
literal2.SetDynamicSize(0, 2);
EXPECT_FALSE(LiteralTestUtil::Near(literal1, literal2, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, DynamicTuplesAreEqual) {
auto literal1 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(U32, {5}), ShapeUtil::MakeShape(U32, {5})}));
auto literal2 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(U32, {5}), ShapeUtil::MakeShape(U32, {5})}));
MutableBorrowingLiteral(&literal1, {0})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal1, {1})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
literal1.SetDynamicSize(0, {0}, 5);
MutableBorrowingLiteral(&literal2, {0})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal2, {1})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
literal2.SetDynamicSize(0, {0}, 5);
EXPECT_TRUE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, DynamicTuplesAreNear) {
auto literal1 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}), ShapeUtil::MakeShape(F32, {5})}));
auto literal2 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}), ShapeUtil::MakeShape(F32, {5})}));
MutableBorrowingLiteral(&literal1, {0})
.PopulateR1<float>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal1, {1})
.PopulateR1<float>({1, 2, 3, 4, 5});
literal1.SetDynamicSize(0, {0}, 5);
MutableBorrowingLiteral(&literal2, {0})
.PopulateR1<float>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal2, {1})
.PopulateR1<float>({1, 2, 3, 4, 5});
literal2.SetDynamicSize(0, {0}, 5);
EXPECT_TRUE(LiteralTestUtil::Near(literal1, literal2, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, DynamicTuplesAreEqualWithinDynamicBounds) {
auto literal1 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(U32, {5}), ShapeUtil::MakeShape(U32, {5})}));
auto literal2 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(U32, {5}), ShapeUtil::MakeShape(U32, {5})}));
MutableBorrowingLiteral(&literal1, {0})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal1, {1})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
literal1.SetDynamicSize(0, {0}, 3);
MutableBorrowingLiteral(&literal2, {0})
.PopulateR1<uint32_t>({1, 2, 3, 99, 99});
MutableBorrowingLiteral(&literal2, {1})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
literal2.SetDynamicSize(0, {0}, 3);
EXPECT_TRUE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, DynamicTuplesAreNearWithinDynamicBounds) {
auto literal1 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}), ShapeUtil::MakeShape(F32, {5})}));
auto literal2 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}), ShapeUtil::MakeShape(F32, {5})}));
MutableBorrowingLiteral(&literal1, {0})
.PopulateR1<float>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal1, {1})
.PopulateR1<float>({1, 2, 3, 4, 5});
literal1.SetDynamicSize(0, {0}, 3);
MutableBorrowingLiteral(&literal2, {0})
.PopulateR1<float>({1, 2, 3, 99, 99});
MutableBorrowingLiteral(&literal2, {1})
.PopulateR1<float>({1, 2, 3, 4, 5});
literal2.SetDynamicSize(0, {0}, 3);
EXPECT_TRUE(LiteralTestUtil::Near(literal1, literal2, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, DynamicTuplesHaveDifferentDynamicSizes) {
auto literal1 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(U32, {5}), ShapeUtil::MakeShape(U32, {5})}));
auto literal2 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(U32, {5}), ShapeUtil::MakeShape(U32, {5})}));
MutableBorrowingLiteral(&literal1, {0})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal1, {1})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
literal1.SetDynamicSize(0, {0}, 5);
MutableBorrowingLiteral(&literal2, {0})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal2, {1})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
literal2.SetDynamicSize(0, {0}, 4);
EXPECT_FALSE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, DynamicTuplesHaveDifferentDynamicSizes_F32) {
auto literal1 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}), ShapeUtil::MakeShape(F32, {5})}));
auto literal2 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}), ShapeUtil::MakeShape(F32, {5})}));
MutableBorrowingLiteral(&literal1, {0})
.PopulateR1<float>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal1, {1})
.PopulateR1<float>({1, 2, 3, 4, 5});
literal1.SetDynamicSize(0, {0}, 5);
MutableBorrowingLiteral(&literal2, {0})
.PopulateR1<float>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal2, {1})
.PopulateR1<float>({1, 2, 3, 4, 5});
literal2.SetDynamicSize(0, {0}, 4);
EXPECT_FALSE(LiteralTestUtil::Near(literal1, literal2, ErrorSpec{0.0001}));
}
TEST(LiteralTestUtilTest, OneTupleDynamicOneIsNot) {
auto literal1 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(U32, {5}), ShapeUtil::MakeShape(U32, {5})}));
auto literal2 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(U32, {5}), ShapeUtil::MakeShape(U32, {5})}));
MutableBorrowingLiteral(&literal1, {0})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal1, {1})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
literal1.SetDynamicSize(0, {0}, 5);
MutableBorrowingLiteral(&literal2, {0})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal2, {1})
.PopulateR1<uint32_t>({1, 2, 3, 4, 5});
EXPECT_FALSE(LiteralTestUtil::Equal(literal1, literal2));
}
TEST(LiteralTestUtilTest, OneTupleDynamicOneIsNot_F32) {
auto literal1 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}), ShapeUtil::MakeShape(F32, {5})}));
auto literal2 = Literal(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {5}), ShapeUtil::MakeShape(F32, {5})}));
MutableBorrowingLiteral(&literal1, {0})
.PopulateR1<float>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal1, {1})
.PopulateR1<float>({1, 2, 3, 4, 5});
literal1.SetDynamicSize(0, {0}, 5);
MutableBorrowingLiteral(&literal2, {0})
.PopulateR1<float>({1, 2, 3, 4, 5});
MutableBorrowingLiteral(&literal2, {1})
.PopulateR1<float>({1, 2, 3, 4, 5});
EXPECT_FALSE(LiteralTestUtil::Near(literal1, literal2, ErrorSpec{0.0001}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/literal_test_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/literal_test_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1986c596-9933-436d-a46c-46859567f49a | cpp | tensorflow/tensorflow | cycle_detector | third_party/xla/xla/mlir_hlo/utils/cycle_detector.cc | third_party/xla/xla/mlir_hlo/utils/cycle_detector_test.cc | #include "utils/cycle_detector.h"
#include <algorithm>
#include <optional>
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallVector.h"
namespace mlir {
namespace {
using NodeSet = llvm::DenseSet<int32_t>;
using OrderedNodeSet = OrderedSet<int32_t>;
template <typename T>
struct VecStruct {
using type = llvm::SmallVector<T, 4>;
};
template <typename T>
using Vec = typename VecStruct<T>::type;
struct Node {
int32_t rank;
bool visited;
void* data;
OrderedNodeSet in;
OrderedNodeSet out;
};
}
struct GraphCycles::Rep {
Vec<Node*> nodes;
Vec<int32_t> freeNodes;
Vec<int32_t> deltaf;
Vec<int32_t> deltab;
Vec<int32_t> list;
Vec<int32_t> merged;
Vec<int32_t> stack;
};
GraphCycles::GraphCycles(int32_t numNodes) : rep_(new Rep) {
rep_->nodes.reserve(numNodes);
for (int32_t i = 0; i < numNodes; ++i) {
Node* n = new Node;
n->visited = false;
n->data = nullptr;
n->rank = rep_->nodes.size();
rep_->nodes.push_back(n);
}
}
GraphCycles::~GraphCycles() {
for (Vec<Node*>::size_type i = 0, e = rep_->nodes.size(); i < e; ++i) {
delete rep_->nodes[i];
}
delete rep_;
}
bool GraphCycles::HasEdge(int32_t x, int32_t y) const {
return rep_->nodes[x]->out.Contains(y);
}
void GraphCycles::RemoveEdge(int32_t x, int32_t y) {
rep_->nodes[x]->out.Erase(y);
rep_->nodes[y]->in.Erase(x);
}
static bool forwardDfs(GraphCycles::Rep* r, int32_t n, int32_t upperBound);
static void backwardDfs(GraphCycles::Rep* r, int32_t n, int32_t lowerBound);
static void reorder(GraphCycles::Rep* r);
static void sort(const Vec<Node*>&, Vec<int32_t>* delta);
static void moveToList(GraphCycles::Rep* r, Vec<int32_t>* src,
Vec<int32_t>* dst);
static void clearVisitedBits(GraphCycles::Rep* r, const Vec<int32_t>& nodes);
bool GraphCycles::InsertEdge(int32_t x, int32_t y) {
if (x == y) return false;
Rep* r = rep_;
Node* nx = r->nodes[x];
if (!nx->out.Insert(y)) {
return true;
}
Node* ny = r->nodes[y];
ny->in.Insert(x);
if (nx->rank <= ny->rank) {
return true;
}
if (forwardDfs(r, y, nx->rank)) {
nx->out.Erase(y);
ny->in.Erase(x);
clearVisitedBits(r, r->deltaf);
return false;
}
backwardDfs(r, x, ny->rank);
reorder(r);
return true;
}
static bool forwardDfs(GraphCycles::Rep* r, int32_t n, int32_t upperBound) {
r->deltaf.clear();
r->stack.clear();
r->stack.push_back(n);
while (!r->stack.empty()) {
n = r->stack.back();
r->stack.pop_back();
Node* nn = r->nodes[n];
if (nn->visited) continue;
nn->visited = true;
r->deltaf.push_back(n);
for (auto w : nn->out.GetSequence()) {
Node* nw = r->nodes[w];
if (nw->rank == upperBound) {
return true;
}
if (!nw->visited && nw->rank < upperBound) {
r->stack.push_back(w);
}
}
}
return false;
}
static void backwardDfs(GraphCycles::Rep* r, int32_t n, int32_t lowerBound) {
r->deltab.clear();
r->stack.clear();
r->stack.push_back(n);
while (!r->stack.empty()) {
n = r->stack.back();
r->stack.pop_back();
Node* nn = r->nodes[n];
if (nn->visited) continue;
nn->visited = true;
r->deltab.push_back(n);
for (auto w : nn->in.GetSequence()) {
Node* nw = r->nodes[w];
if (!nw->visited && lowerBound < nw->rank) {
r->stack.push_back(w);
}
}
}
}
static void reorder(GraphCycles::Rep* r) {
sort(r->nodes, &r->deltab);
sort(r->nodes, &r->deltaf);
r->list.clear();
moveToList(r, &r->deltab, &r->list);
moveToList(r, &r->deltaf, &r->list);
r->merged.resize(r->deltab.size() + r->deltaf.size());
std::merge(r->deltab.begin(), r->deltab.end(), r->deltaf.begin(),
r->deltaf.end(), r->merged.begin());
for (Vec<int32_t>::size_type i = 0, e = r->list.size(); i < e; ++i) {
r->nodes[r->list[i]]->rank = r->merged[i];
}
}
static void sort(const Vec<Node*>& nodes, Vec<int32_t>* delta) {
struct ByRank {
const Vec<Node*>* nodes;
bool operator()(int32_t a, int32_t b) const {
return (*nodes)[a]->rank < (*nodes)[b]->rank;
}
};
ByRank cmp;
cmp.nodes = &nodes;
std::sort(delta->begin(), delta->end(), cmp);
}
static void moveToList(GraphCycles::Rep* r, Vec<int32_t>* src,
Vec<int32_t>* dst) {
for (Vec<int32_t>::size_type i = 0, e = src->size(); i < e; i++) {
int32_t w = (*src)[i];
(*src)[i] = r->nodes[w]->rank;
r->nodes[w]->visited = false;
dst->push_back(w);
}
}
static void clearVisitedBits(GraphCycles::Rep* r, const Vec<int32_t>& nodes) {
for (Vec<int32_t>::size_type i = 0, e = nodes.size(); i < e; i++) {
r->nodes[nodes[i]]->visited = false;
}
}
bool GraphCycles::IsReachable(int32_t x, int32_t y) {
if (x == y) return true;
Rep* r = rep_;
Node* nx = r->nodes[x];
Node* ny = r->nodes[y];
if (nx->rank >= ny->rank) {
return false;
}
bool reachable = forwardDfs(r, x, ny->rank);
clearVisitedBits(r, r->deltaf);
return reachable;
}
std::optional<int32_t> GraphCycles::ContractEdge(int32_t a, int32_t b) {
assert(HasEdge(a, b));
RemoveEdge(a, b);
if (IsReachable(a, b)) {
InsertEdge(a, b);
return {};
}
if (rep_->nodes[b]->in.Size() + rep_->nodes[b]->out.Size() >
rep_->nodes[a]->in.Size() + rep_->nodes[a]->out.Size()) {
std::swap(a, b);
}
Node* nb = rep_->nodes[b];
OrderedNodeSet out = std::move(nb->out);
OrderedNodeSet in = std::move(nb->in);
for (int32_t y : out.GetSequence()) {
rep_->nodes[y]->in.Erase(b);
}
for (int32_t y : in.GetSequence()) {
rep_->nodes[y]->out.Erase(b);
}
rep_->freeNodes.push_back(b);
rep_->nodes[a]->out.Reserve(rep_->nodes[a]->out.Size() + out.Size());
for (int32_t y : out.GetSequence()) {
InsertEdge(a, y);
}
rep_->nodes[a]->in.Reserve(rep_->nodes[a]->in.Size() + in.Size());
for (int32_t y : in.GetSequence()) {
InsertEdge(y, a);
}
return a;
}
std::vector<int32_t> GraphCycles::SuccessorsCopy(int32_t node) const {
return rep_->nodes[node]->out.GetSequence();
}
namespace {
void sortInPostOrder(const Vec<Node*>& nodes, std::vector<int32_t>* toSort) {
std::sort(toSort->begin(), toSort->end(), [&](int32_t a, int32_t b) {
return nodes[a]->rank > nodes[b]->rank;
});
}
}
std::vector<int32_t> GraphCycles::AllNodesInPostOrder() const {
llvm::DenseSet<int32_t> freeNodesSet;
for (int32_t n : rep_->freeNodes) freeNodesSet.insert(n);
std::vector<int32_t> allNodes;
allNodes.reserve(rep_->nodes.size() - freeNodesSet.size());
for (size_t i = 0, e = rep_->nodes.size(); i < e; i++) {
if (!freeNodesSet.count(i)) {
allNodes.push_back(i);
}
}
sortInPostOrder(rep_->nodes, &allNodes);
return allNodes;
}
} | #include "utils/cycle_detector.h"
#include "xla/test.h"
class GraphCyclesTest : public ::testing::Test {
public:
GraphCyclesTest() : g_(100) {}
bool AddEdge(int x, int y) { return g_.InsertEdge(x, y); }
void AddMultiples() {
for (int x = 1; x < 25; x++) {
EXPECT_TRUE(AddEdge(x, 2 * x)) << x;
EXPECT_TRUE(AddEdge(x, 3 * x)) << x;
}
}
mlir::GraphCycles g_;
};
TEST_F(GraphCyclesTest, NoCycle) { AddMultiples(); }
TEST_F(GraphCyclesTest, SimpleCycle) {
AddMultiples();
EXPECT_FALSE(AddEdge(8, 4));
}
TEST_F(GraphCyclesTest, IndirectCycle) {
AddMultiples();
EXPECT_TRUE(AddEdge(16, 9));
EXPECT_FALSE(AddEdge(9, 2));
}
TEST_F(GraphCyclesTest, RemoveEdge) {
EXPECT_TRUE(AddEdge(1, 2));
EXPECT_TRUE(AddEdge(2, 3));
EXPECT_TRUE(AddEdge(3, 4));
EXPECT_TRUE(AddEdge(4, 5));
g_.RemoveEdge(2, 3);
EXPECT_FALSE(g_.HasEdge(2, 3));
}
TEST_F(GraphCyclesTest, IsReachable) {
EXPECT_TRUE(AddEdge(1, 2));
EXPECT_TRUE(AddEdge(2, 3));
EXPECT_TRUE(AddEdge(3, 4));
EXPECT_TRUE(AddEdge(4, 5));
EXPECT_TRUE(g_.IsReachable(1, 5));
EXPECT_FALSE(g_.IsReachable(5, 1));
}
TEST_F(GraphCyclesTest, ContractEdge) {
ASSERT_TRUE(AddEdge(1, 2));
ASSERT_TRUE(AddEdge(1, 3));
ASSERT_TRUE(AddEdge(2, 3));
ASSERT_TRUE(AddEdge(2, 4));
ASSERT_TRUE(AddEdge(3, 4));
EXPECT_FALSE(g_.ContractEdge(1, 3).has_value());
EXPECT_TRUE(g_.HasEdge(1, 3));
EXPECT_EQ(*g_.ContractEdge(1, 2), 2);
EXPECT_TRUE(g_.HasEdge(2, 3));
EXPECT_TRUE(g_.HasEdge(2, 4));
EXPECT_TRUE(g_.HasEdge(3, 4));
EXPECT_EQ(*g_.ContractEdge(2, 3), 2);
EXPECT_TRUE(g_.HasEdge(2, 4));
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/mlir_hlo/utils/cycle_detector.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/mlir_hlo/utils/cycle_detector_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b4ad9eac-84ab-45ce-8e5f-fd72109f0f90 | cpp | tensorflow/tensorflow | hlo_utils | third_party/xla/xla/hlo/translate/hlo_to_mhlo/hlo_utils.cc | third_party/xla/xla/hlo/translate/hlo_to_mhlo/hlo_utils_test.cc | #include "xla/hlo/translate/hlo_to_mhlo/hlo_utils.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <vector>
#include "absl/status/statusor.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/IR/ValueRange.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/mlir/utils/type_util.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using mlir::AffineMap;
using mlir::Builder;
using mlir::DenseElementsAttr;
using mlir::ShapedType;
template <typename CppType>
::mlir::DenseElementsAttr CreateDenseAttrFromLiteral(
const ShapedType& type, const LiteralBase& literal) {
if constexpr (is_intN_v<CppType>) {
auto data_span = literal.data<CppType>();
std::vector<char> packed_padded_data;
packed_padded_data.reserve(literal.element_count());
for (size_t i = 0; i < literal.element_count(); i++) {
packed_padded_data.push_back(static_cast<char>(data_span[i]));
}
return ::mlir::DenseElementsAttr::getFromRawBuffer(type,
packed_padded_data);
} else {
auto data_span = literal.data<CppType>();
return ::mlir::DenseElementsAttr::get(
type, llvm::ArrayRef(data_span.data(), data_span.size()));
}
}
absl::StatusOr<AffineMap> GetPermutationIfAvailable(const Shape& shape,
mlir::Builder builder) {
if (!shape.layout().tiles().empty()) {
return Internal("Tiled layouts are not yet supported");
}
if (!shape.has_layout() ||
LayoutUtil::IsMonotonicWithDim0Major(shape.layout())) {
return AffineMap();
}
if (!shape.is_static()) {
return Internal("Permutations for dynamic shapes are not yet supported");
}
int64_t accumulated_stride = 1;
llvm::SmallVector<int64_t, 4> strides(shape.rank(), 1);
for (int64_t dim : LayoutUtil::MinorToMajor(shape)) {
strides[dim] = accumulated_stride;
accumulated_stride *= shape.dimensions(dim);
}
if (accumulated_stride == 0) {
return AffineMap();
}
return makeStridedLinearLayoutMap(strides, 0,
builder.getContext());
}
}
absl::StatusOr<mlir::MemRefType> ConvertTensorShapeToMemRefType(
const Shape& shape, mlir::Builder builder) {
auto element_type_or =
ConvertPrimitiveTypeToMlirType(shape.element_type(), builder);
if (!element_type_or.ok()) return element_type_or.status();
using mlir::MemRefType;
auto dimensions = shape.dimensions();
llvm::SmallVector<int64_t, 4> array(dimensions.begin(), dimensions.end());
auto permutation_or = GetPermutationIfAvailable(shape, builder);
if (!permutation_or.ok()) return permutation_or.status();
return MemRefType::get(array, element_type_or.value(),
permutation_or.value());
}
absl::StatusOr<mlir::DenseElementsAttr> CreateDenseElementsAttrFromLiteral(
const LiteralBase& literal, Builder builder) {
TF_ASSIGN_OR_RETURN(auto type,
ConvertTensorShapeToType<mlir::RankedTensorType>(
literal.shape(), builder));
auto element_type = literal.shape().element_type();
return primitive_util::PrimitiveTypeSwitch<
absl::StatusOr<mlir::DenseElementsAttr>>(
[&](auto primitive_type_constant)
-> absl::StatusOr<mlir::DenseElementsAttr> {
if constexpr (primitive_util::IsArrayType(primitive_type_constant)) {
return CreateDenseAttrFromLiteral<
primitive_util::NativeTypeOf<primitive_type_constant>>(type,
literal);
}
return Internal("Unsupported type: %s",
PrimitiveType_Name(element_type));
},
element_type);
}
mlir::DenseIntElementsAttr CreateDenseIntElementsAttrFromVector(
const llvm::ArrayRef<int64_t> vector, mlir::Builder builder,
llvm::ArrayRef<int64_t> shape) {
return mlir::DenseIntElementsAttr::get(
mlir::RankedTensorType::get(shape.empty() ? vector.size() : shape,
builder.getIntegerType(64)),
vector);
}
mlir::Value CreateTupleValue(mlir::OpBuilder* func_builder, mlir::Location loc,
mlir::ValueRange& flatten_values,
mlir::Type type) {
auto tuple_type = type.dyn_cast<mlir::TupleType>();
if (!tuple_type) {
assert(!flatten_values.empty());
auto retval = flatten_values.front();
flatten_values = flatten_values.drop_front();
return retval;
}
llvm::SmallVector<mlir::Value> flatten_sub_values;
for (auto child_type : tuple_type.getTypes())
flatten_sub_values.push_back(
CreateTupleValue(func_builder, loc, flatten_values, child_type));
return func_builder->create<mlir::mhlo::TupleOp>(loc, flatten_sub_values)
.getResult();
}
mlir::Operation* CreateTupleFromOpResults(mlir::OpBuilder* func_builder,
mlir::Location loc,
mlir::Operation* op,
mlir::Type type) {
if (!type.isa<mlir::TupleType>()) return op;
mlir::ValueRange flattened_results_ref(op->getResults());
auto result =
CreateTupleValue(func_builder, loc, flattened_results_ref, type);
auto defining_tuple_op = result.getDefiningOp<mlir::mhlo::TupleOp>();
assert(defining_tuple_op && "builder didn't return the right type");
auto tupleOp = defining_tuple_op.getOperation();
return tupleOp;
}
mlir::TypeRange Untuple(const mlir::Type& type) {
if (llvm::isa<mlir::TupleType>(type)) {
return llvm::dyn_cast<mlir::TupleType>(type).getTypes();
}
return type;
}
} | #include "xla/hlo/translate/hlo_to_mhlo/hlo_utils.h"
#include <cstdint>
#include <cstring>
#include <vector>
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/DebugStringHelper.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
namespace xla {
namespace {
TEST(ConvertTensorShapeToType, Simple) {
mlir::MLIRContext context;
context.loadDialect<mlir::mhlo::MhloDialect>();
mlir::Builder builder(&context);
{
auto shape = ShapeUtil::MakeShape(PrimitiveType::S32, {8, 128});
TF_ASSERT_OK_AND_ASSIGN(
auto type,
ConvertTensorShapeToType<mlir::RankedTensorType>(shape, builder));
auto expected = mlir::RankedTensorType::get({8, 128}, builder.getI32Type());
EXPECT_TRUE(type == expected)
<< " Expected: " << mlir::debugString(expected)
<< " Computed: " << mlir::debugString(type);
}
{
auto shape =
ShapeUtil::MakeShape(PrimitiveType::S32, {8, 128}, {true, false});
TF_ASSERT_OK_AND_ASSIGN(
auto type,
ConvertTensorShapeToType<mlir::RankedTensorType>(shape, builder));
int64_t bounds[] = {8, mlir::ShapedType::kDynamic};
auto extensions = mlir::mhlo::TypeExtensionsAttr::get(&context, bounds);
auto expected = mlir::RankedTensorType::get(
{mlir::ShapedType::kDynamic, 128}, builder.getI32Type(), extensions);
EXPECT_TRUE(type == expected)
<< " Expected: " << mlir::debugString(expected)
<< " Computed: " << mlir::debugString(type);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/translate/hlo_to_mhlo/hlo_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/translate/hlo_to_mhlo/hlo_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
02e32eea-6457-4f34-9a05-e15a3b90742b | cpp | tensorflow/tensorflow | func | tensorflow/compiler/mlir/quantization/common/func.cc | tensorflow/compiler/mlir/quantization/common/func_test.cc | #include "tensorflow/compiler/mlir/quantization/common/func.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/cc/saved_model/signature_constants.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
namespace mlir::quant {
namespace {
using ::tensorflow::kDefaultServingSignatureDefKey;
using ::tensorflow::kImportModelDefaultGraphFuncName;
bool IsPublicFuncOp(func::FuncOp func_op) {
return SymbolTable::getSymbolVisibility(&*func_op) ==
SymbolTable::Visibility::Public;
}
}
func::FuncOp FindMainFuncOp(ModuleOp module_op) {
if (const auto main_func_op = module_op.lookupSymbol<func::FuncOp>(
kImportModelDefaultGraphFuncName);
main_func_op != nullptr && IsPublicFuncOp(main_func_op)) {
return main_func_op;
}
if (const auto serving_default_func_op =
module_op.lookupSymbol<func::FuncOp>(kDefaultServingSignatureDefKey);
serving_default_func_op != nullptr &&
IsPublicFuncOp(serving_default_func_op)) {
return serving_default_func_op;
}
return nullptr;
}
} | #include "tensorflow/compiler/mlir/quantization/common/func.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
namespace mlir::quant {
namespace {
using ::testing::IsNull;
using ::testing::NotNull;
using FindMainFuncOpTest = ::mlir::quant::QuantizationTestBase;
TEST_F(FindMainFuncOpTest, ReturnsMainFuncOp) {
constexpr absl::string_view kModuleWithMainFunc = R"mlir(
module {
func.func @main() -> () {
return
}
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleWithMainFunc);
EXPECT_THAT(*module_op, NotNull());
func::FuncOp main_func_op = FindMainFuncOp(*module_op);
EXPECT_THAT(main_func_op, NotNull());
}
TEST_F(FindMainFuncOpTest, ReturnsNullWhenMainFuncOpIsPrivate) {
constexpr absl::string_view kModuleWithPrivateMainFunc = R"mlir(
module {
func.func private @main() -> () {
return
}
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithPrivateMainFunc);
EXPECT_THAT(*module_op, NotNull());
EXPECT_THAT(FindMainFuncOp(*module_op), IsNull());
}
TEST_F(FindMainFuncOpTest, ReturnsServingDefaultFuncOp) {
constexpr absl::string_view kModuleWithServingDefaultFunc = R"mlir(
module {
func.func @serving_default() -> () {
return
}
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithServingDefaultFunc);
EXPECT_THAT(*module_op, NotNull());
EXPECT_THAT(FindMainFuncOp(*module_op), NotNull());
}
TEST_F(FindMainFuncOpTest, ReturnsNullWhenServingDefaultFuncOpIsPrivate) {
constexpr absl::string_view kModuleWithPrivateServingDefaultFunc = R"mlir(
module {
func.func private @serving_default() -> () {
return
}
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithPrivateServingDefaultFunc);
EXPECT_THAT(*module_op, NotNull());
EXPECT_THAT(FindMainFuncOp(*module_op), IsNull());
}
TEST_F(FindMainFuncOpTest, ReturnsNullWhenMainFuncNotFound) {
constexpr absl::string_view kModuleWithNoMainFunc = R"mlir(
module {
func.func @foo() -> () {
return
}
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleWithNoMainFunc);
EXPECT_THAT(*module_op, NotNull());
EXPECT_THAT(FindMainFuncOp(*module_op), IsNull());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/func.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/func_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
64e73bd8-48f9-4cb1-8ac8-ead261a6781a | cpp | tensorflow/tensorflow | interpreter | tensorflow/lite/core/interpreter.cc | tensorflow/lite/interpreter_test.cc | #include "tensorflow/lite/core/interpreter.h"
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <functional>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "ruy/denormal.h"
#include "tensorflow/compiler/mlir/lite/allocation.h"
#include "tensorflow/compiler/mlir/lite/experimental/remat/metadata_util.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/signature_runner.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/external_cpu_backend_context.h"
#include "tensorflow/lite/internal/signature_def.h"
#include "tensorflow/lite/interpreter_options.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/profiling/root_profiler.h"
#include "tensorflow/lite/profiling/telemetry/c/telemetry_setting.h"
#include "tensorflow/lite/profiling/telemetry/telemetry.h"
#include "tensorflow/lite/stderr_reporter.h"
#include "tensorflow/lite/util.h"
#if defined(__ANDROID__)
#define TFLITE_IS_MOBILE_PLATFORM
#endif
#if defined(__APPLE__)
#include "TargetConditionals.h"
#if TARGET_IPHONE_SIMULATOR
#define TFLITE_IS_MOBILE_PLATFORM
#elif TARGET_OS_IPHONE
#define TFLITE_IS_MOBILE_PLATFORM
#endif
#endif
static_assert(sizeof(TfLiteFloat16) == sizeof(uint16_t),
"Float 16 type must be 16 bits.");
namespace tflite {
namespace {
TfLiteQuantization GetQuantizationFromLegacy(
const TfLiteQuantizationParams& legacy_quantization) {
TfLiteQuantization quantization;
quantization.type = kTfLiteAffineQuantization;
auto* affine_quantization = reinterpret_cast<TfLiteAffineQuantization*>(
calloc(1, sizeof(TfLiteAffineQuantization)));
affine_quantization->scale = TfLiteFloatArrayCreate(1);
affine_quantization->zero_point = TfLiteIntArrayCreate(1);
affine_quantization->scale->data[0] = legacy_quantization.scale;
affine_quantization->zero_point->data[0] = legacy_quantization.zero_point;
quantization.params = affine_quantization;
return quantization;
}
#define TF_LITE_ENSURE_STATUS_WITH_SCOPED_INSTRUMENTATION(runtime_event, a) \
do { \
TfLiteStatus status = (a); \
runtime_event.set_runtime_status(0, \
static_cast<int64_t>(status)); \
TF_LITE_ENSURE_STATUS(status); \
} while (0)
}
Interpreter::Interpreter(ErrorReporter* error_reporter)
: error_reporter_(error_reporter ? error_reporter
: DefaultErrorReporter()) {
#if defined(TFLITE_IS_MOBILE_PLATFORM)
TFLITE_LOG_PROD_ONCE(TFLITE_LOG_INFO, "Initialized TensorFlow Lite runtime.");
#else
TFLITE_LOG_ONCE(TFLITE_LOG_INFO, "Initialized TensorFlow Lite runtime.");
#endif
AddSubgraphs(1);
context_ = primary_subgraph().context();
for (int i = 0; i < kTfLiteMaxExternalContexts; ++i) {
external_contexts_[i] = nullptr;
}
own_external_cpu_backend_context_ =
std::make_unique<ExternalCpuBackendContext>();
external_contexts_[kTfLiteCpuBackendContext] =
own_external_cpu_backend_context_.get();
}
Interpreter::~Interpreter() {
if (external_contexts_[kTfLiteCpuBackendContext] &&
(external_contexts_[kTfLiteCpuBackendContext] !=
own_external_cpu_backend_context_.get())) {
ExternalCpuBackendContext* external_context =
static_cast<ExternalCpuBackendContext*>(
external_contexts_[kTfLiteCpuBackendContext]);
TfLiteInternalBackendContext* internal_context =
external_context->internal_backend_context();
if (internal_context) {
internal_context->ClearCaches();
}
}
}
void Interpreter::SetExternalContext(TfLiteExternalContextType type,
TfLiteExternalContext* ctx) {
if (ctx == own_external_cpu_backend_context_.get()) {
error_reporter_->Report(
"WARNING: The passed external context is identical to the internally "
"owned one.");
return;
}
if (kTfLiteCpuBackendContext == type &&
external_contexts_[kTfLiteCpuBackendContext] ==
own_external_cpu_backend_context_.get()) {
own_external_cpu_backend_context_.reset();
}
primary_subgraph().SetExternalContext(type, ctx);
}
TfLiteStatus Interpreter::SetInputs(std::vector<int> inputs) {
return primary_subgraph().SetInputs(std::move(inputs));
}
TfLiteStatus Interpreter::SetOutputs(std::vector<int> outputs) {
return primary_subgraph().SetOutputs(std::move(outputs));
}
TfLiteStatus Interpreter::SetVariables(std::vector<int> variables) {
return primary_subgraph().SetVariables(std::move(variables));
}
TfLiteStatus Interpreter::AllocateTensors() {
if (ApplyLazyDelegateProviders() == kTfLiteError) return kTfLiteError;
return primary_subgraph().AllocateTensors();
}
void Interpreter::AddSubgraphs(int subgraphs_to_add,
int* first_new_subgraph_index) {
const size_t base_index = subgraphs_.size();
if (first_new_subgraph_index) *first_new_subgraph_index = base_index;
subgraphs_.reserve(base_index + subgraphs_to_add);
for (int i = 0; i < subgraphs_to_add; ++i) {
Subgraph* subgraph = new Subgraph(
error_reporter_, external_contexts_, &subgraphs_, &resources_,
&resource_ids_, &initialization_status_map_, subgraphs_.size());
subgraphs_.emplace_back(subgraph);
}
}
TfLiteStatus Interpreter::AddNodeWithParameters(
const std::vector<int>& inputs, const std::vector<int>& outputs,
const char* init_data, size_t init_data_size, void* builtin_data,
const TfLiteRegistration* registration, int* node_index) {
return primary_subgraph().AddNodeWithParameters(
inputs, outputs, {}, init_data, init_data_size, builtin_data,
registration, node_index);
}
TfLiteStatus Interpreter::ResizeInputTensor(int tensor_index,
const std::vector<int>& dims) {
return primary_subgraph().ResizeInputTensor(tensor_index, dims);
}
TfLiteStatus Interpreter::ResizeInputTensorStrict(
int tensor_index, const std::vector<int>& dims) {
return primary_subgraph().ResizeInputTensorStrict(tensor_index, dims);
}
TfLiteStatus Interpreter::Invoke() {
ScopedRuntimeInstrumentationProfile scoped_runtime_event(root_profiler_.get(),
"invoke");
if (cancellation_enabled_) (void)continue_invocation_.test_and_set();
ruy::ScopedSuppressDenormals suppress_denormals;
TF_LITE_ENSURE_STATUS_WITH_SCOPED_INSTRUMENTATION(
scoped_runtime_event, primary_subgraph().Invoke());
if (!allow_buffer_handle_output_) {
for (int tensor_index : outputs()) {
TF_LITE_ENSURE_STATUS_WITH_SCOPED_INSTRUMENTATION(
scoped_runtime_event,
primary_subgraph().EnsureTensorDataIsReadable(tensor_index));
}
}
return kTfLiteOk;
}
TfLiteStatus Interpreter::AddTensors(int tensors_to_add,
int* first_new_tensor_index) {
return primary_subgraph().AddTensors(tensors_to_add, first_new_tensor_index);
}
TfLiteStatus Interpreter::SetTensorParametersReadOnly(
int tensor_index, TfLiteType type, const char* name,
const std::vector<int>& dims, TfLiteQuantization quantization,
const char* buffer, size_t bytes, const Allocation* allocation) {
return primary_subgraph().SetTensorParametersReadOnly(
tensor_index, type, name, dims.size(), dims.data(), quantization, buffer,
bytes, allocation);
}
TfLiteStatus Interpreter::SetTensorParametersReadWrite(
int tensor_index, TfLiteType type, const char* name,
const std::vector<int>& dims, TfLiteQuantization quantization,
bool is_variable) {
return primary_subgraph().SetTensorParametersReadWrite(
tensor_index, type, name, dims.size(), dims.data(), quantization,
is_variable);
}
TfLiteStatus Interpreter::SetTensorParametersReadOnly(
int tensor_index, TfLiteType type, const char* name, size_t rank,
const int* dims, TfLiteQuantizationParams quantization, const char* buffer,
size_t bytes, const Allocation* allocation) {
TfLiteQuantization new_quantization = GetQuantizationFromLegacy(quantization);
return primary_subgraph().SetTensorParametersReadOnly(
tensor_index, type, name, rank, dims, new_quantization, buffer, bytes,
allocation);
}
TfLiteStatus Interpreter::SetTensorParametersReadWrite(
int tensor_index, TfLiteType type, const char* name, size_t rank,
const int* dims, TfLiteQuantizationParams quantization, bool is_variable,
size_t rank_dims_signature, const int* dims_signature) {
TfLiteQuantization new_quantization = GetQuantizationFromLegacy(quantization);
return primary_subgraph().SetTensorParametersReadWrite(
tensor_index, type, name, rank, dims, new_quantization, is_variable,
rank_dims_signature, dims_signature);
}
TfLiteStatus Interpreter::SetExecutionPlan(const std::vector<int>& new_plan) {
return primary_subgraph().SetExecutionPlan(new_plan);
}
TfLiteStatus Interpreter::SetNumThreads(int num_threads) {
if (num_threads < -1) {
context_->ReportError(context_,
"num_threads should be >=0 or just -1 to let TFLite "
"runtime set the value.");
return kTfLiteError;
}
num_threads = num_threads == 0 ? 1 : num_threads;
for (auto& subgraph : subgraphs_) {
subgraph->context()->recommended_num_threads = num_threads;
}
for (int i = 0; i < kTfLiteMaxExternalContexts; ++i) {
auto* c = external_contexts_[i];
if (c && c->Refresh) {
c->Refresh(context_);
}
}
return kTfLiteOk;
}
TfLiteStatus Interpreter::ApplyLazyDelegateProviders() {
if (lazy_delegate_providers_.empty() || IsFullyDelegated()) return kTfLiteOk;
TfLiteDelegateCreators delegate_providers;
delegate_providers.swap(lazy_delegate_providers_);
TFLITE_LOG(TFLITE_LOG_INFO,
"Applying %zu TensorFlow Lite delegate(s) lazily.",
delegate_providers.size());
for (size_t i = 0; i < delegate_providers.size(); ++i) {
auto delegate_ptr = delegate_providers[i](context_);
if (delegate_ptr == nullptr) continue;
auto status = ModifyGraphWithDelegateImpl(std::move(delegate_ptr));
switch (status) {
case kTfLiteOk:
TFLITE_LOG(
TFLITE_LOG_INFO,
"Successfully applied the default TensorFlow Lite "
"delegate indexed at %zu.\n *NOTE*: because a delegate has been "
"applied, the precision of computations should be unchanged, but "
"the exact output tensor values may have changed. If such output "
"values are checked in your code, like in your tests etc., please "
"consider increasing error tolerance for the check.",
i);
break;
case kTfLiteError:
TF_LITE_REPORT_ERROR(error_reporter_,
"Failed to apply the default TensorFlow Lite "
"delegate indexed at %zu.",
i);
return kTfLiteError;
case kTfLiteDelegateError:
TFLITE_LOG(
TFLITE_LOG_INFO,
"Error in applying the default TensorFlow Lite delegate indexed "
"at %zu, and all previously applied delegates are reverted.",
i);
return kTfLiteDelegateError;
case kTfLiteApplicationError:
TFLITE_LOG(
TFLITE_LOG_INFO,
"Failed to apply the default TensorFlow Lite delegate indexed at "
"%zu because of incompatibility between runtime and delegate. "
"Ignoring the error, and continuing anyway.",
i);
return kTfLiteApplicationError;
case kTfLiteUnresolvedOps:
TFLITE_LOG(
TFLITE_LOG_INFO,
"Failed to apply the default TensorFlow Lite delegate indexed at "
"%zu because of unresolved ops (which could be resolved by "
"another delegate). Ignoring the error, and continuing anyway.",
i);
return kTfLiteUnresolvedOps;
default:
TF_LITE_REPORT_ERROR(error_reporter_,
"Unknown status (%d) after applying the default "
"TensorFlow Lite delegate indexed at %zu.",
status, i);
return kTfLiteError;
}
}
return kTfLiteOk;
}
TfLiteStatus Interpreter::ModifyGraphWithDelegateImpl(
TfLiteDelegate* delegate) {
TfLiteStatus status = kTfLiteOk;
for (auto& subgraph : subgraphs_) {
if (IsValidationSubgraph(subgraph->GetName().c_str()) ||
subgraph->IsDelegationSkippable()) {
TFLITE_LOG(TFLITE_LOG_INFO,
"Skipping calling ModifyGraphWithDelegate on Subgraph %i: %s",
subgraph->GetSubgraphIndex(), subgraph->GetName().c_str());
continue;
}
status = subgraph->ModifyGraphWithDelegate(delegate);
if (status != kTfLiteOk) {
break;
}
}
if (status == kTfLiteDelegateError) {
TF_LITE_ENSURE_STATUS(RemoveAllDelegates());
}
return status;
}
TfLiteStatus Interpreter::RemoveAllDelegates() {
for (auto& subgraph : subgraphs_) {
TF_LITE_ENSURE_STATUS(subgraph->RemoveAllDelegates());
}
return kTfLiteOk;
}
TfLiteStatus Interpreter::SetMetadata(
const std::map<std::string, std::string>& metadata) {
metadata_ = metadata;
const auto maybe_model_control_dependencies =
metadata_.find(kModelControlDependenciesMetadataKey);
if (maybe_model_control_dependencies == metadata_.end() ||
!ParseModelControlDependencies(
maybe_model_control_dependencies->second.data(),
maybe_model_control_dependencies->second.size(),
&model_control_dependencies_)) {
model_control_dependencies_.clear();
}
for (int subgraph_index = 0; subgraph_index < subgraphs_.size();
++subgraph_index) {
TF_LITE_ENSURE_STATUS(subgraphs_[subgraph_index]->SetMetadata(
&metadata_, model_control_dependencies_.empty()
? nullptr
: &model_control_dependencies_[subgraph_index]));
}
return kTfLiteOk;
}
TfLiteStatus Interpreter::SetTelemetrySettings(
std::unique_ptr<TfLiteTelemetryInterpreterSettings> settings) {
telemetry_data_ = std::move(settings);
return kTfLiteOk;
}
TfLiteStatus Interpreter::ReportTelemetrySettings(const char* setting_name) {
telemetry::TelemetryReportSettings(context_, setting_name,
telemetry_data_.get());
return kTfLiteOk;
}
bool Interpreter::IsFullyDelegated() const {
return primary_subgraph().IsFullyDelegated();
}
void Interpreter::SetProfilerImpl(std::unique_ptr<Profiler> profiler) {
if (profiler == nullptr) {
root_profiler_ = nullptr;
return;
}
if (root_profiler_ == nullptr) {
root_profiler_ = std::make_unique<profiling::RootProfiler>();
} else {
root_profiler_->RemoveChildProfilers();
}
root_profiler_->AddProfiler(std::move(profiler));
SetSubgraphProfiler();
}
void Interpreter::SetSubgraphProfiler() {
for (int subgraph_index = 0; subgraph_index < subgraphs_.size();
++subgraph_index) {
subgraphs_[subgraph_index]->SetProfiler(root_profiler_.get(),
subgraph_index);
}
}
TfLiteStatus Interpreter::ApplyOptionsImpl(InterpreterOptions* options) {
if (options == nullptr) {
return kTfLiteOk;
}
options_ = std::make_unique<InterpreterOptions>(*options);
for (auto& subgraph : subgraphs_) {
subgraph->SetOptions(options_.get());
}
return kTfLiteOk;
}
TfLiteStatus Interpreter::EnableCancellation() {
cancellation_enabled_ = true;
for (auto& subgraph : subgraphs_) {
TF_LITE_ENSURE_STATUS(subgraph->EnableCancellation(&continue_invocation_));
}
return kTfLiteOk;
}
TfLiteStatus Interpreter::Cancel() { return primary_subgraph().Cancel(); }
void Interpreter::AddProfiler(std::unique_ptr<Profiler> profiler) {
if (profiler == nullptr) return;
if (root_profiler_ == nullptr) {
root_profiler_ = std::make_unique<profiling::RootProfiler>();
}
root_profiler_->AddProfiler(std::move(profiler));
SetSubgraphProfiler();
}
impl::SignatureRunner* Interpreter::GetSignatureRunner(
const char* signature_key_) {
auto [signature_key, empty_signature_fallback] =
ReplaceWithPlaceholderSignatureKeyIfNeeded(signature_key_);
if (!signature_key) {
return nullptr;
}
auto iter = signature_runner_map_.find(signature_key);
if (iter != signature_runner_map_.end()) {
return &(iter->second);
}
if (ApplyLazyDelegateProviders() == kTfLiteError) {
return nullptr;
}
if (empty_signature_fallback) {
placeholder_signature_def_ = CreatePlaceholderSignatureDef();
auto status = signature_runner_map_.insert(
{signature_key, SignatureRunner(placeholder_signature_def_.get(),
&primary_subgraph())});
return &(status.first->second);
}
for (const auto& signature : signature_defs_) {
if (signature.signature_key == signature_key) {
auto status = signature_runner_map_.insert(
{signature_key,
SignatureRunner(&signature, subgraph(signature.subgraph_index))});
return &(status.first->second);
}
}
return nullptr;
}
std::unique_ptr<internal::SignatureDef>
Interpreter::CreatePlaceholderSignatureDef() {
auto placeholder_signature_def = std::make_unique<internal::SignatureDef>();
for (auto i = 0; i < inputs().size(); ++i) {
auto* name = GetInputName(i);
placeholder_signature_def->inputs[name] = inputs()[i];
}
for (auto i = 0; i < outputs().size(); ++i) {
auto* name = GetOutputName(i);
placeholder_signature_def->outputs[name] = outputs()[i];
}
placeholder_signature_def->signature_key = kPlaceholderSignatureDefKey;
placeholder_signature_def->subgraph_index = 0;
return placeholder_signature_def;
}
std::pair<const char*, bool>
Interpreter::ReplaceWithPlaceholderSignatureKeyIfNeeded(
const char* signature_key) {
bool empty_signature_fallback = false;
if (signature_key == nullptr) {
if (signature_defs_.empty()) {
signature_key = kPlaceholderSignatureDefKey;
empty_signature_fallback = true;
} else {
for (const auto& signature : signature_defs_) {
if (signature.subgraph_index == 0) {
signature_key = signature.signature_key.c_str();
break;
}
}
}
}
if (signature_key == nullptr) {
TF_LITE_REPORT_ERROR(error_reporter_,
"The model has signature def but none of those points "
"to primary subgraph.");
return {nullptr, empty_signature_fallback};
} else {
return {signature_key, empty_signature_fallback};
}
}
} | #include "tensorflow/lite/core/interpreter.h"
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <map>
#include <memory>
#include <string>
#include <thread>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "Eigen/Core"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/delegates/utils/simple_delegate.h"
#include "tensorflow/lite/external_cpu_backend_context.h"
#include "tensorflow/lite/interpreter_options.h"
#include "tensorflow/lite/interpreter_test_util.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/testing/util.h"
#include "tensorflow/lite/util.h"
#ifdef __APPLE__
#include "TargetConditionals.h"
#endif
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::IsEmpty;
TEST(BasicInterpreter, ZeroInterpreter) {
testing::internal::CaptureStderr();
Interpreter interpreter;
#if (!defined(NDEBUG)) || defined(__ANDROID__) || \
(defined(__APPLE__) && (TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE))
const char* kExpectedLog = "INFO: Initialized TensorFlow Lite runtime";
EXPECT_THAT(testing::internal::GetCapturedStderr(),
testing::HasSubstr(kExpectedLog));
#else
EXPECT_THAT(testing::internal::GetCapturedStderr(), testing::IsEmpty());
#endif
interpreter.SetInputs({});
interpreter.SetOutputs({});
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
testing::internal::CaptureStderr();
Interpreter interpreter2;
EXPECT_THAT(testing::internal::GetCapturedStderr(), IsEmpty());
}
TEST(BasicInterpreter, InvokeInvalidModel) {
Interpreter interpreter;
ASSERT_NE(interpreter.Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
}
TEST(BasicInterpreter, TestAllocateTensorsResetVariableTensorsFloatAndHyrbid) {
Interpreter interpreter;
int tensor_index;
ASSERT_EQ(interpreter.AddTensors(1, &tensor_index), kTfLiteOk);
constexpr int kTensorSize = 16;
TfLiteQuantizationParams quant;
interpreter.SetTensorParametersReadWrite(tensor_index, kTfLiteFloat32, "",
{kTensorSize}, quant,
true);
interpreter.SetVariables({tensor_index});
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
TfLiteTensor* tensor = interpreter.tensor(tensor_index);
for (int i = 0; i < kTensorSize; ++i) {
ASSERT_EQ(tensor->data.f[i], 0.0f);
}
}
TEST(BasicInterpreter, TestAllocateTensorsResetVariableTensorsInt8) {
Interpreter interpreter;
int tensor_index;
ASSERT_EQ(interpreter.AddTensors(1, &tensor_index), kTfLiteOk);
constexpr int kTensorSize = 16;
TfLiteQuantizationParams quant;
quant.scale = 0.15;
quant.zero_point = -3;
interpreter.SetTensorParametersReadWrite(tensor_index, kTfLiteInt8, "",
{kTensorSize}, quant,
true);
interpreter.SetVariables({tensor_index});
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
TfLiteTensor* tensor = interpreter.tensor(tensor_index);
for (int i = 0; i < kTensorSize; ++i) {
ASSERT_EQ(tensor->data.int8[i], -3);
}
}
TEST(BasicInterpreter, TestSizeFunctions) {
Interpreter interpreter;
int base_index;
ASSERT_EQ(interpreter.nodes_size(), 0);
ASSERT_EQ(interpreter.tensors_size(), 0);
ASSERT_EQ(interpreter.AddTensors(2, &base_index), kTfLiteOk);
ASSERT_EQ(interpreter.tensors_size(), 2);
ASSERT_EQ(base_index, 0);
ASSERT_EQ(interpreter.AddTensors(3, &base_index), kTfLiteOk);
ASSERT_EQ(interpreter.tensors_size(), 5);
ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
ASSERT_EQ(interpreter.tensors_size(), 6);
ASSERT_EQ(base_index, 2);
}
TEST(BasicInterpreter, InconsistentModel) {
{
Interpreter interpreter;
ASSERT_NE(interpreter.SetInputs({5}), kTfLiteOk);
ASSERT_NE(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_NE(interpreter.Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter.inputs(), std::vector<int>());
}
{
Interpreter interpreter;
ASSERT_NE(interpreter.SetOutputs({5}), kTfLiteOk);
ASSERT_NE(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_NE(interpreter.Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter.outputs(), std::vector<int>());
}
{
Interpreter interpreter;
TfLiteRegistration registration = {nullptr, nullptr, nullptr, nullptr};
ASSERT_NE(interpreter.AddNodeWithParameters({3}, {0}, nullptr, 0, nullptr,
®istration),
kTfLiteOk);
ASSERT_NE(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_NE(interpreter.Invoke(), kTfLiteOk);
}
{
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
TfLiteRegistration registration = {nullptr, nullptr, nullptr, nullptr};
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr,
®istration),
kTfLiteOk);
}
}
TEST(BasicInterpreter, CheckAllocate) {
struct {
TfLiteType type;
size_t size;
} cases[] = {
{kTfLiteFloat32, sizeof(float)},
{kTfLiteInt32, sizeof(int32_t)},
{kTfLiteUInt32, sizeof(uint32_t)},
{kTfLiteUInt8, sizeof(uint8_t)},
{kTfLiteInt64, sizeof(int64_t)},
{kTfLiteInt16, sizeof(int16_t)},
{kTfLiteUInt16, sizeof(uint16_t)},
{kTfLiteFloat16, sizeof(TfLiteFloat16)},
};
for (auto test : cases) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({});
TfLiteQuantizationParams quant;
interpreter.SetTensorParametersReadWrite(0, test.type, "", {3}, quant);
interpreter.SetTensorParametersReadWrite(1, test.type, "", {4}, quant);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.tensor(0)->bytes, 3 * test.size);
ASSERT_NE(interpreter.tensor(0)->data.raw, nullptr);
ASSERT_EQ(interpreter.tensor(1)->bytes, 4 * test.size);
ASSERT_NE(interpreter.tensor(1)->data.raw, nullptr);
}
}
TEST(BasicInterpreter, CheckQuantization) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({});
TfLiteType tensor_type = kTfLiteInt8;
const uint8_t int8s[] = {3, 4};
float scale = 0.5f;
int32_t zero_point = 12;
TfLiteQuantization rw_quantization;
rw_quantization.type = kTfLiteAffineQuantization;
auto* rw_affine_quantization = static_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
rw_affine_quantization->scale = TfLiteFloatArrayCreate(1);
rw_affine_quantization->zero_point = TfLiteIntArrayCreate(1);
rw_affine_quantization->scale->data[0] = scale;
rw_affine_quantization->zero_point->data[0] = zero_point;
rw_quantization.params = rw_affine_quantization;
TfLiteQuantization ro_quantization;
ro_quantization.type = kTfLiteAffineQuantization;
auto* ro_affine_quantization = static_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
ro_affine_quantization->scale = TfLiteFloatArrayCreate(1);
ro_affine_quantization->zero_point = TfLiteIntArrayCreate(1);
ro_affine_quantization->scale->data[0] = scale;
ro_affine_quantization->zero_point->data[0] = zero_point;
ro_quantization.params = ro_affine_quantization;
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(0, tensor_type, "", {3},
rw_quantization),
kTfLiteOk);
ASSERT_EQ(interpreter.SetTensorParametersReadOnly(
1, tensor_type, "", {2}, ro_quantization,
reinterpret_cast<const char*>(int8s), 2),
kTfLiteOk);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.tensor(0)->params.scale, scale);
ASSERT_EQ(interpreter.tensor(0)->params.zero_point, zero_point);
ASSERT_EQ(interpreter.tensor(0)->quantization.type, rw_quantization.type);
ASSERT_EQ(interpreter.tensor(1)->params.scale, scale);
ASSERT_EQ(interpreter.tensor(1)->params.zero_point, zero_point);
ASSERT_EQ(interpreter.tensor(1)->quantization.type, ro_quantization.type);
}
TEST(BasicInterpreter, CheckResize) {
const float floats[] = {-3., -4.};
const int32_t int32s[] = {-3, -4};
const uint32_t uint32s[] = {3, 4};
const uint8_t uint8s[] = {3, 4};
const int64_t int64s[] = {6, -7};
const int16_t int16s[] = {8, -9};
const Eigen::half float16s[] = {Eigen::half(-3.f), Eigen::half(-4.f)};
struct {
TfLiteType type;
size_t size;
const char* array;
} cases[] = {
{kTfLiteFloat32, sizeof(float), reinterpret_cast<const char*>(floats)},
{kTfLiteInt32, sizeof(int32_t), reinterpret_cast<const char*>(int32s)},
{kTfLiteUInt32, sizeof(uint32_t), reinterpret_cast<const char*>(uint32s)},
{kTfLiteUInt8, sizeof(uint8_t), reinterpret_cast<const char*>(uint8s)},
{kTfLiteInt64, sizeof(int64_t), reinterpret_cast<const char*>(int64s)},
{kTfLiteInt16, sizeof(int16_t), reinterpret_cast<const char*>(int16s)},
{kTfLiteFloat16, sizeof(TfLiteFloat16),
reinterpret_cast<const char*>(float16s)},
};
for (auto test : cases) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({});
TfLiteQuantizationParams quant;
ASSERT_EQ(
interpreter.SetTensorParametersReadWrite(0, test.type, "", {3}, quant),
kTfLiteOk);
ASSERT_EQ(interpreter.SetTensorParametersReadOnly(
1, test.type, "", {2}, quant, test.array, 2 * test.size),
kTfLiteOk);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.ResizeInputTensor(0, {1, 2}), kTfLiteOk);
ASSERT_NE(interpreter.ResizeInputTensor(1, {3}), kTfLiteOk);
ASSERT_NE(interpreter.SetTensorParametersReadOnly(
1, test.type, "", {2}, quant, test.array, 1 * test.size),
kTfLiteOk);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
}
}
TEST(BasicInterpreter, CheckAlignment) {
struct {
TfLiteType type;
} cases[] = {{kTfLiteFloat32}, {kTfLiteInt32}, {kTfLiteUInt32},
{kTfLiteUInt8}, {kTfLiteInt64}, {kTfLiteInt16},
{kTfLiteFloat16}};
for (auto test : cases) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(4), kTfLiteOk);
for (int i = 0; i < 4; i++) {
TfLiteQuantizationParams quant;
interpreter.SetTensorParametersReadWrite(i, test.type, "", {2 * i + 1},
quant);
}
interpreter.AllocateTensors();
for (int i = 0; i < 4; i++) {
const TfLiteTensor& tensor = *interpreter.tensor(i);
ASSERT_EQ(reinterpret_cast<intptr_t>(tensor.data.raw) % 4, 0);
}
}
}
TEST(BasicInterpreter, CheckArenaAllocation) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(10), kTfLiteOk);
TfLiteQuantizationParams quant;
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
std::vector<int> sizes{2048, 4096, 1023, 2047, 1021,
2047, 1023, 2046, 0, 2048};
for (size_t i = 0; i < sizes.size(); ++i) {
interpreter.SetTensorParametersReadWrite(static_cast<int>(i), kTfLiteUInt8,
"", {sizes[i]}, quant);
}
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({9, 4});
interpreter.AddNodeWithParameters({0, 1}, {2, 3}, nullptr, 0, nullptr, ®);
interpreter.AddNodeWithParameters({2, 1}, {4, 5}, nullptr, 0, nullptr, ®);
interpreter.AddNodeWithParameters({4, 3}, {6, 7}, nullptr, 0, nullptr, ®);
interpreter.AddNodeWithParameters({6, 5}, {8}, nullptr, 0, nullptr, ®);
interpreter.AddNodeWithParameters({8, 7}, {9}, nullptr, 0, nullptr, ®);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
#ifndef TFLITE_USE_SIMPLE_MEMORY_PLANNER
ASSERT_LT(interpreter.tensor(0)->data.raw, interpreter.tensor(1)->data.raw);
ASSERT_LT(interpreter.tensor(1)->data.raw, interpreter.tensor(3)->data.raw);
ASSERT_EQ(interpreter.tensor(3)->data.raw, interpreter.tensor(9)->data.raw);
ASSERT_LT(interpreter.tensor(3)->data.raw, interpreter.tensor(5)->data.raw);
ASSERT_LT(interpreter.tensor(5)->data.raw, interpreter.tensor(2)->data.raw);
ASSERT_EQ(interpreter.tensor(2)->data.raw, interpreter.tensor(7)->data.raw);
ASSERT_LT(interpreter.tensor(2)->data.raw, interpreter.tensor(4)->data.raw);
#endif
ASSERT_EQ(interpreter.tensor(8)->data.raw, nullptr);
}
TEST(BasicInterpreter, BufferAccess) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
kTfLiteOk);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_NE(interpreter.typed_tensor<float>(0), nullptr);
ASSERT_EQ(interpreter.typed_tensor<int>(0), nullptr);
ASSERT_EQ(interpreter.typed_tensor<float>(0), interpreter.tensor(0)->data.f);
}
TEST(BasicInterpreter, NoOpInterpreter) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
kTfLiteOk);
ASSERT_EQ(interpreter.ResizeInputTensor(interpreter.inputs()[0], {1, 2, 3}),
kTfLiteOk);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
}
TEST(BasicInterpreter, RedundantAllocateTensors) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
kTfLiteOk);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
const auto data_raw = interpreter.tensor(0)->data.raw;
ASSERT_NE(data_raw, nullptr);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.tensor(0)->data.raw, data_raw);
}
TEST(BasicInterpreter, RedundantAllocateTensorsWithDynamicInputs) {
Interpreter interpreter;
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
interpreter.SetInputs({0});
interpreter.SetOutputs({1});
interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr, ®);
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
kTfLiteOk);
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
1, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
kTfLiteOk);
interpreter.tensor(0)->data.raw = nullptr;
interpreter.tensor(0)->allocation_type = kTfLiteDynamic;
ASSERT_EQ(interpreter.ResizeInputTensor(interpreter.inputs()[0], {1, 2, 3}),
kTfLiteOk);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_NE(interpreter.tensor(1)->data.raw, nullptr);
interpreter.tensor(1)->data.raw = nullptr;
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_NE(interpreter.tensor(1)->data.raw, nullptr);
}
TEST(BasicInterpreter, ResizingTensors) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
kTfLiteOk);
int t = interpreter.inputs()[0];
TfLiteTensor* tensor = interpreter.tensor(t);
ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 3}), kTfLiteOk);
EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
tensor->data.f[5] = 0.123f;
tensor->data.raw = nullptr;
tensor->allocation_type = kTfLiteDynamic;
ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 4}), kTfLiteOk);
EXPECT_EQ(tensor->bytes, 8 * sizeof(float));
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.ResizeInputTensor(t, {}), kTfLiteOk);
EXPECT_EQ(tensor->bytes, 1 * sizeof(float));
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.ResizeInputTensor(t, {0}), kTfLiteOk);
EXPECT_EQ(tensor->bytes, 0);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 0}), kTfLiteOk);
EXPECT_EQ(tensor->bytes, 0);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
TfLiteTensorRealloc(9 * sizeof(float), tensor);
tensor->data.f[7] = 0.123f;
ASSERT_EQ(interpreter.ResizeInputTensor(t, {2, 2, 4}), kTfLiteOk);
EXPECT_EQ(tensor->bytes, 16 * sizeof(float));
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
TfLiteTensorRealloc(17 * sizeof(float), tensor);
tensor->data.f[15] = 0.123f;
}
TEST(BasicInterpreter, NoopResizingTensors) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
kTfLiteOk);
int t = interpreter.inputs()[0];
TfLiteTensor* tensor = interpreter.tensor(t);
ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 3}), kTfLiteOk);
EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
tensor->data.f[5] = 0.123f;
ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 3}), kTfLiteOk);
EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
ASSERT_NE(tensor->data.raw, nullptr);
ASSERT_EQ(tensor->data.f[5], 0.123f);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
ASSERT_NE(tensor->data.raw, nullptr);
ASSERT_EQ(tensor->data.f[5], 0.123f);
}
TEST(BasicInterpreter, ResizingTensorsStrictInvalid) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
0, kTfLiteFloat32, "", {1, 1, 3}, TfLiteQuantizationParams()),
kTfLiteOk);
int t = interpreter.inputs()[0];
TfLiteTensor* tensor = interpreter.tensor(t);
ASSERT_EQ(interpreter.ResizeInputTensorStrict(t, {1, 1, 3}), kTfLiteOk);
EXPECT_EQ(tensor->bytes, 3 * sizeof(float));
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.ResizeInputTensorStrict(t, {1, 2, 3}), kTfLiteError);
EXPECT_EQ(tensor->bytes, 3 * sizeof(float));
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 3}), kTfLiteOk);
EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
}
TEST(BasicInterpreter, ResizingTensorsStrict) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
std::vector<int> dims_signature = {-1, -1, 3};
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
0, kTfLiteFloat32, "", {1, 1, 3}, TfLiteQuantizationParams(),
false, &dims_signature),
kTfLiteOk);
int t = interpreter.inputs()[0];
TfLiteTensor* tensor = interpreter.tensor(t);
ASSERT_EQ(interpreter.ResizeInputTensorStrict(t, {1, 2, 3}), kTfLiteOk);
EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.ResizeInputTensorStrict(t, {1, 2, 4}), kTfLiteError);
EXPECT_EQ(tensor->bytes, 6 * sizeof(float));
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.ResizeInputTensor(t, {1, 2, 4}), kTfLiteOk);
EXPECT_EQ(tensor->bytes, 8 * sizeof(float));
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.ResizeInputTensorStrict(t, {8}), kTfLiteError);
ASSERT_EQ(interpreter.ResizeInputTensorStrict(t, {1}), kTfLiteError);
EXPECT_EQ(tensor->bytes, 8 * sizeof(float));
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.ResizeInputTensorStrict(t, {1, 2, 4, 1}), kTfLiteError);
EXPECT_EQ(tensor->bytes, 8 * sizeof(float));
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
}
TfLiteRegistration GetPassthroughOpRegistration() {
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
reg.init = [](TfLiteContext* context, const char*, size_t) -> void* {
auto* first_new_tensor = new int;
context->AddTensors(context, 2, first_new_tensor);
return first_new_tensor;
};
reg.free = [](TfLiteContext* context, void* buffer) {
delete static_cast<int*>(buffer);
};
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
auto* first_new_tensor = static_cast<int*>(node->user_data);
const TfLiteTensor* tensor0;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &tensor0));
TfLiteTensor* tensor1;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &tensor1));
TfLiteIntArray* newSize = TfLiteIntArrayCopy(tensor0->dims);
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, tensor1, newSize));
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(2);
for (int i = 0; i < 2; ++i) {
node->temporaries->data[i] = *(first_new_tensor) + i;
}
auto setup_temporary = [&](int id) {
TfLiteTensor* tmp = &context->tensors[id];
tmp->type = kTfLiteFloat32;
tmp->allocation_type = kTfLiteArenaRw;
return context->ResizeTensor(context, tmp,
TfLiteIntArrayCopy(tensor0->dims));
};
TF_LITE_ENSURE_STATUS(setup_temporary(node->temporaries->data[0]));
TF_LITE_ENSURE_STATUS(setup_temporary(node->temporaries->data[1]));
return kTfLiteOk;
};
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* a0;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &a0));
auto populate = [&](int id) {
TfLiteTensor* t = &context->tensors[id];
int num = a0->dims->data[0];
for (int i = 0; i < num; i++) {
t->data.f[i] = a0->data.f[i];
}
};
populate(node->outputs->data[0]);
populate(node->temporaries->data[0]);
populate(node->temporaries->data[1]);
return kTfLiteOk;
};
return reg;
}
TEST(BasicInterpreter, OneOpInterpreter) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.SetOutputs({1}), kTfLiteOk);
TfLiteQuantizationParams quantized;
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "in1",
{3}, quantized),
kTfLiteOk);
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(1, kTfLiteFloat32, "out0",
{3}, quantized),
kTfLiteOk);
ASSERT_EQ(interpreter.GetInputName(0), "in1");
ASSERT_EQ(interpreter.GetOutputName(0), "out0");
TfLiteRegistration reg = GetPassthroughOpRegistration();
ASSERT_EQ(
interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr, ®),
kTfLiteOk);
ASSERT_EQ(interpreter.ResizeInputTensor(0, {3}), kTfLiteOk);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
}
TEST(BasicInterpreter, ReleaseNonPersistentMemory) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.SetOutputs({1}), kTfLiteOk);
TfLiteQuantizationParams quantized;
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "in1",
{3}, quantized),
kTfLiteOk);
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(1, kTfLiteFloat32, "out0",
{3}, quantized),
kTfLiteOk);
TfLiteRegistration reg = GetPassthroughOpRegistration();
ASSERT_EQ(
interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr, ®),
kTfLiteOk);
ASSERT_EQ(interpreter.ResizeInputTensor(0, {3}), kTfLiteOk);
ASSERT_EQ(interpreter.ReleaseNonPersistentMemory(), kTfLiteOk);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter.ReleaseNonPersistentMemory(), kTfLiteOk);
ASSERT_NE(interpreter.Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter.ReleaseNonPersistentMemory(), kTfLiteOk);
ASSERT_EQ(interpreter.ResizeInputTensor(0, {4}), kTfLiteOk);
ASSERT_NE(interpreter.Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
}
TEST(BasicInterpreter, ThreeStepAllocate) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(5), kTfLiteOk);
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.SetOutputs({4}), kTfLiteOk);
TfLiteQuantizationParams quantized;
union {
char raw_bytes[15];
struct {
int32_t num_strs;
int32_t offsets[2];
char str_data[3];
} tensor_data;
} data;
data.tensor_data = {1, {12, 15}, {'A', 'B', 'C'}};
ASSERT_EQ(interpreter.SetTensorParametersReadOnly(0, kTfLiteString, "", {1},
quantized, data.raw_bytes,
sizeof(data.raw_bytes)),
kTfLiteOk);
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(1, kTfLiteString, "", {1},
quantized),
kTfLiteOk);
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(2, kTfLiteInt32, "", {1},
quantized),
kTfLiteOk);
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(3, kTfLiteString, "", {1},
quantized),
kTfLiteOk);
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(4, kTfLiteInt32, "", {1},
quantized),
kTfLiteOk);
TfLiteRegistration reg_copy = {nullptr, nullptr, nullptr, nullptr};
reg_copy.invoke = [](TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
DynamicBuffer buf;
StringRef str_ref = GetString(input, 0);
buf.AddString(str_ref);
buf.WriteToTensorAsVector(output);
return kTfLiteOk;
};
TfLiteRegistration reg_len = {nullptr, nullptr, nullptr, nullptr};
reg_len.prepare = [](TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1);
outputSize->data[0] = 1;
return context->ResizeTensor(context, output, outputSize);
};
reg_len.invoke = [](TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* a0;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &a0));
TfLiteTensor* a1;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &a1));
a1->data.i32[0] = a0->bytes;
return kTfLiteOk;
};
ASSERT_EQ(interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr,
®_copy),
kTfLiteOk);
ASSERT_EQ(interpreter.AddNodeWithParameters({1}, {2}, nullptr, 0, nullptr,
®_len),
kTfLiteOk);
ASSERT_EQ(interpreter.AddNodeWithParameters({0}, {3}, nullptr, 0, nullptr,
®_copy),
kTfLiteOk);
ASSERT_EQ(interpreter.AddNodeWithParameters({3}, {4}, nullptr, 0, nullptr,
®_len),
kTfLiteOk);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter.tensor(0)->bytes, 15);
ASSERT_NE(interpreter.tensor(0)->data.raw, nullptr);
ASSERT_EQ(interpreter.tensor(1)->bytes, 15);
ASSERT_NE(interpreter.tensor(1)->data.raw, nullptr);
ASSERT_EQ(interpreter.tensor(3)->bytes, 15);
ASSERT_NE(interpreter.tensor(4)->data.raw, nullptr);
ASSERT_EQ(interpreter.tensor(2)->bytes, 4);
ASSERT_EQ(interpreter.tensor(2)->data.i32[0], 15);
ASSERT_EQ(interpreter.tensor(4)->bytes, 4);
ASSERT_EQ(interpreter.tensor(4)->data.i32[0], 15);
}
TEST(BasicInterpreter, AllocateTwice) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.SetOutputs({1}), kTfLiteOk);
TfLiteQuantizationParams quantized;
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "", {3},
quantized),
kTfLiteOk);
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(1, kTfLiteFloat32, "", {3},
quantized),
kTfLiteOk);
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* tensor0;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &tensor0));
TfLiteTensor* tensor1;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &tensor1));
TfLiteIntArray* newSize = TfLiteIntArrayCopy(tensor0->dims);
return context->ResizeTensor(context, tensor1, newSize);
};
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* a0;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &a0));
TfLiteTensor* a1;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &a1));
int num = a0->dims->data[0];
for (int i = 0; i < num; i++) {
a1->data.f[i] = a0->data.f[i];
}
return kTfLiteOk;
};
ASSERT_EQ(
interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr, ®),
kTfLiteOk);
ASSERT_EQ(interpreter.ResizeInputTensor(0, {3}), kTfLiteOk);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
char* old_tensor0_ptr = interpreter.tensor(0)->data.raw;
char* old_tensor1_ptr = interpreter.tensor(1)->data.raw;
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
ASSERT_EQ(old_tensor0_ptr, interpreter.tensor(0)->data.raw);
ASSERT_EQ(old_tensor1_ptr, interpreter.tensor(1)->data.raw);
}
TEST(BasicInterpreter, TestNullErrorReporter) {
TestErrorReporter reporter;
Interpreter interpreter;
}
TEST(BasicInterpreter, TestCustomErrorReporter) {
TestErrorReporter reporter;
Interpreter interpreter(&reporter);
ASSERT_NE(interpreter.Invoke(), kTfLiteOk);
ASSERT_EQ(reporter.error_messages(),
"Invoke called on model that is not ready.");
ASSERT_EQ(reporter.num_calls(), 1);
}
TEST(BasicInterpreter, TestOverflow) {
TestErrorReporter reporter;
Interpreter interpreter(&reporter);
TfLiteQuantizationParams quantized;
ASSERT_EQ(interpreter.AddTensors(1), kTfLiteOk);
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
if (sizeof(size_t) == 8) {
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
0, kTfLiteFloat32, "in1", {1 << 30, 1 << 30}, quantized),
kTfLiteOk);
ASSERT_NE(
interpreter.SetTensorParametersReadWrite(
0, kTfLiteFloat32, "in1", {1 << 30, 1 << 30, 1 << 2}, quantized),
kTfLiteOk);
EXPECT_THAT(
reporter.error_messages(),
testing::EndsWith("BytesRequired number of bytes overflowed.\n"));
reporter.Reset();
ASSERT_NE(interpreter.SetTensorParametersReadWrite(
0, kTfLiteFloat32, "in1", {1 << 30, 1 << 30, 1 << 2, 1 << 4},
quantized),
kTfLiteOk);
EXPECT_THAT(
reporter.error_messages(),
testing::EndsWith("BytesRequired number of elements overflowed.\n"));
} else if (sizeof(size_t) == 4) {
ASSERT_EQ(interpreter.SetTensorParametersReadWrite(
0, kTfLiteFloat32, "in1", {1 << 14, 1 << 14}, quantized),
kTfLiteOk);
ASSERT_NE(
interpreter.SetTensorParametersReadWrite(
0, kTfLiteFloat32, "in1", {1 << 14, 1 << 14, 1 << 3}, quantized),
kTfLiteOk);
EXPECT_THAT(
reporter.error_messages(),
testing::EndsWith("BytesRequired number of bytes overflowed.\n"));
reporter.Reset();
ASSERT_NE(
interpreter.SetTensorParametersReadWrite(
0, kTfLiteFloat32, "in1", {1 << 14, 1 << 14, 1 << 4}, quantized),
kTfLiteOk);
EXPECT_THAT(
reporter.error_messages(),
testing::EndsWith("BytesRequired number of elements overflowed.\n"));
} else {
ASSERT_TRUE(false);
}
}
TEST(BasicInterpreter, TestUnsupportedDelegateFunctions) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
TfLiteRegistration registration = {nullptr, nullptr, nullptr, nullptr};
registration.prepare = [](TfLiteContext* context, TfLiteNode* node) {
{
TfLiteIntArray* execution_plan;
EXPECT_EQ(context->GetExecutionPlan(context, &execution_plan),
kTfLiteError);
}
{
TfLiteNode* node;
TfLiteRegistration* registration;
EXPECT_EQ(
context->GetNodeAndRegistration(context, 0, &node, ®istration),
kTfLiteError);
}
{
TfLiteRegistration delegate_registration = {nullptr, nullptr, nullptr,
nullptr};
TfLiteIntArray nodes_to_replace;
nodes_to_replace.size = 0;
EXPECT_EQ(context->ReplaceNodeSubsetsWithDelegateKernels(
context, delegate_registration, &nodes_to_replace, nullptr),
kTfLiteError);
}
return kTfLiteError;
};
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.SetOutputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr,
®istration),
kTfLiteOk);
EXPECT_EQ(interpreter.AllocateTensors(), kTfLiteError);
}
TEST(BasicInterpreter, DynamicTensorsResizeDescendants) {
Interpreter interpreter;
interpreter.AddTensors(4);
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({3});
TfLiteQuantizationParams quant;
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "", {2, 2, 1, 1},
quant);
interpreter.SetTensorParametersReadWrite(1, kTfLiteInt32, "", {4, 2}, quant);
interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "", {}, quant);
interpreter.SetTensorParametersReadWrite(3, kTfLiteFloat32, "", {}, quant);
TfLiteRegistration* pad_op = tflite::ops::builtin::Register_PADV2();
TfLiteRegistration* neg_op = tflite::ops::builtin::Register_NEG();
interpreter.AddNodeWithParameters({0, 1}, {2}, nullptr, 0, nullptr, pad_op);
interpreter.AddNodeWithParameters({2}, {3}, nullptr, 0, nullptr, neg_op);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
interpreter.typed_tensor<int>(1)[0] = 2;
interpreter.typed_tensor<int>(1)[1] = 2;
interpreter.typed_tensor<int>(1)[2] = 2;
interpreter.typed_tensor<int>(1)[3] = 2;
interpreter.typed_tensor<int>(1)[4] = 0;
interpreter.typed_tensor<int>(1)[5] = 0;
interpreter.typed_tensor<int>(1)[6] = 0;
interpreter.typed_tensor<int>(1)[7] = 0;
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter.tensor(2)->bytes, sizeof(float) * 6 * 6);
ASSERT_EQ(interpreter.tensor(3)->bytes, sizeof(float) * 6 * 6);
interpreter.typed_tensor<int>(1)[0] = 4;
interpreter.typed_tensor<int>(1)[1] = 4;
interpreter.typed_tensor<int>(1)[2] = 6;
interpreter.typed_tensor<int>(1)[3] = 6;
interpreter.typed_tensor<int>(1)[4] = 0;
interpreter.typed_tensor<int>(1)[5] = 0;
interpreter.typed_tensor<int>(1)[6] = 0;
interpreter.typed_tensor<int>(1)[7] = 0;
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter.tensor(2)->bytes, sizeof(float) * 10 * 14);
ASSERT_EQ(interpreter.tensor(3)->bytes, sizeof(float) * 10 * 14);
}
TEST(BasicInterpreter, ReleaseDynamicTensors) {
Interpreter interpreter;
interpreter.AddTensors(4);
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({3});
TfLiteQuantizationParams quant;
interpreter.SetTensorParametersReadWrite(0,
kTfLiteFloat32, "",
{2, 2, 1, 1},
quant);
interpreter.SetTensorParametersReadWrite(
1, kTfLiteInt32, "", {4, 2},
quant);
interpreter.SetTensorParametersReadWrite(2,
kTfLiteFloat32, "",
{}, quant);
interpreter.SetTensorParametersReadWrite(3,
kTfLiteFloat32, "",
{}, quant);
TfLiteRegistration* pad_op = tflite::ops::builtin::Register_PADV2();
TfLiteRegistration* neg_op = tflite::ops::builtin::Register_NEG();
interpreter.AddNodeWithParameters(
{0, 1}, {2}, nullptr,
0, nullptr, pad_op);
interpreter.AddNodeWithParameters(
{2}, {3}, nullptr,
0, nullptr, neg_op);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
const std::vector<int> padding = {2, 2, 2, 2, 0, 0, 0, 0};
int* tensor_value = interpreter.typed_tensor<int>(1);
for (int i = 0; i < padding.size(); ++i) {
tensor_value[i] = padding[i];
}
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
ASSERT_NE(interpreter.tensor(2)->data.raw, nullptr);
InterpreterOptions options;
options.SetEnsureDynamicTensorsAreReleased();
interpreter.ApplyOptions(&options);
ASSERT_EQ(interpreter.Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter.tensor(2)->data.raw, nullptr);
ASSERT_EQ(interpreter.tensor(3)->bytes, sizeof(float) * 6 * 6);
}
TEST(InterpreterTensorsCapacityTest, TestWithinHeadroom) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(Interpreter::kTensorsReservedCapacity),
kTfLiteOk);
TfLiteRegistration registration = {nullptr, nullptr, nullptr, nullptr};
registration.prepare = [](TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* first_tensor = context->tensors;
int new_tensor_index;
context->AddTensors(context, Interpreter::kTensorsCapacityHeadroom,
&new_tensor_index);
EXPECT_EQ(first_tensor, context->tensors);
return kTfLiteOk;
};
ASSERT_EQ(interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr,
®istration),
kTfLiteOk);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
}
TEST(InterpreterTensorsCapacityTest, TestExceedHeadroom) {
Interpreter interpreter;
ASSERT_EQ(interpreter.AddTensors(Interpreter::kTensorsReservedCapacity),
kTfLiteOk);
TfLiteRegistration registration = {nullptr, nullptr, nullptr, nullptr};
registration.prepare = [](TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* first_tensor = context->tensors;
int new_tensor_index;
context->AddTensors(
context,
(context->tensors_size + Interpreter::kTensorsCapacityHeadroom + 1) * 2,
&new_tensor_index);
EXPECT_NE(first_tensor, context->tensors);
return kTfLiteOk;
};
ASSERT_EQ(interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr,
®istration),
kTfLiteOk);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
}
TEST_F(InterpreterTest, SubgraphNumbering) {
EXPECT_THAT(interpreter_->subgraph(0)->GetSubgraphIndex(), 0);
AddSubgraphs(2);
AddSubgraphs(3);
std::vector<int> subgraph_indices;
for (int i = 0; i < interpreter_->subgraphs_size(); ++i) {
subgraph_indices.push_back(interpreter_->subgraph(i)->GetSubgraphIndex());
}
EXPECT_THAT(subgraph_indices, ElementsAre(0, 1, 2, 3, 4, 5));
}
struct TestExternalContext : public TfLiteExternalContext {
static constexpr TfLiteExternalContextType kType = kTfLiteGemmLowpContext;
static TestExternalContext* Get(TfLiteContext* context) {
return reinterpret_cast<TestExternalContext*>(
context->GetExternalContext(context, kType));
}
static void Set(TfLiteContext* context, TestExternalContext* value) {
context->SetExternalContext(context, kType, value);
}
int num_refreshes = 0;
};
TEST_F(InterpreterTest, GetSetResetExternalContexts) {
auto* context = GetInterpreterContext();
TestExternalContext external_context;
external_context.Refresh = [](TfLiteContext* context) {
auto* ptr = TestExternalContext::Get(context);
if (ptr != nullptr) {
++ptr->num_refreshes;
}
return kTfLiteOk;
};
EXPECT_EQ(TestExternalContext::Get(context), nullptr);
ASSERT_EQ(interpreter_->SetNumThreads(4), kTfLiteOk);
TestExternalContext::Set(context, &external_context);
EXPECT_EQ(TestExternalContext::Get(context), &external_context);
ASSERT_EQ(interpreter_->SetNumThreads(4), kTfLiteOk);
ASSERT_EQ(interpreter_->SetNumThreads(5), kTfLiteOk);
EXPECT_EQ(external_context.num_refreshes, 2);
external_context.num_refreshes = 0;
ASSERT_EQ(interpreter_->SetNumThreads(-2), kTfLiteError);
EXPECT_EQ(external_context.num_refreshes, 0);
ASSERT_EQ(interpreter_->SetNumThreads(-1), kTfLiteOk);
EXPECT_EQ(external_context.num_refreshes, 1);
TestExternalContext::Set(context, nullptr);
EXPECT_EQ(TestExternalContext::Get(context), nullptr);
ASSERT_EQ(interpreter_->SetNumThreads(4), kTfLiteOk);
}
TEST_F(InterpreterTest, SetNumThreadsSucceedsWithZero) {
ASSERT_EQ(interpreter_->SetNumThreads(0), kTfLiteOk);
EXPECT_EQ(interpreter_->subgraph(0)->context()->recommended_num_threads, 1);
}
struct TestCpuBackendContext : public TfLiteInternalBackendContext {
void ClearCaches() override { ++num_calls; }
void SetMaxNumThreads(int num_threads) override {}
int num_calls = 0;
};
TEST_F(InterpreterTest, ExternalBackendContextClearsCachesOnDelete) {
ExternalCpuBackendContext external_cpu_context;
TestCpuBackendContext* cpu_backend_context = new TestCpuBackendContext();
external_cpu_context.set_internal_backend_context(
std::unique_ptr<TfLiteInternalBackendContext>(cpu_backend_context));
{
Interpreter interpreter;
interpreter.SetExternalContext(kTfLiteCpuBackendContext,
&external_cpu_context);
EXPECT_EQ(cpu_backend_context->num_calls, 0);
}
EXPECT_EQ(cpu_backend_context->num_calls, 1);
}
class TestExecutionPlan : public InterpreterTest {
class CallReporting {
public:
CallReporting(int node_id, std::vector<int>* run_order)
: node_id_(node_id), run_order_(run_order) {}
void Record() { run_order_->push_back(node_id_); }
private:
int node_id_;
std::vector<int>* run_order_;
};
TfLiteRegistration CopyOpRegistration() {
TfLiteRegistration reg = {nullptr, nullptr, nullptr, nullptr};
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* tensor0;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &tensor0));
TfLiteTensor* tensor1;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &tensor1));
TfLiteIntArray* newSize = TfLiteIntArrayCopy(tensor0->dims);
return context->ResizeTensor(context, tensor1, newSize);
};
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
CallReporting* call_reporting =
static_cast<CallReporting*>(node->builtin_data);
const TfLiteTensor* a0;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &a0));
TfLiteTensor* a1;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &a1));
int num = a0->dims->data[0];
for (int i = 0; i < num; i++) {
a1->data.f[i] = a0->data.f[i];
}
call_reporting->Record();
return kTfLiteOk;
};
return reg;
}
void MakeCopyNode(int input, int output) {
TfLiteRegistration copy_op = CopyOpRegistration();
CallReporting* call_reporting_1 =
static_cast<CallReporting*>(malloc(sizeof(CallReporting)));
new (call_reporting_1) CallReporting(input, &run_order_);
ASSERT_EQ(interpreter_->AddNodeWithParameters(
{0}, {2}, nullptr, 0, static_cast<void*>(call_reporting_1),
©_op),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(input, {3}), kTfLiteOk);
}
void SetUp() final {
ASSERT_EQ(interpreter_->AddTensors(4), kTfLiteOk);
interpreter_->SetInputs({0, 1});
interpreter_->SetOutputs({2, 3});
TfLiteQuantizationParams quantized;
for (int tensor_index = 0; tensor_index < 4; tensor_index++) {
ASSERT_EQ(interpreter_->SetTensorParametersReadWrite(
tensor_index, kTfLiteFloat32, "", {3}, quantized),
kTfLiteOk);
}
MakeCopyNode(0, 2);
MakeCopyNode(1, 3);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
}
protected:
std::vector<int> run_order_;
};
TEST_F(TestExecutionPlan, DefaultExecutionPlan) {
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(run_order_, std::vector<int>({0, 1}));
}
TEST_F(TestExecutionPlan, ReversedExecutionPlan) {
SetExecutionPlan({1, 0});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(run_order_, std::vector<int>({1, 0}));
}
TEST_F(TestExecutionPlan, SubsetExecutionPlan) {
SetExecutionPlan({1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(run_order_, std::vector<int>({1}));
}
TEST_F(TestExecutionPlan, NullExecutionPlan) {
SetExecutionPlan({});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(run_order_, std::vector<int>());
}
TEST(TestDelegateOwnership, ProperlyDisposed) {
struct TfLiteInterpreterOwnedDelegate : public TfLiteDelegate {
TfLiteInterpreterOwnedDelegate(bool* destroyed, bool* prepared)
: TfLiteDelegate(TfLiteDelegateCreate()),
destroyed(destroyed),
prepared(prepared) {
flags = kTfLiteDelegateFlagsNone;
Prepare = [](TfLiteContext*, TfLiteDelegate* delegate) -> TfLiteStatus {
*static_cast<TfLiteInterpreterOwnedDelegate*>(delegate)->prepared =
true;
return kTfLiteOk;
};
}
~TfLiteInterpreterOwnedDelegate() { *destroyed = true; }
bool* destroyed;
bool* prepared;
};
bool destroyed = false;
bool prepared = false;
std::unique_ptr<TfLiteInterpreterOwnedDelegate> delegate(
new TfLiteInterpreterOwnedDelegate(&destroyed, &prepared));
{
Interpreter interpreter;
TfLiteRegistration registration = {nullptr, nullptr, nullptr, nullptr};
ASSERT_EQ(interpreter.AddTensors(2), kTfLiteOk);
ASSERT_EQ(interpreter.SetInputs({0}), kTfLiteOk);
ASSERT_EQ(interpreter.SetOutputs({1}), kTfLiteOk);
ASSERT_EQ(interpreter.AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr,
®istration),
kTfLiteOk);
ASSERT_EQ(InterpreterTest::ModifyGraphWithDelegate(&interpreter,
std::move(delegate)),
kTfLiteOk);
EXPECT_TRUE(prepared);
EXPECT_FALSE(destroyed);
interpreter.AllocateTensors();
interpreter.Invoke();
EXPECT_FALSE(destroyed);
}
EXPECT_TRUE(destroyed);
}
struct CancellationData {
bool is_cancelled = false;
Interpreter* interpreter = nullptr;
TfLiteStatus cancellation_status = kTfLiteError;
uint cancel_count = 1;
};
bool CheckCancellation(void* data) {
CancellationData* cancellation_data =
static_cast<struct CancellationData*>(data);
return cancellation_data->is_cancelled;
}
CancellationData& GetCancellationData() {
static CancellationData* data = []() -> CancellationData* {
return new CancellationData();
}();
return *data;
}
auto GetOpPrepare() {
return [](TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* in_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &in_tensor));
TfLiteTensor* out_tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &out_tensor));
TfLiteIntArray* new_size = TfLiteIntArrayCopy(in_tensor->dims);
return context->ResizeTensor(context, out_tensor, new_size);
};
}
class CancelTest : public InterpreterTest {
protected:
TfLiteRegistration CancelOpRegistration() {
TfLiteRegistration reg{};
reg.prepare = GetOpPrepare();
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
if (GetCancellationData().cancel_count--) {
std::thread([]() {
GetCancellationData().cancellation_status =
GetCancellationData().interpreter->Cancel();
}).join();
}
return kTfLiteOk;
};
return reg;
}
TfLiteRegistration OkOpRegistration() {
TfLiteRegistration reg{};
reg.prepare = GetOpPrepare();
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
};
return reg;
}
void MakeCancelNode(int input, int output) {
TfLiteRegistration op = CancelOpRegistration();
ASSERT_EQ(interpreter_->AddNodeWithParameters({input}, {output}, nullptr, 0,
nullptr, &op),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(input, {3}), kTfLiteOk);
}
TfLiteRegistration CancelAndCallOpRegistartion() {
TfLiteRegistration reg{};
reg.prepare = [](TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* in_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &in_tensor));
TfLiteTensor* out_tensor;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &out_tensor));
TfLiteIntArray* new_size = TfLiteIntArrayCopy(in_tensor->dims);
TF_LITE_ENSURE_STATUS(
context->ResizeTensor(context, out_tensor, new_size));
auto* subgraphs =
reinterpret_cast<Subgraph*>(context->impl_)->GetSubgraphs();
Subgraph* callee_subgraph = (*subgraphs)[1].get();
return callee_subgraph->AllocateTensors();
};
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
if (GetCancellationData().cancel_count--) {
std::thread([]() {
GetCancellationData().cancellation_status =
GetCancellationData().interpreter->Cancel();
}).join();
}
auto* subgraphs =
reinterpret_cast<Subgraph*>(context->impl_)->GetSubgraphs();
Subgraph* callee_subgraph = (*subgraphs)[1].get();
return callee_subgraph->Invoke();
};
return reg;
}
void MakeCancelAndCallNode(int input, int output) {
TfLiteRegistration op = CancelAndCallOpRegistartion();
ASSERT_EQ(interpreter_->AddNodeWithParameters({input}, {output}, nullptr, 0,
nullptr, &op),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(input, {3}), kTfLiteOk);
}
void SetUpCalleeSubgraph() {
TfLiteRegistration op = OkOpRegistration();
auto* subgraph = interpreter_->subgraph(1);
ASSERT_EQ(
subgraph->AddNodeWithParameters({0}, {1}, {}, nullptr, 0, nullptr, &op),
kTfLiteOk);
ASSERT_EQ(subgraph->ResizeInputTensor(0, {3}), kTfLiteOk);
}
void MakeOkNode(int input, int output) {
TfLiteRegistration op = OkOpRegistration();
ASSERT_EQ(interpreter_->AddNodeWithParameters({input}, {output}, nullptr, 0,
nullptr, &op),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(input, {3}), kTfLiteOk);
}
void SetUp() final {
int num_tensors = 3;
ASSERT_EQ(interpreter_->AddTensors(num_tensors), kTfLiteOk);
interpreter_->SetInputs({0});
interpreter_->SetOutputs({2});
TfLiteQuantizationParams quantized;
for (int tensor_index = 0; tensor_index < num_tensors; tensor_index++) {
ASSERT_EQ(interpreter_->SetTensorParametersReadWrite(
tensor_index, kTfLiteFloat32, "", {3}, quantized),
kTfLiteOk);
}
AddSubgraphs(1);
auto* subgraph = interpreter_->subgraph(1);
num_tensors = 2;
ASSERT_EQ(subgraph->AddTensors(num_tensors), kTfLiteOk);
subgraph->SetInputs({0});
subgraph->SetOutputs({1});
TfLiteQuantization quant{kTfLiteNoQuantization, nullptr};
for (int tensor_index = 0; tensor_index < num_tensors; tensor_index++) {
ASSERT_EQ(subgraph->SetTensorParametersReadWrite(
tensor_index, kTfLiteFloat32, "", {3}, quant),
kTfLiteOk);
}
GetCancellationData().interpreter = interpreter_.get();
GetCancellationData().cancellation_status = kTfLiteError;
GetCancellationData().cancel_count = 1;
}
};
TEST_F(CancelTest, CancellationNotSupported) {
EXPECT_EQ(kTfLiteError, interpreter_->Cancel());
}
TEST_F(CancelTest, CancellationSupported) {
MakeCancelNode(0, 1);
MakeOkNode(1, 2);
interpreter_->EnableCancellation();
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
EXPECT_EQ(kTfLiteCancelled, interpreter_->Invoke());
ASSERT_EQ(kTfLiteOk, GetCancellationData().cancellation_status);
}
TEST_F(CancelTest, CancelBeforeInvoke) {
MakeOkNode(0, 1);
MakeOkNode(1, 2);
interpreter_->EnableCancellation();
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(kTfLiteOk, interpreter_->Cancel());
EXPECT_EQ(kTfLiteOk, interpreter_->Invoke());
}
TEST_F(CancelTest, CancelOnlyAffectsOngoingInvoke) {
MakeCancelNode(0, 1);
MakeOkNode(1, 2);
interpreter_->EnableCancellation();
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
EXPECT_EQ(kTfLiteCancelled, interpreter_->Invoke());
ASSERT_EQ(kTfLiteOk, GetCancellationData().cancellation_status);
EXPECT_EQ(kTfLiteOk, interpreter_->Invoke());
}
TEST_F(CancelTest, CancellationAffectsOtherSubgraphs) {
MakeCancelAndCallNode(0, 1);
MakeOkNode(1, 2);
SetUpCalleeSubgraph();
interpreter_->EnableCancellation();
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
EXPECT_EQ(kTfLiteCancelled, interpreter_->Invoke());
ASSERT_EQ(kTfLiteOk, GetCancellationData().cancellation_status);
EXPECT_EQ(kTfLiteOk, interpreter_->Invoke());
}
class SetCancellationFunctionTest : public InterpreterTest {
public:
TfLiteStatus Invoke() { return interpreter_->Invoke(); }
void Cancel() { GetCancellationData().is_cancelled = true; }
void MakeCancelNode(int input, int output) {
TfLiteRegistration op = CancelOpRegistration();
ASSERT_EQ(interpreter_->AddNodeWithParameters({input}, {output}, nullptr, 0,
nullptr, &op),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(input, {3}), kTfLiteOk);
}
void MakeOkNode(int input, int output) {
TfLiteRegistration op = OkOpRegistration();
ASSERT_EQ(interpreter_->AddNodeWithParameters({input}, {output}, nullptr, 0,
nullptr, &op),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(input, {3}), kTfLiteOk);
}
private:
TfLiteRegistration CancelOpRegistration() {
TfLiteRegistration reg{};
reg.prepare = GetOpPrepare();
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
GetCancellationData().is_cancelled = true;
return kTfLiteOk;
};
return reg;
}
TfLiteRegistration OkOpRegistration() {
TfLiteRegistration reg{};
reg.prepare = GetOpPrepare();
reg.invoke = [](TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
};
return reg;
}
void SetUp() final {
GetCancellationData().is_cancelled = false;
int num_tensors = 3;
ASSERT_EQ(interpreter_->AddTensors(num_tensors), kTfLiteOk);
interpreter_->SetInputs({0});
interpreter_->SetOutputs({2});
TfLiteQuantizationParams quantized;
for (int tensor_index = 0; tensor_index < num_tensors; tensor_index++) {
ASSERT_EQ(interpreter_->SetTensorParametersReadWrite(
tensor_index, kTfLiteFloat32, "", {3}, quantized),
kTfLiteOk);
}
interpreter_->SetCancellationFunction(&GetCancellationData(),
&CheckCancellation);
}
};
TEST_F(SetCancellationFunctionTest, CancelBeforeInvoke) {
SetCancellationFunctionTest::MakeOkNode(1, 2);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
SetCancellationFunctionTest::Cancel();
TfLiteStatus invoke_error_code = SetCancellationFunctionTest::Invoke();
ASSERT_EQ(invoke_error_code, kTfLiteError);
}
TEST_F(SetCancellationFunctionTest, CancelDuringInvoke) {
SetCancellationFunctionTest::MakeCancelNode(0, 1);
SetCancellationFunctionTest::MakeOkNode(1, 2);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
TfLiteStatus invoke_error_code = SetCancellationFunctionTest::Invoke();
ASSERT_EQ(invoke_error_code, kTfLiteError);
}
class TestCustomAllocation : public InterpreterTest {
protected:
void SetUp() override {
interpreter_ = std::make_unique<Interpreter>();
interpreter_->AddTensors(7);
interpreter_->SetInputs({0, 1});
interpreter_->SetOutputs({3, 4, 6});
TfLiteQuantizationParams quant;
interpreter_->SetTensorParametersReadWrite(0, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(1, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(2, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(3, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(4, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(5, kTfLiteFloat32, "", {3},
quant, true);
interpreter_->SetTensorParametersReadWrite(6, kTfLiteFloat32, "", {3},
quant);
auto* add_reg = ops::builtin::Register_ADD();
TfLiteAddParams* builtin_data0 =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
TfLiteAddParams* builtin_data1 =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
TfLiteAddParams* builtin_data2 =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
TfLiteAddParams* builtin_data3 =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data0->activation = kTfLiteActNone;
builtin_data1->activation = kTfLiteActNone;
builtin_data2->activation = kTfLiteActNone;
builtin_data3->activation = kTfLiteActNone;
interpreter_->AddNodeWithParameters({0, 0}, {2}, nullptr, 0, builtin_data0,
add_reg);
interpreter_->AddNodeWithParameters({1, 1}, {3}, nullptr, 0, builtin_data1,
add_reg);
interpreter_->AddNodeWithParameters({2, 1}, {4}, nullptr, 0, builtin_data2,
add_reg);
interpreter_->AddNodeWithParameters({0, 5}, {6}, nullptr, 0, builtin_data3,
add_reg);
interpreter_->SetVariables({5});
}
void AssignCustomAllocForTensor(int tensor_idx, int required_alignment) {
const TfLiteTensor* tensor = interpreter_->tensor(tensor_idx);
auto tensor_alloc = NewCustomAlloc(tensor->bytes, required_alignment);
ASSERT_EQ(
interpreter_->SetCustomAllocationForTensor(tensor_idx, tensor_alloc),
kTfLiteOk);
}
void VerifyInvoke() {
std::vector<float> input = {1.0f, 2.0f, 3.0f};
std::vector<float> variable = {0.0f, 1.0f, 2.0f};
std::vector<float> expected_output = {2.0f, 4.0f, 6.0f};
memcpy(interpreter_->typed_tensor<float>(interpreter_->variables()[0]),
variable.data(), 3 * sizeof(float));
memcpy(interpreter_->typed_tensor<float>(0), input.data(),
3 * sizeof(float));
memcpy(interpreter_->typed_tensor<float>(1), input.data(),
3 * sizeof(float));
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output_tensor =
interpreter_->tensor(interpreter_->outputs()[0]);
for (int i = 0; i < 3; ++i) {
EXPECT_EQ(output_tensor->data.f[i], expected_output[i]) << i;
}
}
TfLiteCustomAllocation NewCustomAlloc(size_t num_bytes,
int required_alignment) {
char* new_alloc = new char[num_bytes + required_alignment];
char* new_underlying_buffer_aligned_ptr = reinterpret_cast<char*>(
AlignTo(required_alignment, reinterpret_cast<intptr_t>(new_alloc)));
custom_alloc_buffers_.emplace_back(new_alloc);
return TfLiteCustomAllocation(
{new_underlying_buffer_aligned_ptr, num_bytes});
}
intptr_t AlignTo(size_t alignment, intptr_t offset) {
return offset % alignment == 0 ? offset
: offset + (alignment - offset % alignment);
}
void TearDown() override {
interpreter_.reset();
custom_alloc_buffers_.clear();
}
protected:
TfLiteAddParams add_params_;
std::vector<std::unique_ptr<char[]>> custom_alloc_buffers_;
};
TEST_F(TestCustomAllocation, InvalidAlignment) {
const TfLiteTensor* input_tensor =
interpreter_->tensor(interpreter_->inputs()[0]);
intptr_t dummy_ptr = kDefaultTensorAlignment - 1;
TfLiteCustomAllocation input_alloc{reinterpret_cast<void*>(dummy_ptr),
input_tensor->bytes};
ASSERT_EQ(interpreter_->SetCustomAllocationForTensor(
interpreter_->inputs()[0], input_alloc),
kTfLiteError);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
VerifyInvoke();
}
TEST_F(TestCustomAllocation, InvalidAlignment_SkipCheck) {
const TfLiteTensor* input_tensor =
interpreter_->tensor(interpreter_->inputs()[0]);
const int required_alignment = kDefaultTensorAlignment - 1;
auto tensor_alloc = NewCustomAlloc(input_tensor->bytes, required_alignment);
ASSERT_EQ(interpreter_->SetCustomAllocationForTensor(
interpreter_->inputs()[0], tensor_alloc,
kTfLiteCustomAllocationFlagsSkipAlignCheck),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
}
TEST_F(TestCustomAllocation, InsufficientBytes) {
auto input_alloc = NewCustomAlloc(4, kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->SetCustomAllocationForTensor(
interpreter_->inputs()[0], input_alloc),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteError);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteError);
}
TEST_F(TestCustomAllocation, CustomInputAlloc) {
AssignCustomAllocForTensor(interpreter_->inputs()[0],
kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
VerifyInvoke();
}
TEST_F(TestCustomAllocation, CustomInputAlloc_MultipleAssigns) {
AssignCustomAllocForTensor(interpreter_->inputs()[0],
kDefaultTensorAlignment);
AssignCustomAllocForTensor(interpreter_->inputs()[0],
kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
VerifyInvoke();
AssignCustomAllocForTensor(interpreter_->inputs()[0],
kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
VerifyInvoke();
}
TEST_F(TestCustomAllocation, CustomInputAlloc_AllocateTensorsBefore) {
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
AssignCustomAllocForTensor(interpreter_->inputs()[0],
kDefaultTensorAlignment);
VerifyInvoke();
}
TEST_F(TestCustomAllocation, CustomInputAndOutputAllocs) {
AssignCustomAllocForTensor(interpreter_->inputs()[0],
kDefaultTensorAlignment);
AssignCustomAllocForTensor(interpreter_->inputs()[1],
kDefaultTensorAlignment);
AssignCustomAllocForTensor(interpreter_->outputs()[0],
kDefaultTensorAlignment);
AssignCustomAllocForTensor(interpreter_->outputs()[1],
kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
VerifyInvoke();
}
TEST_F(TestCustomAllocation, CustomAlloc_VariableTensor) {
AssignCustomAllocForTensor(interpreter_->variables()[0],
kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
VerifyInvoke();
AssignCustomAllocForTensor(interpreter_->variables()[0],
kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
std::vector<float> input = {2.0f, 3.0f, 4.0f};
std::vector<float> variable = {1.0f, 2.0f, 3.0f};
std::vector<float> expected_output = {3.0f, 5.0f, 7.0f};
memcpy(interpreter_->typed_tensor<float>(interpreter_->variables()[0]),
variable.data(), 3 * sizeof(float));
memcpy(interpreter_->typed_tensor<float>(0), input.data(), 3 * sizeof(float));
memcpy(interpreter_->typed_tensor<float>(1), input.data(), 3 * sizeof(float));
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output_tensor =
interpreter_->tensor(interpreter_->outputs()[2]);
for (int i = 0; i < 3; ++i) {
EXPECT_EQ(output_tensor->data.f[i], expected_output[i]) << i;
}
}
TEST_F(TestCustomAllocation, ResizeInputsWithoutEnoughMemory) {
AssignCustomAllocForTensor(interpreter_->inputs()[0],
kDefaultTensorAlignment);
AssignCustomAllocForTensor(interpreter_->inputs()[1],
kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {2, 3}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {2, 3}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteError);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteError);
}
TEST_F(TestCustomAllocation, ResizeInputsWithEnoughMemory) {
const TfLiteTensor* input0_tensor =
interpreter_->tensor(interpreter_->inputs()[0]);
auto input0_alloc =
NewCustomAlloc(2 * input0_tensor->bytes, kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->SetCustomAllocationForTensor(
interpreter_->inputs()[0], input0_alloc),
kTfLiteOk);
const TfLiteTensor* input1_tensor =
interpreter_->tensor(interpreter_->inputs()[1]);
auto input1_alloc =
NewCustomAlloc(2 * input1_tensor->bytes, kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->SetCustomAllocationForTensor(
interpreter_->inputs()[1], input1_alloc),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {6, 1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {6, 1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
std::vector<float> input = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f};
std::vector<float> expected_output = {2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f};
TfLiteTensor* tensor = interpreter_->tensor(interpreter_->outputs()[0]);
memcpy(interpreter_->typed_tensor<float>(0), input.data(), 6 * sizeof(float));
memcpy(interpreter_->typed_tensor<float>(1), input.data(), 6 * sizeof(float));
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
for (int i = 0; i < 6; ++i) {
EXPECT_EQ(tensor->data.f[i], expected_output[i]) << i;
}
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {3, 1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {3, 1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
VerifyInvoke();
}
TEST_F(TestCustomAllocation, ResizeAndAllocateForEveryInvoke) {
AssignCustomAllocForTensor(interpreter_->inputs()[0],
kDefaultTensorAlignment);
AssignCustomAllocForTensor(interpreter_->inputs()[1],
kDefaultTensorAlignment);
AssignCustomAllocForTensor(interpreter_->outputs()[0],
kDefaultTensorAlignment);
AssignCustomAllocForTensor(interpreter_->outputs()[1],
kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1, 1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1, 1}),
kTfLiteOk);
auto input0_alloc =
NewCustomAlloc( 4, kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->SetCustomAllocationForTensor(
interpreter_->inputs()[0], input0_alloc),
kTfLiteOk);
auto input1_alloc =
NewCustomAlloc( 4, kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->SetCustomAllocationForTensor(
interpreter_->inputs()[1], input1_alloc),
kTfLiteOk);
auto output0_alloc =
NewCustomAlloc( 4, kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->SetCustomAllocationForTensor(
interpreter_->outputs()[0], output0_alloc),
kTfLiteOk);
auto output1_alloc =
NewCustomAlloc( 4, kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->SetCustomAllocationForTensor(
interpreter_->outputs()[1], output1_alloc),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
std::vector<float> input = {2.0f};
std::vector<float> expected_output = {4.0f};
TfLiteTensor* tensor = interpreter_->tensor(interpreter_->outputs()[0]);
memcpy(interpreter_->typed_tensor<float>(0), input.data(), sizeof(float));
memcpy(interpreter_->typed_tensor<float>(1), input.data(), sizeof(float));
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
EXPECT_EQ(tensor->data.f[0], expected_output[0]);
}
TEST_F(TestCustomAllocation, ResizeAndAllocate_InvalidAllocAfterInvokable) {
AssignCustomAllocForTensor(interpreter_->inputs()[0],
kDefaultTensorAlignment);
AssignCustomAllocForTensor(interpreter_->inputs()[1],
kDefaultTensorAlignment);
AssignCustomAllocForTensor(interpreter_->outputs()[0],
kDefaultTensorAlignment);
AssignCustomAllocForTensor(interpreter_->outputs()[1],
kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
VerifyInvoke();
auto invalid_output_alloc =
NewCustomAlloc( 4, kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->SetCustomAllocationForTensor(
interpreter_->outputs()[0], invalid_output_alloc),
kTfLiteOk);
ASSERT_NE(interpreter_->AllocateTensors(), kTfLiteOk);
}
TEST_F(TestCustomAllocation, ResizeAndAllocate_WithDynamicTensor) {
TfLiteTensor* intermediate_tensor = interpreter_->tensor(2);
intermediate_tensor->allocation_type = kTfLiteDynamic;
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
VerifyInvoke();
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1, 1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1, 1}),
kTfLiteOk);
auto input0_alloc =
NewCustomAlloc( 4, kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->SetCustomAllocationForTensor(
interpreter_->inputs()[0], input0_alloc),
kTfLiteOk);
auto input1_alloc =
NewCustomAlloc( 4, kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->SetCustomAllocationForTensor(
interpreter_->inputs()[1], input1_alloc),
kTfLiteOk);
auto output0_alloc =
NewCustomAlloc( 4, kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->SetCustomAllocationForTensor(
interpreter_->outputs()[0], output0_alloc),
kTfLiteOk);
auto output1_alloc =
NewCustomAlloc( 4, kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->SetCustomAllocationForTensor(
interpreter_->outputs()[1], output1_alloc),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
std::vector<float> input = {2.0f};
std::vector<float> expected_output = {4.0f};
TfLiteTensor* tensor = interpreter_->tensor(interpreter_->outputs()[0]);
memcpy(interpreter_->typed_tensor<float>(0), input.data(), sizeof(float));
memcpy(interpreter_->typed_tensor<float>(1), input.data(), sizeof(float));
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
EXPECT_EQ(tensor->data.f[0], expected_output[0]);
intermediate_tensor = interpreter_->tensor(2);
intermediate_tensor->allocation_type = kTfLiteDynamic;
auto invalid_output0_alloc =
NewCustomAlloc( 2, kDefaultTensorAlignment);
ASSERT_EQ(interpreter_->SetCustomAllocationForTensor(
interpreter_->outputs()[0], invalid_output0_alloc),
kTfLiteOk);
ASSERT_NE(interpreter_->AllocateTensors(), kTfLiteOk);
}
class TestLazyDelegateProvider : public InterpreterTest {
protected:
class DummyLazyDelegateKernel : public SimpleDelegateKernelInterface {
public:
explicit DummyLazyDelegateKernel(bool prepare_error)
: prepare_error_(prepare_error) {}
TfLiteStatus Init(TfLiteContext* context,
const TfLiteDelegateParams* params) override {
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) override {
return prepare_error_ ? kTfLiteError : kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) override {
return kTfLiteOk;
}
private:
const bool prepare_error_;
};
class DummyLazyDelegate : public SimpleDelegateInterface {
public:
explicit DummyLazyDelegate(bool return_error)
: return_error_(return_error) {}
bool IsNodeSupportedByDelegate(const TfLiteRegistration* registration,
const TfLiteNode* node,
TfLiteContext* context) const override {
return true;
}
TfLiteStatus Initialize(TfLiteContext* context) override {
return kTfLiteOk;
}
const char* Name() const override { return "DummyLazyDelegateForTest"; }
std::unique_ptr<SimpleDelegateKernelInterface>
CreateDelegateKernelInterface() override {
return std::unique_ptr<SimpleDelegateKernelInterface>(
new DummyLazyDelegateKernel(return_error_));
}
SimpleDelegateInterface::Options DelegateOptions() const override {
return SimpleDelegateInterface::Options();
}
private:
bool return_error_;
};
void InitWithLazyDelegate(bool create_dyanmic_tensor = false,
bool return_error = false) {
TfLiteRegistration reg = {nullptr};
ASSERT_EQ(interpreter_->AddTensors(2), kTfLiteOk);
interpreter_->SetInputs({0});
interpreter_->SetOutputs({1});
interpreter_->AddNodeWithParameters({0}, {1}, nullptr, 0, nullptr, ®);
Interpreter::TfLiteDelegatePtr delegate(
TfLiteDelegateFactory::CreateSimpleDelegate(
std::unique_ptr<SimpleDelegateInterface>(
new DummyLazyDelegate(return_error))),
TfLiteDelegateFactory::DeleteSimpleDelegate);
mutable_lazy_delegate_providers()->push_back(
[=](TfLiteContext* ) {
return Interpreter::TfLiteDelegatePtr(
TfLiteDelegateFactory::CreateSimpleDelegate(
std::unique_ptr<SimpleDelegateInterface>(
new DummyLazyDelegate(return_error))),
TfLiteDelegateFactory::DeleteSimpleDelegate);
});
if (create_dyanmic_tensor) {
interpreter_->tensor(1)->data.raw = nullptr;
interpreter_->tensor(1)->allocation_type = kTfLiteDynamic;
}
}
};
TEST_F(TestLazyDelegateProvider, ApplicationSuccess) {
InitWithLazyDelegate();
EXPECT_EQ(kTfLiteOk, interpreter_->AllocateTensors());
EXPECT_TRUE(mutable_lazy_delegate_providers()->empty());
EXPECT_TRUE(HasDelegates());
EXPECT_TRUE(IsFullyDelegated());
}
TEST_F(TestLazyDelegateProvider, ApplicationFailure) {
InitWithLazyDelegate(false ,
true );
EXPECT_EQ(kTfLiteDelegateError, ApplyLazyDelegateProviders());
EXPECT_TRUE(mutable_lazy_delegate_providers()->empty());
EXPECT_EQ(kTfLiteOk, interpreter_->AllocateTensors());
EXPECT_FALSE(HasDelegates());
EXPECT_FALSE(IsFullyDelegated());
}
TEST_F(TestLazyDelegateProvider, ApplicationSkipped) {
InitWithLazyDelegate(true );
EXPECT_EQ(kTfLiteOk, interpreter_->AllocateTensors());
EXPECT_TRUE(mutable_lazy_delegate_providers()->empty());
EXPECT_FALSE(HasDelegates());
EXPECT_FALSE(IsFullyDelegated());
}
TEST_F(InterpreterTest, SingleSignature_get_signatures) {
const char kSignatureKey[] = "test_method";
BuildSignature(kSignatureKey, {{"Input1", 0}, {"Input2", 1}},
{{"Output1", 5}});
auto results = interpreter_->signature_keys();
ASSERT_EQ(1, results.size());
EXPECT_EQ(kSignatureKey, *results[0]);
}
TEST_F(InterpreterTest, SingleSignature_get_inputs) {
const char kSignatureKey[] = "test_method";
const std::map<std::string, uint32_t> inputs = {{"Input1", 0}, {"Input2", 1}};
const std::map<std::string, uint32_t> outputs = {{"Output1", 5}};
BuildSignature(kSignatureKey, inputs, outputs);
EXPECT_THAT(interpreter_->signature_inputs(kSignatureKey),
testing::Eq(inputs));
EXPECT_THAT(interpreter_->signature_outputs(kSignatureKey),
testing::Eq(outputs));
}
TEST_F(InterpreterTest, SingleSignature_validate_get_tensor) {
const char kSignatureKey[] = "test_method";
const std::map<std::string, uint32_t> inputs = {{"Input1", 0}, {"Input2", 1}};
const std::map<std::string, uint32_t> outputs = {{"Output1", 5}};
BuildSignature(kSignatureKey, inputs, outputs);
ASSERT_EQ(interpreter_->AddTensors(6), kTfLiteOk);
ASSERT_EQ(interpreter_->SetInputs({0, 1}), kTfLiteOk);
ASSERT_EQ(interpreter_->SetOutputs({5}), kTfLiteOk);
ASSERT_EQ(interpreter_->SetTensorParametersReadWrite(
0, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
kTfLiteOk);
ASSERT_EQ(interpreter_->SetTensorParametersReadWrite(
1, kTfLiteFloat32, "", {3}, TfLiteQuantizationParams()),
kTfLiteOk);
ASSERT_EQ(
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1, 2, 3}),
kTfLiteOk);
ASSERT_EQ(
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1, 2, 3}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
EXPECT_TRUE(interpreter_->input_tensor_by_signature(
"Input1", kSignatureKey) != nullptr);
EXPECT_TRUE(interpreter_->input_tensor_by_signature(
"Input2", kSignatureKey) != nullptr);
EXPECT_TRUE(interpreter_->output_tensor_by_signature(
"Output1", kSignatureKey) != nullptr);
EXPECT_EQ(interpreter_->input_tensor_by_signature("Input3", kSignatureKey),
nullptr);
EXPECT_EQ(interpreter_->output_tensor_by_signature("Input3", kSignatureKey),
nullptr);
EXPECT_EQ(interpreter_->input_tensor_by_signature("Input1", "InvalidMethod"),
nullptr);
EXPECT_EQ(
interpreter_->output_tensor_by_signature("Output1", "InvalidMethod"),
nullptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/interpreter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/interpreter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
57f1cd51-8a49-41d2-8617-e678c4a4dc31 | cpp | tensorflow/tensorflow | tensor_or_memref | third_party/xla/xla/mlir/tools/mlir_interpreter/framework/tensor_or_memref.cc | third_party/xla/xla/mlir/tools/mlir_interpreter/framework/tests/tensor_or_memref_test.cc | #include "xla/mlir/tools/mlir_interpreter/framework/tensor_or_memref.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <utility>
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Utils/IndexingUtils.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
namespace mlir {
namespace interpreter {
std::optional<int64_t> BufferView::GetPhysicalIndex(
llvm::ArrayRef<int64_t> view_indices) const {
int64_t result = offset;
if (!InBounds(view_indices)) {
return std::nullopt;
}
for (int64_t i = 0; i < view_indices.size(); ++i) {
result += view_indices[i] * strides[i];
}
return result;
}
bool BufferView::InBounds(llvm::ArrayRef<int64_t> view_indices) const {
if (view_indices.size() > sizes.size()) {
return false;
}
for (auto [index, size] : llvm::zip(view_indices, sizes)) {
if (index < 0 || index >= size) {
return false;
}
}
return true;
}
SmallVector<int64_t> BufferView::GetDefaultStrides(ArrayRef<int64_t> sizes) {
SmallVector<int64_t> result(sizes.size());
int64_t stride = 1;
for (int64_t i = result.size() - 1; i >= 0; --i) {
result[i] = stride;
stride *= sizes[i];
}
return result;
}
SmallVector<int64_t> BufferView::GetStridesForLayout(ArrayRef<int64_t> sizes,
ArrayRef<int64_t> layout) {
if (layout.empty()) return GetDefaultStrides(sizes);
auto inverse_layout = invertPermutationVector(layout);
SmallVector<int64_t> result(sizes.size());
int64_t stride = 1;
for (int64_t i = 0; i < layout.size(); ++i) {
result[inverse_layout[i]] = stride;
stride *= sizes[inverse_layout[i]];
}
return result;
}
LogicalResult BufferView::Slice(int64_t dim_index, int64_t dim_offset) {
llvm::SmallVector<int64_t> offsets(Rank(), 0);
offsets[dim_index] = dim_offset;
if (auto new_offset = GetPhysicalIndex(offsets)) {
offset = *new_offset;
} else {
return failure();
}
if (dim_index >= Rank()) --*num_vector_dims;
strides.erase(strides.begin() + dim_index);
sizes.erase(sizes.begin() + dim_index);
return success();
}
LogicalResult BufferView::Slice(int64_t dim_index, int64_t dim_offset,
int64_t dim_size, int64_t dim_stride) {
llvm::SmallVector<int64_t> offsets(Rank(), 0);
offsets[dim_index] = dim_offset;
if (dim_size == 0) {
offset = 0;
} else if (auto new_offset = GetPhysicalIndex(offsets)) {
offset = *new_offset;
} else {
return failure();
}
sizes[dim_index] = dim_size;
strides[dim_index] *= dim_stride;
return success();
}
LogicalResult BufferView::Subview(ArrayRef<int64_t> subview_offsets,
ArrayRef<int64_t> subview_sizes,
ArrayRef<int64_t> subview_strides) {
if (auto new_offset = GetPhysicalIndex(subview_offsets)) {
offset = *new_offset;
} else {
return failure();
}
for (auto [in_size, subview_offset, subview_size, subview_stride] :
llvm::zip(sizes, subview_offsets, subview_sizes, subview_strides)) {
int64_t limit_index = subview_offset + (subview_size - 1) * subview_stride;
if (subview_offset < 0 || subview_offset >= in_size || limit_index < 0 ||
limit_index >= in_size) {
return failure();
}
}
for (auto [in_stride, subview_stride] : llvm::zip(strides, subview_strides)) {
in_stride *= subview_stride;
}
sizes = llvm::to_vector(subview_sizes);
return success();
}
int64_t BufferView::GetNumElements(bool include_vector_dims) const {
size_t n = 1;
for (auto size : ArrayRef<int64_t>(sizes).drop_back(
include_vector_dims ? 0 : num_vector_dims.value_or(0))) {
n *= size;
}
return n;
}
std::optional<int64_t> BufferView::GetCollapsedStride(
llvm::ArrayRef<int64_t> dims) const {
using StrideAndDim = std::pair<int64_t, int64_t>;
llvm::SmallVector<StrideAndDim> strides_and_dims;
for (auto dim : dims) {
if (sizes[dim] != 1) {
strides_and_dims.emplace_back(strides[dim], dim);
}
}
if (strides_and_dims.empty()) {
return 0;
}
llvm::sort(strides_and_dims);
int64_t next_stride = strides_and_dims.front().first;
for (auto [stride, dim] : strides_and_dims) {
if (stride != next_stride) {
return std::nullopt;
}
next_stride *= sizes[dim];
}
return strides_and_dims.front().first;
}
}
} | #include "xla/mlir/tools/mlir_interpreter/framework/tensor_or_memref.h"
#include <algorithm>
#include <cstdint>
#include <optional>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_join.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Support/LLVM.h"
namespace mlir {
namespace interpreter {
namespace {
using ::testing::ElementsAre;
TEST(TensorOrMemrefTest, DefaultStrides) {
EXPECT_THAT(BufferView::GetDefaultStrides({1, 2, 3}), ElementsAre(6, 3, 1));
}
TEST(TensorOrMemrefTest, StridesForLayout) {
EXPECT_THAT(BufferView::GetStridesForLayout({1, 2, 3}, {2, 1, 0}),
ElementsAre(6, 3, 1));
EXPECT_THAT(BufferView::GetStridesForLayout({1, 2, 3}, {0, 1, 2}),
ElementsAre(1, 1, 2));
EXPECT_THAT(BufferView::GetStridesForLayout({3, 3, 3, 3}, {3, 0, 1, 2}),
ElementsAre(27, 1, 3, 9));
}
std::optional<int64_t> GetCollapsedStrideNaive(llvm::ArrayRef<int64_t> dims,
const BufferView& view) {
BufferView f;
for (int64_t dim : dims) {
f.sizes.push_back(view.sizes[dim]);
}
llvm::SmallBitVector v(view.GetNumElements());
for (const auto& indices : f.Indices()) {
SmallVector<int64_t> view_indices(view.Rank());
for (auto [dim, index] : llvm::zip(dims, indices)) {
view_indices[dim] = index;
}
v[*view.GetPhysicalIndex(view_indices)] = true;
}
if (v.count() != f.GetNumElements()) return std::nullopt;
if (f.GetNumElements() <= 1) return 0;
int64_t min = v.find_first();
int64_t expected_stride = (v.find_last() - min) / (f.GetNumElements() - 1);
for (int64_t i = 0; i < f.GetNumElements(); ++i) {
if (!v[i * expected_stride + min]) {
return std::nullopt;
}
}
return expected_stride;
}
TEST(TensorOrMemrefTest, CollapsedStride) {
BufferView view{.sizes = {1, 2, 3, 1, 5},
.strides = BufferView::GetDefaultStrides({1, 2, 3, 1, 5})};
auto check_all = [&]() {
for (int64_t i = 0; i < (1 << view.Rank()); ++i) {
SmallVector<int64_t> dims;
for (int64_t dim = 0; dim < view.Rank(); ++dim) {
if (i & (1 << dim)) dims.push_back(dim);
}
do {
auto v = view.GetCollapsedStride(dims);
auto n = GetCollapsedStrideNaive(dims, view);
EXPECT_EQ(n, v) << "checking " << absl::StrJoin(dims, ", ");
} while (std::next_permutation(dims.begin(), dims.end()));
}
};
check_all();
ASSERT_TRUE(view.Slice(3, 0).succeeded());
check_all();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/mlir/tools/mlir_interpreter/framework/tensor_or_memref.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/mlir/tools/mlir_interpreter/framework/tests/tensor_or_memref_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d2594947-6568-4ee6-a346-5acce0f5b176 | cpp | tensorflow/tensorflow | registration | third_party/xla/xla/mlir/tools/mlir_interpreter/framework/registration.cc | tensorflow/core/framework/registration/registration_test.cc | #include "xla/mlir/tools/mlir_interpreter/framework/registration.h"
#include <cassert>
#include <functional>
#include <utility>
#include "mlir/IR/Operation.h"
#include "mlir/Support/LLVM.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter_value.h"
namespace mlir {
namespace interpreter {
namespace detail {
namespace {
DenseMap<llvm::StringRef, llvm::StringRef>& GetOpAliases() {
static DenseMap<llvm::StringRef, llvm::StringRef>* aliases = nullptr;
if (!aliases) {
aliases = new DenseMap<llvm::StringRef, llvm::StringRef>();
}
return *aliases;
}
DenseMap<llvm::StringRef, InterpreterFunction>& GetFunctions() {
static DenseMap<llvm::StringRef, InterpreterFunction>* functions = nullptr;
if (!functions) {
functions = new DenseMap<llvm::StringRef, InterpreterFunction>();
}
return *functions;
}
}
InterpreterFunction GetFunction(llvm::StringRef name) {
const auto& fns = GetFunctions();
auto fn = fns.find(name);
if (fn != fns.end()) {
return fn->second;
}
const auto& aliases = GetOpAliases();
auto alias = aliases.find(name);
if (alias != aliases.end()) {
return fns.find(alias->second)->second;
}
return nullptr;
}
void RegisterInterpreterOp(llvm::StringRef name,
InterpreterValue (*fn)(const InterpreterValue&)) {
RegisterInterpreterOp(
name,
[fn](MutableArrayRef<InterpreterValue> operands, mlir::Operation*,
InterpreterState&) -> SmallVector<InterpreterValue> {
assert(operands.size() == 1 && "unexpected number of operands");
return {fn(operands[0])};
});
}
void RegisterInterpreterOp(llvm::StringRef name,
InterpreterValue (*fn)(const InterpreterValue&,
const InterpreterValue&)) {
RegisterInterpreterOp(
name,
[fn](MutableArrayRef<InterpreterValue> operands, mlir::Operation*,
InterpreterState&) -> SmallVector<InterpreterValue> {
assert(operands.size() == 2 && "unexpected number of operands");
return {fn(operands[0], operands[1])};
});
}
void RegisterInterpreterOp(
llvm::StringRef name,
InterpreterValue (*fn)(MutableArrayRef<InterpreterValue>)) {
RegisterInterpreterOp(
name,
[fn](MutableArrayRef<InterpreterValue> operands, mlir::Operation*,
InterpreterState&) -> SmallVector<InterpreterValue> {
return {fn(operands)};
});
}
void RegisterInterpreterOp(
llvm::StringRef name,
std::function<llvm::SmallVector<InterpreterValue>(
MutableArrayRef<InterpreterValue>, mlir::Operation*, InterpreterState&)>
fn) {
GetFunctions()[name] = std::move(fn);
}
void RegisterInterpreterOp(llvm::StringRef name, llvm::StringRef original) {
GetOpAliases()[name] = original;
}
}
}
} | #include "tensorflow/core/framework/registration/registration.h"
#include <gmock/gmock.h>
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using ::testing::Eq;
#define STORE_NEXT_ID_IMPL(id, name) constexpr int name = id
#define STORE_NEXT_ID(name) TF_NEW_ID_FOR_INIT(STORE_NEXT_ID_IMPL, name)
STORE_NEXT_ID(kBaseId);
STORE_NEXT_ID(kNextId1);
STORE_NEXT_ID(kNextId2);
TEST(NewIdForInitTest, SequentialIds) {
static_assert(kBaseId >= 0, "kBaseId < 0");
static_assert(kNextId1 == kBaseId + 1, "kNextId1 != kBaseId+1");
static_assert(kNextId2 == kBaseId + 2, "kNextId2 != kBaseId+2");
}
int observed_unconditional_init;
InitOnStartupMarker const kUnconditionalInitMarker =
InitOnStartupMarker{} << []() {
observed_unconditional_init++;
return InitOnStartupMarker{};
};
TEST(InitOnStartupTest, Unconditional) {
EXPECT_THAT(observed_unconditional_init, Eq(1));
}
template <bool Enable>
int observed_conditional_init;
template <bool Enable>
InitOnStartupMarker const kConditionalInitMarker =
TF_INIT_ON_STARTUP_IF(Enable) << []() {
(observed_conditional_init<Enable>)++;
return InitOnStartupMarker{};
};
template InitOnStartupMarker const kConditionalInitMarker<true>;
template InitOnStartupMarker const kConditionalInitMarker<false>;
TEST(InitOnStartupTest, DISABLED_Conditional) {
EXPECT_THAT(observed_conditional_init<true>, Eq(1));
EXPECT_THAT(observed_conditional_init<false>, Eq(0));
}
template <bool Enable>
int observed_conditional_init_immediate;
template <bool Enable>
InitOnStartupMarker const kConditionalInitImmediateMarker =
TF_INIT_ON_STARTUP_IF(Enable) << ([]() {
(observed_conditional_init_immediate<Enable>)++;
return InitOnStartupMarker{};
})();
template InitOnStartupMarker const kConditionalInitImmediateMarker<true>;
template InitOnStartupMarker const kConditionalInitImmediateMarker<false>;
TEST(InitOnStartupTest, DISABLED_ConditionalImmediate) {
EXPECT_THAT(observed_conditional_init_immediate<true>, Eq(1));
EXPECT_THAT(observed_conditional_init_immediate<false>, Eq(0));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/mlir/tools/mlir_interpreter/framework/registration.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/registration/registration_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
21dfe8dd-d12e-423a-9396-e9855bfff713 | cpp | tensorflow/tensorflow | interpreter_value | third_party/xla/xla/mlir/tools/mlir_interpreter/framework/interpreter_value.cc | third_party/xla/xla/mlir/tools/mlir_interpreter/framework/tests/interpreter_value_test.cc | #include "xla/mlir/tools/mlir_interpreter/framework/interpreter_value.h"
#include <cassert>
#include <complex>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <string>
#include <string_view>
#include <type_traits>
#include <variant>
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
#include "xla/mlir/tools/mlir_interpreter/framework/tensor_or_memref.h"
namespace mlir {
namespace interpreter {
namespace {
struct TypeStr {
static std::string_view Get(bool) { return "i1"; }
static std::string_view Get(int64_t) { return "i64"; }
static std::string_view Get(int32_t) { return "i32"; }
static std::string_view Get(int16_t) { return "i16"; }
static std::string_view Get(int8_t) { return "i8"; }
static std::string_view Get(uint64_t) { return "ui64"; }
static std::string_view Get(uint32_t) { return "ui32"; }
static std::string_view Get(uint16_t) { return "ui16"; }
static std::string_view Get(uint8_t) { return "ui8"; }
static std::string_view Get(float) { return "f32"; }
static std::string_view Get(double) { return "f64"; }
static std::string_view Get(std::complex<float>) { return "complex<f32>"; }
static std::string_view Get(std::complex<double>) { return "complex<f64>"; }
};
struct InterpreterValuePrinter {
llvm::raw_ostream& os;
template <typename T>
void operator()(const TensorOrMemref<T>& t) {
if (!t.buffer) {
os << "Memref: null";
return;
}
if (t.view.is_vector) {
os << "vector<";
} else {
os << "TensorOrMemref<";
}
ArrayRef<int64_t> sizes = t.view.sizes;
for (int64_t size : sizes.drop_back(t.view.num_vector_dims.value_or(0))) {
os << size << "x";
}
if (t.view.num_vector_dims) {
os << "vector<";
for (int64_t size : sizes.take_back(*t.view.num_vector_dims)) {
os << size << "x";
}
os << TypeStr::Get(T{}) << ">>: ";
} else {
os << TypeStr::Get(T{}) << ">: ";
}
SmallVector<int64_t> indices(t.view.Rank() +
t.view.num_vector_dims.value_or(0));
std::function<void(int64_t)> print;
print = [&](int64_t dim) {
if (dim == indices.size()) {
PrintScalar(t.at(indices));
} else {
os << "[";
for (int64_t i = 0; i < t.view.sizes[dim]; ++i) {
if (i > 0) os << ", ";
indices[dim] = i;
print(dim + 1);
}
os << "]";
}
};
if (t.buffer->Deallocated()) {
os << "<<deallocated>>";
} else {
print(0);
}
}
void operator()(const Tuple& t) {
os << "(";
bool first = true;
for (const auto& v : t.values) {
if (!first) os << ", ";
first = false;
v->Print(os);
}
os << ")";
}
template <typename T>
void operator()(const T& t) {
os << TypeStr::Get(t) << ": ";
PrintScalar(t);
}
template <typename T>
void PrintScalar(const T& v) {
os << v;
}
template <typename T>
void PrintScalar(const std::complex<T>& v) {
os << v.real() << (v.imag() >= 0 ? "+" : "") << v.imag() << "i";
}
void PrintScalar(bool v) { os << (v ? "true" : "false"); }
void PrintScalar(int8_t v) { os << (int)v; }
void PrintScalar(uint8_t v) { os << (int)v; }
};
}
void InterpreterValue::Print(llvm::raw_ostream& os) const {
std::visit(InterpreterValuePrinter{os}, storage);
}
std::string InterpreterValue::ToString() const {
std::string buf;
llvm::raw_string_ostream os(buf);
Print(os);
return buf;
}
InterpreterValue InterpreterValue::ExtractElement(
llvm::ArrayRef<int64_t> indices) const {
return std::visit(
[&](auto& it) -> InterpreterValue {
using T = std::decay_t<decltype(it)>;
if constexpr (is_tensor_or_memref_v<T>) {
if (it.view.num_vector_dims) {
return {it.VectorAt(indices)};
} else {
return {it.at(indices)};
}
} else if constexpr (std::is_same_v<T, Tuple>) {
llvm_unreachable("extracting from tuples is unsupported");
} else {
return {it};
}
},
storage);
}
void InterpreterValue::InsertElement(llvm::ArrayRef<int64_t> indices,
const InterpreterValue& value) {
std::visit(
[&](auto& it) {
using T = std::decay_t<decltype(it)>;
if constexpr (is_tensor_or_memref_v<T>) {
if (it.view.num_vector_dims) {
auto subview = it.VectorAt(indices);
const auto& values = std::get<T>(value.storage);
assert(values.view.sizes == subview.view.sizes &&
"mismatched sizes");
for (const auto& index : subview.view.Indices()) {
subview.at(index) = values.at(index);
}
} else {
it.at(indices) = std::get<typename T::element_type>(value.storage);
}
} else if constexpr (std::is_same_v<T, Tuple>) {
llvm_unreachable("inserting into tuples is unsupported");
} else {
it = std::get<T>(value.storage);
}
},
storage);
}
void InterpreterValue::Fill(
const std::function<InterpreterValue(llvm::ArrayRef<int64_t> indices)>& f) {
std::visit(
[&](auto& it) {
using T = std::decay_t<decltype(it)>;
if constexpr (is_tensor_or_memref_v<T>) {
for (const auto& indices : it.view.Indices()) {
if (it.view.num_vector_dims) {
auto subview = it.VectorAt(indices);
auto value = std::get<T>(f(indices).storage);
for (const auto& index : subview.view.Indices()) {
subview.at(index) = value.at(index);
}
} else {
it.at(indices) =
std::get<typename T::element_type>(f(indices).storage);
}
}
} else if constexpr (std::is_same_v<T, Tuple>) {
llvm_unreachable("Filling tuples is unsupported");
} else {
it = std::get<T>(f({}).storage);
}
},
storage);
}
InterpreterValue InterpreterValue::Clone(ArrayRef<int64_t> layout) const {
return std::visit(
[&](const auto& it) -> InterpreterValue {
using T = std::decay_t<decltype(it)>;
if constexpr (is_tensor_or_memref_v<T>) {
return {it.Clone(layout)};
} else if constexpr (std::is_same_v<T, Tuple>) {
llvm_unreachable("cloning tuples is unsupported");
} else {
return {it};
}
},
storage);
}
InterpreterValue InterpreterValue::CoerceLayout(
ArrayRef<int64_t> layout) const {
const auto& view = this->View();
if (view.strides == BufferView::GetStridesForLayout(view.sizes, layout)) {
return *this;
}
return Clone(layout);
}
InterpreterValue InterpreterValue::TypedAlike(
llvm::ArrayRef<int64_t> shape) const {
return std::visit(
[&](const auto& it) -> InterpreterValue {
using T = std::decay_t<decltype(it)>;
if constexpr (is_tensor_or_memref_v<T>) {
return {T::Empty(shape)};
} else if constexpr (std::is_same_v<T, Tuple>) {
llvm_unreachable("TypedAlike for tuples is unsupported");
} else {
return {TensorOrMemref<T>::Empty(shape)};
}
},
storage);
}
InterpreterValue InterpreterValue::MakeTensor(mlir::Type element_type,
SmallVector<int64_t> shape) {
auto vector_ty = llvm::dyn_cast<VectorType>(element_type);
if (vector_ty) {
llvm::copy(vector_ty.getShape(), std::back_inserter(shape));
}
return DispatchScalarType(element_type, [&](auto dummy) -> InterpreterValue {
auto tensor = TensorOrMemref<decltype(dummy)>::Empty(shape);
if (vector_ty) {
tensor.view.num_vector_dims = vector_ty.getRank();
}
return {tensor};
});
}
BufferView& InterpreterValue::View() {
return std::visit(
[](auto& it) -> BufferView& {
if constexpr (is_tensor_or_memref_v<decltype(it)>) {
return it.view;
}
llvm_unreachable("view is only supported for tensors");
},
storage);
}
const BufferView& InterpreterValue::View() const {
return std::visit(
[](const auto& it) -> const BufferView& {
if constexpr (is_tensor_or_memref_v<decltype(it)>) {
return it.view;
}
llvm_unreachable("view is only supported for tensors");
},
storage);
}
bool InterpreterValue::IsTensor() const {
return std::visit(
[](const auto& it) { return is_tensor_or_memref_v<decltype(it)>; },
storage);
}
InterpreterValue InterpreterValue::AsUnitTensor(bool is_vector) const {
auto result = TypedAlike({});
result.InsertElement({}, *this);
result.View().is_vector = is_vector;
return result;
}
bool Tuple::operator==(const Tuple& other) const {
if (other.values.size() != values.size()) return false;
for (const auto& [lhs, rhs] : llvm::zip(values, other.values)) {
if (!(*lhs == *rhs)) return false;
}
return true;
}
std::shared_ptr<Buffer> InterpreterValue::GetBuffer() const {
return std::visit(
[](const auto& it) -> std::shared_ptr<interpreter::Buffer> {
if constexpr (is_tensor_or_memref_v<decltype(it)>) {
return it.buffer;
} else {
llvm_unreachable("buffer() is only supported for tensors");
}
},
storage);
}
int64_t InterpreterValue::AsInt() const {
auto visit = [](auto value) -> int64_t {
if constexpr (std::is_integral_v<decltype(value)>) {
return static_cast<int64_t>(value);
} else {
llvm_unreachable("only integral types can be converted to ints");
}
};
return std::visit(visit, storage);
}
uint64_t InterpreterValue::AsUInt() const {
auto visit = [](auto value) -> uint64_t {
if constexpr (std::is_integral_v<decltype(value)>) {
if constexpr (std::is_signed_v<decltype(value)>) {
return static_cast<uint64_t>(
static_cast<std::make_unsigned_t<decltype(value)>>(value));
} else {
return static_cast<uint64_t>(value);
}
} else {
llvm_unreachable("only integral types can be converted to ints");
}
};
return std::visit(visit, storage);
}
double InterpreterValue::AsDouble() const {
auto visit = [](auto value) -> int64_t {
if constexpr (std::is_floating_point_v<decltype(value)>) {
return static_cast<double>(value);
} else {
llvm_unreachable("only float types can be converted to ints");
}
};
return std::visit(visit, storage);
}
int64_t InterpreterValue::GetByteSizeOfElement() const {
return std::visit(
[](const auto& it) -> int64_t {
using T = std::decay_t<decltype(it)>;
if constexpr (is_tensor_or_memref_v<T>) {
return sizeof(typename T::element_type);
} else {
llvm_unreachable("scalars have no element sizes");
}
},
storage);
}
}
} | #include "xla/mlir/tools/mlir_interpreter/framework/interpreter_value.h"
#include <complex>
#include <cstdint>
#include <optional>
#include <variant>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "llvm/ADT/ArrayRef.h"
#include "xla/mlir/tools/mlir_interpreter/framework/tensor_or_memref.h"
namespace mlir {
namespace interpreter {
namespace {
using ::testing::ElementsAre;
using ::testing::IsEmpty;
TEST(InterpreterValueTest, FillUnitTensor) {
auto t = TensorOrMemref<int64_t>::Empty({});
t.at({}) = 42;
InterpreterValue v{t};
v.Fill([](llvm::ArrayRef<int64_t>) { return InterpreterValue{int64_t{43}}; });
ASSERT_EQ(t.at({}), 43);
}
TEST(InterpreterValueTest, Fill1DTensor) {
auto t = TensorOrMemref<int64_t>::Empty({3});
InterpreterValue v{t};
v.Fill([](llvm::ArrayRef<int64_t> indices) {
return InterpreterValue{indices[0]};
});
ASSERT_EQ(t.at(0), 0);
ASSERT_EQ(t.at(1), 1);
ASSERT_EQ(t.at(2), 2);
}
TEST(InterpreterValueTest, FillTensorOfVector) {
auto t = TensorOrMemref<int64_t>::Empty({4, 2});
t.view.num_vector_dims = 1;
InterpreterValue v{t};
v.Fill([](llvm::ArrayRef<int64_t> indices) -> InterpreterValue {
EXPECT_EQ(indices.size(), 1);
auto r = TensorOrMemref<int64_t>::Empty({2});
r.view.is_vector = true;
r.at(0) = indices[0];
r.at(1) = indices[0] * 10;
return {r};
});
ASSERT_EQ(
v.ToString(),
"TensorOrMemref<4xvector<2xi64>>: [[0, 0], [1, 10], [2, 20], [3, 30]]");
}
TEST(InterpreterValueTest, FillZeroSizedTensor) {
auto t = TensorOrMemref<int64_t>::Empty({0, 1});
InterpreterValue v{t};
bool was_called = false;
v.Fill([&](llvm::ArrayRef<int64_t> indices) {
was_called = true;
return InterpreterValue{indices[0]};
});
EXPECT_FALSE(was_called);
}
TEST(InterpreterValueTest, TypedAlike) {
InterpreterValue v{TensorOrMemref<int32_t>::Empty({})};
auto TypedAlike = v.TypedAlike({1, 2, 3});
ASSERT_TRUE(
std::holds_alternative<TensorOrMemref<int32_t>>(TypedAlike.storage));
ASSERT_THAT(TypedAlike.View().sizes, ElementsAre(1, 2, 3));
}
TEST(InterpreterValueTest, AsUnitTensor) {
InterpreterValue v{42};
InterpreterValue wrapped = v.AsUnitTensor();
ASSERT_THAT(wrapped.View().sizes, IsEmpty());
ASSERT_EQ(std::get<TensorOrMemref<int32_t>>(wrapped.storage).at({}), 42);
}
TEST(InterpreterValueTest, IsTensor) {
ASSERT_FALSE(InterpreterValue{42}.IsTensor());
ASSERT_TRUE(InterpreterValue{TensorOrMemref<int32_t>::Empty({})}.IsTensor());
}
TEST(InterpreterValueTest, AsInt) {
ASSERT_EQ(InterpreterValue{int64_t{42}}.AsInt(), 42);
ASSERT_EQ(InterpreterValue{int32_t{42}}.AsInt(), 42);
ASSERT_EQ(InterpreterValue{int16_t{42}}.AsInt(), 42);
ASSERT_EQ(InterpreterValue{int8_t{42}}.AsInt(), 42);
ASSERT_EQ(InterpreterValue{int8_t{-1}}.AsInt(), -1);
}
TEST(InterpreterValueTest, AsUInt) {
ASSERT_EQ(InterpreterValue{int16_t{-1}}.AsUInt(), 65535);
ASSERT_EQ(InterpreterValue{int8_t{-1}}.AsUInt(), 255);
}
TEST(InterpreterValueTest, CloneTensor) {
auto tensor = TensorOrMemref<int64_t>::Empty({3});
tensor.at(0) = 1;
tensor.at(1) = 2;
tensor.at(2) = 3;
InterpreterValue wrapped{tensor};
auto clone = wrapped.Clone();
tensor.at(0) = 4;
auto& cloned_tensor = std::get<TensorOrMemref<int64_t>>(clone.storage);
ASSERT_EQ(cloned_tensor.at(0), 1);
ASSERT_EQ(cloned_tensor.at(1), 2);
ASSERT_EQ(cloned_tensor.at(2), 3);
}
TEST(InterpreterValueTest, CloneWithLayouts) {
auto tensor = TensorOrMemref<int64_t>::Empty({3, 5}, {0, 1});
tensor.at({2, 4}) = 42;
InterpreterValue wrapped{tensor};
auto clone = wrapped.Clone();
ASSERT_EQ(clone.View().strides,
BufferView::GetStridesForLayout({3, 5}, {1, 0}));
ASSERT_EQ(clone.ExtractElement({2, 4}).AsInt(), 42);
}
TEST(InterpreterValueTest, CoerceLayoutNoop) {
auto tensor = TensorOrMemref<int64_t>::Empty({3, 5}, {0, 1});
tensor.at({2, 4}) = 42;
InterpreterValue wrapped{tensor};
auto coerced = wrapped.CoerceLayout({0, 1});
ASSERT_EQ(tensor.buffer,
std::get<TensorOrMemref<int64_t>>(coerced.storage).buffer);
}
TEST(InterpreterValueTest, CoerceLayout) {
auto tensor = TensorOrMemref<int64_t>::Empty({3, 5});
tensor.at({2, 4}) = 42;
InterpreterValue wrapped{tensor};
auto clone = wrapped.CoerceLayout({0, 1});
ASSERT_EQ(clone.View().strides,
BufferView::GetStridesForLayout({3, 5}, {0, 1}));
ASSERT_EQ(clone.ExtractElement({2, 4}).AsInt(), 42);
}
TEST(InterpreterValueTest, CoerceLayoutSquare) {
auto tensor = TensorOrMemref<float>::Empty({2, 2});
tensor.at({0, 0}) = 1;
tensor.at({0, 1}) = 2;
tensor.at({1, 0}) = 3;
tensor.at({1, 1}) = 4;
InterpreterValue wrapped{tensor};
auto clone = wrapped.CoerceLayout({0, 1});
auto& cloned_tensor = std::get<TensorOrMemref<float>>(clone.storage);
EXPECT_EQ(
*reinterpret_cast<float*>(cloned_tensor.buffer->at(0, sizeof(float))), 1);
EXPECT_EQ(
*reinterpret_cast<float*>(cloned_tensor.buffer->at(1, sizeof(float))), 3);
EXPECT_EQ(
*reinterpret_cast<float*>(cloned_tensor.buffer->at(2, sizeof(float))), 2);
EXPECT_EQ(
*reinterpret_cast<float*>(cloned_tensor.buffer->at(3, sizeof(float))), 4);
}
TEST(InterpreterValueTest, CloneScalar) {
InterpreterValue value{42};
auto clone = value.Clone();
ASSERT_THAT(std::get<int32_t>(clone.storage), 42);
}
TEST(InterpreterValueTest, ToString) {
InterpreterValue value{TensorOrMemref<int64_t>::Empty({3})};
ASSERT_EQ(value.ToString(), "TensorOrMemref<3xi64>: [0, 0, 0]");
}
TEST(InterpreterValueTest, ToString2d) {
InterpreterValue value{TensorOrMemref<int64_t>::Empty({3, 2})};
ASSERT_EQ(value.ToString(),
"TensorOrMemref<3x2xi64>: [[0, 0], [0, 0], [0, 0]]");
}
TEST(InterpreterValueTest, ToString0d) {
InterpreterValue value{TensorOrMemref<int64_t>::Empty({})};
ASSERT_EQ(value.ToString(), "TensorOrMemref<i64>: 0");
}
TEST(InterpreterValueTest, ToStringComplex) {
InterpreterValue value{std::complex<float>{}};
ASSERT_EQ(value.ToString(), "complex<f32>: 0.000000e+00+0.000000e+00i");
}
TEST(CastTest, UnpackTensor) {
InterpreterValue value{TensorOrMemref<int8_t>::Empty({1, 1})};
value.InsertElement({0, 0}, {int8_t{1}});
ASSERT_EQ(InterpreterValueCast<int64_t>(value), 1);
ASSERT_EQ(InterpreterValueCast<uint8_t>(value), 1);
ASSERT_EQ(InterpreterValueCast<float>(value), 1.0f);
ASSERT_EQ(InterpreterValueCast<double>(value), 1.0);
InterpreterValue non_unit{TensorOrMemref<int8_t>::Empty({2, 2})};
ASSERT_EQ(InterpreterValueDynCast<int64_t>(non_unit), std::nullopt);
}
TEST(CastTest, IdentityCast) {
InterpreterValue value{TensorOrMemref<float>::Empty({1, 1})};
ASSERT_EQ(InterpreterValueCast<InterpreterValue>(value), value);
}
TEST(CastTest, CastToUnsigned) {
InterpreterValue value{int8_t{-1}};
ASSERT_EQ(InterpreterValueCast<uint8_t>(value), 255);
ASSERT_EQ(InterpreterValueCast<uint16_t>(value), 65535);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/mlir/tools/mlir_interpreter/framework/interpreter_value.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/mlir/tools/mlir_interpreter/framework/tests/interpreter_value_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
52916fe4-bde7-4bfd-a579-9931b007d93f | cpp | tensorflow/tensorflow | math | third_party/xla/xla/hlo/builder/lib/math.cc | third_party/xla/xla/hlo/builder/lib/math_test.cc | #include "xla/hlo/builder/lib/math.h"
#include <algorithm>
#include <array>
#include <cmath>
#include <functional>
#include <limits>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/loops.h"
#include "xla/hlo/builder/lib/math_impl.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
template <typename FP>
XlaOp EvaluatePolynomial(XlaOp x, absl::Span<const FP> coefficients) {
static_assert(std::is_floating_point<FP>::value,
"Template-argument 'FP' must be a floating-point type");
if (coefficients.empty()) {
return ScalarLike(x, FP(0.0));
}
XlaOp poly = ScalarLike(x, coefficients[0]);
for (int i = 1; i < coefficients.size(); ++i) {
FP c = coefficients[i];
poly = poly * x + ScalarLike(x, c);
}
return poly;
}
template <typename FP>
XlaOp EvaluateChebyshevPolynomial(XlaOp x, absl::Span<const FP> coefficients) {
static_assert(std::is_floating_point<FP>::value,
"Template-argument 'FP' must be a floating-point type");
XlaOp b0 = ScalarLike(x, 0.0);
XlaOp b1 = ScalarLike(x, 0.0);
XlaOp b2 = ScalarLike(x, 0.0);
for (FP c : coefficients) {
b2 = b1;
b1 = b0;
b0 = x * b1 - b2 + ScalarLike(x, c);
}
return ScalarLike(x, 0.5) * (b0 - b2);
}
}
static XlaOp DoWithUpcastToF32(XlaOp operand,
absl::Span<const PrimitiveType> upcast_types,
const std::function<XlaOp(XlaOp)>& operation) {
auto& b = *operand.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(operand));
PrimitiveType elem_ty = shape.element_type();
bool needs_upcast =
upcast_types.empty()
? primitive_util::BitWidth(shape.element_type()) <= 16
: absl::c_linear_search(upcast_types, elem_ty);
if (needs_upcast) {
operand = ConvertElementType(operand, F32);
}
XlaOp result = operation(operand);
if (needs_upcast) {
result = ConvertElementType(result, elem_ty);
}
return result;
});
}
static absl::Status EnsureOperandIsRealFp(absl::string_view op_name,
XlaOp operand) {
auto& b = *operand.builder();
TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(operand));
auto elem_ty = shape.element_type();
if (!primitive_util::IsFloatingPointType(elem_ty)) {
return InvalidArgument(
"Operands to %s must be real-valued floating-point, but got %s",
op_name, PrimitiveType_Name(elem_ty));
}
return absl::OkStatus();
}
XlaOp IsPosInf(XlaOp operand) {
auto& b = *operand.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("IsPosInf", operand));
TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(operand));
return Eq(operand, MaxValue(&b, shape.element_type()));
});
}
XlaOp IsNegInf(XlaOp operand) {
auto& b = *operand.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("IsNegInf", operand));
TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(operand));
return Eq(operand, MinValue(&b, shape.element_type()));
});
}
XlaOp IsInf(XlaOp operand) {
auto& b = *operand.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("IsInf", operand));
return IsPosInf(Abs(operand));
});
}
XlaOp IsNan(XlaOp operand) {
auto& b = *operand.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("IsNan", operand));
return Ne(operand, operand);
});
}
XlaOp IsNegZero(XlaOp operand) {
auto& b = *operand.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("IsNegZero", operand));
TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(operand));
switch (shape.element_type()) {
case F64:
return Eq(BitcastConvertType(operand, U64),
ConstantR0WithType(&b, U64, uint64_t{1} << 63));
case F32:
return Eq(BitcastConvertType(operand, U32),
ConstantR0WithType(&b, U32, uint32_t{1} << 31));
case F8E3M4:
case F8E4M3:
case F8E5M2:
case F8E4M3FN:
case F8E4M3B11FNUZ:
case F8E5M2FNUZ:
case F8E4M3FNUZ:
case F16:
case BF16:
return Eq(BitcastConvertType(ConvertElementType(operand, F32), U32),
ConstantR0WithType(&b, U32, uint32_t{1} << 31));
default:
LOG(FATAL) << "Expected real fp type.";
}
});
}
XlaOp Square(XlaOp operand) { return operand * operand; }
XlaOp Reciprocal(XlaOp operand) { return ScalarLike(operand, 1.0) / operand; }
static XlaOp ErfcImpl32(XlaOp x) {
const double kMaxlog = 88.72283905206835;
static const std::array<float, 9> kErfcPCoefficient{
+2.326819970068386E-2, -1.387039388740657E-1, +3.687424674597105E-1,
-5.824733027278666E-1, +6.210004621745983E-1, -4.944515323274145E-1,
+3.404879937665872E-1, -2.741127028184656E-1, +5.638259427386472E-1,
};
static const std::array<float, 8> kErfcRCoefficient{
-1.047766399936249E+1, +1.297719955372516E+1, -7.495518717768503E+0,
+2.921019019210786E+0, -1.015265279202700E+0, +4.218463358204948E-1,
-2.820767439740514E-1, +5.641895067754075E-1,
};
XlaOp abs_x = Abs(x);
XlaOp z = Exp(-x * x);
XlaOp q = ScalarLike(x, 1) / abs_x;
XlaOp y = q * q;
XlaOp p = Select(Lt(abs_x, ScalarLike(x, 2.0)),
EvaluatePolynomial<float>(y, kErfcPCoefficient),
EvaluatePolynomial<float>(y, kErfcRCoefficient));
y = z * q * p;
XlaOp y_clamp = Select(Lt(z, ScalarLike(x, -kMaxlog)), ScalarLike(x, 0), y);
return Select(Lt(x, ScalarLike(x, 0)), ScalarLike(x, 2.0) - y_clamp, y_clamp);
}
static XlaOp ErfImpl32Cephes(XlaOp x) {
static const std::array<float, 7> kErfTCoefficient{
+7.853861353153693E-5, -8.010193625184903E-4, +5.188327685732524E-3,
-2.685381193529856E-2, +1.128358514861418E-1, -3.761262582423300E-1,
+1.128379165726710E+0,
};
return x * EvaluatePolynomial<float>(x * x, kErfTCoefficient);
}
static XlaOp ErfcImpl64(XlaOp x) {
const double kMaxlog = 7.09782712893383996843E2;
static const std::array<double, 9> kErfcPCoefficient{
2.46196981473530512524E-10, 5.64189564831068821977E-1,
7.46321056442269912687E0, 4.86371970985681366614E1,
1.96520832956077098242E2, 5.26445194995477358631E2,
9.34528527171957607540E2, 1.02755188689515710272E3,
5.57535335369399327526E2};
static const std::array<double, 9> kErfcQCoefficient{
1.00000000000000000000E0, 1.32281951154744992508E1,
8.67072140885989742329E1, 3.54937778887819891062E2,
9.75708501743205489753E2, 1.82390916687909736289E3,
2.24633760818710981792E3, 1.65666309194161350182E3,
5.57535340817727675546E2};
static const std::array<double, 6> kErfcRCoefficient{
5.64189583547755073984E-1, 1.27536670759978104416E0,
5.01905042251180477414E0, 6.16021097993053585195E0,
7.40974269950448939160E0, 2.97886665372100240670E0};
static const std::array<double, 7> kErfcSCoefficient{
1.00000000000000000000E0, 2.26052863220117276590E0,
9.39603524938001434673E0, 1.20489539808096656605E1,
1.70814450747565897222E1, 9.60896809063285878198E0,
3.36907645100081516050E0};
XlaOp z = -x * x;
XlaOp abs_x = Abs(x);
XlaOp y =
Select(Lt(abs_x, ScalarLike(x, 8.0)),
Exp(z) * EvaluatePolynomial<double>(abs_x, kErfcPCoefficient) /
EvaluatePolynomial<double>(abs_x, kErfcQCoefficient),
Exp(z) * EvaluatePolynomial<double>(abs_x, kErfcRCoefficient) /
EvaluatePolynomial<double>(abs_x, kErfcSCoefficient));
XlaOp y_clamp = Select(Lt(z, ScalarLike(x, -kMaxlog)), ScalarLike(x, 0), y);
return Select(Lt(x, ScalarLike(x, 0)), ScalarLike(x, 2.0) - y_clamp, y_clamp);
}
static XlaOp ErfImpl64(XlaOp x) {
static std::array<double, 5> kErfTCoefficient{
9.60497373987051638749E0, 9.00260197203842689217E1,
2.23200534594684319226E3, 7.00332514112805075473E3,
5.55923013010394962768E4};
static std::array<double, 6> kErfUCoefficient{
1.00000000000000000000E0, 3.35617141647503099647E1,
5.21357949780152679795E2, 4.59432382970980127987E3,
2.26290000613890934246E4, 4.92673942608635921086E4};
XlaOp z = x * x;
return x * EvaluatePolynomial<double>(z, kErfTCoefficient) /
EvaluatePolynomial<double>(z, kErfUCoefficient);
}
XlaOp Erfc(XlaOp x) {
auto& b = *x.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("Erfc", x));
TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(x));
if (shape.element_type() == F64) {
return Select(Gt(Abs(x), ScalarLike(x, 1)), ErfcImpl64(x),
ScalarLike(x, 1) - ErfImpl64(x));
}
return DoWithUpcastToF32(x, {}, [](XlaOp x) {
return Select(Gt(Abs(x), ScalarLike(x, 1)), ErfcImpl32(x),
ScalarLike(x, 1) - ErfImpl32Cephes(x));
});
});
}
static XlaOp ErfImpl32(XlaOp x) {
static const std::array<float, 5> kAlpha{
0.00022905065861350646f, 0.0034082910107109506f, 0.050955695062380861f,
0.18520832239976145f, 1.128379143519084f};
static const std::array<float, 7> kBeta{-1.1791602954361697e-7,
0.000023547966471313185f,
0.0010179625278914885f,
0.014070470171167667f,
0.11098505178285362f,
0.49746925110067538f,
1.0f};
constexpr float kErfInvOneMinusHalfULP = 3.7439211627767994f;
x = Clamp(ScalarLike(x, -kErfInvOneMinusHalfULP), x,
ScalarLike(x, kErfInvOneMinusHalfULP));
auto x2 = x * x;
return (x * EvaluatePolynomial<float>(x2, kAlpha)) /
EvaluatePolynomial<float>(x2, kBeta);
}
namespace {
XlaOp ErfInv32(XlaOp x) {
constexpr int kDegree = 9;
constexpr std::array<float, 9> w_less_than_5_constants = {
2.81022636e-08f, 3.43273939e-07f, -3.5233877e-06f,
-4.39150654e-06f, 0.00021858087f, -0.00125372503f,
-0.00417768164f, 0.246640727f, 1.50140941f};
constexpr std::array<float, 9> w_greater_than_5_constants = {
-0.000200214257f, 0.000100950558f, 0.00134934322f,
-0.00367342844f, 0.00573950773f, -0.0076224613f,
0.00943887047f, 1.00167406f, 2.83297682f};
auto w = -Log1p(-x * x);
auto lt = Lt(w, ScalarLike(x, 5.0));
auto coefficient = [&](int i) {
return Select(lt, FullLike(x, w_less_than_5_constants[i]),
FullLike(x, w_greater_than_5_constants[i]));
};
w = Select(lt, w - ScalarLike(x, 2.5), Sqrt(w) - ScalarLike(x, 3.0));
auto p = coefficient(0);
for (int i = 1; i < kDegree; ++i) {
p = coefficient(i) + p * w;
}
XlaOp result = p * x;
auto& b = *x.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, b.GetShape(x));
return Select(Eq(Abs(x), ScalarLike(x, 1)),
x * MaxValue(&b, shape.element_type()), result);
});
}
XlaOp ErfInv64(XlaOp x) {
constexpr std::array<double, 23> w_less_than_6_25_constants = {
-3.6444120640178196996e-21, -1.685059138182016589e-19,
1.2858480715256400167e-18, 1.115787767802518096e-17,
-1.333171662854620906e-16, 2.0972767875968561637e-17,
6.6376381343583238325e-15, -4.0545662729752068639e-14,
-8.1519341976054721522e-14, 2.6335093153082322977e-12,
-1.2975133253453532498e-11, -5.4154120542946279317e-11,
1.051212273321532285e-09, -4.1126339803469836976e-09,
-2.9070369957882005086e-08, 4.2347877827932403518e-07,
-1.3654692000834678645e-06, -1.3882523362786468719e-05,
0.0001867342080340571352, -0.00074070253416626697512,
-0.0060336708714301490533, 0.24015818242558961693,
1.6536545626831027356};
constexpr std::array<double, 19> w_less_than_16_constants = {
2.2137376921775787049e-09, 9.0756561938885390979e-08,
-2.7517406297064545428e-07, 1.8239629214389227755e-08,
1.5027403968909827627e-06, -4.013867526981545969e-06,
2.9234449089955446044e-06, 1.2475304481671778723e-05,
-4.7318229009055733981e-05, 6.8284851459573175448e-05,
2.4031110387097893999e-05, -0.0003550375203628474796,
0.00095328937973738049703, -0.0016882755560235047313,
0.0024914420961078508066, -0.0037512085075692412107,
0.005370914553590063617, 1.0052589676941592334,
3.0838856104922207635,
};
constexpr std::array<double, 17> w_greater_than_16_constants = {
-2.7109920616438573243e-11, -2.5556418169965252055e-10,
1.5076572693500548083e-09, -3.7894654401267369937e-09,
7.6157012080783393804e-09, -1.4960026627149240478e-08,
2.9147953450901080826e-08, -6.7711997758452339498e-08,
2.2900482228026654717e-07, -9.9298272942317002539e-07,
4.5260625972231537039e-06, -1.9681778105531670567e-05,
7.5995277030017761139e-05, -0.00021503011930044477347,
-0.00013871931833623122026, 1.0103004648645343977,
4.8499064014085844221,
};
auto w = -Log1p(-x * x);
auto lt_6_25 = Lt(w, ScalarLike(x, 6.25));
auto lt_16 = Lt(w, ScalarLike(x, 16));
auto coefficient = [&](int i) {
auto c = FullLike(x, w_less_than_6_25_constants[i]);
if (i < 19) {
c = Select(lt_6_25, c, FullLike(x, w_less_than_16_constants[i]));
}
if (i < 17) {
c = Select(lt_16, c, FullLike(x, w_greater_than_16_constants[i]));
}
return c;
};
auto sqrt_w = Sqrt(w);
w = Select(lt_6_25, w - ScalarLike(x, 3.125),
sqrt_w - Select(lt_16, ScalarLike(x, 3.25), ScalarLike(x, 5.0)));
auto p = coefficient(0);
for (int i = 1; i < 17; ++i) {
p = coefficient(i) + p * w;
}
for (int i = 17; i < 19; ++i) {
p = Select(lt_16, coefficient(i) + p * w, p);
}
for (int i = 19; i < 23; ++i) {
p = Select(lt_6_25, coefficient(i) + p * w, p);
}
XlaOp result = p * x;
auto& b = *x.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, b.GetShape(x));
return Select(Eq(Abs(x), ScalarLike(x, 1)),
x * MaxValue(&b, shape.element_type()), result);
});
}
}
XlaOp ErfInv(XlaOp x) {
auto& b = *x.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("ErfInv", x));
TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(x));
if (shape.element_type() == F64) {
return ErfInv64(x);
}
return DoWithUpcastToF32(x, {}, [](XlaOp x) { return ErfInv32(x); });
});
}
namespace {
static constexpr double kLanczosGamma = 7;
static constexpr double kBaseLanczosCoeff = 0.99999999999980993227684700473478;
static constexpr std::array<double, 8> kLanczosCoefficients = {
676.520368121885098567009190444019, -1259.13921672240287047156078755283,
771.3234287776530788486528258894, -176.61502916214059906584551354,
12.507343278686904814458936853, -0.13857109526572011689554707,
9.984369578019570859563e-6, 1.50563273514931155834e-7};
}
XlaOp Lgamma(XlaOp input) {
auto do_it = [](XlaOp input) {
XlaOp one_half = ScalarLike(input, 0.5);
XlaOp one = ScalarLike(input, 1);
XlaOp pi = ScalarLike(input, M_PI);
XlaOp log_pi = ScalarLike(input, std::log(M_PI));
XlaOp log_sqrt_two_pi =
ScalarLike(input, (std::log(2) + std::log(M_PI)) / 2);
XlaOp lanczos_gamma_plus_one_half = ScalarLike(input, kLanczosGamma + 0.5);
XlaOp log_lanczos_gamma_plus_one_half =
ScalarLike(input, std::log(kLanczosGamma + 0.5));
XlaOp base_lanczos_coeff = ScalarLike(input, kBaseLanczosCoeff);
XlaOp need_to_reflect = Lt(input, one_half);
XlaOp z = Select(need_to_reflect, -input, input - one);
XlaOp x = base_lanczos_coeff;
for (int i = 0, end = kLanczosCoefficients.size(); i < end; ++i) {
XlaOp lanczos_coefficient = ScalarLike(input, kLanczosCoefficients[i]);
XlaOp index = ScalarLike(input, i);
x = x + lanczos_coefficient / (z + index + one);
}
XlaOp t = lanczos_gamma_plus_one_half + z;
XlaOp log_t = log_lanczos_gamma_plus_one_half +
Log1p(z / lanczos_gamma_plus_one_half);
XlaOp log_y = log_sqrt_two_pi + (z + one_half - t / log_t) * log_t + Log(x);
XlaOp abs_input = Abs(input);
XlaOp abs_frac_input = abs_input - Floor(abs_input);
XlaOp reduced_frac_input =
Select(Gt(abs_frac_input, ScalarLike(abs_frac_input, 0.5)),
ScalarLike(abs_frac_input, 1) - abs_frac_input, abs_frac_input);
XlaOp reflection_denom = Log(Sin(pi * reduced_frac_input));
XlaOp reflection =
Select(IsFinite(reflection_denom), log_pi - reflection_denom - log_y,
-reflection_denom);
XlaOp result = Select(need_to_reflect, reflection, log_y);
XlaOp inf_bcast = FullLike(input, std::numeric_limits<float>::infinity());
return Select(IsInf(input), inf_bcast, result);
};
auto& b = *input.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("Lgamma", input));
return DoWithUpcastToF32(input, {}, do_it);
});
}
static XlaOp Lbeta(XlaOp a, XlaOp b) {
return Lgamma(a) + Lgamma(b) - Lgamma(a + b);
}
XlaOp Digamma(XlaOp input) {
auto do_it = [](XlaOp input) {
XlaOp zero = ScalarLike(input, 0);
XlaOp one_half = ScalarLike(input, 0.5);
XlaOp one = ScalarLike(input, 1);
XlaOp pi = ScalarLike(input, M_PI);
XlaOp lanczos_gamma = ScalarLike(input, kLanczosGamma);
XlaOp lanczos_gamma_plus_one_half = ScalarLike(input, kLanczosGamma + 0.5);
XlaOp log_lanczos_gamma_plus_one_half =
ScalarLike(input, std::log(kLanczosGamma + 0.5));
XlaOp base_lanczos_coeff = ScalarLike(input, kBaseLanczosCoeff);
XlaOp need_to_reflect = Lt(input, one_half);
XlaOp z = Select(need_to_reflect, -input, input - one);
XlaOp num = zero;
XlaOp denom = base_lanczos_coeff;
for (int i = 0, end = kLanczosCoefficients.size(); i < end; ++i) {
XlaOp lanczos_coefficient = ScalarLike(input, kLanczosCoefficients[i]);
XlaOp index = ScalarLike(input, i);
num = num - lanczos_coefficient / ((z + index + one) * (z + index + one));
denom = denom + lanczos_coefficient / (z + index + one);
}
XlaOp t = lanczos_gamma_plus_one_half + z;
XlaOp log_t = log_lanczos_gamma_plus_one_half +
Log1p(z / lanczos_gamma_plus_one_half);
XlaOp y = log_t + num / denom - lanczos_gamma / t;
XlaOp reduced_input = input + Abs(Floor(input + ScalarLike(input, 0.5)));
XlaOp reflection =
y - pi * Cos(pi * reduced_input) / Sin(pi * reduced_input);
XlaOp real_result = Select(need_to_reflect, reflection, y);
return Select(And(Le(input, zero), Eq(input, Floor(input))),
FullLike(input, std::numeric_limits<float>::quiet_NaN()),
real_result);
};
auto& b = *input.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("Digamma", input));
return DoWithUpcastToF32(input, {}, do_it);
});
}
namespace {
enum kIgammaMode { VALUE, DERIVATIVE, SAMPLE_DERIVATIVE };
template <kIgammaMode mode>
XlaOp IgammaSeries(XlaOp ax, XlaOp x, XlaOp a, XlaOp enabled,
xla::PrimitiveType type) {
auto cond = [&](absl::Span<const XlaOp> vals,
XlaBuilder* builder) -> absl::StatusOr<XlaOp> {
XlaOp enabled = vals[0];
return Any(enabled);
};
auto body = [&](absl::Span<const XlaOp> vals,
XlaBuilder* builder) -> absl::StatusOr<std::vector<XlaOp>> {
XlaOp enabled = vals[0];
XlaOp r = vals[1];
XlaOp c = vals[2];
XlaOp ans = vals[3];
XlaOp x = vals[4];
XlaOp dc_da = vals[5];
XlaOp dans_da = vals[6];
r = r + ScalarLike(r, 1);
dc_da = dc_da * (x / r) + (ScalarLike(r, -1) * c * x) / (r * r);
dans_da = dans_da + dc_da;
c = c * (x / r);
ans = ans + c;
XlaOp conditional;
if (mode == VALUE) {
conditional = And(enabled, Gt(c / ans, Epsilon(builder, type)));
} else {
conditional =
And(enabled, Gt(Abs(dc_da / dans_da), Epsilon(builder, type)));
}
return std::vector<XlaOp>{
conditional,
Select(enabled, r, vals[1]),
Select(enabled, c, vals[2]),
Select(enabled, ans, vals[3]),
Select(enabled, x, vals[4]),
Select(enabled, dc_da, vals[5]),
Select(enabled, dans_da, vals[6]),
};
};
auto& b = *ax.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
std::vector<XlaOp> vals = {
enabled, a, FullLike(a, 1), FullLike(a, 1), x, FullLike(a, 0),
FullLike(a, 0),
};
TF_ASSIGN_OR_RETURN(vals, WhileLoopHelper(cond, body, vals, "igamma", &b));
XlaOp ans = vals[3];
XlaOp dans_da = vals[6];
if (mode == VALUE) {
return (ans * ax) / a;
}
XlaOp dlogax_da = Log(x) - Digamma(a + ScalarLike(a, 1));
switch (mode) {
case DERIVATIVE:
return ax * (ans * dlogax_da + dans_da) / a;
case SAMPLE_DERIVATIVE:
default:
return -(dans_da + ans * dlogax_da) * x / a;
}
});
}
template <kIgammaMode mode>
XlaOp IgammacContinuedFraction(XlaOp ax, XlaOp x, XlaOp a, XlaOp enabled,
xla::PrimitiveType type) {
auto cond = [&](absl::Span<const XlaOp> vals,
XlaBuilder* builder) -> absl::StatusOr<XlaOp> {
XlaOp enabled = vals[0];
XlaOp c = vals[5];
return And(Lt(c, ScalarLike(c, 2000)), Any(enabled));
};
auto body = [&](absl::Span<const XlaOp> vals,
XlaBuilder* builder) -> absl::StatusOr<std::vector<XlaOp>> {
XlaOp enabled = vals[0];
XlaOp ans = vals[1];
XlaOp t = vals[2];
XlaOp y = vals[3];
XlaOp z = vals[4];
XlaOp c = vals[5];
XlaOp pkm1 = vals[6];
XlaOp qkm1 = vals[7];
XlaOp pkm2 = vals[8];
XlaOp qkm2 = vals[9];
XlaOp dpkm2_da = vals[10];
XlaOp dqkm2_da = vals[11];
XlaOp dpkm1_da = vals[12];
XlaOp dqkm1_da = vals[13];
XlaOp dans_da = vals[14];
c = c + ScalarLike(c, 1);
y = y + ScalarLike(y, 1);
z = z + ScalarLike(z, 2);
XlaOp yc = y * c;
XlaOp pk = pkm1 * z - pkm2 * yc;
XlaOp qk = qkm1 * z - qkm2 * yc;
XlaOp qk_is_nonzero = Ne(qk, ScalarLike(qk, 0));
XlaOp r = pk / qk;
t = Select(qk_is_nonzero, Abs((ans - r) / r), FullLike(t, 1));
ans = Select(qk_is_nonzero, r, ans);
XlaOp dpk_da = dpkm1_da * z - pkm1 - dpkm2_da * yc + pkm2 * c;
XlaOp dqk_da = dqkm1_da * z - qkm1 - dqkm2_da * yc + qkm2 * c;
XlaOp dans_da_new =
Select(qk_is_nonzero, (dpk_da - ans * dqk_da) / qk, dans_da);
XlaOp grad_conditional =
Select(qk_is_nonzero, Abs(dans_da_new - dans_da), FullLike(dans_da, 1));
pkm2 = pkm1;
pkm1 = pk;
qkm2 = qkm1;
qkm1 = qk;
dpkm2_da = dpkm1_da;
dqkm2_da = dqkm1_da;
dpkm1_da = dpk_da;
dqkm1_da = dqk_da;
XlaOp rescale = Gt(Abs(pk), Reciprocal(Epsilon(builder, type)));
pkm2 = Select(rescale, pkm2 * Epsilon(builder, type), pkm2);
pkm1 = Select(rescale, pkm1 * Epsilon(builder, type), pkm1);
qkm2 = Select(rescale, qkm2 * Epsilon(builder, type), qkm2);
qkm1 = Select(rescale, qkm1 * Epsilon(builder, type), qkm1);
dpkm2_da = Select(rescale, dpkm2_da * Epsilon(builder, type), dpkm2_da);
dqkm2_da = Select(rescale, dqkm2_da * Epsilon(builder, type), dqkm2_da);
dpkm1_da = Select(rescale, dpkm1_da * Epsilon(builder, type), dpkm1_da);
dqkm1_da = Select(rescale, dqkm1_da * Epsilon(builder, type), dqkm1_da);
XlaOp conditional;
if (mode == VALUE) {
conditional = And(enabled, Gt(t, Epsilon(builder, type)));
} else {
conditional = And(enabled, Gt(grad_conditional, Epsilon(builder, type)));
}
return std::vector<XlaOp>{conditional,
Select(enabled, ans, vals[1]),
Select(enabled, t, vals[2]),
Select(enabled, y, vals[3]),
Select(enabled, z, vals[4]),
c,
Select(enabled, pkm1, vals[6]),
Select(enabled, qkm1, vals[7]),
Select(enabled, pkm2, vals[8]),
Select(enabled, qkm2, vals[9]),
Select(enabled, dpkm2_da, vals[10]),
Select(enabled, dqkm2_da, vals[11]),
Select(enabled, dpkm1_da, vals[12]),
Select(enabled, dqkm1_da, vals[13]),
Select(enabled, dans_da_new, vals[14])};
};
auto& b = *ax.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
XlaOp y = ScalarLike(a, 1) - a;
XlaOp z = x + y + ScalarLike(x, 1);
XlaOp c = ScalarLike(x, 0);
XlaOp pkm2 = FullLike(x, 1);
XlaOp qkm2 = x;
XlaOp pkm1 = x + ScalarLike(x, 1);
XlaOp qkm1 = z * x;
XlaOp ans = pkm1 / qkm1;
XlaOp t = FullLike(x, 1);
XlaOp dpkm2_da = FullLike(x, 0);
XlaOp dqkm2_da = FullLike(x, 0);
XlaOp dpkm1_da = FullLike(x, 0);
XlaOp dqkm1_da = -x;
XlaOp dans_da = (dpkm1_da - ans * dqkm1_da) / qkm1;
std::vector<XlaOp> vals = {enabled, ans, t, y, z,
c, pkm1, qkm1, pkm2, qkm2,
dpkm2_da, dqkm2_da, dpkm1_da, dqkm1_da, dans_da};
TF_ASSIGN_OR_RETURN(vals, WhileLoopHelper(cond, body, vals, "igammac", &b));
ans = vals[1];
if (mode == VALUE) {
return ans * ax;
}
dans_da = vals[14];
XlaOp dlogax_da = Log(x) - Digamma(a);
switch (mode) {
case DERIVATIVE:
return ax * (ans * dlogax_da + dans_da);
case SAMPLE_DERIVATIVE:
default:
return -(dans_da + ans * dlogax_da) * x;
}
});
}
}
XlaOp Igamma(XlaOp a, XlaOp x) {
auto& b = *a.builder();
auto doit = [&b](XlaOp a, XlaOp x, PrimitiveType type) -> XlaOp {
XlaOp is_nan = Or(IsNan(a), IsNan(x));
XlaOp x_is_zero = Eq(x, ScalarLike(x, 0));
XlaOp x_is_infinity =
Eq(x, ScalarLike(x, std::numeric_limits<float>::infinity()));
XlaOp domain_error = Or(Lt(x, ScalarLike(x, 0)), Le(a, ScalarLike(a, 0)));
XlaOp use_igammac = And(Gt(x, ScalarLike(x, 1)), Gt(x, a));
XlaOp ax = a * Log(x) - x - Lgamma(a);
XlaOp underflow = Lt(ax, -Log(MaxFiniteValue(&b, type)));
ax = Exp(ax);
XlaOp enabled = Not(Or(Or(Or(x_is_zero, domain_error), underflow), is_nan));
const double nan = std::numeric_limits<double>::quiet_NaN();
XlaOp output = Select(
use_igammac,
ScalarLike(a, 1) - IgammacContinuedFraction<VALUE>(
ax, x, a, And(enabled, use_igammac), type),
IgammaSeries<VALUE>(ax, x, a, And(enabled, Not(use_igammac)), type));
output = Select(x_is_zero, ZerosLike(output), output);
output = Select(x_is_infinity, FullLike(output, 1), output);
output = Select(Or(domain_error, is_nan), FullLike(a, nan), output);
return output;
};
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto a_shape, b.GetShape(a));
TF_ASSIGN_OR_RETURN(auto x_shape, b.GetShape(x));
if (a_shape != x_shape) {
return InvalidArgument(
"Arguments to Igamma must have equal shapes and types; got %s and %s",
a_shape.ToString(), x_shape.ToString());
}
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("Igamma", a));
PrimitiveType a_x_type = a_shape.element_type();
bool needs_upcast = false;
for (PrimitiveType type : {BF16, F16, F8E3M4, F8E4M3, F8E5M2, F8E4M3FN,
F8E4M3B11FNUZ, F8E5M2FNUZ, F8E4M3FNUZ}) {
if (a_shape.element_type() == type) {
needs_upcast = true;
break;
}
}
if (needs_upcast) {
a = ConvertElementType(a, F32);
x = ConvertElementType(x, F32);
a_x_type = F32;
}
XlaOp result = doit(a, x, a_x_type);
if (needs_upcast) {
result = ConvertElementType(result, a_shape.element_type());
}
return result;
});
}
XlaOp IgammaGradA(XlaOp a, XlaOp x) {
auto& b = *a.builder();
auto doit = [&b](XlaOp a, XlaOp x, PrimitiveType type) -> XlaOp {
XlaOp is_nan = Or(IsNan(a), IsNan(x));
XlaOp x_is_zero = Eq(x, ScalarLike(x, 0));
XlaOp domain_error = Or(Lt(x, ScalarLike(x, 0)), Le(a, ScalarLike(a, 0)));
XlaOp use_igammac = And(Gt(x, ScalarLike(x, 1)), Gt(x, a));
XlaOp ax = a * Log(x) - x - Lgamma(a);
XlaOp underflow = Lt(ax, -Log(MaxFiniteValue(&b, type)));
ax = Exp(ax);
XlaOp enabled = Not(Or(Or(Or(x_is_zero, domain_error), underflow), is_nan));
const double nan = std::numeric_limits<double>::quiet_NaN();
XlaOp output = Select(use_igammac,
-IgammacContinuedFraction<DERIVATIVE>(
ax, x, a, And(enabled, use_igammac), type),
IgammaSeries<DERIVATIVE>(
ax, x, a, And(enabled, Not(use_igammac)), type));
output = Select(x_is_zero, ZerosLike(output), output);
output = Select(Or(domain_error, is_nan), FullLike(a, nan), output);
return output;
};
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto a_shape, b.GetShape(a));
TF_ASSIGN_OR_RETURN(auto x_shape, b.GetShape(x));
if (a_shape != x_shape) {
return InvalidArgument(
"Arguments to IgammaGradA must have equal shapes and types; got %s "
"and %s",
a_shape.ToString(), x_shape.ToString());
}
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("IgammaGradA", a));
bool needs_upcast = false;
for (PrimitiveType type : {BF16, F16, F8E3M4, F8E4M3, F8E5M2, F8E4M3FN,
F8E4M3B11FNUZ, F8E5M2FNUZ, F8E4M3FNUZ}) {
if (a_shape.element_type() == type) {
needs_upcast = true;
break;
}
}
if (needs_upcast) {
a = ConvertElementType(a, F32);
x = ConvertElementType(x, F32);
}
XlaOp result = doit(a, x, a_shape.element_type());
if (needs_upcast) {
result = ConvertElementType(result, a_shape.element_type());
}
return result;
});
}
XlaOp RandomGammaGrad(XlaOp a, XlaOp x) {
auto& b = *a.builder();
auto doit = [&b](XlaOp a, XlaOp x, PrimitiveType type) -> XlaOp {
XlaOp is_nan = Or(IsNan(a), IsNan(x));
XlaOp x_is_zero = Eq(x, ScalarLike(x, 0));
XlaOp domain_error = Or(Lt(x, ScalarLike(x, 0)), Le(a, ScalarLike(a, 0)));
XlaOp use_igammac = And(Gt(x, ScalarLike(x, 1)), Gt(x, a));
XlaOp ax = a * Log(x) - x - Lgamma(a);
XlaOp underflow = Lt(ax, -Log(MaxFiniteValue(&b, type)));
ax = Exp(ax);
XlaOp enabled = Not(Or(Or(Or(x_is_zero, domain_error), underflow), is_nan));
const double nan = std::numeric_limits<double>::quiet_NaN();
XlaOp output = Select(use_igammac,
-IgammacContinuedFraction<SAMPLE_DERIVATIVE>(
ax, x, a, And(enabled, use_igammac), type),
IgammaSeries<SAMPLE_DERIVATIVE>(
ax, x, a, And(enabled, Not(use_igammac)), type));
output = Select(x_is_zero, ZerosLike(output), output);
output = Select(Or(domain_error, is_nan), FullLike(a, nan), output);
return output;
};
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto a_shape, b.GetShape(a));
TF_ASSIGN_OR_RETURN(auto x_shape, b.GetShape(x));
if (a_shape != x_shape) {
return InvalidArgument(
"Arguments to RandomGammaGrad must have equal shapes and types; got "
"%s and %s",
a_shape.ToString(), x_shape.ToString());
}
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("RandomGammaGrad", a));
bool needs_upcast =
a_shape.element_type() == F16 || a_shape.element_type() == BF16;
if (needs_upcast) {
a = ConvertElementType(a, F32);
x = ConvertElementType(x, F32);
}
XlaOp result = doit(a, x, a_shape.element_type());
if (needs_upcast) {
result = ConvertElementType(result, a_shape.element_type());
}
return result;
});
}
XlaOp Igammac(XlaOp a, XlaOp x) {
auto& b = *a.builder();
auto doit = [&b](XlaOp a, XlaOp x, PrimitiveType type) -> XlaOp {
XlaOp out_of_range = Or(Le(x, ScalarLike(x, 0)), Le(a, ScalarLike(a, 0)));
XlaOp use_igamma = Or(Lt(x, ScalarLike(x, 1)), Lt(x, a));
XlaOp ax = a * Log(x) - x - Lgamma(a);
XlaOp underflow = Lt(ax, -Log(MaxFiniteValue(&b, type)));
XlaOp enabled = Not(Or(out_of_range, underflow));
ax = Exp(ax);
XlaOp result =
Select(use_igamma,
ScalarLike(a, 1) - IgammaSeries<VALUE>(
ax, x, a, And(enabled, use_igamma), type),
IgammacContinuedFraction<VALUE>(
ax, x, a, And(enabled, Not(use_igamma)), type));
XlaOp x_is_infinity =
Eq(x, ScalarLike(x, std::numeric_limits<float>::infinity()));
result = Select(x_is_infinity, ZerosLike(result), result);
return Select(out_of_range, FullLike(a, 1), result);
};
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto a_shape, b.GetShape(a));
TF_ASSIGN_OR_RETURN(auto x_shape, b.GetShape(x));
if (a_shape != x_shape) {
return InvalidArgument(
"Arguments to Igammac must have equal shapes and types; "
"got %s and %s",
a_shape.ToString(), x_shape.ToString());
}
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("Igammac", a));
PrimitiveType a_x_type = a_shape.element_type();
bool needs_upcast =
a_shape.element_type() == F16 || a_shape.element_type() == BF16;
if (needs_upcast) {
a = ConvertElementType(a, F32);
x = ConvertElementType(x, F32);
a_x_type = F32;
}
XlaOp result = doit(a, x, a_x_type);
if (needs_upcast) {
result = ConvertElementType(result, a_shape.element_type());
}
return result;
});
}
XlaOp RoundToEven(XlaOp x) {
auto& b = *x.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("RoundToEven", x));
return RoundNearestEven(x);
});
}
XlaOp Acos(XlaOp x) {
XlaBuilder* b = x.builder();
return b->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x));
if (primitive_util::IsComplexType(shape.element_type())) {
auto one = ScalarLike(x, 1);
auto imag_one = Complex(
Zero(b, primitive_util::ComplexComponentType(shape.element_type())),
One(b, primitive_util::ComplexComponentType(shape.element_type())));
auto result =
Neg(imag_one * Log(x + imag_one * Sqrt((one + x) * (one - x))));
return result;
}
return Select(Ne(x, FullLike(x, -1)),
ScalarLike(x, 2.0) * Atan2(Sqrt(ScalarLike(x, 1.0) - x * x),
ScalarLike(x, 1.0) + x),
FullLike(x, M_PI));
});
}
XlaOp Asin(XlaOp x) {
XlaBuilder* b = x.builder();
auto do_it = [&](XlaOp z) -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(z));
auto elem_ty = shape.element_type();
switch (elem_ty) {
case C128:
return math_impl::AsinComplex<double>(z);
case C64:
return math_impl::AsinComplex<float>(z);
case F64:
return math_impl::AsinReal<double>(z);
case F32:
return math_impl::AsinReal<float>(z);
default:
return InvalidArgument("Asin got unsupported element type %s",
PrimitiveType_Name(elem_ty));
}
};
return DoWithUpcastToF32(
x, {}, [&](XlaOp x) { return b->ReportErrorOrReturn(do_it(x)); });
}
XlaOp Atan(XlaOp x) { return Atan2(x, ScalarLike(x, 1.0)); }
XlaOp Acosh(XlaOp x) {
XlaBuilder* b = x.builder();
return b->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x));
auto one = ScalarLike(x, 1);
auto neg_one = ScalarLike(x, -1);
auto nan = FullLike(x, std::numeric_limits<float>::quiet_NaN());
auto naive_result = Log(x + Sqrt((x + one) * (x - one)));
if (primitive_util::IsComplexType(shape.element_type())) {
return naive_result;
}
auto overflow_result = Log(x) + Log(ScalarLike(x, 2));
auto sqrt_max_value = Sqrt(MaxFiniteValue(b, shape.element_type()));
return Select(Lt(x, neg_one), nan,
Select(Ge(x, sqrt_max_value), overflow_result, naive_result));
});
}
XlaOp Asinh(XlaOp x) {
XlaBuilder* b = x.builder();
auto do_it = [&](XlaOp x) -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x));
auto one = ScalarLike(x, 1);
if (primitive_util::IsComplexType(shape.element_type())) {
auto x_re = Real(x);
auto x_im = Imag(x);
auto z = Asin(Complex(x_im, -x_re));
auto z_im = Imag(z);
auto on_branch_cut = And(Eq(x_re, ScalarLike(x_re, 0)),
Gt(Abs(x_im), ScalarLike(x_im, 1)));
return Complex(Select(on_branch_cut, z_im, -z_im), Real(z));
}
auto a = Abs(x);
auto small_result = Log1p(a + a * a / (one + Sqrt(a * a + one)));
auto naive_result = Log(a + Sqrt(a * a + one));
auto overflow_result = Log(Abs(a)) + Log(ScalarLike(a, 2));
auto sqrt_max_value = Sqrt(MaxFiniteValue(b, shape.element_type()));
return Sign(x) * Select(Ge(a, sqrt_max_value), overflow_result,
Select(Le(a, one), small_result, naive_result));
};
return DoWithUpcastToF32(x, {BF16, F16}, [&](XlaOp x) {
return b->ReportErrorOrReturn(do_it(x));
});
}
XlaOp Atanh(XlaOp x) {
XlaBuilder* b = x.builder();
auto do_it = [&](XlaOp x) -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x));
auto naive_result = (Log1p(x) - Log1p(-x)) * ScalarLike(x, 0.5);
if (primitive_util::IsComplexType(shape.element_type())) {
return naive_result;
}
auto nan = FullLike(x, std::numeric_limits<float>::quiet_NaN());
return Select(Gt(Abs(x), ScalarLike(x, 1)), nan, naive_result);
};
return DoWithUpcastToF32(x, {BF16}, [&](XlaOp x) {
return b->ReportErrorOrReturn(do_it(x));
});
}
XlaOp Cosh(XlaOp x) {
XlaBuilder* b = x.builder();
auto do_it = [&](XlaOp x) -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x));
auto log_one_half = Log(ScalarLike(x, 0.5));
auto result = Exp(x + log_one_half) + Exp(-x + log_one_half);
if (primitive_util::IsComplexType(shape.element_type())) {
return result;
}
return Max(result, ScalarLike(result, 1.0));
};
return DoWithUpcastToF32(x, {BF16, F16}, [&](XlaOp x) {
return b->ReportErrorOrReturn(do_it(x));
});
}
XlaOp Sinh(XlaOp x) {
XlaBuilder* b = x.builder();
auto do_it = [&](XlaOp x) -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto shape, b->GetShape(x));
auto one_half = ScalarLike(x, 0.5);
auto log_one_half = Log(ScalarLike(x, 0.5));
auto large_sinh_result = Exp(x + log_one_half) - Exp(-x + log_one_half);
if (primitive_util::IsComplexType(shape.element_type())) {
return large_sinh_result;
}
auto expm1 = Expm1(x);
auto one = ScalarLike(x, 1.);
auto small_sinh_result = one_half * (expm1 + expm1 / (expm1 + one));
return Select(Lt(Abs(x), one), small_sinh_result, large_sinh_result);
};
return DoWithUpcastToF32(x, {BF16, F16}, [&](XlaOp x) {
return b->ReportErrorOrReturn(do_it(x));
});
}
XlaOp MaybeConjugate(XlaOp x, bool conjugate) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
auto perform_conj =
primitive_util::IsComplexType(shape.element_type()) && conjugate;
return perform_conj ? Conj(x) : x;
});
}
XlaOp NextAfter(XlaOp from, XlaOp to) {
auto builder = from.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto shape, builder->GetShape(from));
int bitwidth = primitive_util::BitWidth(shape.element_type());
auto int_type = primitive_util::UnsignedIntegralTypeForBitWidth(bitwidth);
auto from_as_int = BitcastConvertType(from, int_type);
auto to_as_int = BitcastConvertType(to, int_type);
auto from_is_nan = Ne(from, from);
auto to_is_nan = Ne(to, to);
auto nan_input = Or(from_is_nan, to_is_nan);
auto result_for_nan =
Broadcast(ScalarLike(from, std::numeric_limits<double>::quiet_NaN()),
shape.dimensions());
result_for_nan = BitcastConvertType(result_for_nan, int_type);
const int64_t sign_mask = int64_t{1} << (bitwidth - 1);
auto from_abs = And(from_as_int, ScalarLike(from_as_int, ~sign_mask));
auto to_abs = And(to_as_int, ScalarLike(to_as_int, ~sign_mask));
auto from_and_to_are_equal = Eq(from_as_int, to_as_int);
auto result_for_equal = to_as_int;
auto from_is_zero = Eq(from_abs, ZerosLike(from_abs));
auto to_is_zero = Eq(to_abs, ZerosLike(to_abs));
auto result_for_both_zero = to_as_int;
auto from_sign = And(from_as_int, ScalarLike(from_as_int, sign_mask));
auto to_sign = And(to_as_int, ScalarLike(to_as_int, sign_mask));
auto result_for_from_zero_to_non_zero =
Or(to_sign, ScalarLike(from_as_int, 1));
auto signs_disagree = Ne(from_sign, to_sign);
auto from_magnitude_larger_than_to = Gt(from_abs, to_abs);
auto result_has_smaller_magnitude =
Or(from_magnitude_larger_than_to, signs_disagree);
auto magnitude_adjustment =
Select(result_has_smaller_magnitude,
Broadcast(ScalarLike(from_as_int, -1), shape.dimensions()),
Broadcast(ScalarLike(from_as_int, 1), shape.dimensions()));
auto result = Add(from_as_int, magnitude_adjustment);
result = Select(from_is_zero,
Select(to_is_zero, result_for_both_zero,
result_for_from_zero_to_non_zero),
result);
result = Select(from_and_to_are_equal, result_for_equal, result);
result = Select(nan_input, result_for_nan, result);
return BitcastConvertType(result, shape.element_type());
});
}
static XlaOp I0eImpl32(XlaOp x) {
static const std::array<float, 18> kI0eCoeffsA{
-1.30002500998624804212E-8f, 6.04699502254191894932E-8f,
-2.67079385394061173391E-7f, 1.11738753912010371815E-6f,
-4.41673835845875056359E-6f, 1.64484480707288970893E-5f,
-5.75419501008210370398E-5f, 1.88502885095841655729E-4f,
-5.76375574538582365885E-4f, 1.63947561694133579842E-3f,
-4.32430999505057594430E-3f, 1.05464603945949983183E-2f,
-2.37374148058994688156E-2f, 4.93052842396707084878E-2f,
-9.49010970480476444210E-2f, 1.71620901522208775349E-1f,
-3.04682672343198398683E-1f, 6.76795274409476084995E-1f};
static const std::array<float, 7> kI0eCoeffsB{
3.39623202570838634515E-9f, 2.26666899049817806459E-8f,
2.04891858946906374183E-7f, 2.89137052083475648297E-6f,
6.88975834691682398426E-5f, 3.36911647825569408990E-3f,
8.04490411014108831608E-1f};
x = Abs(x);
auto half = xla::ScalarLike(x, 0.5);
auto two = xla::ScalarLike(x, 2.0);
auto thirty_two = xla::ScalarLike(x, 32.0);
auto result_le_8 =
EvaluateChebyshevPolynomial<float>(half * x - two, kI0eCoeffsA);
auto result_gt_8 =
EvaluateChebyshevPolynomial<float>(thirty_two / x - two, kI0eCoeffsB) /
Sqrt(x);
return Select(Le(x, xla::ScalarLike(x, 8.0)), result_le_8, result_gt_8);
}
static XlaOp I0eImpl64(XlaOp x) {
static const std::array<double, 30> kI0eCoeffsA{
-4.41534164647933937950E-18, 3.33079451882223809783E-17,
-2.43127984654795469359E-16, 1.71539128555513303061E-15,
-1.16853328779934516808E-14, 7.67618549860493561688E-14,
-4.85644678311192946090E-13, 2.95505266312963983461E-12,
-1.72682629144155570723E-11, 9.67580903537323691224E-11,
-5.18979560163526290666E-10, 2.65982372468238665035E-9,
-1.30002500998624804212E-8, 6.04699502254191894932E-8,
-2.67079385394061173391E-7, 1.11738753912010371815E-6,
-4.41673835845875056359E-6, 1.64484480707288970893E-5,
-5.75419501008210370398E-5, 1.88502885095841655729E-4,
-5.76375574538582365885E-4, 1.63947561694133579842E-3,
-4.32430999505057594430E-3, 1.05464603945949983183E-2,
-2.37374148058994688156E-2, 4.93052842396707084878E-2,
-9.49010970480476444210E-2, 1.71620901522208775349E-1,
-3.04682672343198398683E-1, 6.76795274409476084995E-1};
static const std::array<double, 25> kI0eCoeffsB{
-7.23318048787475395456E-18, -4.83050448594418207126E-18,
4.46562142029675999901E-17, 3.46122286769746109310E-17,
-2.82762398051658348494E-16, -3.42548561967721913462E-16,
1.77256013305652638360E-15, 3.81168066935262242075E-15,
-9.55484669882830764870E-15, -4.15056934728722208663E-14,
1.54008621752140982691E-14, 3.85277838274214270114E-13,
7.18012445138366623367E-13, -1.79417853150680611778E-12,
-1.32158118404477131188E-11, -3.14991652796324136454E-11,
1.18891471078464383424E-11, 4.94060238822496958910E-10,
3.39623202570838634515E-9, 2.26666899049817806459E-8,
2.04891858946906374183E-7, 2.89137052083475648297E-6,
6.88975834691682398426E-5, 3.36911647825569408990E-3,
8.04490411014108831608E-1};
x = Abs(x);
auto half = xla::ScalarLike(x, 0.5);
auto two = xla::ScalarLike(x, 2.0);
auto thirty_two = xla::ScalarLike(x, 32.0);
auto result_le_8 =
EvaluateChebyshevPolynomial<double>(half * x - two, kI0eCoeffsA);
auto result_gt_8 =
EvaluateChebyshevPolynomial<double>(thirty_two / x - two, kI0eCoeffsB) /
Sqrt(x);
return Select(Le(x, xla::ScalarLike(x, 8.0)), result_le_8, result_gt_8);
}
XlaOp BesselI0e(XlaOp x) {
auto& b = *x.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("BesselI0e", x));
TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(x));
if (shape.element_type() == F64) {
return I0eImpl64(x);
}
return DoWithUpcastToF32(x, {BF16, F16},
[](XlaOp x) { return I0eImpl32(x); });
});
}
static XlaOp I1eImpl32(XlaOp x) {
static const std::array<float, 17> kI1eCoeffsA{
9.38153738649577178388E-9f, -4.44505912879632808065E-8f,
2.00329475355213526229E-7f, -8.56872026469545474066E-7f,
3.47025130813767847674E-6f, -1.32731636560394358279E-5f,
4.78156510755005422638E-5f, -1.61760815825896745588E-4f,
5.12285956168575772895E-4f, -1.51357245063125314899E-3f,
4.15642294431288815669E-3f, -1.05640848946261981558E-2f,
2.47264490306265168283E-2f, -5.29459812080949914269E-2f,
1.02643658689847095384E-1f, -1.76416518357834055153E-1f,
2.52587186443633654823E-1f};
static const std::array<float, 7> kI1eCoeffsB{
-3.83538038596423702205E-9f, -2.63146884688951950684E-8f,
-2.51223623787020892529E-7f, -3.88256480887769039346E-6f,
-1.10588938762623716291E-4f, -9.76109749136146840777E-3f,
7.78576235018280120474E-1f};
XlaOp z = Abs(x);
auto half = xla::ScalarLike(x, 0.5);
auto two = xla::ScalarLike(x, 2.0);
auto thirty_two = xla::ScalarLike(x, 32.0);
auto result_le_8 =
z * EvaluateChebyshevPolynomial<float>(half * z - two, kI1eCoeffsA);
auto result_gt_8 =
EvaluateChebyshevPolynomial<float>(thirty_two / z - two, kI1eCoeffsB) /
Sqrt(z);
return Sign(x) *
Select(Le(z, xla::ScalarLike(x, 8.0)), result_le_8, result_gt_8);
}
static XlaOp I1eImpl64(XlaOp x) {
static const std::array<double, 29> kI1eCoeffsA{
2.77791411276104639959E-18, -2.11142121435816608115E-17,
1.55363195773620046921E-16, -1.10559694773538630805E-15,
7.60068429473540693410E-15, -5.04218550472791168711E-14,
3.22379336594557470981E-13, -1.98397439776494371520E-12,
1.17361862988909016308E-11, -6.66348972350202774223E-11,
3.62559028155211703701E-10, -1.88724975172282928790E-9,
9.38153738649577178388E-9, -4.44505912879632808065E-8,
2.00329475355213526229E-7, -8.56872026469545474066E-7,
3.47025130813767847674E-6, -1.32731636560394358279E-5,
4.78156510755005422638E-5, -1.61760815825896745588E-4,
5.12285956168575772895E-4, -1.51357245063125314899E-3,
4.15642294431288815669E-3, -1.05640848946261981558E-2,
2.47264490306265168283E-2, -5.29459812080949914269E-2,
1.02643658689847095384E-1, -1.76416518357834055153E-1,
2.52587186443633654823E-1};
static const std::array<double, 25> kI1eCoeffsB{
7.51729631084210481353E-18, 4.41434832307170791151E-18,
-4.65030536848935832153E-17, -3.20952592199342395980E-17,
2.96262899764595013876E-16, 3.30820231092092828324E-16,
-1.88035477551078244854E-15, -3.81440307243700780478E-15,
1.04202769841288027642E-14, 4.27244001671195135429E-14,
-2.10154184277266431302E-14, -4.08355111109219731823E-13,
-7.19855177624590851209E-13, 2.03562854414708950722E-12,
1.41258074366137813316E-11, 3.25260358301548823856E-11,
-1.89749581235054123450E-11, -5.58974346219658380687E-10,
-3.83538038596423702205E-9, -2.63146884688951950684E-8,
-2.51223623787020892529E-7, -3.88256480887769039346E-6,
-1.10588938762623716291E-4, -9.76109749136146840777E-3,
7.78576235018280120474E-1};
XlaOp z = Abs(x);
auto half = xla::ScalarLike(x, 0.5);
auto two = xla::ScalarLike(x, 2.0);
auto thirty_two = xla::ScalarLike(x, 32.0);
auto result_le_8 =
z * EvaluateChebyshevPolynomial<double>(half * z - two, kI1eCoeffsA);
auto result_gt_8 =
EvaluateChebyshevPolynomial<double>(thirty_two / z - two, kI1eCoeffsB) /
Sqrt(z);
return Sign(x) *
Select(Le(z, xla::ScalarLike(x, 8.0)), result_le_8, result_gt_8);
}
XlaOp BesselI1e(XlaOp x) {
auto& b = *x.builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("BesselI1e", x));
TF_ASSIGN_OR_RETURN(auto shape, b.GetShape(x));
if (shape.element_type() == F64) {
return I1eImpl64(x);
}
return DoWithUpcastToF32(x, {BF16, F16},
[](XlaOp x) { return I1eImpl32(x); });
});
}
static XlaOp LentzThompsonBarnettAlgorithm(
int64_t num_iterations, double small, double threshold,
const ForEachIndexBodyFunction& nth_partial_numerator,
const ForEachIndexBodyFunction& nth_partial_denominator,
absl::Span<const XlaOp> inputs, absl::string_view name) {
auto& b = *inputs.front().builder();
return b.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_RET_CHECK(num_iterations < INT32_MAX);
enum {
kIterationIdx,
kValuesUnconvergedIdx,
kCIdx,
kDIdx,
kHIdx,
kFirstInputIdx,
};
auto while_cond_fn =
[num_iterations](absl::Span<const XlaOp> values,
XlaBuilder* cond_builder) -> absl::StatusOr<XlaOp> {
auto iteration = values[kIterationIdx];
auto iterations_remain_cond =
Lt(iteration, ScalarLike(iteration, num_iterations));
auto values_unconverged_cond = values[kValuesUnconvergedIdx];
return And(iterations_remain_cond, values_unconverged_cond);
};
auto while_body_fn =
[small, threshold, &nth_partial_numerator, &nth_partial_denominator](
absl::Span<const XlaOp> values,
XlaBuilder* body_builder) -> absl::StatusOr<std::vector<XlaOp>> {
XlaOp iteration = values[kIterationIdx];
TF_ASSIGN_OR_RETURN(
std::vector<XlaOp> partial_numerator,
nth_partial_numerator(iteration, values.subspan(kFirstInputIdx),
body_builder));
TF_RET_CHECK(partial_numerator.size() == 1);
TF_ASSIGN_OR_RETURN(
std::vector<XlaOp> partial_denominator,
nth_partial_denominator(iteration, values.subspan(kFirstInputIdx),
body_builder));
TF_RET_CHECK(partial_denominator.size() == 1);
auto c = partial_denominator[0] + partial_numerator[0] / values[kCIdx];
auto small_constant = FullLike(c, small);
c = Select(Lt(Abs(c), small_constant), small_constant, c);
auto d = partial_denominator[0] + partial_numerator[0] * values[kDIdx];
d = Select(Lt(Abs(d), small_constant), small_constant, d);
d = Reciprocal(d);
auto delta = c * d;
auto h = values[kHIdx] * delta;
std::vector<XlaOp> updated_values(values.size());
updated_values[kIterationIdx] = Add(iteration, ScalarLike(iteration, 1));
updated_values[kCIdx] = c;
updated_values[kDIdx] = d;
updated_values[kHIdx] = h;
std::copy(values.begin() + kFirstInputIdx, values.end(),
updated_values.begin() + kFirstInputIdx);
auto tolerance_comparison =
Ge(Abs(Sub(delta, FullLike(delta, 1.0))), FullLike(delta, threshold));
updated_values[kValuesUnconvergedIdx] =
ReduceAll(tolerance_comparison, ConstantR0<bool>(body_builder, false),
CreateScalarOrComputation(PRED, body_builder));
return updated_values;
};
TF_ASSIGN_OR_RETURN(std::vector<XlaOp> partial_denominator,
nth_partial_denominator(Zero(&b, U32), inputs, &b));
TF_RET_CHECK(partial_denominator.size() == 1);
auto h = partial_denominator[0];
auto small_constant = FullLike(h, small);
h = Select(Lt(Abs(h), small_constant), small_constant, h);
std::vector<XlaOp> values(kFirstInputIdx + inputs.size());
values[kIterationIdx] = One(&b, U32);
values[kValuesUnconvergedIdx] = ConstantR0<bool>(&b, true);
values[kCIdx] = h;
values[kDIdx] = FullLike(h, 0.0);
values[kHIdx] = h;
std::copy(inputs.begin(), inputs.end(), values.begin() + kFirstInputIdx);
TF_ASSIGN_OR_RETURN(values, WhileLoopHelper(while_cond_fn, while_body_fn,
values, name, &b));
return values[kHIdx];
});
}
XlaOp RegularizedIncompleteBeta(XlaOp a, XlaOp b, XlaOp x) {
auto& builder = *x.builder();
return builder.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder.GetShape(a));
TF_ASSIGN_OR_RETURN(Shape b_shape, builder.GetShape(b));
TF_ASSIGN_OR_RETURN(Shape x_shape, builder.GetShape(x));
if (b_shape.element_type() != shape.element_type() ||
x_shape.element_type() != shape.element_type()) {
return InvalidArgument(
"Operands to RegularizedIncompleteBeta must have identical types, "
"got shapes %s, %s, and %s",
shape.ToString(), b_shape.ToString(), x_shape.ToString());
}
if (!primitive_util::IsFloatingPointType(shape.element_type())) {
return InvalidArgument(
"Operands to RegularizedIncompleteBeta must be real-valued "
"floating-point, but got %s",
PrimitiveType_Name(shape.element_type()));
}
PrimitiveType element_type = shape.element_type();
if (element_type == F16 || element_type == BF16) {
element_type = F32;
a = ConvertElementType(a, F32);
b = ConvertElementType(b, F32);
x = ConvertElementType(x, F32);
}
auto NthPartialBetaincNumerator =
[&](XlaOp iteration, absl::Span<const XlaOp> inputs,
XlaBuilder* builder) -> absl::StatusOr<std::vector<XlaOp>> {
auto a = inputs[0];
auto b = inputs[1];
auto x = inputs[2];
auto iteration_bcast = Broadcast(iteration, shape.dimensions());
auto iteration_is_even =
Eq(iteration_bcast % FullLike(iteration_bcast, 2),
FullLike(iteration_bcast, 0));
auto iteration_is_one = Eq(iteration_bcast, FullLike(iteration_bcast, 1));
auto iteration_minus_one = iteration_bcast - FullLike(iteration_bcast, 1);
auto m = iteration_minus_one / FullLike(iteration_minus_one, 2);
m = ConvertElementType(m, element_type);
auto one = FullLike(a, 1.0);
auto two = FullLike(a, 2.0);
auto even_numerator =
-(a + m) * (a + b + m) * x / ((a + two * m) * (a + two * m + one));
auto odd_numerator =
m * (b - m) * x / ((a + two * m - one) * (a + two * m));
auto one_numerator = ScalarLike(x, 1.0);
auto numerator = Select(iteration_is_even, even_numerator, odd_numerator);
return std::vector<XlaOp>{
Select(iteration_is_one, one_numerator, numerator)};
};
auto NthPartialBetaincDenominator =
[&shape](XlaOp iteration, absl::Span<const XlaOp> inputs,
XlaBuilder* builder) -> absl::StatusOr<std::vector<XlaOp>> {
auto x = inputs[2];
auto iteration_bcast = Broadcast(iteration, shape.dimensions());
return std::vector<XlaOp>{
Select(Eq(iteration_bcast, ScalarLike(iteration_bcast, 0)),
ScalarLike(x, 0.0), ScalarLike(x, 1.0))};
};
auto result_is_nan =
Or(Or(Or(Le(a, ScalarLike(a, 0.0)), Le(b, ScalarLike(b, 0.0))),
Lt(x, ScalarLike(x, 0.0))),
Gt(x, ScalarLike(x, 1.0)));
auto converges_rapidly =
Lt(x, (a + FullLike(a, 1.0)) / (a + b + FullLike(b, 2.0)));
auto a_orig = a;
a = Select(converges_rapidly, a, b);
b = Select(converges_rapidly, b, a_orig);
x = Select(converges_rapidly, x, Sub(FullLike(x, 1.0), x));
XlaOp continued_fraction;
if (element_type == F32) {
continued_fraction = LentzThompsonBarnettAlgorithm(
200,
std::numeric_limits<float>::epsilon() / 2.0f,
std::numeric_limits<float>::epsilon() / 2.0f,
NthPartialBetaincNumerator,
NthPartialBetaincDenominator, {a, b, x},
"Betainc");
} else {
TF_RET_CHECK(element_type == F64);
continued_fraction = LentzThompsonBarnettAlgorithm(
600,
std::numeric_limits<double>::epsilon() / 2.0f,
std::numeric_limits<double>::epsilon() / 2.0f,
NthPartialBetaincNumerator,
NthPartialBetaincDenominator, {a, b, x},
"Betainc");
}
auto lbeta = Lbeta(a, b);
auto result =
continued_fraction * Exp(Log(x) * a + Log1p(-x) * b - lbeta) / a;
result = Select(result_is_nan, NanValue(&builder, element_type), result);
auto out =
Select(converges_rapidly, result, Sub(FullLike(result, 1.0), result));
return shape.element_type() == element_type
? out
: ConvertElementType(out, shape.element_type());
});
}
XlaOp Polygamma(XlaOp n, XlaOp x) {
auto& builder = *x.builder();
auto doit = [](XlaOp n, XlaOp x, PrimitiveType type) -> XlaOp {
XlaOp n_plus_one = n + ScalarLike(n, 1.);
XlaOp sign =
(ScalarLike(n, 2.) * Rem(n, ScalarLike(n, 2.)) - ScalarLike(n, 1.));
const double nan = std::numeric_limits<double>::quiet_NaN();
XlaOp output = Select(Eq(n, ScalarLike(n, 0.)), Digamma(x),
sign * Exp(Lgamma(n_plus_one)) * Zeta(n_plus_one, x));
output = Select(Or(Ne(n, Floor(n)), Lt(n, ScalarLike(n, 0.))),
ScalarLike(n, nan), output);
return output;
};
return builder.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto n_shape, builder.GetShape(n));
TF_ASSIGN_OR_RETURN(auto x_shape, builder.GetShape(x));
if (n_shape != x_shape) {
return InvalidArgument(
"Arguments to Polygamma must have equal shapes and types; "
"got %s and %s",
n_shape.ToString(), x_shape.ToString());
}
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("Zeta", x));
bool needs_upcast =
n_shape.element_type() == F16 || x_shape.element_type() == BF16;
if (needs_upcast) {
n = ConvertElementType(n, F32);
x = ConvertElementType(x, F32);
}
XlaOp result = doit(n, x, n_shape.element_type());
if (needs_upcast) {
result = ConvertElementType(result, n_shape.element_type());
}
return result;
});
}
XlaOp Zeta(XlaOp x, XlaOp q) {
auto& builder = *x.builder();
auto doit = [&builder](XlaOp x, XlaOp q, PrimitiveType type) -> XlaOp {
static constexpr int M = 12, N = 9;
static const std::array<double, M> kZetaCoeffs{
-7.1661652561756670113e18,
1.8152105401943546773e17,
-4.5979787224074726105e15,
1.1646782814350067249e14,
-2.950130727918164224e12,
7.47242496e10,
-1.8924375803183791606e9,
47900160.0,
-1209600.0,
30240.0,
-720.0,
12.0,
};
XlaOp acc = q, neg_power = ScalarLike(q, 0.);
XlaOp S = Pow(q, Neg(x));
for (int i = 0; i < N; ++i) {
acc = acc + ScalarLike(acc, 1.);
neg_power = Pow(acc, Neg(x));
S = S + neg_power;
}
acc = acc + ScalarLike(acc, 1.);
neg_power = Pow(acc, Neg(x));
XlaOp I = neg_power * acc / (x - ScalarLike(acc, 1.));
XlaOp a_inverse_square = Reciprocal(Square(acc));
XlaOp horner_sum = ScalarLike(acc, 0.);
XlaOp factor = ScalarLike(acc, 1.);
static constexpr int kTwoKMinusOne = 2 * M - 1;
for (int i = 0; i < M - 1; ++i) {
factor = (x + ScalarLike(x, kTwoKMinusOne - 1 - 2 * i)) *
(x + ScalarLike(x, kTwoKMinusOne - 2 - 2 * i));
horner_sum = factor * a_inverse_square *
(horner_sum + ScalarLike(acc, 1. / kZetaCoeffs[i]));
}
XlaOp T =
neg_power *
(ScalarLike(neg_power, 0.5) +
x / acc * (ScalarLike(acc, 1. / kZetaCoeffs[M - 1]) + horner_sum));
XlaOp accurate_result = S + I + T;
const double nan = std::numeric_limits<double>::quiet_NaN();
const double inf = std::numeric_limits<double>::infinity();
XlaOp output = Select(Lt(Abs(neg_power), Abs(S) * Epsilon(&builder, type)),
S, accurate_result);
output = Select(Eq(x, ScalarLike(x, 1.)), ScalarLike(x, inf), output);
output = Select(Lt(x, ScalarLike(x, 1.)), ScalarLike(x, nan), output);
XlaOp x_domain_error = And(Le(q, ScalarLike(x, 0.)), Ne(x, Floor(x)));
output = Select(x_domain_error, ScalarLike(x, nan), output);
XlaOp at_pole = And(Le(q, ScalarLike(x, 0.)), Eq(q, Floor(q)));
XlaOp x_is_even_int =
And(Eq(Rem(x, ScalarLike(x, 2.)), ScalarLike(x, 0.)), Eq(x, Floor(x)));
output = Select(
at_pole, Select(x_is_even_int, ScalarLike(x, inf), ScalarLike(x, nan)),
output);
return output;
};
return builder.ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(auto x_shape, builder.GetShape(x));
TF_ASSIGN_OR_RETURN(auto q_shape, builder.GetShape(q));
if (x_shape != q_shape) {
return InvalidArgument(
"Arguments to Zeta must have equal shapes and types; got %s and %s",
x_shape.ToString(), q_shape.ToString());
}
TF_RETURN_IF_ERROR(EnsureOperandIsRealFp("Zeta", x));
bool needs_upcast =
x_shape.element_type() == F16 || x_shape.element_type() == BF16;
if (needs_upcast) {
x = ConvertElementType(x, F32);
q = ConvertElementType(q, F32);
}
XlaOp result = doit(x, q, x_shape.element_type());
if (needs_upcast) {
result = ConvertElementType(result, x_shape.element_type());
}
return result;
});
}
} | #include "xla/hlo/builder/lib/math.h"
#include <cmath>
#include <complex>
#include <functional>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "xla/array3d.h"
#include "xla/error_spec.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/service.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
class MathTest : public ClientLibraryTestBase {
public:
ErrorSpec error_spec_{0.0001};
};
template <typename T>
class MathTypedTest : public MathTest {
public:
void TestLogEdgeCases() {
SetFastMathDisabled(true);
XlaBuilder b(TestName());
Log(AddParam(LiteralUtil::CreateR1<T>({T{0.0}, T{-0.0}}), &b));
ComputeAndCompareR1<T>(&b,
{-std::numeric_limits<T>::infinity(),
-std::numeric_limits<T>::infinity()},
{}, error_spec_);
}
void TestLog1pEdgeCases() {
SetFastMathDisabled(true);
XlaBuilder b(TestName());
Log1p(AddParam(LiteralUtil::CreateR1<T>({T{0.0}, T{-0.0}, T{-1.0}}), &b));
ComputeAndCompareR1<T>(
&b, {T{0.0}, T{-0.0}, -std::numeric_limits<T>::infinity()}, {},
error_spec_);
}
void TestIsInfOrNan() {
SetFastMathDisabled(true);
XlaBuilder b(TestName());
auto x =
ConstantR1<T>(&b, {
T{0},
T{100},
T{-1000},
T{std::numeric_limits<T>::max()},
T{std::numeric_limits<T>::lowest()},
T{std::numeric_limits<float>::infinity()},
T{-std::numeric_limits<float>::infinity()},
T{std::numeric_limits<float>::quiet_NaN()},
T{std::numeric_limits<float>::signaling_NaN()},
});
Tuple(&b, {IsFinite(x), IsInf(x), IsPosInf(x), IsNegInf(x), IsNan(x)});
auto expected = LiteralUtil::MakeTupleOwned(
LiteralUtil::CreateR1<bool>(
{true, true, true, true, true, false, false, false, false}),
LiteralUtil::CreateR1<bool>(
{false, false, false, false, false, true, true, false, false}),
LiteralUtil::CreateR1<bool>(
{false, false, false, false, false, true, false, false, false}),
LiteralUtil::CreateR1<bool>(
{false, false, false, false, false, false, true, false, false}),
LiteralUtil::CreateR1<bool>(
{false, false, false, false, false, false, false, true, true}));
ComputeAndCompareLiteral(&b, expected, {});
}
void TestIsNegZero() {
SetFastMathDisabled(true);
XlaBuilder b(TestName());
T inf(std::numeric_limits<float>::infinity());
T nan(std::numeric_limits<float>::quiet_NaN());
IsNegZero(AddParam(
LiteralUtil::CreateR1<T>({T{-0.0}, T{0}, T{1}, T{-1}, inf, -inf, nan}),
&b));
ComputeAndCompareLiteral(
&b,
LiteralUtil::CreateR1<bool>(
{true, false, false, false, false, false, false}),
{}, error_spec_);
}
void TestSqrtPowInequivalence() {
SetFastMathDisabled(true);
mutable_debug_options()->clear_xla_disable_hlo_passes();
const T inf(std::numeric_limits<float>::infinity());
const T nan(std::numeric_limits<float>::quiet_NaN());
XlaBuilder b(TestName());
auto x = AddParam(LiteralUtil::CreateR1<T>({-inf}), &b);
ConcatInDim(
&b, {Sqrt(x), Pow(x, ScalarLike(x, 0.5)), Pow(x, ScalarLike(x, 0.3))},
0);
std::vector<T> expected = {nan, inf, inf};
ComputeAndCompareR1<T>(&b, expected, {}, error_spec_);
}
void TestErfInvEdgeCases() {
SetFastMathDisabled(true);
XlaBuilder b(TestName());
auto x = AddParam(LiteralUtil::CreateR1<T>({T{-1}, T{1}, T{0}}), &b);
ErfInv(x);
const T inf(std::numeric_limits<float>::infinity());
std::vector<T> expected = {-inf, inf, T{0}};
ComputeAndCompareR1<T>(&b, expected, {}, error_spec_);
}
void TestErfEdgeCases() {
SetFastMathDisabled(true);
const T kErfInvOneMinusHalfULP = T(3.832506856900711);
const T inf(std::numeric_limits<float>::infinity());
XlaBuilder b(TestName());
auto x = AddParam(LiteralUtil::CreateR1<T>({T{-inf}, T{inf}, T{-0}, T{0},
T{-kErfInvOneMinusHalfULP},
T{kErfInvOneMinusHalfULP}}),
&b);
Erf(x);
std::vector<T> expected = {T(-1), T(1), T(-0), T(0), T(-1), T(1)};
ComputeAndCompareR1<T>(&b, expected, {}, error_spec_);
}
};
using TestTypes = ::testing::Types<float
#ifndef XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT16
,
Eigen::half
#endif
#ifndef XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT64
,
double
#endif
>;
TYPED_TEST_CASE(MathTypedTest, TestTypes);
XLA_TYPED_TEST(MathTypedTest, LogEdgeCases) { this->TestLogEdgeCases(); }
XLA_TYPED_TEST(MathTypedTest, Log1pEdgeCases) { this->TestLog1pEdgeCases(); }
XLA_TYPED_TEST(MathTypedTest, IsInfOrNan) { this->TestIsInfOrNan(); }
XLA_TYPED_TEST(MathTypedTest, IsNegZero) { this->TestIsNegZero(); }
XLA_TYPED_TEST(MathTypedTest, DISABLED_ON_TPU(SqrtPowInequivalence)) {
this->TestSqrtPowInequivalence();
}
XLA_TYPED_TEST(MathTypedTest, ErfInvEdgeCases) { this->TestErfInvEdgeCases(); }
XLA_TYPED_TEST(MathTypedTest, ErfEdgeCases) { this->TestErfEdgeCases(); }
XLA_TEST_F(MathTest, RealFpOnlyOps) {
for (int64_t i = PrimitiveType_MIN; i <= PrimitiveType_MAX; ++i) {
auto ty = static_cast<PrimitiveType>(i);
SCOPED_TRACE(PrimitiveType_Name(ty));
Shape shape;
if (ty == U4 || ty == S4) {
continue;
}
if (primitive_util::IsArrayType(ty)) {
shape = ShapeUtil::MakeShape(ty, {42});
} else if (ty == PrimitiveType::TUPLE) {
shape = ShapeUtil::MakeTupleShape({});
} else if (ty == PrimitiveType::OPAQUE_TYPE) {
shape = ShapeUtil::MakeOpaqueShape();
} else if (ty == PrimitiveType::TOKEN) {
shape = ShapeUtil::MakeTokenShape();
} else {
continue;
}
for (const auto& test :
std::vector<std::pair<std::function<XlaOp(XlaOp)>, std::string>>({
{IsFinite, "is_finite"},
{IsInf, "is_inf"},
{IsPosInf, "is_pos_inf"},
{IsNegInf, "is_neg_inf"},
{IsNan, "is_nan"},
{Erf, "erf"},
{Erfc, "erfc"},
{Lgamma, "lgamma"},
{Digamma, "digamma"},
{RoundToEven, "round_to_even"},
})) {
SCOPED_TRACE(test.second);
XlaBuilder b(TestName());
XlaOp p = Parameter(&b, 0, shape, "p0");
test.first(p);
if (primitive_util::IsFloatingPointType(ty)) {
TF_EXPECT_OK(b.first_error());
} else {
EXPECT_FALSE(b.first_error().ok());
}
}
}
}
XLA_TEST_F(MathTest, SqrtF32) {
XlaBuilder builder(TestName());
Literal zero_literal = LiteralUtil::Zero(PrimitiveType::F32);
std::unique_ptr<GlobalData> zero_data =
client_->TransferToServer(zero_literal).value();
XlaOp zero = Parameter(&builder, 0, zero_literal.shape(), "zero");
Sqrt(zero);
ComputeAndCompareR0<float>(&builder, 0.0f, {zero_data.get()}, error_spec_);
}
XLA_TEST_F(MathTest, SqrtF64) {
XlaBuilder builder(TestName());
Literal zero_literal = LiteralUtil::Zero(PrimitiveType::F64);
std::unique_ptr<GlobalData> zero_data =
client_->TransferToServer(zero_literal).value();
XlaOp zero = Parameter(&builder, 0, zero_literal.shape(), "zero");
Sqrt(zero);
ComputeAndCompareR0<double>(&builder, 0.0f, {zero_data.get()}, error_spec_);
}
#ifndef XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT64
XLA_TEST_F(MathTest, ErfInvF64) {
XlaBuilder builder(TestName());
auto x = ConstantR1<double>(
&builder, {-0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0.0, 0.1,
0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9});
ErfInv(x);
std::vector<double> expected = {-1.163087153676674, -0.9061938024368231,
-0.732869077959217, -0.5951160814499948,
-0.4769362762044698, -0.37080715859355795,
-0.27246271472675443, -0.1791434546212916,
-0.08885599049425767, 0.,
0.08885599049425777, 0.1791434546212916,
0.27246271472675443, 0.37080715859355784,
0.4769362762044698, 0.5951160814499948,
0.732869077959217, 0.9061938024368231,
1.1630871536766736};
ComputeAndCompareR1<double>(&builder, expected, {}, ErrorSpec{1e-15});
}
#endif
XLA_TEST_F(MathTest, SquareTenValues) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(
&builder, {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
Square(x);
std::vector<float> expected = {4.41, 6.76, 6.76, 16., 4.41,
5.29, 25., 0.81, 5.76, 2.56};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, ReciprocalTenValues) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(
&builder, {2.1, -2.6, 2.6, -4.0, 2.1, 2.3, -5.0, -0.9, -2.4, 1.6});
Reciprocal(x);
std::vector<float> expected = {
0.47619048, -0.38461538, 0.38461538, -0.25, 0.47619048,
0.43478261, -0.2, -1.11111111, -0.41666667, 0.625};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, SqrtZeroes) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(&builder, {0.0, -0.0});
Sqrt(x);
ComputeAndCompareR1<float>(&builder, {0, 0}, {}, error_spec_);
}
XLA_TEST_F(MathTest, SqrtSixValues) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(&builder, {16.0, 1.0, 1024.0, 0.16, 0.2, 12345});
Sqrt(x);
std::vector<float> expected = {4, 1, 32, 0.4, 0.4472, 111.1080};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, CbrtSixF32Values) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(&builder, {8.0, 1.0, 4096.0, -64.0, 1.728, 1331});
Cbrt(x);
std::vector<float> expected = {2, 1, 16, -4, 1.2, 11};
ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.001));
}
XLA_TEST_F(MathTest, CbrtSixF64Values) {
XlaBuilder builder(TestName());
auto x = ConstantR1<double>(&builder, {8.0, 1.0, 4096.0, -64.0, 1.728, 1331});
Cbrt(x);
std::vector<double> expected = {2, 1, 16, -4, 1.2, 11};
ComputeAndCompareR1<double>(&builder, expected, {}, ErrorSpec(0.001));
}
XLA_TEST_F(MathTest, SinhSmallValues) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(&builder, {1e-3, 1e-5, 1e-7, 1e-9, 1e-11});
Sinh(x);
std::vector<float> expected = {1e-3, 1e-5, 1e-7, 1e-9, 1e-11};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, AsinhSmallValues) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(&builder, {1e-3, 1e-5, 1e-7, 1e-9, 1e-11});
Asinh(x);
std::vector<float> expected = {1e-3, 1e-5, 1e-7, 1e-9, 1e-11};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, AtanhSmallValues) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(&builder, {1e-8, 1e-9, 1e-10, 1e-11});
Atanh(x);
std::vector<float> expected = {1e-8, 1e-9, 1e-10, 1e-11};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, Lgamma) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(&builder, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 0.5, 1.5,
2.5, -1.5, -3.5, -5.5});
Lgamma(x);
std::vector<float> expected = {
0,
0,
static_cast<float>(std::log(2)),
static_cast<float>(std::log(6)),
static_cast<float>(std::log(24)),
static_cast<float>(std::log(120)),
static_cast<float>(std::log(M_PI) / 2),
static_cast<float>(std::log(M_PI) / 2 - std::log(2)),
static_cast<float>(std::log(M_PI) / 2 - std::log(4) + std::log(3)),
static_cast<float>(std::log(M_PI) / 2 - std::log(3) + std::log(4)),
static_cast<float>(std::log(M_PI) / 2 - std::log(105) + std::log(16)),
static_cast<float>(std::log(M_PI) / 2 - std::log(10395) + std::log(64))};
error_spec_ = ErrorSpec{0.001};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
#if !defined(XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT16)
XLA_TEST_F(MathTest, LgammaF16) {
SetFastMathDisabled(true);
XlaBuilder b(TestName());
auto x = ConstantR1<half>(&b, {
half(-7360.0),
half(-4066.0),
half(-5.9605e-08),
});
Lgamma(x);
std::vector<half> expected = {
std::numeric_limits<half>::infinity(),
std::numeric_limits<half>::infinity(),
half(16.64),
};
ComputeAndCompareR1<half>(&b, expected, {}, ErrorSpec{0.1});
}
#endif
XLA_TEST_F(MathTest, Digamma) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(&builder, {1.0, 0.5, 1 / 3.0, 0.25, 1 / 6.0, 0.125,
2.0, 3.0, 4.0, 6.0, 8.0, 9.0});
Digamma(x);
constexpr double euler_mascheroni =
0.57721566490153286060651209008240243104215933593992;
std::vector<float> expected = {
static_cast<float>(-euler_mascheroni),
static_cast<float>(-2 * std::log(2) - euler_mascheroni),
static_cast<float>(-M_PI / 2 / std::sqrt(3) - 3 * std::log(3) / 2 -
euler_mascheroni),
static_cast<float>(-M_PI / 2 - 3 * std::log(2) - euler_mascheroni),
static_cast<float>(-M_PI * std::sqrt(3) / 2 - 2 * std::log(2) -
3 * std::log(3) / 2 - euler_mascheroni),
static_cast<float>(
-M_PI / 2 - 4 * std::log(2) -
(M_PI + std::log(2 + std::sqrt(2)) - std::log(2 - std::sqrt(2))) /
std::sqrt(2) -
euler_mascheroni),
static_cast<float>(1 - euler_mascheroni),
static_cast<float>(1.5 - euler_mascheroni),
static_cast<float>(11 / 6.0 - euler_mascheroni),
static_cast<float>(137 / 60.0 - euler_mascheroni),
static_cast<float>(363 / 140.0 - euler_mascheroni),
static_cast<float>(761 / 280.0 - euler_mascheroni)};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, Igamma) {
XlaBuilder builder(TestName());
auto a = ConstantR3FromArray3D<float>(
&builder,
{{{0.3760359, 1.62685306, 0.53327996, 1.5111382, 0.3521143},
{1.79378175, 1.05317882, 0.85049253, 1.399534, 0.22073882},
{1.17725309, 0.90727209, 1.32418503, 1.53238533, 0.51984756}}});
auto x = ConstantR3FromArray3D<float>(
&builder,
{{{0.56420934, 8.97671773, 2.81068609, 4.50655124, 2.88178617},
{1.01795164, 8.86298411, 0.29232942, 8.17661015, 5.67652269},
{1.59959565, 0.54463897, 0.6585252, 9.83192283, 3.93372669}}});
Igamma(a, x);
Array3D<float> expected = {
{{0.78746926, 0.99940502, 0.98028261, 0.97033807, 0.99054696},
{0.33265522, 0.99983558, 0.32599159, 0.99923275, 0.99980893},
{0.74343963, 0.46703197, 0.33923541, 0.99978511, 0.99460685}}};
ComputeAndCompareR3<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, IgammaSpecialValues) {
SetFastMathDisabled(true);
XlaBuilder builder(TestName());
const float nan = std::numeric_limits<float>::quiet_NaN();
auto a =
ConstantR1<float>(&builder, {nan, nan, 0.53327996, -6.00773744602e+37,
-1.3937809742e+31, -23.351348877});
auto x = ConstantR1<float>(
&builder, {nan, 8.97671773, nan, nan, 0.0, 6.02455484352e-39});
Igamma(a, x);
std::vector<float> expected = {nan, nan, nan, nan, nan, nan};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
#if !defined(XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT16)
XLA_TEST_F(MathTest, IgammaF16) {
SetFastMathDisabled(true);
XlaBuilder builder(TestName());
auto a = ConstantR3FromArray3D<half>(
&builder,
{{{half(0.37603), half(1.6268), half(0.53327), half(1.5111)},
{half(1.79378), half(1.05317), half(0.85049), half(1.3995)},
{half(1.17725), half(0.90727), half(1.32418), half(1.5323)}}});
Igamma(a, a);
Array3D<half> expected = {
{{half(0.7068214), half(0.6041154), half(0.67748886), half(0.60799426)},
{half(0.599202), half(0.6288743), half(0.64280254), half(0.6121421)},
{half(0.6220287), half(0.6384635), half(0.6152258), half(0.6072449)}}};
ComputeAndCompareR3<half>(&builder, expected, {}, ErrorSpec{1e-3});
}
#endif
XLA_TEST_F(MathTest, Igammac) {
XlaBuilder builder(TestName());
auto a = ConstantR3FromArray3D<float>(
&builder,
{{{0.3760359, 1.62685306, 0.53327996, 1.5111382, 0.3521143},
{1.79378175, 1.05317882, 0.85049253, 1.399534, 0.22073882},
{1.17725309, 0.90727209, 1.32418503, 1.53238533, 0.51984756}}});
auto x = ConstantR3FromArray3D<float>(
&builder,
{{{0.56420934, 8.97671773, 2.81068609, 4.50655124, 2.88178617},
{1.01795164, 8.86298411, 0.29232942, 8.17661015, 5.67652269},
{1.59959565, 0.54463897, 0.6585252, 9.83192283, 3.93372669}}});
Igammac(a, x);
Array3D<float> expected = {{{2.12530741e-01, 5.94977775e-04, 1.97173867e-02,
2.96619296e-02, 9.45303689e-03},
{6.67344782e-01, 1.64421996e-04, 6.74008406e-01,
7.67252602e-04, 1.91071108e-04},
{2.56560373e-01, 5.32968026e-01, 6.60764593e-01,
2.14889688e-04, 5.39314824e-03}}};
ComputeAndCompareR3<float>(&builder, expected, {}, error_spec_);
}
#if !defined(XLA_BACKEND_DOES_NOT_SUPPORT_FLOAT16)
XLA_TEST_F(MathTest, IgammacF16) {
SetFastMathDisabled(true);
XlaBuilder builder(TestName());
auto a = ConstantR3FromArray3D<half>(
&builder,
{{{half(0.37603), half(1.6268), half(0.53327), half(1.5111)},
{half(1.79378), half(1.05317), half(0.85049), half(1.3995)},
{half(1.17725), half(0.90727), half(1.32418), half(1.5323)}}});
Igammac(a, a);
Array3D<half> expected = {
{{half(0.29317862), half(0.39588454), half(0.32251117), half(0.39200574)},
{half(0.40079802), half(0.37112573), half(0.35719746), half(0.3878579)},
{half(0.3779713), half(0.36153653), half(0.38477424),
half(0.39275512)}}};
ComputeAndCompareR3<half>(&builder, expected, {}, ErrorSpec{1e-4});
}
#endif
XLA_TEST_F(MathTest, RoundToEven) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(
&builder, {-1.4, -1.5, -2.5, -0.5, 0, 0.5, 1.5, 2.5, 3.5, 4.5});
RoundToEven(x);
std::vector<float> expected = {-1.0, -2.0, -2.0, -0.0, 0,
0.0, 2.0, 2.0, 4.0, 4.0};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, ErfRejectsComplexInputs) {
XlaBuilder b(TestName());
auto x = ConstantR1<std::complex<float>>(&b, {{0, 0}});
Erf(x);
EXPECT_FALSE(b.Build().status().ok());
}
XLA_TEST_F(MathTest, ErfcRejectsComplexInputs) {
XlaBuilder b(TestName());
auto x = ConstantR1<std::complex<float>>(&b, {{0, 0}});
Erfc(x);
EXPECT_FALSE(b.Build().status().ok());
}
XLA_TEST_F(MathTest, LgammaRejectsComplexInputs) {
XlaBuilder b(TestName());
auto x = ConstantR1<std::complex<float>>(&b, {{0, 0}});
Lgamma(x);
EXPECT_FALSE(b.Build().status().ok());
}
XLA_TEST_F(MathTest, DigammaRejectsComplexInputs) {
XlaBuilder b(TestName());
auto x = ConstantR1<std::complex<float>>(&b, {{0, 0}});
Digamma(x);
EXPECT_FALSE(b.Build().status().ok());
}
XLA_TEST_F(MathTest, RoundToEvenRejectsComplexInputs) {
XlaBuilder b(TestName());
auto x = ConstantR1<std::complex<float>>(&b, {{0, 0}});
RoundToEven(x);
EXPECT_FALSE(b.Build().status().ok());
}
XLA_TEST_F(MathTest, BesselI0eFloat) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(
&builder,
{-20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0,
2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0});
BesselI0e(x);
std::vector<float> expected = {0.0897803118848,
0.0947062952128,
0.100544127361,
0.107615251671,
0.116426221213,
0.127833337163,
0.143431781857,
0.16665743264,
0.207001921224,
0.308508322554,
1.0,
0.308508322554,
0.207001921224,
0.16665743264,
0.143431781857,
0.127833337163,
0.116426221213,
0.107615251671,
0.100544127361,
0.0947062952128,
0.0897803118848};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, DISABLED_ON_TPU(BesselI0eDouble)) {
XlaBuilder builder(TestName());
auto x = ConstantR1<double>(
&builder,
{-20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0,
2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0});
BesselI0e(x);
std::vector<double> expected = {0.0897803118848,
0.0947062952128,
0.100544127361,
0.107615251671,
0.116426221213,
0.127833337163,
0.143431781857,
0.16665743264,
0.207001921224,
0.308508322554,
1.0,
0.308508322554,
0.207001921224,
0.16665743264,
0.143431781857,
0.127833337163,
0.116426221213,
0.107615251671,
0.100544127361,
0.0947062952128,
0.0897803118848};
ComputeAndCompareR1<double>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, BesselI1eFloat) {
XlaBuilder builder(TestName());
auto x = ConstantR1<float>(
&builder,
{-20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0,
2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0});
BesselI1e(x);
std::vector<float> expected = {-0.0875062221833,
-0.092036796872,
-0.0973496147565,
-0.103697667463,
-0.11146429929,
-0.121262681384,
-0.134142493293,
-0.152051459309,
-0.178750839502,
-0.215269289249,
0.0,
0.215269289249,
0.178750839502,
0.152051459309,
0.134142493293,
0.121262681384,
0.11146429929,
0.103697667463,
0.0973496147565,
0.092036796872,
0.0875062221833};
ComputeAndCompareR1<float>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, DISABLED_ON_TPU(BesselI1eDouble)) {
XlaBuilder builder(TestName());
auto x = ConstantR1<double>(
&builder,
{-20.0, -18.0, -16.0, -14.0, -12.0, -10.0, -8.0, -6.0, -4.0, -2.0, 0.0,
2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0});
BesselI1e(x);
std::vector<double> expected = {-0.0875062221833,
-0.092036796872,
-0.0973496147565,
-0.103697667463,
-0.11146429929,
-0.121262681384,
-0.134142493293,
-0.152051459309,
-0.178750839502,
-0.215269289249,
0.0,
0.215269289249,
0.178750839502,
0.152051459309,
0.134142493293,
0.121262681384,
0.11146429929,
0.103697667463,
0.0973496147565,
0.092036796872,
0.0875062221833};
ComputeAndCompareR1<double>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, AcosComplexValues) {
XlaBuilder builder(TestName());
auto x = ConstantR1<std::complex<float>>(
&builder, {{0, 0}, {0, 1}, {1, 1}, {0.8, 0.2}});
Acos(x);
std::vector<std::complex<float>> expected = {
{1.5707963267948966, 0},
{1.5707963267948966, -0.881373587019543},
{0.9045568943023814, -1.0612750619050357},
{0.7011246914497526, -0.30527648462436596}};
ComputeAndCompareR1<std::complex<float>>(&builder, expected, {}, error_spec_);
}
XLA_TEST_F(MathTest, ZetaF64) {
XlaBuilder builder(TestName());
auto x = ConstantR1<double>(&builder, {2.0});
auto q = ConstantR1<double>(&builder, {1.0});
Zeta(x, q);
std::vector<double> expected = {1.64493406684823};
ComputeAndCompareR1<double>(&builder, expected, {},
ErrorSpec{0.00000000000001});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/math.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/math_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
db56bb06-7c34-4ecb-b2e3-92e681833f69 | cpp | tensorflow/tensorflow | tensor | tensorflow/lite/delegates/gpu/cl/tensor.cc | tensorflow/cc/experimental/base/tests/tensor_test.cc | #include "tensorflow/lite/delegates/gpu/cl/tensor.h"
#include <cstdint>
#include <cstring>
#include <memory>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/delegates/gpu/cl/buffer.h"
#include "tensorflow/lite/delegates/gpu/cl/cl_image_format.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/task/tensor_desc.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
absl::Status AllocateTensorMemoryInternal(const CLContext& context,
const TensorDescriptor& descriptor,
CLMemory* result) {
cl_mem_flags mem_flags = CL_MEM_READ_WRITE;
const uint8_t* data_ptr = nullptr;
if (!descriptor.GetData().empty()) {
data_ptr = descriptor.GetData().data();
mem_flags |= CL_MEM_COPY_HOST_PTR;
}
std::vector<uint64_t> storage_dims = descriptor.GetStorageDims();
switch (descriptor.GetStorageType()) {
case TensorStorageType::BUFFER:
case TensorStorageType::IMAGE_BUFFER: {
const size_t data_size = storage_dims[0] * descriptor.GetElementSize() *
SizeOf(descriptor.GetDataType());
cl_int error_code;
cl_mem memory =
clCreateBuffer(context.context(), mem_flags, data_size,
const_cast<uint8_t*>(data_ptr), &error_code);
if (!memory) {
return absl::UnknownError(
absl::StrCat("Failed to allocate device memory (clCreateBuffer): ",
CLErrorCodeToString(error_code)));
}
*result = CLMemory(memory, true);
return absl::OkStatus();
}
case TensorStorageType::TEXTURE_2D: {
cl_image_desc desc;
desc.image_type = CL_MEM_OBJECT_IMAGE2D;
desc.image_width = storage_dims[0];
desc.image_height = storage_dims[1];
desc.image_depth = 0;
desc.image_row_pitch = 0;
desc.image_slice_pitch = 0;
desc.num_mip_levels = 0;
desc.num_samples = 0;
desc.buffer = nullptr;
cl_image_format format;
format.image_channel_order = CL_RGBA;
format.image_channel_data_type =
DataTypeToChannelType(descriptor.GetDataType());
cl_int error_code;
cl_mem memory =
CreateImage2DLegacy(context.context(), mem_flags, &format, &desc,
const_cast<uint8_t*>(data_ptr), &error_code);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(
absl::StrCat("Failed to create 2D texture (clCreateImage): ",
CLErrorCodeToString(error_code)));
}
*result = CLMemory(memory, true);
return absl::OkStatus();
}
case TensorStorageType::TEXTURE_3D: {
cl_image_desc desc;
desc.image_type = CL_MEM_OBJECT_IMAGE3D;
desc.image_width = storage_dims[0];
desc.image_height = storage_dims[1];
desc.image_depth = storage_dims[2];
desc.image_row_pitch = 0;
desc.image_slice_pitch = 0;
desc.num_mip_levels = 0;
desc.num_samples = 0;
desc.buffer = nullptr;
cl_image_format format;
format.image_channel_order = CL_RGBA;
format.image_channel_data_type =
DataTypeToChannelType(descriptor.GetDataType());
cl_int error_code;
cl_mem memory =
CreateImage3DLegacy(context.context(), mem_flags, &format, &desc,
const_cast<uint8_t*>(data_ptr), &error_code);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(
absl::StrCat("Failed to create 3D texture (clCreateImage): ",
CLErrorCodeToString(error_code)));
}
*result = CLMemory(memory, true);
return absl::OkStatus();
}
case TensorStorageType::TEXTURE_ARRAY: {
cl_image_desc desc;
desc.image_type = CL_MEM_OBJECT_IMAGE2D_ARRAY;
desc.image_width = storage_dims[0];
desc.image_height = storage_dims[1];
desc.image_depth = 0;
desc.image_array_size = storage_dims[2];
desc.image_row_pitch = 0;
desc.image_slice_pitch = 0;
desc.num_mip_levels = 0;
desc.num_samples = 0;
desc.buffer = nullptr;
cl_image_format format;
format.image_channel_order = CL_RGBA;
format.image_channel_data_type =
DataTypeToChannelType(descriptor.GetDataType());
cl_int error_code;
cl_mem memory =
clCreateImage(context.context(), mem_flags, &format, &desc,
const_cast<uint8_t*>(data_ptr), &error_code);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(
absl::StrCat("Failed to create 2D texture array (clCreateImage): ",
CLErrorCodeToString(error_code)));
}
*result = CLMemory(memory, true);
return absl::OkStatus();
}
case TensorStorageType::SINGLE_TEXTURE_2D: {
const int element_size = descriptor.GetElementSize();
if (element_size > 4) {
return absl::InvalidArgumentError(absl::StrCat(
"SINGLE_TEXTURE_2D support only channels in range [1-4], but ",
element_size, "was provided"));
}
cl_image_desc desc;
desc.image_type = CL_MEM_OBJECT_IMAGE2D;
desc.image_width = storage_dims[0];
desc.image_height = storage_dims[1];
desc.image_depth = 0;
desc.image_row_pitch = 0;
desc.image_slice_pitch = 0;
desc.num_mip_levels = 0;
desc.num_samples = 0;
desc.buffer = nullptr;
cl_image_format format;
if (context.IsFloatTexture2DSupported(element_size,
descriptor.GetDataType())) {
format.image_channel_order = ToChannelOrder(element_size);
format.image_channel_data_type =
DataTypeToChannelType(descriptor.GetDataType());
} else {
return absl::InvalidArgumentError(
absl::StrCat("This device doesn't support ", element_size,
"-channel textures."));
}
cl_int error_code;
cl_mem memory =
CreateImage2DLegacy(context.context(), mem_flags, &format, &desc,
const_cast<uint8_t*>(data_ptr), &error_code);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(
absl::StrCat("Failed to create single 2D texture (clCreateImage): ",
CLErrorCodeToString(error_code)));
}
*result = CLMemory(memory, true);
return absl::OkStatus();
}
default:
return absl::InternalError("Unsupported tensor storage type");
}
}
absl::Status CreateImageBufferFromBuffer(const CLContext& context,
cl_mem memory, DataType data_type,
int width, cl_mem* result) {
cl_image_format format;
cl_image_desc desc;
std::memset(&desc, 0, sizeof(desc));
desc.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER;
desc.image_width = width;
desc.mem_object = memory;
format.image_channel_data_type = DataTypeToChannelType(data_type);
format.image_channel_order = CL_RGBA;
cl_int error_code;
*result = clCreateImage(context.context(), CL_MEM_READ_WRITE, &format, &desc,
nullptr, &error_code);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(
absl::StrCat("Failed to create Image from Buffer (clCreateImage): ",
CLErrorCodeToString(error_code)));
}
return absl::OkStatus();
}
absl::Status CreateImage2DFromBuffer(const CLContext& context, cl_mem memory,
DataType data_type, int width, int height,
int channels, int width_pixel_alignment,
cl_mem* result) {
if (!context.IsFloatTexture2DSupported(channels, data_type)) {
return absl::InvalidArgumentError(absl::StrCat(
"This device doesn't support ", channels, "-channel textures."));
}
cl_image_desc desc;
desc.image_type = CL_MEM_OBJECT_IMAGE2D;
desc.image_width = width;
desc.image_height = height;
desc.image_depth = 0;
const size_t width_aligned = AlignByN(width, width_pixel_alignment);
desc.image_row_pitch = width_aligned * channels * SizeOf(data_type);
desc.image_slice_pitch = 0;
desc.num_mip_levels = 0;
desc.num_samples = 0;
desc.mem_object = memory;
cl_image_format format;
format.image_channel_order = ToChannelOrder(channels);
format.image_channel_data_type = DataTypeToChannelType(data_type);
cl_int error_code;
*result = CreateImage2DLegacy(context.context(), CL_MEM_READ_WRITE, &format,
&desc, nullptr, &error_code);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(
absl::StrCat("Failed to create Image2D from Buffer (clCreateImage): ",
CLErrorCodeToString(error_code)));
}
return absl::OkStatus();
}
}
Tensor::Tensor(cl_mem memory, bool memory_owner,
const TensorDescriptor& descriptor)
: memory_(memory),
image_buffer_memory_(nullptr),
memory_owner_(memory_owner),
descriptor_(descriptor) {}
Tensor::Tensor(cl_mem memory, bool memory_owner, cl_mem image_buffer_memory,
const TensorDescriptor& descriptor)
: memory_(memory),
image_buffer_memory_(image_buffer_memory),
memory_owner_(memory_owner),
descriptor_(descriptor) {
if (image_buffer_memory &&
(descriptor.GetStorageType() == TensorStorageType::TEXTURE_2D ||
descriptor.GetStorageType() == TensorStorageType::SINGLE_TEXTURE_2D)) {
buffer_based_ = true;
}
}
Tensor::Tensor(Tensor&& tensor)
: memory_(tensor.memory_),
image_buffer_memory_(tensor.image_buffer_memory_),
memory_owner_(tensor.memory_owner_),
buffer_based_(tensor.buffer_based_),
descriptor_(std::move(tensor.descriptor_)),
aligned_texture_width_(tensor.aligned_texture_width_) {
tensor.memory_ = nullptr;
tensor.image_buffer_memory_ = nullptr;
}
Tensor& Tensor::operator=(Tensor&& tensor) {
if (this != &tensor) {
Release();
std::swap(memory_, tensor.memory_);
std::swap(image_buffer_memory_, tensor.image_buffer_memory_);
std::swap(memory_owner_, tensor.memory_owner_);
std::swap(buffer_based_, tensor.buffer_based_);
descriptor_ = std::move(tensor.descriptor_);
std::swap(aligned_texture_width_, tensor.aligned_texture_width_);
}
return *this;
}
void Tensor::Release() {
if (image_buffer_memory_) {
clReleaseMemObject(image_buffer_memory_);
image_buffer_memory_ = nullptr;
}
if (memory_owner_ && memory_) {
clReleaseMemObject(memory_);
memory_ = nullptr;
}
}
absl::Status Tensor::GetGPUResources(const GPUObjectDescriptor* obj_ptr,
GPUResourcesWithValue* resources) const {
const auto* buffer_desc = dynamic_cast<const BufferDescriptor*>(obj_ptr);
if (buffer_desc) {
if (descriptor_.GetStorageType() != TensorStorageType::BUFFER &&
descriptor_.GetStorageType() != TensorStorageType::IMAGE_BUFFER) {
return absl::InvalidArgumentError(
"Tensor can be used with BufferDescriptor only with "
"TensorStorageType::BUFFER/TensorStorageType::IMAGE_BUFFER.");
}
resources->buffers.push_back({"buffer", memory_});
return absl::OkStatus();
}
const auto* tensor_desc = dynamic_cast<const TensorDescriptor*>(obj_ptr);
if (!tensor_desc) {
return absl::InvalidArgumentError("Expected TensorDescriptor on input.");
}
tensor_desc->GetGpuResources(descriptor_.GetBHWDCShape(),
&resources->generic);
if (descriptor_.GetStorageType() == TensorStorageType::BUFFER) {
resources->buffers.push_back({"buffer", memory_});
} else if (descriptor_.GetStorageType() == TensorStorageType::TEXTURE_2D ||
descriptor_.GetStorageType() ==
TensorStorageType::SINGLE_TEXTURE_2D) {
if (obj_ptr->GetAccess() == AccessType::WRITE &&
tensor_desc->GetUseBufferForWriteOnlyTexture2d()) {
resources->AddInt("aligned_texture_width", aligned_texture_width_);
resources->buffers.push_back({"buffer", memory_});
} else {
cl_mem mem = buffer_based_ ? image_buffer_memory_ : memory_;
resources->images2d.push_back({"image2d", mem});
}
} else if (descriptor_.GetStorageType() == TensorStorageType::TEXTURE_ARRAY) {
resources->image2d_arrays.push_back({"image2d_array", memory_});
} else if (descriptor_.GetStorageType() == TensorStorageType::TEXTURE_3D) {
resources->images3d.push_back({"image3d", memory_});
} else if (descriptor_.GetStorageType() == TensorStorageType::IMAGE_BUFFER) {
if (obj_ptr->GetAccess() == AccessType::WRITE &&
tensor_desc->GetUseBufferForWriteOnlyImageBuffer()) {
resources->buffers.push_back({"buffer", memory_});
} else {
resources->image_buffers.push_back(
{"image_buffer", image_buffer_memory_});
}
}
return absl::OkStatus();
}
cl_mem Tensor::GetMemoryPtr() const {
if (buffer_based_) {
return image_buffer_memory_;
} else {
return descriptor_.GetStorageType() == TensorStorageType::IMAGE_BUFFER
? image_buffer_memory_
: memory_;
}
}
cl_mem Tensor::GetMemoryPtrForWriting() const {
if (buffer_based_) {
return image_buffer_memory_;
} else {
return memory_;
}
}
absl::Status Tensor::CreateFromDescriptor(const TensorDescriptor& desc,
CLContext* context) {
desc.CopyWithoutData(&descriptor_);
memory_owner_ = true;
CLMemory memory;
RETURN_IF_ERROR(AllocateTensorMemoryInternal(*context, desc, &memory));
memory_ = memory.Release();
if (desc.GetStorageType() == TensorStorageType::IMAGE_BUFFER) {
std::vector<uint64_t> storage_dims = descriptor_.GetStorageDims();
RETURN_IF_ERROR(
CreateImageBufferFromBuffer(*context, memory_, desc.GetDataType(),
storage_dims[0], &image_buffer_memory_));
}
return absl::OkStatus();
}
absl::Status Tensor::UploadDescriptorData(const TensorDescriptor& desc,
CLCommandQueue* queue) {
return WriteData(desc.GetData().data(), queue);
}
absl::Status Tensor::ToDescriptor(TensorDescriptor* desc,
CLCommandQueue* queue) const {
*desc = descriptor_;
std::vector<uint8_t> data(GetMemorySizeInBytes());
RETURN_IF_ERROR(ReadData(data.data(), queue));
desc->SetData(std::move(data));
return absl::OkStatus();
}
absl::Status Tensor::WriteData(const void* ptr, CLCommandQueue* queue) {
switch (descriptor_.GetStorageType()) {
case TensorStorageType::BUFFER:
case TensorStorageType::IMAGE_BUFFER:
RETURN_IF_ERROR(
queue->EnqueueWriteBuffer(memory_, GetMemorySizeInBytes(), ptr));
break;
case TensorStorageType::TEXTURE_ARRAY:
case TensorStorageType::TEXTURE_2D:
case TensorStorageType::TEXTURE_3D:
case TensorStorageType::SINGLE_TEXTURE_2D: {
cl_mem mem = buffer_based_ ? image_buffer_memory_ : memory_;
RETURN_IF_ERROR(queue->EnqueueWriteImage(
mem, descriptor_.GetFullTensorRegion(), ptr));
break;
}
default:
return absl::InternalError("Unsupported tensor storage type");
}
return absl::OkStatus();
}
absl::Status Tensor::ReadData(void* ptr, CLCommandQueue* queue) const {
switch (descriptor_.GetStorageType()) {
case TensorStorageType::BUFFER:
case TensorStorageType::IMAGE_BUFFER:
RETURN_IF_ERROR(
queue->EnqueueReadBuffer(memory_, GetMemorySizeInBytes(), ptr));
break;
case TensorStorageType::TEXTURE_ARRAY:
case TensorStorageType::TEXTURE_2D:
case TensorStorageType::TEXTURE_3D:
case TensorStorageType::SINGLE_TEXTURE_2D: {
cl_mem mem = buffer_based_ ? image_buffer_memory_ : memory_;
RETURN_IF_ERROR(
queue->EnqueueReadImage(mem, descriptor_.GetFullTensorRegion(), ptr));
break;
}
default:
return absl::InternalError("Unsupported tensor storage type");
}
return absl::OkStatus();
}
absl::Status CreateTensor(const CLContext& context,
const TensorDescriptor& descriptor, Tensor* result) {
CLMemory mem;
RETURN_IF_ERROR(AllocateTensorMemoryInternal(context, descriptor, &mem));
cl_mem memory = mem.Release();
if (descriptor.GetStorageType() == TensorStorageType::IMAGE_BUFFER) {
std::vector<uint64_t> storage_dims = descriptor.GetStorageDims();
cl_mem image_memory;
RETURN_IF_ERROR(
CreateImageBufferFromBuffer(context, memory, descriptor.GetDataType(),
storage_dims[0], &image_memory));
*result = Tensor(memory, true, image_memory, descriptor);
} else {
*result = Tensor(memory, true, descriptor);
}
return absl::OkStatus();
}
absl::Status CreateTensorShared(const CLContext& context, cl_mem memory,
const TensorDescriptor& descriptor,
Tensor* result) {
const bool memory_owner = false;
if (descriptor.GetStorageType() == TensorStorageType::IMAGE_BUFFER) {
std::vector<uint64_t> storage_dims = descriptor.GetStorageDims();
cl_mem image_memory;
RETURN_IF_ERROR(
CreateImageBufferFromBuffer(context, memory, descriptor.GetDataType(),
storage_dims[0], &image_memory));
*result = Tensor(memory, memory_owner, image_memory, descriptor);
} else {
*result = Tensor(memory, memory_owner, descriptor);
}
return absl::OkStatus();
}
absl::Status CreateTensorSharedImage2DBuffer(const CLContext& context,
cl_mem memory,
const TensorDescriptor& descriptor,
int width_pixel_alignment,
Tensor* result) {
std::vector<uint64_t> storage_dims = descriptor.GetStorageDims();
const int width = storage_dims[0];
const int height = storage_dims[1];
const int channels = descriptor.GetElementSize();
cl_mem image_memory;
RETURN_IF_ERROR(CreateImage2DFromBuffer(
context, memory, descriptor.GetDataType(), width, height, channels,
width_pixel_alignment, &image_memory));
*result = Tensor(memory, false, image_memory, descriptor);
result->aligned_texture_width_ = AlignByN(width, width_pixel_alignment);
return absl::OkStatus();
}
absl::Status AllocateTensorMemory(const CLContext& context,
const TensorDescriptor& descriptor,
CLMemory* result) {
return AllocateTensorMemoryInternal(context, descriptor, result);
}
}
}
} | #include "tensorflow/cc/experimental/base/public/tensor.h"
#include <stddef.h>
#include <stdint.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/cc/experimental/base/public/status.h"
#include "tensorflow/cc/experimental/base/tests/tensor_types_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace {
using tensorflow::experimental::cc::Status;
using tensorflow::experimental::cc::Tensor;
using SimpleTypes = ::testing::Types<
tensorflow::FloatType, tensorflow::DoubleType, tensorflow::Int32Type,
tensorflow::UINT8Type, tensorflow::INT8Type, tensorflow::INT64Type,
tensorflow::UINT16Type, tensorflow::UINT32Type, tensorflow::UINT64Type>;
template <typename T>
class ConstructScalarTensorTest : public ::testing::Test {};
TYPED_TEST_SUITE(ConstructScalarTensorTest, SimpleTypes);
TYPED_TEST(ConstructScalarTensorTest, ValidTensorAttributesAfterConstruction) {
Status status;
TF_DataType dtype = TypeParam::kDType;
typename TypeParam::type value = 42;
Tensor tensor = Tensor::FromBuffer(dtype, {},
&value,
sizeof(value),
[](void*, size_t) {}, &status);
ASSERT_TRUE(status.ok()) << status.message();
EXPECT_EQ(tensor.dims(), 0);
EXPECT_EQ(tensor.dtype(), dtype);
EXPECT_EQ(*reinterpret_cast<typename TypeParam::type*>(tensor.data()), 42);
EXPECT_EQ(tensor.num_bytes(), sizeof(typename TypeParam::type));
EXPECT_EQ(tensor.num_elements(), 1);
}
template <typename T>
class Construct1DTensorTest : public ::testing::Test {};
TYPED_TEST_SUITE(Construct1DTensorTest, SimpleTypes);
TYPED_TEST(Construct1DTensorTest, ValidTensorAttributesAfterConstruction) {
Status status;
TF_DataType dtype = TypeParam::kDType;
std::vector<typename TypeParam::type> value = {42, 100, 0, 1, 4, 29};
std::vector<int64_t> shape;
shape.push_back(value.size());
Tensor tensor = Tensor::FromBuffer(
dtype, shape,
value.data(),
value.size() * sizeof(typename TypeParam::type),
[](void*, size_t) {}, &status);
ASSERT_TRUE(status.ok()) << status.message();
EXPECT_EQ(tensor.dims(), 1);
EXPECT_EQ(tensor.dtype(), dtype);
absl::Span<const typename TypeParam::type> tensor_view(
reinterpret_cast<typename TypeParam::type*>(tensor.data()), value.size());
EXPECT_EQ(tensor_view[0], 42);
EXPECT_EQ(tensor_view[1], 100);
EXPECT_EQ(tensor_view[2], 0);
EXPECT_EQ(tensor_view[3], 1);
EXPECT_EQ(tensor_view[4], 4);
EXPECT_EQ(tensor_view[5], 29);
EXPECT_EQ(tensor.num_bytes(),
value.size() * sizeof(typename TypeParam::type));
EXPECT_EQ(tensor.num_elements(), value.size());
}
template <typename T>
class Construct2DTensorTest : public ::testing::Test {};
TYPED_TEST_SUITE(Construct2DTensorTest, SimpleTypes);
TYPED_TEST(Construct2DTensorTest, ValidTensorAttributesAfterConstruction) {
Status status;
TF_DataType dtype = TypeParam::kDType;
std::vector<typename TypeParam::type> value = {42, 100, 0, 1, 4, 29};
std::vector<int64_t> shape({2, 3});
Tensor tensor = Tensor::FromBuffer(
dtype, shape,
value.data(),
value.size() * sizeof(typename TypeParam::type),
[](void*, size_t) {}, &status);
ASSERT_TRUE(status.ok()) << status.message();
EXPECT_EQ(tensor.dims(), 2);
EXPECT_EQ(tensor.dtype(), dtype);
absl::Span<const typename TypeParam::type> tensor_view(
reinterpret_cast<typename TypeParam::type*>(tensor.data()), value.size());
EXPECT_EQ(tensor_view[0], 42);
EXPECT_EQ(tensor_view[1], 100);
EXPECT_EQ(tensor_view[2], 0);
EXPECT_EQ(tensor_view[3], 1);
EXPECT_EQ(tensor_view[4], 4);
EXPECT_EQ(tensor_view[5], 29);
EXPECT_EQ(tensor.num_bytes(),
value.size() * sizeof(typename TypeParam::type));
EXPECT_EQ(tensor.num_elements(), value.size());
}
TEST(CPPTensorAPI, ConstructTensorFromBuffer) {
bool done = false;
Status status;
std::vector<int32_t> data_vector({12, 14, 20, 18, 39, 42, 100});
{
std::vector<int64_t> shape;
shape.push_back(data_vector.size());
Tensor::DeleterCallback callback = [&done](void* data, size_t len) {
done = true;
};
Tensor tensor =
Tensor::FromBuffer(TF_INT32, shape,
data_vector.data(),
data_vector.size() * sizeof(int32_t),
callback, &status);
ASSERT_TRUE(status.ok()) << status.message();
}
EXPECT_TRUE(done);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/tensor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/experimental/base/tests/tensor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
36f6b8c3-4e4d-44d4-a114-9c8ef199e3e6 | cpp | tensorflow/tensorflow | execution_trace_utils | third_party/xla/xla/mlir/tools/mlir_replay/public/execution_trace_utils.cc | third_party/xla/xla/mlir/tools/mlir_replay/public/execution_trace_utils_test.cc | #include "xla/mlir/tools/mlir_replay/public/execution_trace_utils.h"
#include <cassert>
#include <complex>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <type_traits>
#include <utility>
#include <variant>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Region.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
#include "xla/literal.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter_value.h"
#include "xla/mlir/tools/mlir_interpreter/framework/tensor_or_memref.h"
#include "xla/mlir/tools/mlir_replay/public/execution_trace.pb.h"
#include "xla/primitive_util.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace interpreter {
namespace {
struct TraceInterpreterValueVisitor {
TracedValue out;
void Add(float v) { out.add_floats(v); }
void Add(double v) { out.add_doubles(v); }
void Add(std::complex<float> v) {
out.add_floats(v.real());
out.add_floats(v.imag());
}
void Add(std::complex<double> v) {
out.add_doubles(v.real());
out.add_doubles(v.imag());
}
void Add(int64_t v) { out.add_ints(v); }
void Add(int32_t v) { out.add_ints(v); }
void Add(int16_t v) { out.add_ints(v); }
void Add(int8_t v) { out.add_ints(v); }
void Add(uint64_t v) { out.add_uints(v); }
void Add(uint32_t v) { out.add_uints(v); }
void Add(uint16_t v) { out.add_uints(v); }
void Add(uint8_t v) { out.add_uints(v); }
void Add(bool v) { out.add_ints(static_cast<int64_t>(v)); }
template <typename T>
void operator()(T v) {
SetElementType<T>();
out.set_is_scalar(true);
Add(v);
}
void operator()(const Tuple& t) {
out.set_element_type(TracedValue::TUPLE);
for (const auto& v : t.values) {
*out.add_tuple_elements() = ValueToTracedValue(*v);
}
}
template <typename T>
void operator()(const TensorOrMemref<T>& v) {
for (int64_t size : v.view.sizes) {
out.add_shape(size);
}
SetElementType<T>();
for (const auto& index : v.view.Indices()) {
Add(v.at(index));
}
}
template <typename T>
void SetElementType() {
out.set_element_type(GetElementType(T{}));
if constexpr (std::is_same_v<T, bool>) {
out.set_bit_width(1);
} else {
out.set_bit_width(sizeof(T) * 8);
}
}
template <typename T>
static TracedValue::ElementType GetElementType(const T&) {
if constexpr (std::is_floating_point_v<T>) {
return TracedValue::FLOAT;
} else if constexpr (std::is_integral_v<T>) {
if constexpr (std::is_unsigned_v<T>) {
return TracedValue::UNSIGNED;
} else {
return TracedValue::INTEGRAL;
}
} else {
T{"invalid type"} + 0;
return TracedValue::UNKNOWN;
}
}
template <typename T>
static TracedValue::ElementType GetElementType(const std::complex<T>&) {
return TracedValue::COMPLEX;
}
};
}
void ExecutionTraceListener::BeforeOp(ArrayRef<InterpreterValue> args,
Operation* op) {
auto* inst = regions_.back()->add_instructions();
inst->set_name(op->getName().getStringRef().str());
for (const auto& arg : args) {
*inst->add_args() = ValueToTracedValue(arg);
}
}
void ExecutionTraceListener::AfterOp(ArrayRef<InterpreterValue> results) {
auto* traced_results =
regions_.back()->mutable_instructions()->rbegin()->mutable_results();
for (const auto& result : results) {
*traced_results->Add() = ValueToTracedValue(result);
}
}
void ExecutionTraceListener::EnterRegion(ArrayRef<InterpreterValue> bbargs,
Region& region) {
if (regions_.empty()) {
regions_.push_back(trace_->mutable_trace());
} else {
regions_.push_back(
regions_.back()->mutable_instructions()->rbegin()->add_regions());
}
auto& traced_region = *regions_.back();
traced_region.set_region_number(region.getRegionNumber());
for (const auto& bbarg : bbargs) {
*traced_region.add_bbargs() = ValueToTracedValue(bbarg);
}
}
void ExecutionTraceListener::LeaveRegion(ArrayRef<InterpreterValue> yielded) {
for (const auto& result : yielded) {
*regions_.back()->add_results() = ValueToTracedValue(result);
}
regions_.pop_back();
}
llvm::SmallVector<mlir::Attribute> ValueToAttribute(
const InterpreterValue& value, mlir::Type type) {
if (std::holds_alternative<Tuple>(value.storage)) {
auto types = type.cast<TupleType>().getTypes();
const auto& t = std::get<Tuple>(value.storage);
llvm::SmallVector<mlir::Attribute> attrs;
for (const auto& [v, ty] : llvm::zip(t.values, types)) {
auto attr = ValueToAttribute(*v, ty);
assert(attr.size() == 1 && "nested tuples not supported");
attrs.push_back(attr.front());
}
return attrs;
}
if (!value.IsTensor()) {
return {cast<DenseElementsAttr>(
ValueToAttribute(value.AsUnitTensor(),
mlir::RankedTensorType::get({}, type))
.front())
.getValues<mlir::Attribute>()[0]};
}
if (!type.isa<ShapedType>()) {
return {};
}
auto shaped_ty = type.cast<ShapedType>();
return {DispatchScalarType(shaped_ty, [&](auto dummy) -> mlir::Attribute {
using T = decltype(dummy);
auto& t = std::get<TensorOrMemref<T>>(value.storage);
SmallVector<T> vals;
for (const auto& index : t.view.Indices()) {
vals.push_back(t.at(index));
}
auto attr_ty =
shaped_ty.cloneWith(t.view.sizes, shaped_ty.getElementType());
if constexpr (std::is_same_v<T, bool>) {
return mlir::DenseElementsAttr::get(attr_ty, vals);
} else {
return mlir::DenseElementsAttr::get<T>(attr_ty, vals);
}
})};
}
namespace {
template <typename T>
TensorOrMemref<T> ArrayLiteralToTensor(const xla::Literal& literal) {
SmallVector<int64_t> layout;
if (literal.shape().has_layout()) {
llvm::copy(literal.shape().layout().minor_to_major(),
std::back_inserter(layout));
}
SmallVector<int64_t> shape{literal.shape().dimensions().begin(),
literal.shape().dimensions().end()};
auto result = TensorOrMemref<T>::Empty(shape, layout);
assert(literal.size_bytes() == result.buffer->GetByteSize() &&
"expected buffer sizes to match");
memcpy(result.buffer->at(0, 0), literal.untyped_data(),
result.buffer->GetByteSize());
return result;
}
}
absl::StatusOr<InterpreterValue> LiteralToValue(const xla::Literal& literal) {
if (literal.shape().IsTuple()) {
auto elements = literal.Clone().DecomposeTuple();
Tuple result;
for (auto& element : elements) {
TF_ASSIGN_OR_RETURN(auto converted, LiteralToValue(element));
result.values.push_back(
std::make_shared<InterpreterValue>(std::move(converted)));
}
return {{result}};
}
if (literal.shape().IsToken()) {
return absl::UnimplementedError("token arguments are not implemented");
}
if (literal.shape().IsArray()) {
auto type = literal.shape().element_type();
if (xla::primitive_util::IsF8Type(type)) {
return absl::UnimplementedError(
absl::StrCat(xla::primitive_util::LowercasePrimitiveTypeName(type),
" not implemented"));
}
switch (type) {
case xla::PRED:
return {{ArrayLiteralToTensor<bool>(literal)}};
case xla::S8:
return {{ArrayLiteralToTensor<int8_t>(literal)}};
case xla::S16:
return {{ArrayLiteralToTensor<int16_t>(literal)}};
case xla::S32:
return {{ArrayLiteralToTensor<int32_t>(literal)}};
case xla::S64:
return {{ArrayLiteralToTensor<int64_t>(literal)}};
case xla::U8:
return {{ArrayLiteralToTensor<uint8_t>(literal)}};
case xla::U16:
return {{ArrayLiteralToTensor<uint16_t>(literal)}};
case xla::U32:
return {{ArrayLiteralToTensor<uint32_t>(literal)}};
case xla::U64:
return {{ArrayLiteralToTensor<uint64_t>(literal)}};
case xla::F16:
return absl::UnimplementedError("F16 not implemented");
case xla::F32:
return {{ArrayLiteralToTensor<float>(literal)}};
case xla::BF16:
return absl::UnimplementedError("BF16 not implemented");
case xla::F64:
return {{ArrayLiteralToTensor<double>(literal)}};
case xla::C64:
return {{ArrayLiteralToTensor<std::complex<float>>(literal)}};
case xla::C128:
return {{ArrayLiteralToTensor<std::complex<double>>(literal)}};
default:
break;
}
}
return absl::InvalidArgumentError("unexpected literal type");
}
absl::StatusOr<InterpreterValue> LiteralToValue(
const xla::LiteralProto& literal) {
TF_ASSIGN_OR_RETURN(auto deserialized,
xla::Literal::CreateFromProto(literal));
return LiteralToValue(deserialized);
}
absl::StatusOr<InterpreterValue> LiteralToValue(
const xla::LiteralProto& literal, mlir::Type type) {
TF_ASSIGN_OR_RETURN(auto result, LiteralToValue(literal));
return {DispatchScalarType(type, [&](auto dummy) -> InterpreterValue {
TensorOrMemref<decltype(dummy)> cast;
cast.view = result.View();
cast.buffer = result.GetBuffer();
return {cast};
})};
}
TracedValue ValueToTracedValue(const InterpreterValue& value) {
TraceInterpreterValueVisitor visitor;
std::visit(visitor, value.storage);
return visitor.out;
}
absl::StatusOr<InterpreterValue> TracedValueToValue(
const TracedValue& traced_value) {
auto extract = [&](auto dummy, auto& elements) -> InterpreterValue {
using T = decltype(dummy);
if (traced_value.is_scalar()) {
return {static_cast<T>(elements[0])};
}
auto result =
TensorOrMemref<T>::Empty(llvm::to_vector(traced_value.shape()));
for (auto [index, element] : llvm::zip(result.view.Indices(), elements)) {
result.at(index) = element;
}
return {result};
};
auto extract_complex = [&](auto& elements) -> InterpreterValue {
using T = std::complex<std::decay_t<decltype(elements[0])>>;
if (traced_value.is_scalar()) {
return {T{elements[0], elements[1]}};
}
auto result =
TensorOrMemref<T>::Empty(llvm::to_vector(traced_value.shape()));
int64_t i = 0;
for (auto it = result.view.Indices().begin(),
end = result.view.Indices().end();
it != end; ++it, i += 2) {
result.at(*it) = {elements[i], elements[i + 1]};
}
return {result};
};
switch (traced_value.element_type()) {
case TracedValue::UNKNOWN:
break;
case TracedValue::FLOAT:
if (traced_value.bit_width() == 32) {
return extract(float{}, traced_value.floats());
}
return extract(double{}, traced_value.doubles());
case TracedValue::UNSIGNED:
switch (traced_value.bit_width()) {
case 1:
return extract(bool{}, traced_value.ints());
case 8:
return extract(uint8_t{}, traced_value.uints());
case 16:
return extract(uint16_t{}, traced_value.uints());
case 32:
return extract(uint32_t{}, traced_value.uints());
case 64:
return extract(uint64_t{}, traced_value.uints());
}
break;
case TracedValue::INTEGRAL:
switch (traced_value.bit_width()) {
case 8:
return extract(int8_t{}, traced_value.ints());
case 16:
return extract(int16_t{}, traced_value.ints());
case 32:
return extract(int32_t{}, traced_value.ints());
case 64:
return extract(int64_t{}, traced_value.ints());
}
break;
case TracedValue::COMPLEX:
switch (traced_value.bit_width()) {
case 64:
return extract_complex(traced_value.floats());
case 128:
return extract_complex(traced_value.doubles());
}
break;
case TracedValue::TUPLE:
Tuple result;
for (const auto& elem : traced_value.tuple_elements()) {
TF_ASSIGN_OR_RETURN(auto converted, TracedValueToValue(elem));
result.values.push_back(
std::make_shared<InterpreterValue>(std::move(converted)));
}
return {{std::move(result)}};
}
return absl::InvalidArgumentError("unexpected type: " +
traced_value.DebugString());
}
llvm::SmallVector<const InstructionTrace*> FindOpExecutionsInTrace(
const ExecutionTrace& trace, mlir::Operation* op) {
llvm::SmallVector<int64_t> region_indices;
llvm::SmallVector<int64_t> op_indices;
std::function<void(mlir::Operation*)> get_op_path;
get_op_path = [&](mlir::Operation* op) {
auto* parent = op->getParentOp();
if (!llvm::isa<func::FuncOp>(parent)) {
get_op_path(parent);
region_indices.push_back(op->getParentRegion()->getRegionNumber());
}
int64_t index = 0;
while ((op = op->getPrevNode()) != nullptr) ++index;
op_indices.push_back(index);
};
get_op_path(op);
llvm::SmallVector<const InstructionTrace*> result;
std::function<void(const RegionTrace& trace, int index)> step;
step = [&](const RegionTrace& trace, int index) {
auto& instruction_trace = trace.instructions(op_indices[index]);
if (region_indices.size() > index) {
for (const auto& region : instruction_trace.regions()) {
if (region.region_number() == region_indices[index]) {
step(region, index + 1);
}
}
} else {
result.push_back(&instruction_trace);
}
};
step(trace.trace(), 0);
return result;
}
}
} | #include "xla/mlir/tools/mlir_replay/public/execution_trace_utils.h"
#include <cmath>
#include <complex>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "llvm/ADT/STLExtras.h"
#include "mlir/Support/LLVM.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter_value.h"
#include "xla/mlir/tools/mlir_interpreter/framework/tensor_or_memref.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace interpreter {
namespace {
class TracedValueRoundTripTest
: public ::testing::TestWithParam<InterpreterValue> {};
TEST_P(TracedValueRoundTripTest, Run) {
auto traced_value = ValueToTracedValue(GetParam());
TF_ASSERT_OK_AND_ASSIGN(auto value, TracedValueToValue(traced_value));
EXPECT_EQ(GetParam(), value) << GetParam().ToString();
}
template <typename T>
InterpreterValue MakeTensor(ArrayRef<int64_t> shape, ArrayRef<T> values) {
auto result = TensorOrMemref<T>::Empty(shape);
for (auto [indices, value] : llvm::zip(result.view.Indices(), values)) {
result.at(indices) = value;
}
return {result};
}
template <typename T>
std::shared_ptr<T> WrapShared(T value) {
return std::make_shared<T>(std::move(value));
}
INSTANTIATE_TEST_SUITE_P(
RoundTrip, TracedValueRoundTripTest,
::testing::ValuesIn(std::vector<InterpreterValue>{
{uint8_t{42}},
{uint16_t{43}},
{uint32_t{44}},
{uint64_t{45}},
{int8_t{-47}},
{int16_t{-48}},
{int32_t{-49}},
{int64_t{-50}},
{float{42.0}},
{double{42.0}},
{std::complex<float>{1.0, 2.0}},
{std::complex<double>{3.0, 4.0}},
{true},
{false},
{MakeTensor<int16_t>({1, 2}, {42, 43})},
{MakeTensor<double>({2, 2}, {1.0, -INFINITY, INFINITY, NAN})},
{MakeTensor<std::complex<double>>({}, {{1.0, 2.0}})},
{Tuple{SmallVector<std::shared_ptr<InterpreterValue>>{
WrapShared(InterpreterValue{42}),
WrapShared(InterpreterValue{43.0}),
}}}}));
class FromLiteralTest
: public ::testing::TestWithParam<
std::pair<std::shared_ptr<xla::Literal>, InterpreterValue>> {};
TEST_P(FromLiteralTest, Run) {
TF_ASSERT_OK_AND_ASSIGN(auto value, LiteralToValue(*GetParam().first));
EXPECT_EQ(value, GetParam().second)
<< value.ToString() << " vs " << GetParam().second.ToString();
}
std::vector<std::pair<std::shared_ptr<xla::Literal>, InterpreterValue>>
MakeInputs() {
using ::xla::LiteralUtil;
return {
{WrapShared(LiteralUtil::CreateR2<uint8_t>({{41, 42}})),
MakeTensor<uint8_t>({1, 2}, {41, 42})},
{WrapShared(LiteralUtil::CreateR0<uint16_t>(43)),
MakeTensor<uint16_t>({}, {43})},
{WrapShared(LiteralUtil::CreateR0<uint32_t>(44)),
MakeTensor<uint32_t>({}, {44})},
{WrapShared(LiteralUtil::CreateR0<uint64_t>(45)),
MakeTensor<uint64_t>({}, {45})},
{WrapShared(LiteralUtil::CreateR0<int8_t>(46)),
MakeTensor<int8_t>({}, {46})},
{WrapShared(LiteralUtil::CreateR0<int16_t>(47)),
MakeTensor<int16_t>({}, {47})},
{WrapShared(LiteralUtil::CreateR0<int32_t>(48)),
MakeTensor<int32_t>({}, {48})},
{WrapShared(LiteralUtil::CreateR0<int64_t>(49)),
MakeTensor<int64_t>({}, {49})},
{WrapShared(LiteralUtil::CreateR0<float>(50.0)),
MakeTensor<float>({}, {50.0})},
{WrapShared(LiteralUtil::CreateR0<double>(51.0)),
MakeTensor<double>({}, {51.0})},
{WrapShared(LiteralUtil::CreateR0<std::complex<float>>({52.0, 53.0})),
MakeTensor<std::complex<float>>({}, {{52.0, 53.0}})},
{WrapShared(LiteralUtil::CreateR0<std::complex<double>>({54.0, 55.0})),
MakeTensor<std::complex<double>>({}, {{54.0, 55.0}})},
{WrapShared(LiteralUtil::CreateR1<bool>({true, false})),
MakeTensor<bool>({2}, {true, false})},
{WrapShared(
LiteralUtil::MakeTupleOwned(LiteralUtil::CreateR0<bool>(true),
LiteralUtil::CreateR0<int8_t>(56))),
InterpreterValue{Tuple{SmallVector<std::shared_ptr<InterpreterValue>>{
std::make_shared<InterpreterValue>(MakeTensor<bool>({}, {true})),
std::make_shared<InterpreterValue>(
MakeTensor<int8_t>({}, {56}))}}}}};
}
INSTANTIATE_TEST_SUITE_P(Test, FromLiteralTest,
::testing::ValuesIn(MakeInputs()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/mlir/tools/mlir_replay/public/execution_trace_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/mlir/tools/mlir_replay/public/execution_trace_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
346598cf-4ab4-497e-b096-3d2dbd550eb3 | cpp | tensorflow/tensorflow | error_util | tensorflow/compiler/mlir/tensorflow/utils/error_util.cc | tensorflow/compiler/mlir/tensorflow/utils/error_util_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include <string_view>
#include "absl/status/status.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/managed_stack_trace.h"
namespace mlir {
StatusScopedDiagnosticHandler::StatusScopedDiagnosticHandler(
MLIRContext* context, bool propagate, bool filter_stack)
: BaseScopedDiagnosticHandler(context, propagate, filter_stack) {
if (filter_stack) {
this->shouldShowLocFn = [](Location loc) -> bool {
if (FileLineColLoc fileLoc = mlir::dyn_cast<FileLineColLoc>(loc)) {
return !tensorflow::IsInternalFrameForFilename(
fileLoc.getFilename().str());
} else {
return true;
}
};
}
setHandler([this](Diagnostic& diag) { return this->handler(&diag); });
}
Status StatusScopedDiagnosticHandler::ConsumeStatus() {
return BaseScopedDiagnosticHandler::ConsumeStatus();
}
Status StatusScopedDiagnosticHandler::Combine(Status status) {
absl::Status absl_s = BaseScopedDiagnosticHandler::Combine(status);
return absl_s;
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "llvm/ADT/Twine.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/test.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace mlir {
namespace {
using testing::HasSubstr;
TEST(ErrorUtilTest, StatusScopedDiagnosticHandler) {
MLIRContext context;
auto id = StringAttr::get(&context, "
auto loc = FileLineColLoc::get(&context, id, 0, 0);
{
TF_ASSERT_OK(
StatusScopedDiagnosticHandler(&context).Combine(absl::OkStatus()));
}
{
StatusScopedDiagnosticHandler handler(&context);
emitError(loc) << "Diagnostic message";
ASSERT_TRUE(tensorflow::errors::IsUnknown(handler.ConsumeStatus()));
}
{
Status err = tensorflow::errors::Internal("Passed in error");
ASSERT_TRUE(tensorflow::errors::IsInternal(
StatusScopedDiagnosticHandler(&context).Combine(err)));
}
{
auto function = [&]() {
emitError(loc) << "Diagnostic message reported";
emitError(loc) << "Second diagnostic message reported";
return tensorflow::errors::Internal("Passed in error");
};
StatusScopedDiagnosticHandler ssdh(&context);
Status s = ssdh.Combine(function());
ASSERT_TRUE(tensorflow::errors::IsInternal(s));
EXPECT_THAT(s.message(), HasSubstr("Passed in error"));
EXPECT_THAT(s.message(), HasSubstr("Diagnostic message reported"));
EXPECT_THAT(s.message(), HasSubstr("Second diagnostic message reported"));
}
}
TEST(ErrorUtilTest, StatusScopedDiagnosticHandlerWithFilter) {
MLIRContext context;
auto id =
StringAttr::get(&context, "
auto loc = FileLineColLoc::get(&context, id, 0, 0);
auto id2 =
StringAttr::get(&context, "
auto loc2 = FileLineColLoc::get(&context, id2, 0, 0);
auto id3 = StringAttr::get(&context, "python/tensorflow/show_file.py");
auto loc3 = FileLineColLoc::get(&context, id3, 0, 0);
auto id_filtered =
StringAttr::get(&context, "
auto loc_filtered = FileLineColLoc::get(&context, id_filtered, 0, 0);
auto id_filtered2 =
StringAttr::get(&context, "dir/tensorflow/python/filtered_file_B.py");
auto loc_filtered2 = FileLineColLoc::get(&context, id_filtered2, 0, 0);
auto callsite_loc = mlir::CallSiteLoc::get(loc, loc_filtered);
auto callsite_loc2 = mlir::CallSiteLoc::get(loc2, loc_filtered2);
auto callsite_loc3 = mlir::CallSiteLoc::get(loc_filtered2, loc3);
StatusScopedDiagnosticHandler ssdh_filter(&context, false, true);
emitError(callsite_loc) << "Error 1";
emitError(callsite_loc2) << "Error 2";
emitError(callsite_loc3) << "Error 3";
Status s_filtered = ssdh_filter.ConsumeStatus();
EXPECT_THAT(s_filtered.message(), HasSubstr("keras"));
EXPECT_THAT(s_filtered.message(), HasSubstr("test.py"));
EXPECT_THAT(s_filtered.message(), HasSubstr("show_file"));
EXPECT_THAT(s_filtered.message(), Not(HasSubstr("filtered_file")));
}
TEST(ErrorUtilTest, StatusScopedDiagnosticHandlerWithoutFilter) {
MLIRContext context;
auto id =
StringAttr::get(&context, "
auto loc = FileLineColLoc::get(&context, id, 0, 0);
auto id_filtered =
StringAttr::get(&context, "
auto loc_filtered = FileLineColLoc::get(&context, id_filtered, 0, 0);
auto id_filtered2 =
StringAttr::get(&context, "dir/tensorflow/python/filtered_file_B.py");
auto loc_filtered2 = FileLineColLoc::get(&context, id_filtered2, 0, 0);
auto id_filtered3 =
StringAttr::get(&context, "
auto loc_filtered3 = FileLineColLoc::get(&context, id_filtered3, 0, 0);
auto callsite_loc = mlir::CallSiteLoc::get(loc, loc_filtered);
auto callsite_loc2 = mlir::CallSiteLoc::get(loc_filtered3, loc_filtered2);
StatusScopedDiagnosticHandler ssdh_no_filter(&context, false, false);
emitError(callsite_loc) << "Error 1";
emitError(callsite_loc2) << "Error 2";
Status s_no_filter = ssdh_no_filter.ConsumeStatus();
EXPECT_THAT(s_no_filter.message(), HasSubstr("keras"));
EXPECT_THAT(s_no_filter.message(), HasSubstr("my_op"));
EXPECT_THAT(s_no_filter.message(), HasSubstr("filtered_file_A"));
EXPECT_THAT(s_no_filter.message(), HasSubstr("filtered_file_B"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/error_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/error_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e8206bd6-ec21-4c85-a0fb-f7ed53b0a6be | cpp | tensorflow/tensorflow | type_util | tensorflow/compiler/tf2xla/type_util.cc | tensorflow/compiler/tf2xla/type_util_test.cc | #include "tensorflow/compiler/tf2xla/type_util.h"
#include "absl/container/flat_hash_map.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
Status DataTypeToPrimitiveType(DataType data_type, xla::PrimitiveType* type) {
switch (data_type) {
case tensorflow::DT_BOOL:
*type = xla::PRED;
return absl::OkStatus();
case tensorflow::DT_INT4:
*type = xla::S4;
return absl::OkStatus();
case tensorflow::DT_INT8:
case tensorflow::DT_QINT8:
*type = xla::S8;
return absl::OkStatus();
case tensorflow::DT_INT16:
case tensorflow::DT_QINT16:
*type = xla::S16;
return absl::OkStatus();
case tensorflow::DT_INT32:
case tensorflow::DT_QINT32:
*type = xla::S32;
return absl::OkStatus();
case tensorflow::DT_INT64:
*type = xla::S64;
return absl::OkStatus();
case tensorflow::DT_UINT4:
*type = xla::U4;
return absl::OkStatus();
case tensorflow::DT_UINT8:
case tensorflow::DT_QUINT8:
*type = xla::U8;
return absl::OkStatus();
case tensorflow::DT_UINT16:
case tensorflow::DT_QUINT16:
*type = xla::U16;
return absl::OkStatus();
case tensorflow::DT_UINT32:
*type = xla::U32;
return absl::OkStatus();
case tensorflow::DT_UINT64:
*type = xla::U64;
return absl::OkStatus();
case tensorflow::DT_FLOAT8_E5M2:
*type = xla::F8E5M2;
return absl::OkStatus();
case tensorflow::DT_FLOAT8_E4M3FN:
*type = xla::F8E4M3FN;
return absl::OkStatus();
case tensorflow::DT_BFLOAT16:
*type = xla::BF16;
return absl::OkStatus();
case tensorflow::DT_HALF:
*type = xla::F16;
return absl::OkStatus();
case tensorflow::DT_FLOAT:
*type = xla::F32;
return absl::OkStatus();
case tensorflow::DT_DOUBLE:
*type = xla::F64;
return absl::OkStatus();
case tensorflow::DT_COMPLEX64:
*type = xla::C64;
return absl::OkStatus();
case tensorflow::DT_COMPLEX128:
*type = xla::C128;
return absl::OkStatus();
default:
return errors::InvalidArgument(
"Unsupported type in DataTypeToPrimitiveType: '",
DataTypeString(data_type), "'");
}
}
absl::StatusOr<DataType> EncodePrimitiveTypeAsDataType(
xla::PrimitiveType type) {
static const absl::flat_hash_map<xla::PrimitiveType, DataType>&
data_type_map = *new absl::flat_hash_map<xla::PrimitiveType, DataType>({
{xla::PRED, DT_BOOL},
{xla::F8E5M2, DT_FLOAT8_E5M2},
{xla::F8E4M3FN, DT_FLOAT8_E4M3FN},
{xla::BF16, DT_BFLOAT16},
{xla::F16, DT_HALF},
{xla::F32, DT_FLOAT},
{xla::F64, DT_DOUBLE},
{xla::C64, DT_COMPLEX64},
{xla::S4, DT_INT4},
{xla::S8, DT_INT8},
{xla::S16, DT_INT16},
{xla::S32, DT_INT32},
{xla::S64, DT_INT64},
{xla::U4, DT_UINT4},
{xla::U8, DT_UINT8},
{xla::U16, DT_UINT16},
{xla::U32, DT_UINT32},
{xla::U64, DT_UINT64},
{xla::C128, DT_COMPLEX128},
});
auto it = data_type_map.find(type);
if (it == data_type_map.end()) {
return errors::InvalidArgument(
"Unsupported type in PrimitiveTypeToDataType ", type);
}
return it->second;
}
} | #include "tensorflow/compiler/tf2xla/type_util.h"
#include <array>
#include "absl/status/statusor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
bool DataTypeSupportsXlaConversion(DataType dt) {
switch (dt) {
case DataType::DT_STRING:
case DataType::DT_RESOURCE:
case DataType::DT_VARIANT:
case DataType::DT_INVALID:
return false;
default:
break;
}
return !IsRefType(dt);
}
TEST(DataTypeToPrimitiveTypeTest, AllDataTypesSupported) {
for (int i = tensorflow::DataType_MIN; i < tensorflow::DataType_MAX; ++i) {
if (tensorflow::DataType_IsValid(i)) {
DataType dt = static_cast<DataType>(i);
if (DataTypeSupportsXlaConversion(dt)) {
xla::PrimitiveType out_type;
EXPECT_TRUE(DataTypeToPrimitiveType(dt, &out_type).ok());
}
}
}
}
TEST(EncodePrimitiveTypeAsDataType, AllPrimitiveTypesSupported) {
for (int i = tensorflow::DataType_MIN; i < tensorflow::DataType_MAX; ++i) {
DataType dt = static_cast<DataType>(i);
xla::PrimitiveType xla_type;
if (DataTypeToPrimitiveType(dt, &xla_type).ok()) {
absl::StatusOr<DataType> data_type_or =
EncodePrimitiveTypeAsDataType(xla_type);
EXPECT_TRUE(data_type_or.ok());
if (!DataTypeIsQuantized(dt)) {
EXPECT_EQ(*data_type_or, dt);
}
}
}
}
TEST(EncodePrimitiveTypeAsDataType, QuantizedTypesMapToUnquantized) {
static std::array<DataType, 5> quantized_inputs = {
DT_QINT8, DT_QINT16, DT_QINT32, DT_QUINT8, DT_QUINT16};
static std::array<DataType, 5> expected_outputs = {
DT_INT8, DT_INT16, DT_INT32, DT_UINT8, DT_UINT16};
for (int i = 0; i < quantized_inputs.size(); ++i) {
xla::PrimitiveType xla_type;
EXPECT_TRUE(DataTypeToPrimitiveType(quantized_inputs[i], &xla_type).ok());
absl::StatusOr<DataType> data_type_or =
EncodePrimitiveTypeAsDataType(xla_type);
EXPECT_TRUE(data_type_or.ok());
EXPECT_EQ(*data_type_or, expected_outputs[i]);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/type_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/type_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
89bd881f-5e83-40d0-a29d-07fe18039cf9 | cpp | tensorflow/tensorflow | type_id_registry | third_party/xla/xla/ffi/type_id_registry.cc | third_party/xla/xla/ffi/type_id_registry_test.cc | #include "xla/ffi/type_id_registry.h"
#include <atomic>
#include <cstdint>
#include <string>
#include <string_view>
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "xla/util.h"
namespace xla::ffi {
ABSL_CONST_INIT absl::Mutex type_registry_mutex(absl::kConstInit);
using ExternalTypeIdRegistry =
absl::flat_hash_map<std::string, TypeIdRegistry::TypeId>;
static ExternalTypeIdRegistry& StaticExternalTypeIdRegistry() {
static auto* registry = new ExternalTypeIdRegistry();
return *registry;
}
TypeIdRegistry::TypeId TypeIdRegistry::GetNextTypeId() {
static auto* counter = new std::atomic<int64_t>(1);
return TypeId(counter->fetch_add(1));
}
absl::StatusOr<TypeIdRegistry::TypeId> TypeIdRegistry::RegisterExternalTypeId(
std::string_view name) {
absl::MutexLock lock(&type_registry_mutex);
auto& registry = StaticExternalTypeIdRegistry();
auto emplaced = registry.emplace(name, TypeId(0));
if (!emplaced.second) {
return Internal("Type id %d already registered for type name %s",
emplaced.first->second.value(), name);
}
return emplaced.first->second = GetNextTypeId();
}
} | #include "xla/ffi/type_id_registry.h"
#include <cstdint>
#include "absl/status/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::ffi {
namespace {
using ::testing::HasSubstr;
TEST(TypeIdRegistryTest, RegisterExternalTypeId) {
TF_ASSERT_OK_AND_ASSIGN(auto type_id,
TypeIdRegistry::RegisterExternalTypeId("foo"));
EXPECT_GE(type_id.value(), 0);
auto duplicate_type_id = TypeIdRegistry::RegisterExternalTypeId("foo");
EXPECT_THAT(duplicate_type_id.status().message(),
HasSubstr("already registered for type name foo"));
}
TEST(TypeIdRegistryTest, RegisterInternalTypeId) {
auto int32_type_id = TypeIdRegistry::GetTypeId<int32_t>();
auto int64_type_id = TypeIdRegistry::GetTypeId<int64_t>();
EXPECT_NE(int32_type_id, int64_type_id);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/ffi/type_id_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/ffi/type_id_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f97dfb87-0805-43bb-806b-b6193861e8fb | cpp | tensorflow/tensorflow | execution_state | third_party/xla/xla/ffi/execution_state.cc | third_party/xla/xla/ffi/execution_state_test.cc | #include "xla/ffi/execution_state.h"
#include <utility>
#include "absl/status/status.h"
#include "xla/ffi/type_id_registry.h"
#include "xla/util.h"
#include "tsl/platform/logging.h"
namespace xla::ffi {
ExecutionState::ExecutionState()
: type_id_(TypeIdRegistry::kUnknownTypeId),
state_(nullptr),
deleter_(nullptr) {}
ExecutionState::~ExecutionState() {
if (deleter_) deleter_(state_);
}
absl::Status ExecutionState::Set(TypeId type_id, void* state,
Deleter<void> deleter) {
DCHECK(state && deleter) << "State and deleter must not be null";
if (type_id_ != TypeIdRegistry::kUnknownTypeId) {
return FailedPrecondition("State is already set with a type id %d",
type_id_.value());
}
type_id_ = type_id;
state_ = state;
deleter_ = std::move(deleter);
return absl::OkStatus();
}
absl::StatusOr<void*> ExecutionState::Get(TypeId type_id) const {
if (type_id_ == TypeIdRegistry::kUnknownTypeId) {
return NotFound("State is not set");
}
if (type_id_ != type_id) {
return InvalidArgument(
"Set state type id %d does not match the requested one %d",
type_id_.value(), type_id.value());
}
return state_;
}
bool ExecutionState::IsSet() const {
return type_id_ != TypeIdRegistry::kUnknownTypeId;
}
} | #include "xla/ffi/execution_state.h"
#include <cstdint>
#include <memory>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::ffi {
using TypeId = ExecutionState::TypeId;
using ::testing::HasSubstr;
TEST(ExecutionStateTest, SetAndGet) {
ExecutionState state;
EXPECT_FALSE(state.IsSet());
{
auto data = state.Get(TypeId(1));
EXPECT_THAT(data.status().message(), HasSubstr("State is not set"));
}
{
auto data = state.Get<int32_t>();
EXPECT_THAT(data.status().message(), HasSubstr("State is not set"));
}
TF_ASSERT_OK(state.Set(std::make_unique<int32_t>(42)));
EXPECT_TRUE(state.IsSet());
TF_ASSERT_OK_AND_ASSIGN(int32_t* data, state.Get<int32_t>());
EXPECT_EQ(*data, 42);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/ffi/execution_state.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/ffi/execution_state_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ead042dd-f68b-44ed-b69a-5679dbf2156e | cpp | tensorflow/tensorflow | execution_context | third_party/xla/xla/ffi/execution_context.cc | third_party/xla/xla/ffi/execution_context_test.cc | #include "xla/ffi/execution_context.h"
#include <memory>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
namespace xla::ffi {
ExecutionContext::UserData::UserData(void* data, Deleter<void> deleter)
: data_(data), deleter_(std::move(deleter)) {}
ExecutionContext::UserData::~UserData() {
if (deleter_) deleter_(data_);
}
absl::Status ExecutionContext::Insert(TypeId type_id, void* data,
Deleter<void> deleter) {
return InsertUserData(type_id,
std::make_unique<UserData>(data, std::move(deleter)));
}
absl::Status ExecutionContext::InsertUserData(TypeId type_id,
std::unique_ptr<UserData> data) {
if (!data) return absl::InvalidArgumentError("User data must be not null");
auto emplaced = user_data_.emplace(type_id, std::move(data));
if (!emplaced.second) {
return absl::AlreadyExistsError(
absl::StrCat("User data with type id ", type_id.value(),
" already exists in execution context"));
}
return absl::OkStatus();
}
absl::StatusOr<ExecutionContext::UserData*> ExecutionContext::LookupUserData(
TypeId type_id) const {
auto it = user_data_.find(type_id);
if (it == user_data_.end()) {
return absl::NotFoundError(absl::StrCat("User data with type id ",
type_id.value(),
" not found in execution context"));
}
return it->second.get();
}
} | #include "xla/ffi/execution_context.h"
#include <cstdint>
#include <string>
#include "absl/status/status.h"
#include "xla/ffi/type_id_registry.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::ffi {
struct I32UserData {
explicit I32UserData(int32_t value) : value(value) {}
int32_t value;
};
struct StrUserData {
explicit StrUserData(std::string value) : value(value) {}
std::string value;
};
TEST(ExecutionContextTest, EmplaceUserData) {
ExecutionContext context;
TF_ASSERT_OK(context.Emplace<I32UserData>(42));
TF_ASSERT_OK(context.Emplace<StrUserData>("hello"));
TF_ASSERT_OK_AND_ASSIGN(auto* i32_data, context.Lookup<I32UserData>());
TF_ASSERT_OK_AND_ASSIGN(auto* str_data, context.Lookup<StrUserData>());
ASSERT_NE(i32_data, nullptr);
ASSERT_NE(str_data, nullptr);
ASSERT_EQ(i32_data->value, 42);
ASSERT_EQ(str_data->value, "hello");
}
TEST(ExecutionContextTest, InsertUserOwned) {
I32UserData user_data(42);
ExecutionContext context;
TF_ASSERT_OK(context.Insert(&user_data));
TF_ASSERT_OK_AND_ASSIGN(auto* i32_data, context.Lookup<I32UserData>());
ASSERT_EQ(i32_data, &user_data);
}
TEST(ExecutionContextTest, InsertUserOwnedWithTypeId) {
TF_ASSERT_OK_AND_ASSIGN(
TypeIdRegistry::TypeId type_id,
TypeIdRegistry::RegisterExternalTypeId("I32UserData"));
I32UserData user_data(42);
ExecutionContext context;
TF_ASSERT_OK(context.Insert(type_id, &user_data));
TF_ASSERT_OK_AND_ASSIGN(auto* i32_data, context.Lookup(type_id));
ASSERT_EQ(i32_data, &user_data);
}
TEST(ExecutionContextTest, UserDataNotFound) {
ExecutionContext context;
auto i32_data = context.Lookup<I32UserData>();
ASSERT_EQ(i32_data.status().code(), absl::StatusCode::kNotFound);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/ffi/execution_context.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/ffi/execution_context_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
87bbe468-7164-4411-b315-e073e50aa62f | cpp | tensorflow/tensorflow | call_frame | third_party/xla/xla/ffi/call_frame.cc | third_party/xla/xla/ffi/call_frame_test.cc | #include "xla/ffi/call_frame.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <type_traits>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/optimization.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/ffi/api/api.h"
#include "xla/ffi/api/c_api.h"
#include "xla/ffi/api/c_api_internal.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla::ffi {
struct CallFrameBuilder::Buffer {
se::DeviceMemoryBase memory;
PrimitiveType type;
absl::InlinedVector<int64_t, 4> dims;
};
CallFrameBuilder::AttributesMap CallFrameBuilder::AttributesBuilder::Build() {
return std::move(attrs_);
}
static CallFrameBuilder::Attribute FromFlatAttribute(
CallFrameBuilder::FlatAttribute attr) {
return std::visit(
[](auto& attr) { return CallFrameBuilder::Attribute{attr}; }, attr);
}
CallFrameBuilder::AttributesBuilder::AttributesBuilder() = default;
CallFrameBuilder::AttributesBuilder::~AttributesBuilder() = default;
void CallFrameBuilder::AttributesBuilder::Insert(std::string name,
Attribute attr) {
attrs_.try_emplace(std::move(name), std::move(attr));
}
void CallFrameBuilder::AttributesBuilder::Insert(std::string name,
AttributesMap attrs) {
attrs_.try_emplace(std::move(name),
Dictionary{std::make_shared<AttributesMap>(attrs)});
}
void CallFrameBuilder::AttributesBuilder::Append(AttributesMap attrs) {
for (auto& [name, attr] : attrs) Insert(name, std::move(attr));
}
CallFrameBuilder::CallFrameBuilder(size_t num_args, size_t num_rets) {
args_.reserve(num_args);
rets_.reserve(num_rets);
}
CallFrameBuilder::~CallFrameBuilder() = default;
void CallFrameBuilder::AddBufferArg(se::DeviceMemoryBase memory,
PrimitiveType type,
absl::Span<const int64_t> dims) {
DCHECK(args_.capacity() > args_.size())
<< "CallFrame builder `num_args` argument was too small";
args_.push_back(Buffer{memory, type, {dims.begin(), dims.end()}});
}
void CallFrameBuilder::AddTokenArg() {
DCHECK(args_.capacity() > args_.size())
<< "CallFrame builder `num_args` argument was too small";
args_.push_back(Buffer{se::DeviceMemoryBase(), PrimitiveType::TOKEN, {}});
}
void CallFrameBuilder::AddBufferRet(se::DeviceMemoryBase memory,
PrimitiveType type,
absl::Span<const int64_t> dims) {
DCHECK(rets_.capacity() > rets_.size())
<< "CallFrame builder `num_rets` argument was too small";
rets_.push_back(Buffer{memory, type, {dims.begin(), dims.end()}});
}
void CallFrameBuilder::AddTokenRet() {
DCHECK(rets_.capacity() > rets_.size())
<< "CallFrame builder `num_rets` argument was too small";
rets_.push_back(Buffer{se::DeviceMemoryBase(), PrimitiveType::TOKEN, {}});
}
void CallFrameBuilder::AddAttributes(AttributesMap attrs) {
if (ABSL_PREDICT_TRUE(attrs_.empty())) {
attrs_ = std::move(attrs);
return;
}
for (auto& [name, attr] : attrs) {
attrs_.try_emplace(std::move(name), std::move(attr));
}
}
CallFrame CallFrameBuilder::Build() {
return CallFrame(CallFrame::CreateArgs(args_), CallFrame::CreateRets(rets_),
CallFrame::CreateAttrs(attrs_));
}
CallFrameBuilder::CallFrameBuilder(CallFrameBuilder&&) = default;
CallFrameBuilder& CallFrameBuilder::operator=(CallFrameBuilder&&) = default;
struct CallFrame::Buffer {
absl::InlinedVector<int64_t, 4> dims;
XLA_FFI_Buffer buffer = {XLA_FFI_Buffer_STRUCT_SIZE, nullptr};
};
struct CallFrame::Dictionary {
std::unique_ptr<Attributes> attrs;
};
struct CallFrame::Array {
CallFrameBuilder::Array value;
XLA_FFI_Array array = {};
};
struct CallFrame::Scalar {
CallFrameBuilder::Scalar value;
XLA_FFI_Scalar scalar = {};
};
struct CallFrame::String {
std::string value;
XLA_FFI_ByteSpan span = {};
};
struct CallFrame::NamedAttribute {
String name;
Attribute value;
};
struct CallFrame::Arguments {
std::vector<Buffer> arguments;
std::vector<XLA_FFI_ArgType> types;
std::vector<void*> args;
XLA_FFI_Args ffi_args = {XLA_FFI_Args_STRUCT_SIZE, nullptr};
};
struct CallFrame::Results {
std::vector<Buffer> results;
std::vector<XLA_FFI_RetType> types;
std::vector<void*> rets;
XLA_FFI_Rets ffi_rets = {XLA_FFI_Rets_STRUCT_SIZE, nullptr};
};
struct CallFrame::Attributes {
std::vector<NamedAttribute> attributes;
std::vector<XLA_FFI_ByteSpan*> names;
std::vector<XLA_FFI_AttrType> types;
std::vector<void*> attrs;
XLA_FFI_Attrs ffi_attrs = {XLA_FFI_Attrs_STRUCT_SIZE, nullptr};
};
CallFrame::CallFrame(CallFrame&&) = default;
CallFrame& CallFrame::operator=(CallFrame&&) = default;
CallFrame::~CallFrame() = default;
CallFrame::CallFrame(std::unique_ptr<Arguments> arguments,
std::unique_ptr<Results> results,
std::shared_ptr<Attributes> attributes)
: arguments_(std::move(arguments)),
results_(std::move(results)),
attributes_(std::move(attributes)) {}
XLA_FFI_CallFrame CallFrame::Build(const XLA_FFI_Api* api,
XLA_FFI_ExecutionContext* ctx,
XLA_FFI_ExecutionStage stage) {
XLA_FFI_CallFrame call_frame = {XLA_FFI_CallFrame_STRUCT_SIZE, nullptr};
call_frame.api = api;
call_frame.ctx = ctx;
call_frame.stage = stage;
call_frame.args = arguments_->ffi_args;
call_frame.rets = results_->ffi_rets;
call_frame.attrs = attributes_->ffi_attrs;
return call_frame;
}
static XLA_FFI_DataType ToDataType(PrimitiveType primitive_type) {
switch (primitive_type) {
case PrimitiveType::PRIMITIVE_TYPE_INVALID:
case PrimitiveType::PRED:
case PrimitiveType::S8:
case PrimitiveType::S16:
case PrimitiveType::S32:
case PrimitiveType::S64:
case PrimitiveType::U8:
case PrimitiveType::U16:
case PrimitiveType::U32:
case PrimitiveType::U64:
case PrimitiveType::F16:
case PrimitiveType::F32:
case PrimitiveType::F64:
case PrimitiveType::BF16:
case PrimitiveType::C64:
case PrimitiveType::C128:
case PrimitiveType::TOKEN:
case PrimitiveType::F8E5M2:
case PrimitiveType::F8E4M3:
case PrimitiveType::F8E4M3FN:
case PrimitiveType::F8E4M3B11FNUZ:
case PrimitiveType::F8E5M2FNUZ:
case PrimitiveType::F8E4M3FNUZ:
case PrimitiveType::F8E3M4:
return static_cast<XLA_FFI_DataType>(primitive_type);
default:
DCHECK(false) << "Unsupported primitive type "
<< PrimitiveType_Name(primitive_type);
return XLA_FFI_DataType_INVALID;
}
}
CallFrame::Buffer CallFrame::ConvertBuffer(
const CallFrameBuilder::Buffer& buffer) {
Buffer result;
result.dims = buffer.dims;
result.buffer.data = const_cast<void*>(buffer.memory.opaque());
result.buffer.dtype = ToDataType(buffer.type);
result.buffer.rank = result.dims.size();
return result;
}
std::unique_ptr<CallFrame::Arguments> CallFrame::CreateArgs(
absl::Span<const CallFrameBuilder::Buffer> bargs) {
size_t num_args = bargs.size();
auto args = std::make_unique<Arguments>();
args->types.resize(num_args, XLA_FFI_ArgType_BUFFER);
args->args.resize(num_args, nullptr);
args->arguments.reserve(num_args);
for (const CallFrameBuilder::Buffer& barg : bargs) {
args->arguments.push_back(ConvertBuffer(barg));
}
return FixUpArgs(std::move(args));
}
std::unique_ptr<CallFrame::Arguments> CallFrame::CopyArgs(
const Arguments& args) {
auto upd_args = std::make_unique<Arguments>();
upd_args->arguments = args.arguments;
upd_args->types = args.types;
upd_args->args.resize(args.args.size(), nullptr);
return FixUpArgs(std::move(upd_args));
}
std::unique_ptr<CallFrame::Arguments> CallFrame::FixUpArgs(
std::unique_ptr<Arguments> args) {
size_t num_args = args->arguments.size();
DCHECK_EQ(num_args, args->types.size());
DCHECK_EQ(num_args, args->args.size());
for (size_t i = 0; i < num_args; ++i) {
args->arguments[i].buffer.dims = args->arguments[i].dims.data();
args->args[i] = &args->arguments[i].buffer;
}
args->ffi_args.size = num_args;
args->ffi_args.types = args->types.data();
args->ffi_args.args = args->args.data();
return args;
}
std::unique_ptr<CallFrame::Results> CallFrame::CreateRets(
absl::Span<const CallFrameBuilder::Buffer> brets) {
auto rets = std::make_unique<Results>();
size_t num_rets = brets.size();
rets->types.resize(num_rets, XLA_FFI_RetType_BUFFER);
rets->rets.resize(num_rets, nullptr);
rets->results.reserve(num_rets);
for (const CallFrameBuilder::Buffer& bret : brets) {
rets->results.push_back(ConvertBuffer(bret));
}
return FixUpRets(std::move(rets));
}
std::unique_ptr<CallFrame::Results> CallFrame::CopyRets(const Results& rets) {
auto upd_rets = std::make_unique<Results>();
upd_rets->results = rets.results;
upd_rets->types = rets.types;
upd_rets->rets.resize(rets.rets.size(), nullptr);
return FixUpRets(std::move(upd_rets));
}
std::unique_ptr<CallFrame::Results> CallFrame::FixUpRets(
std::unique_ptr<Results> rets) {
size_t num_rets = rets->results.size();
DCHECK_EQ(num_rets, rets->types.size());
DCHECK_EQ(num_rets, rets->rets.size());
for (size_t i = 0; i < num_rets; ++i) {
rets->results[i].buffer.dims = rets->results[i].dims.data();
rets->rets[i] = &rets->results[i].buffer;
}
rets->ffi_rets.size = num_rets;
rets->ffi_rets.types = rets->types.data();
rets->ffi_rets.rets = rets->rets.data();
return rets;
}
struct CallFrame::ConvertAttribute {
CallFrame::Attribute operator()(const CallFrameBuilder::Array& array) {
return CallFrame::Array{array};
}
CallFrame::Attribute operator()(const CallFrameBuilder::Scalar& scalar) {
return CallFrame::Scalar{scalar};
}
CallFrame::Attribute operator()(const std::string& str) {
return CallFrame::String{str};
}
CallFrame::Attribute operator()(const CallFrameBuilder::Dictionary& dict) {
return CallFrame::Dictionary{CreateAttrs(*dict.attrs)};
}
};
struct CallFrame::FixUpAttribute {
void operator()(CallFrame::Array& array) {
auto visitor = [&](auto& value) {
using T = typename std::remove_reference_t<decltype(value)>::value_type;
array.array.dtype = internal::NativeTypeToCApiDataType<T>();
array.array.size = value.size();
array.array.data = value.data();
};
std::visit(visitor, array.value);
}
void operator()(CallFrame::Scalar& scalar) {
auto visitor = [&](auto& value) {
using T = std::remove_reference_t<decltype(value)>;
scalar.scalar.dtype = internal::NativeTypeToCApiDataType<T>();
scalar.scalar.value = &value;
};
std::visit(visitor, scalar.value);
}
void operator()(CallFrame::String& str) {
str.span.ptr = str.value.data();
str.span.len = str.value.size();
}
void operator()(CallFrame::Dictionary&) {}
};
struct CallFrame::AttributeType {
XLA_FFI_AttrType operator()(CallFrame::Array&) {
return XLA_FFI_AttrType_ARRAY;
}
XLA_FFI_AttrType operator()(CallFrame::Scalar&) {
return XLA_FFI_AttrType_SCALAR;
}
XLA_FFI_AttrType operator()(CallFrame::String&) {
return XLA_FFI_AttrType_STRING;
}
XLA_FFI_AttrType operator()(CallFrame::Dictionary&) {
return XLA_FFI_AttrType_DICTIONARY;
}
};
struct CallFrame::AttributeStorage {
template <typename T>
void* operator()(T& value) {
return &value;
}
void* operator()(CallFrame::Array& array) { return &array.array; }
void* operator()(CallFrame::Scalar& scalar) { return &scalar.scalar; }
void* operator()(CallFrame::String& str) { return &str.span; }
void* operator()(CallFrame::Dictionary& dict) {
return &dict.attrs->ffi_attrs;
}
};
std::unique_ptr<CallFrame::Attributes> CallFrame::CreateAttrs(
const CallFrameBuilder::AttributesMap& battrs) {
auto attrs = std::make_unique<Attributes>();
attrs->attributes.reserve(battrs.size());
for (auto& [name, battr] : battrs) {
NamedAttribute attr = {String{name}, std::visit(ConvertAttribute(), battr)};
attrs->attributes.push_back(std::move(attr));
}
absl::c_sort(attrs->attributes,
[](const NamedAttribute& a, const NamedAttribute& b) {
return a.name.value < b.name.value;
});
return FixUpAttrs(std::move(attrs));
}
std::unique_ptr<CallFrame::Attributes> CallFrame::FixUpAttrs(
std::unique_ptr<CallFrame::Attributes> attrs) {
size_t num_attrs = attrs->attributes.size();
DCHECK(attrs->names.empty() && attrs->types.empty() && attrs->attrs.empty());
attrs->names.reserve(num_attrs);
attrs->types.reserve(num_attrs);
attrs->attrs.reserve(num_attrs);
for (NamedAttribute& attr : attrs->attributes) {
std::invoke(FixUpAttribute{}, attr.name);
std::visit(FixUpAttribute{}, attr.value);
}
for (NamedAttribute& attr : attrs->attributes) {
attrs->names.push_back(&attr.name.span);
attrs->types.push_back(std::visit(AttributeType(), attr.value));
attrs->attrs.push_back(std::visit(AttributeStorage(), attr.value));
}
attrs->ffi_attrs.size = attrs->attributes.size();
attrs->ffi_attrs.names = attrs->names.data();
attrs->ffi_attrs.types = attrs->types.data();
attrs->ffi_attrs.attrs = attrs->attrs.data();
return attrs;
}
absl::Status CallFrame::UpdateWithBuffers(
absl::Span<const se::DeviceMemoryBase> args,
absl::Span<const se::DeviceMemoryBase> rets) {
if (ABSL_PREDICT_FALSE(args.size() != arguments_->args.size())) {
return InvalidArgument("Invalid number of updated arguments: %d vs %d",
args.size(), arguments_->args.size());
}
if (ABSL_PREDICT_FALSE(rets.size() != results_->rets.size())) {
return InvalidArgument("Invalid number of updated results: %d vs %d",
rets.size(), results_->rets.size());
}
size_t num_args = args.size();
for (size_t i = 0; i < num_args; ++i) {
arguments_->arguments[i].buffer.data = const_cast<void*>(args[i].opaque());
}
size_t num_rets = rets.size();
for (size_t i = 0; i < num_rets; ++i) {
results_->results[i].buffer.data = const_cast<void*>(rets[i].opaque());
}
return absl::OkStatus();
}
absl::StatusOr<CallFrame> CallFrame::CopyWithBuffers(
absl::Span<const se::DeviceMemoryBase> args,
absl::Span<const se::DeviceMemoryBase> rets) {
CallFrame clone(CopyArgs(*arguments_), CopyRets(*results_), attributes_);
TF_RETURN_IF_ERROR(clone.UpdateWithBuffers(args, rets));
return clone;
}
} | #include "xla/ffi/call_frame.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "xla/ffi/api/c_api.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace xla::ffi {
TEST(CallFrameTest, UpdateCallFrame) {
se::DeviceMemoryBase mem0(reinterpret_cast<void*>(0x12345678), 1024);
se::DeviceMemoryBase mem1(reinterpret_cast<void*>(0x87654321), 1024);
std::vector<int64_t> dims = {1, 2, 3, 4};
CallFrameBuilder::AttributesBuilder attrs_builder;
attrs_builder.Insert("attr1", "value1");
attrs_builder.Insert("attr2", "value2");
CallFrameBuilder builder(1, 1);
builder.AddBufferArg(mem0, PrimitiveType::F32, dims);
builder.AddBufferRet(mem1, PrimitiveType::F32, dims);
builder.AddAttributes(attrs_builder.Build());
std::optional<CallFrame> call_frame = builder.Build();
{
XLA_FFI_CallFrame ffi_call_frame = call_frame->Build(
nullptr, nullptr, XLA_FFI_ExecutionStage_EXECUTE);
EXPECT_EQ(ffi_call_frame.args.size, 1);
EXPECT_EQ(ffi_call_frame.args.types[0], XLA_FFI_ArgType_BUFFER);
EXPECT_EQ(static_cast<XLA_FFI_Buffer*>(ffi_call_frame.args.args[0])->data,
mem0.opaque());
EXPECT_EQ(ffi_call_frame.rets.size, 1);
EXPECT_EQ(ffi_call_frame.rets.types[0], XLA_FFI_ArgType_BUFFER);
EXPECT_EQ(static_cast<XLA_FFI_Buffer*>(ffi_call_frame.rets.rets[0])->data,
mem1.opaque());
EXPECT_EQ(ffi_call_frame.attrs.size, 2);
}
CallFrame updated_call_frame =
std::move(call_frame)->CopyWithBuffers({mem1}, {mem0}).value();
{
XLA_FFI_CallFrame ffi_call_frame = updated_call_frame.Build(
nullptr, nullptr, XLA_FFI_ExecutionStage_EXECUTE);
EXPECT_EQ(ffi_call_frame.args.size, 1);
EXPECT_EQ(ffi_call_frame.args.types[0], XLA_FFI_ArgType_BUFFER);
EXPECT_EQ(static_cast<XLA_FFI_Buffer*>(ffi_call_frame.args.args[0])->data,
mem1.opaque());
EXPECT_EQ(ffi_call_frame.rets.size, 1);
EXPECT_EQ(ffi_call_frame.rets.types[0], XLA_FFI_ArgType_BUFFER);
EXPECT_EQ(static_cast<XLA_FFI_Buffer*>(ffi_call_frame.rets.rets[0])->data,
mem0.opaque());
EXPECT_EQ(ffi_call_frame.attrs.size, 2);
}
TF_ASSERT_OK(updated_call_frame.UpdateWithBuffers({mem0}, {mem1}));
{
XLA_FFI_CallFrame ffi_call_frame = updated_call_frame.Build(
nullptr, nullptr, XLA_FFI_ExecutionStage_EXECUTE);
EXPECT_EQ(ffi_call_frame.args.size, 1);
EXPECT_EQ(ffi_call_frame.args.types[0], XLA_FFI_ArgType_BUFFER);
EXPECT_EQ(static_cast<XLA_FFI_Buffer*>(ffi_call_frame.args.args[0])->data,
mem0.opaque());
EXPECT_EQ(ffi_call_frame.rets.size, 1);
EXPECT_EQ(ffi_call_frame.rets.types[0], XLA_FFI_ArgType_BUFFER);
EXPECT_EQ(static_cast<XLA_FFI_Buffer*>(ffi_call_frame.rets.rets[0])->data,
mem1.opaque());
EXPECT_EQ(ffi_call_frame.attrs.size, 2);
}
}
void BM_AddBufferArg(benchmark::State& state) {
size_t num_args = state.range(0);
se::DeviceMemoryBase memory(reinterpret_cast<void*>(0x12345678), 1024);
std::vector<int64_t> dims = {1, 2, 3, 4};
for (auto _ : state) {
CallFrameBuilder builder(num_args, 0);
for (size_t i = 0; i < num_args; ++i) {
builder.AddBufferArg(memory, PrimitiveType::F32, dims);
}
CallFrame call_frame = builder.Build();
}
}
void BM_AddAttributes(benchmark::State& state) {
size_t num_attrs = state.range(0);
CallFrameBuilder::AttributesMap attrs;
for (size_t i = 0; i < num_attrs; ++i) {
attrs.try_emplace(absl::StrCat("attr_", i), 42);
}
for (auto _ : state) {
CallFrameBuilder::AttributesBuilder attrs_builder;
attrs_builder.Append(attrs);
CallFrameBuilder builder(0, 0);
builder.AddAttributes(attrs_builder.Build());
CallFrame call_frame = builder.Build();
}
}
void BM_UpdateCallFrame(benchmark::State& state) {
size_t num_args = state.range(0);
se::DeviceMemoryBase memory(reinterpret_cast<void*>(0x12345678), 1024);
std::vector<int64_t> dims = {1, 2, 3, 4};
CallFrameBuilder builder(num_args, 0);
for (size_t i = 0; i < num_args; ++i) {
builder.AddBufferArg(se::DeviceMemoryBase(nullptr, 1024),
PrimitiveType::F32, dims);
}
CallFrame call_frame = builder.Build();
std::vector<se::DeviceMemoryBase> updated_args(num_args, memory);
for (auto _ : state) {
auto updated_call_frame =
call_frame.CopyWithBuffers(updated_args, {});
benchmark::DoNotOptimize(updated_call_frame);
}
}
void BM_UpdateCallFrameInPlace(benchmark::State& state) {
size_t num_args = state.range(0);
se::DeviceMemoryBase memory(reinterpret_cast<void*>(0x12345678), 1024);
std::vector<int64_t> dims = {1, 2, 3, 4};
CallFrameBuilder builder(num_args, 0);
for (size_t i = 0; i < num_args; ++i) {
builder.AddBufferArg(se::DeviceMemoryBase(nullptr, 1024),
PrimitiveType::F32, dims);
}
CallFrame call_frame = builder.Build();
std::vector<se::DeviceMemoryBase> updated_args(num_args, memory);
for (auto _ : state) {
benchmark::DoNotOptimize(
call_frame.UpdateWithBuffers(updated_args, {}));
}
}
#define BENCHMARK_SIZES(name) \
BENCHMARK(name)->Arg(1)->Arg(2)->Arg(4)->Arg(8)->Arg(16)->Arg(32)->Arg(64)
BENCHMARK_SIZES(BM_AddBufferArg);
BENCHMARK_SIZES(BM_AddAttributes);
BENCHMARK_SIZES(BM_UpdateCallFrame);
BENCHMARK_SIZES(BM_UpdateCallFrameInPlace);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/ffi/call_frame.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/ffi/call_frame_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b0083186-28e1-4ea5-95ad-1a8f341b1df3 | cpp | tensorflow/tensorflow | hlo_evaluator | third_party/xla/xla/hlo/evaluator/hlo_evaluator.cc | third_party/xla/xla/hlo/evaluator/hlo_evaluator_test.cc | #include "xla/hlo/evaluator/hlo_evaluator.h"
#include <algorithm>
#include <atomic>
#include <cmath>
#include <complex>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <functional>
#include <iterator>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <random>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/internal/endian.h"
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/memory/memory.h"
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "xla/array2d.h"
#include "xla/comparison_util.h"
#include "xla/hlo/evaluator/hlo_evaluator_typed_visitor.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/index_util.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/compilation_environments.h"
#include "xla/service/cpu/runtime_single_threaded_matmul.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/logical_buffer.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/shape_inference.h"
#include "xla/service/tuple_points_to_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using primitive_util::NativeTypeOf;
template <typename OperandT>
absl::StatusOr<Literal> Compare(const Shape& shape, Comparison comparison,
LiteralSlice lhs_literal,
LiteralSlice rhs_literal) {
auto populate = [&](auto compare_op) -> absl::StatusOr<Literal> {
Literal result(shape);
TF_RETURN_IF_ERROR(result.PopulateParallel<bool>(
[&](absl::Span<const int64_t> multi_index, int ) {
auto lhs = lhs_literal.Get<OperandT>(multi_index);
auto rhs = rhs_literal.Get<OperandT>(multi_index);
if constexpr (is_specialized_floating_point_v<OperandT>) {
if (comparison.IsTotalOrder()) {
return compare_op(ToSignMagnitude(lhs), ToSignMagnitude(rhs));
}
}
return compare_op(lhs, rhs);
}));
return std::move(result);
};
switch (comparison.GetDirection()) {
case ComparisonDirection::kEq:
return populate([](auto lhs, auto rhs) { return lhs == rhs; });
case ComparisonDirection::kNe:
return populate([](auto lhs, auto rhs) { return lhs != rhs; });
case ComparisonDirection::kGe:
if constexpr (!is_complex_v<OperandT>) {
return populate([](auto lhs, auto rhs) { return lhs >= rhs; });
}
break;
case ComparisonDirection::kGt:
if constexpr (!is_complex_v<OperandT>) {
return populate([](auto lhs, auto rhs) { return lhs > rhs; });
}
break;
case ComparisonDirection::kLe:
if constexpr (!is_complex_v<OperandT>) {
return populate([](auto lhs, auto rhs) { return lhs <= rhs; });
}
break;
case ComparisonDirection::kLt:
if constexpr (!is_complex_v<OperandT>) {
return populate([](auto lhs, auto rhs) { return lhs < rhs; });
}
break;
}
LOG(FATAL) << "unhandled direction for conversion to Comparison: "
<< comparison.ToString();
}
std::optional<bool> GetInstructionStaticValueAsBool(
const HloInstruction* instruction) {
HloEvaluator evaluator;
absl::StatusOr<Literal> static_value =
evaluator.Evaluate(instruction, {},
true);
if (static_value.ok()) {
return static_value->GetFirstElement<bool>();
}
return std::nullopt;
}
template <PrimitiveType kType>
struct PopulateParallelImpl {
using NativeT = NativeTypeOf<kType>;
static absl::Status Run(
Literal& literal,
absl::FunctionRef<Literal(absl::Span<const int64_t>, int)>
literal_generator) {
return literal.PopulateParallel<NativeT>(
[&literal_generator](absl::Span<const int64_t> output_index,
int thread_id) {
return literal_generator(output_index, thread_id)
.template Get<NativeT>({});
});
}
};
template <PrimitiveType kType>
struct PopulateImpl {
using NativeT = NativeTypeOf<kType>;
static absl::Status Run(
Literal& literal,
absl::FunctionRef<Literal(absl::Span<const int64_t>)> literal_generator) {
return literal.Populate<NativeT>(
[&literal_generator](absl::Span<const int64_t> output_index) {
return literal_generator(output_index).template Get<NativeT>({});
});
}
};
template <template <PrimitiveType> typename Trait, typename F>
absl::Status Apply(Literal& literal, F&& literal_generator) {
return primitive_util::PrimitiveTypeSwitch<absl::Status>(
[&, literal_generator = std::forward<F>(literal_generator)](
auto primitive_type_constant) -> absl::Status {
if constexpr (primitive_util::IsArrayType(primitive_type_constant)) {
return Trait<primitive_type_constant>::Run(
literal, std::move(literal_generator));
}
LOG(FATAL) << "Unhandled primitive type "
<< literal.shape().element_type();
},
literal.shape().element_type());
}
absl::Status MakeEvalErrorDueToParamOrInfeed(
const HloInstruction& eval_instruction) {
absl::Status error = absl::FailedPreconditionError(absl::StrCat(
"Failed to evaluate instruction (", eval_instruction.name(),
") since it depends on infeed or parameters to its parent computation (",
eval_instruction.parent()->name(), ")."));
std::string error_payload;
error_payload.resize(sizeof(internal::EvalErrorDetail));
absl::little_endian::Store32(
const_cast<char*>(error_payload.data()),
static_cast<uint32_t>(
internal::EvalErrorDetail::kDynamicValueDependence));
error.SetPayload(internal::kEvalErrorDetailUrl, absl::Cord(error_payload));
return error;
}
struct DynamicOrStaticInteger {
std::optional<int64_t> static_value;
bool is_dynamic() const { return !static_value.has_value(); }
std::string ToString() const {
return is_dynamic() ? std::string("DYNAMIC") : absl::StrCat(*static_value);
}
};
std::optional<DynamicOrStaticInteger> GetInstructionValueAsInteger(
const HloInstruction* instruction,
HloEvaluator::PrecomputedAnalyses precomputed_analyses) {
HloEvaluator evaluator;
absl::StatusOr<Literal> static_value =
evaluator.Evaluate(instruction, precomputed_analyses,
true);
if (static_value.ok()) {
if (instruction->shape().element_type() == PrimitiveType::PRED) {
return DynamicOrStaticInteger{
static_cast<int64_t>(static_value->GetFirstElement<bool>())};
} else {
return DynamicOrStaticInteger{static_value->GetFirstInteger()};
}
}
std::optional<internal::EvalErrorDetail> eval_error_detail =
internal::ParseEvalErrorDetail(static_value.status());
if (eval_error_detail.has_value() &&
*eval_error_detail ==
internal::EvalErrorDetail::kDynamicValueDependence) {
return DynamicOrStaticInteger{std::nullopt};
}
return std::nullopt;
}
struct ParamIndexAndValue {
std::optional<int64_t> param_index;
std::optional<DynamicOrStaticInteger> value;
bool IsValid() const { return param_index.has_value() || value.has_value(); }
std::string ToString() const {
return absl::StrCat(
"param_index:",
!param_index.has_value() ? std::string("UNKNOWN")
: absl::StrCat(*param_index),
",", "value:",
!value.has_value() ? std::string("UNKONWN") : value->ToString());
}
};
std::optional<ParamIndexAndValue> TryParsingInstructionAsParameterAndInteger(
const HloInstruction* instruction,
HloEvaluator::PrecomputedAnalyses precomputed_analyses) {
if (instruction->opcode() == HloOpcode::kCopy) {
return TryParsingInstructionAsParameterAndInteger(instruction->operand(0),
precomputed_analyses);
}
if (instruction->opcode() == HloOpcode::kCopyDone) {
return TryParsingInstructionAsParameterAndInteger(
instruction->operand(0)->operand(1), precomputed_analyses);
}
ParamIndexAndValue result;
if (Match(instruction, match::GetTupleElement().WithOperand(
0, match::Parameter().WithParameterNum(0)))) {
result.param_index = instruction->tuple_index();
}
std::optional<DynamicOrStaticInteger> integer_value =
GetInstructionValueAsInteger(instruction, precomputed_analyses);
result.value = std::move(integer_value);
if (!result.IsValid()) {
return std::nullopt;
}
return std::optional<ParamIndexAndValue>(std::move(result));
}
struct WhileCondComparison {
ComparisonDirection comparison_direction;
ParamIndexAndValue lhs;
ParamIndexAndValue rhs;
std::string ToString() const {
return absl::StrCat("WhileCondComparison{", "LHS:{", lhs.ToString(),
"},RHS:{", rhs.ToString(), "}}");
}
};
using WhileCondComparisonOrNoOp =
std::variant<WhileCondComparison, ParamIndexAndValue>;
std::optional<ParamIndexAndValue> ParseComparisonOperand(
const HloInstruction* operand,
HloEvaluator::PrecomputedAnalyses precomputed_analyses) {
if (operand->opcode() == HloOpcode::kCopy ||
operand->opcode() == HloOpcode::kCopyStart ||
operand->opcode() == HloOpcode::kCopyDone) {
return ParseComparisonOperand(operand->operand(0), precomputed_analyses);
}
std::optional<int64_t> param_index;
if (Match(operand, match::GetTupleElement().WithOperand(
0, match::Parameter().WithParameterNum(0)))) {
param_index = operand->tuple_index();
}
std::optional<DynamicOrStaticInteger> operand_value =
GetInstructionValueAsInteger(operand, precomputed_analyses);
if (!param_index.has_value() && !operand_value.has_value()) {
return std::nullopt;
}
return ParamIndexAndValue{param_index, operand_value};
}
std::optional<WhileCondComparisonOrNoOp> PatternMatchLoopCondComparison(
const HloInstruction* comparison,
HloEvaluator::PrecomputedAnalyses precomputed_analyses) {
CHECK_EQ(comparison->opcode(), HloOpcode::kCompare);
std::optional<ParamIndexAndValue> lhs =
ParseComparisonOperand(comparison->operand(0), precomputed_analyses);
std::optional<ParamIndexAndValue> rhs =
ParseComparisonOperand(comparison->operand(1), precomputed_analyses);
if (!lhs.has_value() || !rhs.has_value()) {
return std::nullopt;
}
return WhileCondComparison{comparison->comparison_direction(),
*std::move(lhs), *std::move(rhs)};
}
std::optional<WhileCondComparisonOrNoOp> PatternMatchLoopCondRoot(
const HloInstruction* loop_cond_root,
HloEvaluator::PrecomputedAnalyses precomputed_analyses) {
if (loop_cond_root->opcode() == HloOpcode::kCopy) {
return PatternMatchLoopCondRoot(loop_cond_root->operand(0),
precomputed_analyses);
}
if (loop_cond_root->opcode() == HloOpcode::kCopyDone) {
return PatternMatchLoopCondRoot(loop_cond_root->operand(0)->operand(1),
precomputed_analyses);
}
if (loop_cond_root->opcode() == HloOpcode::kCompare) {
return PatternMatchLoopCondComparison(loop_cond_root, precomputed_analyses);
}
if (Match(loop_cond_root, match::GetTupleElement().WithOperand(
0, match::Parameter().WithParameterNum(0)))) {
if (loop_cond_root->shape().element_type() != PrimitiveType::PRED &&
loop_cond_root->shape().rank() != 0) {
return std::nullopt;
}
return ParamIndexAndValue{{loop_cond_root->tuple_index()}};
}
if (Match(loop_cond_root,
match::GetTupleElement().WithOperand(
0, match::Call().WithNumOperands(1).WithOperand(
0, match::Parameter().WithParameterNum(0))))) {
const HloInstruction* call_instruction = loop_cond_root->operand(0);
const HloComputation* to_apply = call_instruction->to_apply();
const HloInstruction* to_apply_root = to_apply->root_instruction();
if (Match(to_apply_root, match::Tuple())) {
return PatternMatchLoopCondRoot(
to_apply_root->operand(loop_cond_root->tuple_index()),
precomputed_analyses);
}
}
if (Match(loop_cond_root,
match::GetTupleElement().WithOperand(0, match::Tuple()))) {
const HloInstruction* new_cond_root =
loop_cond_root->operand(0)->operand(loop_cond_root->tuple_index());
return PatternMatchLoopCondRoot(new_cond_root, precomputed_analyses);
}
return std::nullopt;
}
std::optional<DynamicOrStaticInteger> PatternMatchInductionVarUpdate(
const HloInstruction* induction_var_update, int64_t tuple_index,
HloEvaluator::PrecomputedAnalyses precomputed_analyses) {
if (induction_var_update->opcode() == HloOpcode::kCopy) {
return PatternMatchInductionVarUpdate(induction_var_update->operand(0),
tuple_index, precomputed_analyses);
}
if (induction_var_update->opcode() == HloOpcode::kCopyDone) {
return PatternMatchInductionVarUpdate(
induction_var_update->operand(0)->operand(1), tuple_index,
precomputed_analyses);
}
std::optional<ParamIndexAndValue> update_param_index_and_value =
TryParsingInstructionAsParameterAndInteger(induction_var_update,
precomputed_analyses);
if (update_param_index_and_value.has_value()) {
if (update_param_index_and_value->param_index.has_value()) {
if (*update_param_index_and_value->param_index == tuple_index) {
VLOG(3) << "PatternMatchInductionVarUpdate, pattern: [induc_var].";
return DynamicOrStaticInteger{0};
} else {
VLOG(3)
<< "PatternMatchInductionVarUpdate, induction variable is set to "
"another parameter value. Parsed update: "
<< update_param_index_and_value->ToString();
return std::nullopt;
}
}
if (update_param_index_and_value->value.has_value() &&
!update_param_index_and_value->value->is_dynamic()) {
VLOG(3) << "PatternMatchInductionVarUpdate, induction variable is set to "
"a constant. Parsed update: "
<< update_param_index_and_value->ToString();
return std::nullopt;
}
}
if (induction_var_update->opcode() != HloOpcode::kAdd &&
induction_var_update->opcode() != HloOpcode::kSubtract) {
return std::nullopt;
}
bool negate_update = induction_var_update->opcode() == HloOpcode::kSubtract;
const HloInstruction* update_lhs = induction_var_update->operand(0);
VLOG(3) << "PatternMatchInductionVarUpdate, LHS: " << update_lhs->ToString();
std::optional<ParamIndexAndValue> update_lhs_param_index_and_value =
TryParsingInstructionAsParameterAndInteger(update_lhs,
precomputed_analyses);
const HloInstruction* update_rhs = induction_var_update->operand(1);
VLOG(3) << "PatternMatchInductionVarUpdate, RHS: " << update_rhs->ToString();
std::optional<ParamIndexAndValue> update_rhs_param_index_and_value =
TryParsingInstructionAsParameterAndInteger(update_rhs,
precomputed_analyses);
if (!update_lhs_param_index_and_value.has_value() ||
!update_lhs_param_index_and_value->value.has_value() ||
!update_rhs_param_index_and_value.has_value() ||
!update_rhs_param_index_and_value->value.has_value()) {
VLOG(3) << "PatternMatchInductionVarUpdate, failed to parse operands. "
"Induction var update instruction: "
<< induction_var_update->ToString();
return std::nullopt;
}
VLOG(3) << "update_lhs: " << update_lhs->ToString();
VLOG(3) << "update_rhs: " << update_rhs->ToString();
if (update_lhs_param_index_and_value->param_index.has_value() &&
*update_lhs_param_index_and_value->param_index == tuple_index &&
update_lhs_param_index_and_value->value->is_dynamic()) {
if (update_rhs_param_index_and_value->value->is_dynamic()) {
return update_rhs_param_index_and_value->value;
}
int64_t update_value =
*update_rhs_param_index_and_value->value->static_value;
return negate_update
? DynamicOrStaticInteger{-update_value}
: DynamicOrStaticInteger{update_value};
}
if (update_rhs_param_index_and_value->param_index.has_value() &&
*update_rhs_param_index_and_value->param_index == tuple_index &&
update_rhs_param_index_and_value->value->is_dynamic() && !negate_update) {
return update_lhs_param_index_and_value->value;
}
VLOG(3) << "Failed to pattern match induction variable update.";
return std::nullopt;
}
std::optional<DynamicOrStaticInteger>
PatternMatchInductionVarUpdateFromLoopBodyRoot(
const HloInstruction* loop_body_root, int64_t tuple_index,
HloEvaluator::PrecomputedAnalyses precomputed_analyses) {
if (loop_body_root->opcode() != HloOpcode::kTuple ||
loop_body_root->operand_count() <= tuple_index) {
return std::nullopt;
}
const HloInstruction* induction_var_update =
loop_body_root->operand(tuple_index);
return PatternMatchInductionVarUpdate(induction_var_update, tuple_index,
precomputed_analyses);
}
std::optional<bool> PatternMatchLoopCondVarOverride(
const HloInstruction* loop_body_root, int64_t tuple_index) {
if (!Match(loop_body_root, match::Tuple()) ||
loop_body_root->operand_count() <= tuple_index) {
return std::nullopt;
}
const HloInstruction* cond_var_override =
loop_body_root->operand(tuple_index);
return GetInstructionStaticValueAsBool(cond_var_override);
}
std::optional<DynamicOrStaticInteger> EvaluateWhileLoopParamInitValue(
const HloInstruction* param_instruction, int64_t tuple_index) {
if (param_instruction->opcode() != HloOpcode::kTuple) {
return std::nullopt;
}
const HloInstruction* element_instruction =
param_instruction->operand(tuple_index);
return GetInstructionValueAsInteger(element_instruction,
{});
}
}
namespace internal {
constexpr absl::string_view kEvalErrorDetailUrl = "EvalErrorDetailUrl";
std::optional<EvalErrorDetail> ParseEvalErrorDetail(const absl::Status& error) {
auto error_detail = error.GetPayload(kEvalErrorDetailUrl);
if (!error_detail.has_value() || error_detail->empty()) {
return std::nullopt;
}
return static_cast<EvalErrorDetail>(
absl::little_endian::Load32(error_detail->Flatten().data()));
}
}
std::optional<ParsedWhileLoop> HandleNoopLoopCondition(
const ParamIndexAndValue& parameter_index_and_value,
const HloInstruction* while_operand, const HloComputation* while_body) {
CHECK(parameter_index_and_value.param_index.has_value());
int64_t loop_cond_var_index = *parameter_index_and_value.param_index;
std::optional<DynamicOrStaticInteger> noop_value =
EvaluateWhileLoopParamInitValue(while_operand, loop_cond_var_index);
if (noop_value.has_value()) {
if (noop_value->is_dynamic()) {
return kParsedDynamicWhileLoop;
} else if (*noop_value->static_value == 0) {
return ParsedWhileLoop{
ParsedStaticWhileLoop{0,
loop_cond_var_index,
0,
0,
0}};
}
std::optional<bool> updated_loop_cond_var = PatternMatchLoopCondVarOverride(
while_body->root_instruction(), loop_cond_var_index);
if (updated_loop_cond_var.has_value()) {
if (!*updated_loop_cond_var) {
return ParsedWhileLoop{
ParsedStaticWhileLoop{1,
loop_cond_var_index,
0,
1,
1}};
} else {
return ParsedWhileLoop{
ParsedStaticWhileLoop{-1,
loop_cond_var_index,
0,
0,
1}};
}
}
}
return std::nullopt;
}
int64_t ComputeTripCountFromComparison(int64_t init, int64_t bound,
int64_t update,
bool comparison_with_equal) {
if (comparison_with_equal && init > bound) {
return 0;
}
if (!comparison_with_equal && init >= bound) {
return 0;
}
int64_t distance = bound - init;
int64_t trip_count = (distance + update - 1) / update;
CHECK_GE(trip_count, 0);
if (comparison_with_equal && (bound - init) % update == 0) {
trip_count += 1;
}
return trip_count;
}
std::optional<ParsedWhileLoop> HandleStaticLoopComparison(
int64_t lhs, int64_t rhs, Comparison::Direction comparison_direction) {
if ((comparison_direction == Comparison::Direction::kLt && lhs < rhs) ||
(comparison_direction == Comparison::Direction::kLe && lhs <= rhs) ||
(comparison_direction == Comparison::Direction::kGt && lhs > rhs) ||
(comparison_direction == Comparison::Direction::kGe && lhs >= rhs) ||
(comparison_direction == Comparison::Direction::kEq && lhs == rhs) ||
(comparison_direction == Comparison::Direction::kNe && lhs != rhs)) {
return ParsedWhileLoop{ParsedStaticWhileLoop{-1,
-1,
0,
0,
1}};
}
return ParsedWhileLoop{ParsedStaticWhileLoop{0,
-1,
0,
0,
0}};
}
std::optional<ParsedWhileLoop> PatternMatchParseWhileLoop(
const HloInstruction* while_op,
HloEvaluator::PrecomputedAnalyses precomputed_analyses) {
VLOG(3) << "PatternMatchParseWhileLoop, while_op: " << while_op->name();
const HloComputation* while_cond = while_op->while_condition();
const HloComputation* while_body = while_op->while_body();
const HloInstruction* while_operand = while_op->operand(0);
std::optional<WhileCondComparisonOrNoOp> loop_comparison_or_noop =
PatternMatchLoopCondRoot(while_cond->root_instruction(),
precomputed_analyses);
if (!loop_comparison_or_noop.has_value()) {
return std::nullopt;
}
if (loop_comparison_or_noop->index() == 1) {
return HandleNoopLoopCondition(
std::get<ParamIndexAndValue>(*loop_comparison_or_noop), while_operand,
while_body);
}
CHECK_EQ(loop_comparison_or_noop->index(), 0);
WhileCondComparison loop_comparison =
std::get<WhileCondComparison>(*loop_comparison_or_noop);
CHECK(loop_comparison.lhs.IsValid() && loop_comparison.rhs.IsValid());
if (while_operand->opcode() != HloOpcode::kTuple) {
return std::nullopt;
}
if (!loop_comparison.lhs.value.has_value() ||
!loop_comparison.rhs.value.has_value()) {
return std::nullopt;
}
CHECK(loop_comparison.lhs.value.has_value());
CHECK(loop_comparison.rhs.value.has_value());
VLOG(3) << loop_comparison.ToString();
if (loop_comparison.lhs.value->is_dynamic() &&
loop_comparison.rhs.value->is_dynamic()) {
VLOG(3) << "Both operands of the loop condition comparison are dynamic.";
return std::nullopt;
}
CHECK(!loop_comparison.lhs.value->is_dynamic() ||
!loop_comparison.rhs.value->is_dynamic());
if (!loop_comparison.lhs.value->is_dynamic() &&
!loop_comparison.rhs.value->is_dynamic()) {
int64_t lhs_value = *loop_comparison.lhs.value->static_value;
int64_t rhs_value = *loop_comparison.rhs.value->static_value;
Comparison::Direction comparison_direction =
loop_comparison.comparison_direction;
return HandleStaticLoopComparison(lhs_value, rhs_value,
comparison_direction);
}
std::optional<DynamicOrStaticInteger> induction_var_init;
std::optional<DynamicOrStaticInteger> induction_var_update;
bool lhs_is_induction_var = true;
if (loop_comparison.lhs.value->is_dynamic()) {
if (loop_comparison.lhs.param_index.has_value()) {
VLOG(3) << "Comparison LHS is induction variable.";
induction_var_init = EvaluateWhileLoopParamInitValue(
while_operand, *loop_comparison.lhs.param_index);
induction_var_update = PatternMatchInductionVarUpdateFromLoopBodyRoot(
while_body->root_instruction(), *loop_comparison.lhs.param_index,
precomputed_analyses);
lhs_is_induction_var = true;
}
} else {
CHECK(loop_comparison.rhs.value->is_dynamic());
if (loop_comparison.rhs.param_index.has_value()) {
VLOG(3) << "Comparison RHS is induction variable.";
induction_var_init = EvaluateWhileLoopParamInitValue(
while_operand, *loop_comparison.rhs.param_index);
induction_var_update = PatternMatchInductionVarUpdateFromLoopBodyRoot(
while_body->root_instruction(), *loop_comparison.rhs.param_index,
precomputed_analyses);
lhs_is_induction_var = false;
}
}
if (!induction_var_init.has_value() || !induction_var_update.has_value()) {
return std::nullopt;
}
VLOG(3) << "induction_var_init: " << induction_var_init->ToString();
VLOG(3) << "induction_var_update: " << induction_var_update->ToString();
if (induction_var_init->is_dynamic() || induction_var_update->is_dynamic()) {
return kParsedDynamicWhileLoop;
}
int64_t init_value = *induction_var_init->static_value;
int64_t update_value = *induction_var_update->static_value;
Comparison::Direction comparison_direction =
loop_comparison.comparison_direction;
ParsedWhileLoop parsed_static_while_loop = ParsedWhileLoop{
ParsedStaticWhileLoop{0,
-1,
init_value,
update_value,
-1}};
if (lhs_is_induction_var) {
CHECK(loop_comparison.rhs.value.has_value() &&
!loop_comparison.rhs.value->is_dynamic());
int64_t bound = *loop_comparison.rhs.value->static_value;
parsed_static_while_loop.static_while_loop->induction_var_index =
*loop_comparison.lhs.param_index;
parsed_static_while_loop.static_while_loop->loop_bound = bound;
if (update_value > 0 &&
(comparison_direction == Comparison::Direction::kLt ||
comparison_direction == Comparison::Direction::kLe)) {
int64_t trip_count = ComputeTripCountFromComparison(
init_value, bound, update_value,
comparison_direction == Comparison::Direction::kLe);
parsed_static_while_loop.static_while_loop->trip_count = trip_count;
return parsed_static_while_loop;
}
if (update_value < 0 &&
(comparison_direction == Comparison::Direction::kGt ||
comparison_direction == Comparison::Direction::kGe)) {
int64_t trip_count = ComputeTripCountFromComparison(
bound, init_value, -update_value,
comparison_direction == Comparison::Direction::kGe);
parsed_static_while_loop.static_while_loop->trip_count = trip_count;
return parsed_static_while_loop;
}
return std::nullopt;
}
CHECK(loop_comparison.lhs.value.has_value() &&
!loop_comparison.lhs.value->is_dynamic());
int64_t bound = *loop_comparison.lhs.value->static_value;
parsed_static_while_loop.static_while_loop->induction_var_index =
*loop_comparison.rhs.param_index;
parsed_static_while_loop.static_while_loop->loop_bound = bound;
if (update_value > 0 &&
(comparison_direction == Comparison::Direction::kGt ||
comparison_direction == Comparison::Direction::kGe)) {
int64_t trip_count = ComputeTripCountFromComparison(
init_value, bound, update_value,
comparison_direction == Comparison::Direction::kGe);
parsed_static_while_loop.static_while_loop->trip_count = trip_count;
return parsed_static_while_loop;
}
if (update_value < 0 &&
(comparison_direction == Comparison::Direction::kLt ||
comparison_direction == Comparison::Direction::kLe)) {
int64_t trip_count = ComputeTripCountFromComparison(
bound, init_value, -update_value,
comparison_direction == Comparison::Direction::kLe);
parsed_static_while_loop.static_while_loop->trip_count = trip_count;
return parsed_static_while_loop;
}
return std::nullopt;
}
HloEvaluator::HloEvaluator(int64_t max_loop_iterations)
: max_loop_iterations_(max_loop_iterations) {
for (int i = PrimitiveType_MIN; i < PrimitiveType_ARRAYSIZE; ++i) {
if (!primitive_util::IsArrayType(PrimitiveType{i})) {
continue;
}
primitive_util::PrimitiveTypeSwitch<void>(
[&](auto primitive_type) {
if constexpr (primitive_util::IsArrayType(primitive_type)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type>;
if constexpr (primitive_util::IsSignedIntegralType(
primitive_type)) {
typed_visitors_[primitive_type] =
std::make_unique<HloEvaluatorTypedVisitor<NativeT, int64_t>>(
this);
} else if constexpr (primitive_util::IsUnsignedIntegralType(
primitive_type)) {
typed_visitors_[primitive_type] =
std::make_unique<HloEvaluatorTypedVisitor<NativeT, uint64_t>>(
this);
} else if constexpr (primitive_util::IsFloatingPointType(
primitive_type) &&
sizeof(NativeT) < sizeof(float)) {
typed_visitors_[primitive_type] =
std::make_unique<HloEvaluatorTypedVisitor<NativeT, float>>(
this);
} else {
typed_visitors_[primitive_type] =
std::make_unique<HloEvaluatorTypedVisitor<NativeT>>(this);
}
}
},
PrimitiveType{i});
}
typed_visitors_[TUPLE] =
std::make_unique<ConstFunctionVisitor>([](const HloInstruction*) {
return Unimplemented(
"HloEvaluatorTypedVisitor: unhandled primitive type: TUPLE.");
});
typed_visitors_[OPAQUE_TYPE] =
std::make_unique<ConstFunctionVisitor>([](const HloInstruction*) {
return Unimplemented(
"HloEvaluatorTypedVisitor: unhandled primitive type: OPAQUE_TYPE.");
});
typed_visitors_[TOKEN] =
std::make_unique<ConstFunctionVisitor>([](const HloInstruction*) {
return Unimplemented(
"HloEvaluatorTypedVisitor: unhandled primitive type: TOKEN.");
});
}
absl::StatusOr<Literal> HloEvaluator::Evaluate(
const HloComputation& computation,
absl::Span<const Literal* const> arg_literals) {
CHECK(computation.parent() != nullptr);
XLA_VLOG_LINES(
2, "HloEvaluator::Evaluate computation:\n" + computation.ToString());
OnEvaluateComputation(computation);
if (arg_literals.size() != computation.num_parameters()) {
return InvalidArgument(
"Expected %d argument%s, but got %d.", computation.num_parameters(),
computation.num_parameters() == 1 ? "" : "s", arg_literals.size());
}
for (int64_t i = 0; i < arg_literals.size(); ++i) {
const auto& computation_shape =
computation.parameter_instruction(i)->shape();
const auto& arg_shape = arg_literals[i]->shape();
if (!Shape::Equal().MinorToMajorOnlyInLayout()(computation_shape,
arg_shape)) {
return InvalidArgument(
"Shape mismatch at parameter %d. Computation expected %s, but arg "
"was %s.",
i, ShapeUtil::HumanStringWithLayout(computation_shape),
ShapeUtil::HumanStringWithLayout(arg_shape));
}
}
evaluated_.clear();
arg_literals_.clear();
call_graph_cache_.reset();
tuple_points_to_analysis_cache_.reset();
for (const auto& literal_ptr : arg_literals) {
arg_literals_.push_back(&*literal_ptr);
}
if (computation.parent()->config().seed()) {
seed_ = computation.parent()->config().seed();
} else {
static std::atomic<uint64_t> global_seed{std::random_device()()};
seed_ = global_seed.fetch_add(1);
}
engine_.seed(seed_);
TF_RETURN_IF_ERROR(computation.Accept(this));
const Literal& result =
GetEvaluatedLiteralFor(computation.root_instruction());
if (VLOG_IS_ON(100)) {
for (const HloInstruction* instr : computation.instructions()) {
VLOG(100) << instr->name() << " = " << GetEvaluatedLiteralFor(instr);
}
}
if (!result.IsKnown()) {
return MakeEvalErrorDueToParamOrInfeed(*computation.root_instruction());
}
return result.Clone();
}
absl::StatusOr<Literal> HloEvaluator::Evaluate(
const HloInstruction* instruction, PrecomputedAnalyses precomputed_analyses,
bool recursively_evaluate_nonconstant_operands) {
arg_literals_.clear();
evaluated_.clear();
call_graph_cache_.reset();
tuple_points_to_analysis_cache_.reset();
auto enable_partial_evaluation_cleanup =
absl::MakeCleanup([this] { enable_partial_evaluation_ = false; });
enable_partial_evaluation_ = recursively_evaluate_nonconstant_operands;
TF_RETURN_IF_ERROR(
EvaluateInternal(instruction, precomputed_analyses, {},
recursively_evaluate_nonconstant_operands));
const Literal& result = GetEvaluatedLiteralFor(instruction);
if (!result.IsKnown()) {
return MakeEvalErrorDueToParamOrInfeed(*instruction);
}
return result.Clone();
}
bool HloEvaluator::TryEvaluate(const HloInstruction* instruction,
Literal* result,
bool recursively_evaluate_nonconstant_operands) {
CHECK(result != nullptr);
auto result_or = Evaluate(instruction, {},
recursively_evaluate_nonconstant_operands);
if (!result_or.ok()) {
VLOG(1) << "TryEvaluate failed:" << result_or.status();
return false;
}
*result = std::move(result_or).value();
return true;
}
absl::StatusOr<Literal> HloEvaluator::EvaluateWithSubstitutions(
const HloInstruction* instruction,
const absl::flat_hash_map<const HloInstruction*, const LiteralBase*>&
substitutions) {
std::vector<std::unique_ptr<HloInstruction>> owned_operands;
for (const HloInstruction* operand : instruction->operands()) {
auto it = substitutions.find(operand);
if (it == substitutions.end()) {
owned_operands.push_back(operand->Clone());
} else {
owned_operands.push_back(
HloInstruction::CreateConstant(it->second->Clone()));
}
}
std::vector<HloInstruction*> operands;
operands.reserve(owned_operands.size());
for (auto& operand : owned_operands) {
operands.push_back(operand.get());
}
std::unique_ptr<HloInstruction> cloned_instruction =
instruction->CloneWithNewOperands(instruction->shape(), operands);
auto result = Evaluate(cloned_instruction.get());
return result;
}
absl::StatusOr<Literal> HloEvaluator::EvaluateElementwiseBinaryOp(
HloOpcode opcode, const Literal& lhs, const Literal& rhs) {
std::unique_ptr<HloInstruction> lhs_instr =
HloInstruction::CreateConstant(lhs.Clone());
std::unique_ptr<HloInstruction> rhs_instr =
HloInstruction::CreateConstant(rhs.Clone());
std::unique_ptr<HloInstruction> cloned_instruction =
HloInstruction::CreateBinary(lhs.shape(), opcode, lhs_instr.get(),
rhs_instr.get());
auto result = Evaluate(cloned_instruction.get());
return result;
}
absl::StatusOr<Literal> HloEvaluator::EvaluateElementwiseTernaryOp(
HloOpcode opcode, const Literal& lhs, const Literal& rhs,
const Literal& ehs) {
std::unique_ptr<HloInstruction> lhs_instr =
HloInstruction::CreateConstant(lhs.Clone());
std::unique_ptr<HloInstruction> rhs_instr =
HloInstruction::CreateConstant(rhs.Clone());
std::unique_ptr<HloInstruction> ehs_instr =
HloInstruction::CreateConstant(ehs.Clone());
TF_ASSIGN_OR_RETURN(auto output_shape,
ShapeInference::InferTernaryOpShape(
opcode, lhs.shape(), rhs.shape(), ehs.shape()));
std::unique_ptr<HloInstruction> cloned_instruction =
HloInstruction::CreateTernary(output_shape, opcode, lhs_instr.get(),
rhs_instr.get(), ehs_instr.get());
return Evaluate(cloned_instruction.get());
}
absl::StatusOr<Literal> HloEvaluator::EvaluateElementwiseCompareOp(
ComparisonDirection direction, const Literal& lhs, const Literal& rhs) {
std::unique_ptr<HloInstruction> lhs_instr =
HloInstruction::CreateConstant(lhs.Clone());
std::unique_ptr<HloInstruction> rhs_instr =
HloInstruction::CreateConstant(rhs.Clone());
std::unique_ptr<HloInstruction> cloned_instruction =
HloInstruction::CreateCompare(
ShapeUtil::ChangeElementType(lhs.shape(), PRED), lhs_instr.get(),
rhs_instr.get(), direction);
auto result = Evaluate(cloned_instruction.get());
return result;
}
absl::StatusOr<Literal> HloEvaluator::EvaluateElementwiseUnaryOp(
HloOpcode opcode, const Literal& operand) {
std::unique_ptr<HloInstruction> operand_instr =
HloInstruction::CreateConstant(operand.Clone());
TF_ASSIGN_OR_RETURN(Shape inferred_shape, ShapeInference::InferUnaryOpShape(
opcode, operand.shape()));
std::unique_ptr<HloInstruction> cloned_instruction =
HloInstruction::CreateUnary(inferred_shape, opcode, operand_instr.get());
auto result = Evaluate(cloned_instruction.get());
return result;
}
absl::StatusOr<Literal> HloEvaluator::EvaluateDotOp(
const DotDimensionNumbers& dim_numbers,
const PrecisionConfig& precision_config, const Literal& lhs,
const Literal& rhs) {
std::unique_ptr<HloInstruction> lhs_instr =
HloInstruction::CreateConstant(lhs.Clone());
std::unique_ptr<HloInstruction> rhs_instr =
HloInstruction::CreateConstant(rhs.Clone());
TF_ASSIGN_OR_RETURN(
Shape dot_shape,
ShapeInference::InferDotOpShape(lhs.shape(), rhs.shape(), dim_numbers,
std::nullopt));
std::unique_ptr<HloInstruction> cloned_instruction =
HloInstruction::CreateDot(dot_shape, lhs_instr.get(), rhs_instr.get(),
dim_numbers, precision_config);
return Evaluate(cloned_instruction.get());
}
absl::Status HloEvaluator::EvaluateParameterFromCallerArgument(
const HloInstruction* parameter, const ShapeIndex& shape_index,
PrecomputedAnalyses analyses) {
CHECK(!evaluated_.contains(parameter));
const HloComputation* parent_computation = parameter->parent();
std::vector<HloInstruction*> computation_callers =
analyses.call_graph->GetComputationCallers(parent_computation);
if (computation_callers.size() != 1) {
return tsl::errors::FailedPrecondition(
"The computation ", parent_computation->name(), " is called by ",
computation_callers.size(),
" callers and thus its argument value "
"cannot be determined statically.");
}
const HloInstruction* computation_caller = computation_callers[0];
const HloInstruction* caller_operand = computation_caller->operand(0);
if (computation_caller->opcode() != HloOpcode::kWhile &&
computation_caller->opcode() != HloOpcode::kCall) {
return tsl::errors::FailedPrecondition(
"The computation ", parent_computation->name(), " is called by ",
"instruction ", computation_caller->name(),
", which is not yet supported.");
}
if (computation_caller->opcode() == HloOpcode::kWhile) {
HloComputation* while_body = computation_caller->while_body();
TF_ASSIGN_OR_RETURN(
const LogicalBuffer* logical_buffer,
analyses.tuple_points_to->GetBufferDefinedAt(
while_body->parameter_instruction(parameter->parameter_number()),
shape_index));
const TuplePointsToAnalysis::BufferAliasVector& buffer_aliases =
analyses.tuple_points_to->GetBufferAliases(*logical_buffer);
bool unchanged_in_return = false;
for (const BufferAlias& buffer_alias : buffer_aliases) {
if (buffer_alias.instruction() == while_body->root_instruction() &&
buffer_alias.index() == shape_index) {
unchanged_in_return = true;
}
}
if (!unchanged_in_return) {
return MakeEvalErrorDueToParamOrInfeed(*parameter);
}
}
TF_RETURN_IF_ERROR(
EvaluateInternal(caller_operand, analyses, shape_index, true));
const Literal& caller_operand_literal =
GetEvaluatedLiteralFor(caller_operand);
evaluated_[parameter] =
Literal::CreateFromShapeWithUnknownLeafArrays(parameter->shape());
TF_RETURN_IF_ERROR(evaluated_[parameter].CopyFrom(
caller_operand_literal, shape_index,
shape_index));
return absl::OkStatus();
}
std::vector<int64_t> HloEvaluator::GetS64Indices(
absl::Span<HloInstruction* const> start_indices) {
auto get_first_s64 = [&](const Literal& index) -> int64_t {
return primitive_util::PrimitiveTypeSwitch<int64_t>(
[&](auto primitive_type_constant) -> int64_t {
if constexpr (primitive_util::IsIntegralType(
primitive_type_constant)) {
return static_cast<int64_t>(
index.GetFirstElement<NativeTypeOf<primitive_type_constant>>());
}
LOG(FATAL) << "GetS64Indices: unhandled primitive type for "
<< PrimitiveType_Name(index.shape().element_type());
},
index.shape().element_type());
};
std::vector<int64_t> start;
start.reserve(start_indices.size());
for (HloInstruction* index : start_indices) {
start.push_back(get_first_s64(GetEvaluatedLiteralFor(index)));
}
return start;
}
DimensionVector HloEvaluator::MakeDimMultipliers(const Shape& shape) {
DimensionVector v(shape.rank());
int64_t scale = 1;
for (auto dim : LayoutUtil::MinorToMajor(shape)) {
v[dim] = scale;
scale *= shape.dimensions(dim);
}
return v;
}
absl::Status HloEvaluator::EvaluateInternal(
const HloInstruction* instruction, PrecomputedAnalyses precomputed_analyses,
const ShapeIndex& shape_index,
bool recursively_evaluate_nonconstant_operands) {
if (IsAlreadyEvaluated(instruction, shape_index)) {
return absl::OkStatus();
}
if (!recursively_evaluate_nonconstant_operands) {
if (!hlo_query::AllOperandsAreConstants(*instruction)) {
return absl::FailedPreconditionError(
absl::StrCat("Not all operands are constants. Instruction: ",
instruction->ToString()));
}
} else {
if (instruction->opcode() == HloOpcode::kGetTupleElement) {
ShapeIndex new_shape_index = shape_index;
new_shape_index.push_front(instruction->tuple_index());
TF_RETURN_IF_ERROR(EvaluateInternal(
instruction->operand(0), precomputed_analyses, new_shape_index,
true));
} else if (instruction->opcode() == HloOpcode::kTuple &&
!shape_index.empty()) {
ShapeIndex new_shape_index = shape_index;
int64_t tuple_index = new_shape_index.front();
new_shape_index.pop_front();
TF_RETURN_IF_ERROR(
EvaluateInternal(instruction->operand(tuple_index),
precomputed_analyses, new_shape_index,
true));
} else if (instruction->opcode() == HloOpcode::kParameter) {
CallGraph* call_graph =
(precomputed_analyses.call_graph != nullptr)
? precomputed_analyses.call_graph
: std::invoke([this, instruction]() -> CallGraph* {
call_graph_cache_ =
CallGraph::Build(instruction->GetModule());
return call_graph_cache_.get();
});
TuplePointsToAnalysis* tuple_points_to_analysis =
(precomputed_analyses.tuple_points_to != nullptr)
? precomputed_analyses.tuple_points_to
: std::invoke([this, instruction]() -> TuplePointsToAnalysis* {
absl::StatusOr<std::unique_ptr<TuplePointsToAnalysis>>
tuple_points_to_analysis =
TuplePointsToAnalysis::Run(instruction->GetModule());
if (!tuple_points_to_analysis.ok()) {
return nullptr;
}
tuple_points_to_analysis_cache_ =
*std::move(tuple_points_to_analysis);
return tuple_points_to_analysis_cache_.get();
});
if (call_graph && tuple_points_to_analysis) {
absl::Status argument_eval_status = EvaluateParameterFromCallerArgument(
instruction, shape_index, {tuple_points_to_analysis, call_graph});
if (!argument_eval_status.ok()) {
VLOG(4) << "Failed to evaluate parameter " << instruction->name()
<< " from caller. Reason: " << argument_eval_status.message();
} else {
VLOG(4) << "Successfully evaluated parameter: "
<< instruction->name();
}
}
} else {
for (HloInstruction* operand : instruction->operands()) {
TF_RETURN_IF_ERROR(EvaluateInternal(
operand, precomputed_analyses, {},
true));
if ((!GetEvaluatedLiteralFor(operand).IsKnown() &&
instruction->opcode() != HloOpcode::kCopy &&
instruction->opcode() != HloOpcode::kCopyStart &&
instruction->opcode() != HloOpcode::kCopyDone &&
instruction->opcode() != HloOpcode::kAsyncStart &&
instruction->opcode() != HloOpcode::kAsyncUpdate &&
instruction->opcode() != HloOpcode::kAsyncDone &&
instruction->opcode() != HloOpcode::kWhile)) {
evaluated_[instruction] =
Literal::CreateFromShapeWithUnknownLeafArrays(
instruction->shape());
return absl::OkStatus();
}
}
}
}
visitor_shape_index_ = shape_index;
TF_RETURN_IF_ERROR(Preprocess(instruction));
TF_RETURN_IF_ERROR(instruction->Visit(this));
TF_RETURN_IF_ERROR(Postprocess(instruction));
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleBitcast(const HloInstruction* bitcast) {
const Literal& operand_literal = GetEvaluatedLiteralFor(bitcast->operand(0));
Literal result(bitcast->shape());
TF_RET_CHECK(operand_literal.size_bytes() >= result.size_bytes());
memcpy(result.untyped_data(), operand_literal.untyped_data(),
result.size_bytes());
evaluated_[bitcast] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleBitcastConvert(const HloInstruction* convert) {
const HloInstruction* operand = convert->operand(0);
TF_ASSIGN_OR_RETURN(
Literal result,
GetEvaluatedLiteralFor(operand).BitcastConvert(convert->shape()));
evaluated_[convert] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleGetDimensionSize(
const HloInstruction* get_dimension_size) {
const HloInstruction* operand = get_dimension_size->operand(0);
int64_t dim = get_dimension_size->dimension();
if (dynamic_dimension_inference_ == nullptr) {
return InvalidArgument(
"Evaluator cannot evaluate get_dimension_size without "
"set_dynamic_dimension_inference.");
}
const HloInstruction* dynamic_size =
dynamic_dimension_inference_->GetDynamicSize(operand, {}, dim);
if (dynamic_size != nullptr) {
evaluated_[get_dimension_size] =
GetEvaluatedLiteralFor(dynamic_size).Clone();
return absl::OkStatus();
}
const Shape& shape = get_dimension_size->operand(0)->shape();
Literal output(ShapeUtil::MakeShape(S32, {}));
output.PopulateWithValue(
static_cast<int32_t>(shape.dimensions(get_dimension_size->dimension())));
evaluated_[get_dimension_size] = std::move(output);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleSetDimensionSize(
const HloInstruction* set_dimension_size) {
const Literal& operand_literal =
GetEvaluatedLiteralFor(set_dimension_size->operand(0));
Literal result(set_dimension_size->shape());
memcpy(result.untyped_data(), operand_literal.untyped_data(),
operand_literal.size_bytes());
const Literal& size_literal =
GetEvaluatedLiteralFor(set_dimension_size->operand(1));
result.SetDynamicSize(set_dimension_size->dimension(),
size_literal.Get<int32_t>({}));
evaluated_[set_dimension_size] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleParameter(const HloInstruction* parameter) {
if (!IsAlreadyEvaluated(parameter, visitor_shape_index_)) {
if (!enable_partial_evaluation_) {
return tsl::errors::FailedPrecondition(
"Failed to evaluate instruction since its operands are unknown "
"or undetermined and partial evaluation is not enabled.");
}
evaluated_[parameter] =
Literal::CreateFromShapeWithUnknownLeafArrays(parameter->shape());
return absl::OkStatus();
}
if (!arg_literals_.empty()) {
CHECK_LT(parameter->parameter_number(), arg_literals_.size());
#ifndef NDEBUG
const Literal* input_literal = arg_literals_[parameter->parameter_number()];
VLOG(2) << "Parameter evaluated to: " << input_literal->ToString();
DCHECK(Shape::Equal().MinorToMajorOnlyInLayout()(parameter->shape(),
input_literal->shape()))
<< "parameter shape is: "
<< ShapeUtil::HumanStringWithLayout(parameter->shape())
<< ", but input literal shape is: "
<< ShapeUtil::HumanStringWithLayout(input_literal->shape());
#endif
}
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleInfeed(const HloInstruction* infeed) {
if (!enable_partial_evaluation_) {
return tsl::errors::FailedPrecondition(
"Failed to evaluate instruction since its operands are unknown "
"or undetermined and partial evaluation is not enabled.");
}
evaluated_[infeed] =
Literal::CreateFromShapeWithUnknownLeafArrays(infeed->shape());
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleConstant(const HloInstruction*) {
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleReshape(const HloInstruction* reshape) {
TF_ASSIGN_OR_RETURN(evaluated_[reshape],
GetEvaluatedLiteralFor(reshape->operand(0))
.Reshape(reshape->shape().dimensions()));
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleTranspose(const HloInstruction* transpose) {
evaluated_[transpose] = GetEvaluatedLiteralFor(transpose->operand(0))
.Transpose(transpose->dimensions());
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleConcatenate(
const HloInstruction* concatenate) {
absl::Span<HloInstruction* const> operands(concatenate->operands());
const Shape& reference_shape = operands[0]->shape();
CHECK(reference_shape.IsArray());
const int64_t rank = reference_shape.rank();
const int64_t concat_dim = concatenate->dimensions()[0];
CHECK_GE(concat_dim, 0);
CHECK_LT(concat_dim, rank);
DimensionVector concat_dimensions(reference_shape.dimensions().begin(),
reference_shape.dimensions().end());
for (int64_t i = 1; i < operands.size(); ++i) {
const Shape& operand_shape = operands[i]->shape();
CHECK(operand_shape.IsArray());
concat_dimensions[concat_dim] +=
ShapeUtil::GetDimension(operand_shape, concat_dim);
}
auto result_literal = LiteralUtil::CreateFromDimensions(
reference_shape.element_type(), concat_dimensions);
DimensionVector source_indices(rank, 0);
DimensionVector dest_indices(concat_dimensions.size(), 0);
for (auto operand : operands) {
const Shape& operand_shape = operand->shape();
TF_RETURN_IF_ERROR(result_literal.CopySliceFrom(
GetEvaluatedLiteralFor(operand), source_indices, dest_indices,
operand_shape.dimensions()));
dest_indices[concat_dim] +=
ShapeUtil::GetDimension(operand_shape, concat_dim);
}
evaluated_[concatenate] = std::move(result_literal);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleIsFinite(const HloInstruction* is_finite) {
auto operand = is_finite->operand(0);
auto elem_ty = operand->shape().element_type();
return primitive_util::PrimitiveTypeSwitch<absl::Status>(
[&](auto primitive_type_constant) -> absl::Status {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type_constant>;
auto result_or = ElementWiseUnaryOpImpl<bool, NativeT>(
is_finite,
[](NativeT elem_operand) {
return Eigen::numext::isfinite(elem_operand);
},
GetEvaluatedLiteralFor(operand));
TF_ASSIGN_OR_RETURN(evaluated_[is_finite], std::move(result_or));
return absl::OkStatus();
}
return InvalidArgument(
"expected element type in shape to be floating point, but got: %s",
PrimitiveType_Name(elem_ty));
},
elem_ty);
}
absl::Status HloEvaluator::HandleReal(const HloInstruction* real) {
auto operand = real->operand(0);
return primitive_util::PrimitiveTypeSwitch<absl::Status>(
[&](auto primitive_type_constant) -> absl::Status {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type_constant>;
auto result_or = ElementWiseUnaryOpImpl<NativeT, NativeT>(
real, [](NativeT elem_operand) { return elem_operand; },
GetEvaluatedLiteralFor(operand));
TF_ASSIGN_OR_RETURN(evaluated_[real], std::move(result_or));
return absl::OkStatus();
}
if constexpr (primitive_util::IsComplexType(primitive_type_constant)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type_constant>;
auto result_or =
ElementWiseUnaryOpImpl<typename NativeT::value_type, NativeT>(
real,
[](NativeT elem_operand) { return std::real(elem_operand); },
GetEvaluatedLiteralFor(operand));
TF_ASSIGN_OR_RETURN(evaluated_[real], std::move(result_or));
return absl::OkStatus();
}
LOG(FATAL) << "HandleReal: unknown/unhandled primitive type: "
<< PrimitiveType_Name(operand->shape().element_type());
},
operand->shape().element_type());
}
absl::Status HloEvaluator::HandleImag(const HloInstruction* imag) {
auto operand = imag->operand(0);
return primitive_util::PrimitiveTypeSwitch<absl::Status>(
[&](auto primitive_type_constant) -> absl::Status {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type_constant>;
auto result_or = ElementWiseUnaryOpImpl<NativeT, NativeT>(
imag, [](NativeT elem_operand) { return NativeT(0); },
GetEvaluatedLiteralFor(operand));
TF_ASSIGN_OR_RETURN(evaluated_[imag], std::move(result_or));
return absl::OkStatus();
}
if constexpr (primitive_util::IsComplexType(primitive_type_constant)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type_constant>;
auto result_or =
ElementWiseUnaryOpImpl<typename NativeT::value_type, NativeT>(
imag,
[](NativeT elem_operand) { return std::imag(elem_operand); },
GetEvaluatedLiteralFor(operand));
TF_ASSIGN_OR_RETURN(evaluated_[imag], std::move(result_or));
return absl::OkStatus();
}
LOG(FATAL) << "HandleImag: unknown/unhandled primitive type: "
<< PrimitiveType_Name(operand->shape().element_type());
},
operand->shape().element_type());
}
absl::Status HloEvaluator::HandleComplex(const HloInstruction* complex) {
const Literal& real = GetEvaluatedLiteralFor(complex->operand(0));
const Literal& imag = GetEvaluatedLiteralFor(complex->operand(1));
TF_RET_CHECK(ShapeUtil::Compatible(real.shape(), imag.shape()));
Literal result(complex->shape());
return primitive_util::PrimitiveTypeSwitch<absl::Status>(
[&](auto primitive_type_constant) -> absl::Status {
if constexpr (primitive_util::IsComplexType(primitive_type_constant)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type_constant>;
TF_RETURN_IF_ERROR(result.Populate<NativeT>(
[&](absl::Span<const int64_t> multi_index) {
return NativeT(
real.Get<typename NativeT::value_type>(multi_index),
imag.Get<typename NativeT::value_type>(multi_index));
}));
evaluated_[complex] = std::move(result);
return absl::OkStatus();
}
LOG(FATAL) << "HandleComplex: unknown/unhandled primitive type: "
<< PrimitiveType_Name(complex->shape().element_type());
},
complex->shape().element_type());
}
absl::Status HloEvaluator::HandleCompare(const HloInstruction* compare) {
ComparisonDirection direction = compare->comparison_direction();
ComparisonOrder order = compare->comparison_order();
auto lhs = compare->operand(0);
auto rhs = compare->operand(1);
DCHECK(ShapeUtil::SameDimensions(compare->shape(), rhs->shape()) &&
ShapeUtil::SameDimensions(lhs->shape(), rhs->shape()));
TF_RET_CHECK(lhs->shape().element_type() == rhs->shape().element_type());
auto element_type = lhs->shape().element_type();
Comparison comparison(direction, element_type, order);
const Literal& lhs_literal = GetEvaluatedLiteralFor(lhs);
const Literal& rhs_literal = GetEvaluatedLiteralFor(rhs);
return primitive_util::PrimitiveTypeSwitch<absl::Status>(
[&](auto primitive_type_constant) -> absl::Status {
if constexpr (primitive_util::IsArrayType(primitive_type_constant)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type_constant>;
TF_ASSIGN_OR_RETURN(evaluated_[compare],
Compare<NativeT>(compare->shape(), comparison,
lhs_literal, rhs_literal));
return absl::OkStatus();
}
LOG(FATAL) << "HandleCompare: unknown primitive type: "
<< PrimitiveType_Name(element_type);
},
element_type);
}
absl::Status HloEvaluator::HandleTuple(const HloInstruction* tuple) {
std::vector<const Literal*> operand_literals;
std::vector<Literal> operand_literal_values;
if (!visitor_shape_index_.empty()) {
int64_t tuple_index = visitor_shape_index_.front();
operand_literal_values.resize(tuple->operand_count());
for (int operand_index = 0; operand_index < tuple->operand_count();
++operand_index) {
if (operand_index == tuple_index) {
operand_literals.push_back(
&GetEvaluatedLiteralFor(tuple->operand(operand_index)));
} else {
operand_literal_values[operand_index] =
Literal::CreateFromShapeWithUndeterminedLeafArrays(
ShapeUtil::GetSubshape(tuple->shape(), {operand_index}));
operand_literals.push_back(&operand_literal_values[operand_index]);
}
}
} else {
for (auto operand : tuple->operands()) {
operand_literals.push_back(&GetEvaluatedLiteralFor(operand));
}
}
std::vector<const Shape*> element_shapes;
element_shapes.reserve(operand_literals.size());
for (const auto* element : operand_literals) {
element_shapes.push_back(&element->shape());
}
Literal new_result = Literal::CreateFromShapeWithUndeterminedLeafArrays(
ShapeUtil::MakeTupleShapeWithPtrs(element_shapes));
for (int i = 0, end = operand_literals.size(); i < end; ++i) {
TF_RETURN_IF_ERROR(
new_result.CopyFrom(*operand_literals[i], {i}));
}
if (evaluated_.contains(tuple)) {
CHECK(new_result.IsDetermined(visitor_shape_index_));
TF_RETURN_IF_ERROR(
evaluated_[tuple].CopyFrom(std::move(new_result),
visitor_shape_index_,
visitor_shape_index_));
} else {
evaluated_[tuple] = std::move(new_result);
}
return absl::OkStatus();
}
namespace {
template <typename ToType, typename FromType>
struct TypeConverter {
static inline ToType GetAs(FromType value) {
return static_cast<ToType>(value);
}
};
template <typename FromType>
struct TypeConverter<float, FromType> {
static inline float GetAs(FromType value) {
return static_cast<float>(value.real());
}
};
template <typename ComplexType>
class FftTransform {
public:
explicit FftTransform(const HloInstruction* fft)
: fft_type_(fft->fft_type()),
fft_rank_(fft->fft_length().size()),
fft_lengths_(fft->fft_length()) {
absl::c_reverse(fft_lengths_);
}
absl::Status ComputeFft(const HloInstruction* fft,
const Literal& input_literal,
Literal* output_literal) {
const Shape& input_shape = input_literal.shape();
const Shape& output_shape = fft->shape();
TF_RETURN_IF_ERROR(CheckParameters(input_shape, output_shape));
const auto fft_strides = ComputeStrides(fft_lengths_);
const int64_t fft_size = fft_strides[fft_rank_];
if (fft_size > 0) {
std::vector<ComplexType> data(fft_size);
int64_t buffer_size = 0;
for (auto len : fft_lengths_) {
int64_t size =
absl::has_single_bit(static_cast<uint64_t>(len)) ? len * 2 : len;
buffer_size = std::max(buffer_size, size);
}
std::vector<ComplexType> buffer(buffer_size);
const auto input_lengths = GetDimensionLengths(input_literal);
const auto output_lengths = GetDimensionLengths(*output_literal);
const auto input_strides = ComputeStrides(input_lengths, input_literal);
const auto output_strides =
ComputeStrides(output_lengths, *output_literal);
auto base_case = [&](int64_t axis, int64_t output_index,
int64_t input_index, bool within_src_bounds) {
if (axis == fft_rank_ - 1) {
CHECK(within_src_bounds);
bool input_is_zero = CopyDataFromInput(
input_literal, input_index, fft_size, fft_lengths_, fft_strides,
input_lengths, input_strides, absl::MakeSpan(data));
if (!input_is_zero) {
Sweep(fft_lengths_, fft_strides, absl::MakeSpan(data),
absl::MakeSpan(buffer));
}
CopyDataToOutput(absl::MakeSpan(data), output_index, fft_lengths_,
fft_strides, output_lengths, output_strides,
output_literal);
return true;
}
return false;
};
GenerateIndices(output_lengths, output_strides, input_lengths,
input_strides, input_shape.rank(), 0, 0, base_case);
}
return absl::OkStatus();
}
private:
static bool GatherToBuffer(absl::Span<ComplexType> data, int64_t length,
int64_t start, int64_t stride, bool expand_input,
absl::Span<ComplexType> buffer) {
CHECK_GE(buffer.size(), length);
bool input_is_zero = true;
const int64_t ub = expand_input ? length / 2 + 1 : length;
CHECK_GE(data.size(), start + (ub - 1) * stride);
for (int64_t k = 0; k < ub; k++) {
ComplexType value = data[start + k * stride];
input_is_zero &= value == ComplexType(0.0, 0.0);
buffer[k] = value;
if (expand_input) {
if (k > 0 && k < (length - ub + 1)) {
buffer[length - k] = std::conj(value);
}
}
}
return input_is_zero;
}
static inline ComplexType Twiddle(int64_t k, int64_t length, bool inverse) {
auto coeff = std::exp(ComplexType(0.0, -2.0 * M_PI * k / length));
return inverse ? std::conj(coeff) : coeff;
}
static void NaiveDft1D(int64_t length, int64_t start, int64_t stride,
bool inverse, bool contract_output, bool expand_input,
absl::Span<ComplexType> data,
absl::Span<ComplexType> buffer) {
const bool input_is_zero =
GatherToBuffer(data, length, start, stride, expand_input, buffer);
if (!input_is_zero) {
const int64_t ub = contract_output ? length / 2 + 1 : length;
for (int64_t k = 0; k < ub; k++) {
ComplexType value = ComplexType(0.0, 0.0);
for (int n = 0; n < length; n++) {
value += buffer[n] * Twiddle(n * k, length, inverse);
}
data[start + k * stride] =
inverse ? value / ComplexType(length, 0.0) : value;
}
}
}
static void Fft1D(int64_t length, int64_t start, int64_t stride, bool inverse,
bool contract_output, bool expand_input,
absl::Span<ComplexType> data,
absl::Span<ComplexType> buffer) {
CHECK(absl::has_single_bit(static_cast<uint64_t>(length)));
const bool input_is_zero =
GatherToBuffer(data, length, start, stride, expand_input, buffer);
if (!input_is_zero) {
auto generate_twiddles = [](int64_t length, bool inverse) {
std::vector<ComplexType> twiddles;
twiddles.reserve(length / 2);
for (int64_t k = 0; k < length / 2; k++) {
twiddles.push_back(Twiddle(k, length, inverse));
}
return twiddles;
};
int64_t in_base = length;
int64_t out_base = 0;
for (int64_t num_blocks = 1; num_blocks < length; num_blocks *= 2) {
std::swap(in_base, out_base);
auto twiddles = generate_twiddles(num_blocks * 2, inverse);
const int64_t block_size = length / num_blocks;
const int64_t next_iteration_block_size = block_size / 2;
for (int64_t block = 0; block < num_blocks; block++) {
const int64_t in_offset = in_base + block * block_size;
const int64_t out_offset =
out_base + block * next_iteration_block_size;
for (int64_t pair = 0; pair < block_size / 2; pair++) {
const ComplexType even = buffer[in_offset + pair];
const ComplexType odd = buffer[in_offset + block_size / 2 + pair];
const ComplexType twiddled_odd = twiddles[block] * odd;
buffer[out_offset + pair] = even + twiddled_odd;
buffer[out_offset + length / 2 + pair] = even - twiddled_odd;
}
}
}
const int64_t ub = contract_output ? length / 2 + 1 : length;
for (int64_t k = 0; k < ub; k++) {
ComplexType value = buffer[out_base + k];
data[start + k * stride] =
inverse ? value / ComplexType(length, 0.0) : value;
}
}
}
static void Dft1D(int64_t length, int64_t start, int64_t stride, bool inverse,
bool contract_output, bool expand_input,
absl::Span<ComplexType> data,
absl::Span<ComplexType> buffer) {
if (absl::has_single_bit(static_cast<uint64_t>(length))) {
Fft1D(length, start, stride, inverse, contract_output, expand_input, data,
buffer);
} else {
NaiveDft1D(length, start, stride, inverse, contract_output, expand_input,
data, buffer);
}
}
static std::vector<int64_t> GetDimensionLengths(const Literal& literal) {
auto dimensions = literal.shape().dimensions();
return std::vector<int64_t>(dimensions.rbegin(), dimensions.rend());
}
static std::vector<int64_t> ComputeStrides(
const absl::Span<const int64_t> lengths, const Layout& layout) {
const int64_t num_dimensions = lengths.size();
CHECK_EQ(num_dimensions, layout.minor_to_major_size());
std::vector<int64_t> strides(num_dimensions + 1);
int64_t stride = 1;
for (int64_t i = 0; i < num_dimensions; i++) {
const int64_t index = (num_dimensions - 1) - layout.minor_to_major(i);
strides[index] = stride;
stride *= lengths[index];
}
strides[num_dimensions] = stride;
return strides;
}
static std::vector<int64_t> ComputeStrides(
const absl::Span<const int64_t> lengths) {
return ComputeStrides(lengths,
LayoutUtil::GetDefaultLayoutForRank(lengths.size()));
}
static std::vector<int64_t> ComputeStrides(
const absl::Span<const int64_t> lengths, const Literal& literal) {
return literal.shape().has_layout()
? ComputeStrides(lengths, literal.shape().layout())
: ComputeStrides(lengths);
}
void Sweep(const absl::Span<const int64_t> fft_lengths,
const absl::Span<const int64_t> fft_strides,
absl::Span<ComplexType> data, absl::Span<ComplexType> buffer) {
const bool inverse =
fft_type_ == FftType::IFFT || fft_type_ == FftType::IRFFT;
const bool input_is_truncated = fft_type_ == FftType::IRFFT;
const bool output_is_truncated = fft_type_ == FftType::RFFT;
std::function<void(int64_t, int64_t, int64_t)> sweep =
[&](int64_t sweep_axis, int64_t axis, int64_t start) {
if (axis < 0) {
const int64_t length = fft_lengths[sweep_axis];
const int64_t stride = fft_strides[sweep_axis];
const bool expand_input = input_is_truncated && sweep_axis == 0;
const bool contract_oputput =
output_is_truncated && sweep_axis == 0;
Dft1D(length, start, stride, inverse, contract_oputput,
expand_input, data, buffer);
} else if (axis == sweep_axis) {
sweep(sweep_axis, axis - 1, start);
} else {
const int64_t length = fft_lengths[axis];
const bool is_truncated = input_is_truncated || output_is_truncated;
const int64_t ub =
is_truncated && axis == 0 ? (length / 2) + 1 : length;
for (int64_t i = 0; i < ub; i++) {
sweep(sweep_axis, axis - 1, start + i * fft_strides[axis]);
}
}
};
if (input_is_truncated) {
for (int64_t sweep_axis = fft_rank_ - 1; sweep_axis >= 0; sweep_axis--) {
sweep(sweep_axis, fft_rank_ - 1, 0);
}
} else {
for (int64_t sweep_axis = 0; sweep_axis < fft_rank_; sweep_axis++) {
sweep(sweep_axis, fft_rank_ - 1, 0);
}
}
}
template <typename BaseFn>
static void GenerateIndices(const absl::Span<const int64_t> dst_lengths,
const absl::Span<const int64_t> dst_strides,
const absl::Span<const int64_t> src_lengths,
const absl::Span<const int64_t> src_strides,
int64_t rank, int64_t dst_start,
int64_t src_start, BaseFn&& base) {
CHECK_EQ(dst_lengths.size() + 1, dst_strides.size());
CHECK_GE(dst_lengths.size(), rank);
CHECK_EQ(src_lengths.size() + 1, src_strides.size());
CHECK_GE(src_lengths.size(), rank);
std::function<void(int64_t, int64_t, int64_t, bool)> generate =
[&](int64_t axis, int64_t dst_index, int64_t src_index,
bool within_src_bounds) {
if (!base(axis, dst_index, src_index, within_src_bounds)) {
for (int64_t i = 0; i < dst_lengths[axis]; i++) {
within_src_bounds &= i < src_lengths[axis];
generate(axis - 1, dst_index, src_index, within_src_bounds);
dst_index += dst_strides[axis];
src_index += src_strides[axis];
}
}
};
generate(rank - 1, dst_start, src_start, true);
}
template <typename InputType>
bool CopyDataFromInput(const Literal& input_literal, int64_t input_start,
int64_t fft_size,
const absl::Span<const int64_t> fft_lengths,
const absl::Span<const int64_t> fft_strides,
const absl::Span<const int64_t> input_lengths,
const absl::Span<const int64_t> input_strides,
absl::Span<ComplexType> data) {
CHECK_GE(data.size(), fft_size);
const bool input_is_truncated = fft_type_ == FftType::IRFFT;
bool input_is_zero = true;
const InputType* input_data = input_literal.data<InputType>().data();
auto base_case = [&](int64_t axis, int64_t dst_index, int64_t src_index,
bool within_src_bounds) {
if (axis == 0) {
const int64_t length = fft_lengths[axis];
const int64_t ub = input_is_truncated ? (length / 2) + 1 : length;
for (int64_t i = 0; i < ub; i++) {
ComplexType value = ComplexType(0);
if (within_src_bounds && i < input_lengths[axis]) {
value = TypeConverter<ComplexType, InputType>::GetAs(
input_data[src_index + i * input_strides[axis]]);
input_is_zero &= value == ComplexType(0.0, 0.0);
}
data[dst_index + i * fft_strides[axis]] = value;
}
return true;
}
return false;
};
GenerateIndices(fft_lengths, fft_strides, input_lengths, input_strides,
fft_rank_, 0, input_start, base_case);
return input_is_zero;
}
template <typename OutputType>
void CopyDataToOutput(const absl::Span<ComplexType> data,
int64_t output_start,
const absl::Span<const int64_t> fft_lengths,
const absl::Span<const int64_t> fft_strides,
const absl::Span<const int64_t> output_lengths,
const absl::Span<const int64_t> output_strides,
Literal* output_literal) {
const bool output_is_truncated = fft_type_ == FftType::RFFT;
OutputType* output_data = output_literal->data<OutputType>().data();
auto base_case = [&](int64_t axis, int64_t dst_index, int64_t src_index,
bool within_src_bounds) {
if (axis == 0) {
const int64_t length = fft_lengths[axis];
const int64_t ub = output_is_truncated ? (length / 2) + 1 : length;
for (int64_t i = 0; i < output_lengths[axis]; i++) {
OutputType value = OutputType(0);
if (within_src_bounds && i < ub) {
value = TypeConverter<OutputType, ComplexType>::GetAs(
data[src_index + i * fft_strides[axis]]);
}
output_data[dst_index + i * output_strides[axis]] = value;
}
return true;
}
return false;
};
GenerateIndices(output_lengths, output_strides, fft_lengths, fft_strides,
fft_rank_, output_start, 0, base_case);
}
bool CopyDataFromInput(const Literal& input_literal, int64_t input_start,
int64_t fft_size,
const absl::Span<const int64_t> fft_lengths,
const absl::Span<const int64_t> fft_strides,
const absl::Span<const int64_t> input_lengths,
const absl::Span<const int64_t> input_strides,
absl::Span<ComplexType> data) {
const bool input_is_float = fft_type_ == FftType::RFFT;
if (input_is_float) {
return CopyDataFromInput<float>(input_literal, input_start, fft_size,
fft_lengths, fft_strides, input_lengths,
input_strides, data);
} else {
return CopyDataFromInput<complex64>(input_literal, input_start, fft_size,
fft_lengths, fft_strides,
input_lengths, input_strides, data);
}
}
void CopyDataToOutput(const absl::Span<ComplexType> data,
int64_t output_start,
const absl::Span<const int64_t> fft_lengths,
const absl::Span<const int64_t> fft_strides,
const absl::Span<const int64_t> output_lengths,
const absl::Span<const int64_t> output_strides,
Literal* output_literal) {
const bool output_is_float = fft_type_ == FftType::IRFFT;
if (output_is_float) {
CopyDataToOutput<float>(data, output_start, fft_lengths, fft_strides,
output_lengths, output_strides, output_literal);
} else {
CopyDataToOutput<complex64>(data, output_start, fft_lengths, fft_strides,
output_lengths, output_strides,
output_literal);
}
}
absl::Status CheckParameters(const Shape& input_shape,
const Shape& output_shape) {
if (fft_rank_ <= 0) {
return InvalidArgument("Zero or negative FFT rank.");
}
if (*absl::c_min_element(fft_lengths_) < 0) {
return InvalidArgument("Negative FFT length.");
}
TF_CHECK_OK(ShapeUtil::ValidateShape(input_shape));
if (!input_shape.IsArray()) {
return Unimplemented("Only array input shapes are supported.");
}
auto input_elt_type = input_shape.element_type();
if (fft_type_ == FftType::RFFT && input_elt_type != PrimitiveType::F32) {
return InvalidArgument("Invalid input type: %d, must be %d (float).",
input_elt_type, PrimitiveType::F32);
}
if (fft_type_ != FftType::RFFT && input_elt_type != PrimitiveType::C64) {
return InvalidArgument("Invalid input type: %d, must be %d (complex64).",
input_elt_type, PrimitiveType::C64);
}
const int64_t input_rank = input_shape.rank();
if (input_rank < fft_rank_) {
return InvalidArgument("Input shape rank is smaller than FFT rank.");
}
TF_CHECK_OK(ShapeUtil::ValidateShape(output_shape));
if (!output_shape.IsArray()) {
return Unimplemented("Only array output shapes are supported.");
}
auto output_elt_type = output_shape.element_type();
if (fft_type_ == FftType::IRFFT && output_elt_type != PrimitiveType::F32) {
return InvalidArgument("Invalid output type: %d, must be %d (float).",
output_elt_type, PrimitiveType::F32);
}
if (fft_type_ != FftType::IRFFT && output_elt_type != PrimitiveType::C64) {
return InvalidArgument("Invalid output type: %d, must be %d (complex64).",
output_elt_type, PrimitiveType::C64);
}
const int64_t output_rank = output_shape.rank();
if (output_rank < fft_rank_) {
return InvalidArgument("Output shape rank is smaller than FFT rank.");
}
if (input_rank != output_rank) {
return InvalidArgument(
"Ranks of input shape and output shape do not match.");
}
for (int64_t dim = 0; dim < input_rank - fft_rank_; dim++) {
if (ShapeUtil::GetDimension(input_shape, dim) !=
ShapeUtil::GetDimension(output_shape, dim)) {
return InvalidArgument(
"Higher dimension lengths of input shape and output shape do not "
"match.");
}
}
return absl::OkStatus();
}
private:
const FftType fft_type_;
const int64_t fft_rank_;
std::vector<int64_t> fft_lengths_;
};
}
absl::Status HloEvaluator::HandleFft(const HloInstruction* fft) {
const Literal& input_literal = GetEvaluatedLiteralFor(fft->operand(0));
Literal output_literal = Literal::CreateFromShape(fft->shape());
FftTransform<complex128> transform(fft);
TF_RETURN_IF_ERROR(transform.ComputeFft(fft, input_literal, &output_literal));
evaluated_[fft] = std::move(output_literal);
return absl::OkStatus();
}
ShapeUtil::IndexIterationSpace IterationSpaceForOutputBatchIndices(
const Shape& output_shape, const GatherDimensionNumbers& dim_numbers) {
int64_t output_rank = output_shape.dimensions_size();
std::vector<int64_t> index_base(output_rank, 0);
std::vector<int64_t> index_count;
index_count.reserve(output_rank);
for (int64_t i = 0; i < output_rank; i++) {
bool is_output_batch_dim =
!absl::c_binary_search(dim_numbers.offset_dims(), i);
index_count.push_back(is_output_batch_dim ? output_shape.dimensions(i) : 1);
}
return {std::move(index_base), std::move(index_count),
std::vector<int64_t>(output_rank, 1)};
}
ShapeUtil::IndexIterationSpace IterationSpaceForOutputOffsetIndices(
int64_t output_rank, absl::Span<const int64_t> slice_sizes,
const GatherDimensionNumbers& dim_numbers) {
std::vector<int64_t> index_base(output_rank, 0);
std::vector<int64_t> index_count(output_rank, 1);
int64_t slice_sizes_idx = 0;
for (int64_t i = 0; i < output_rank; i++) {
bool is_output_window_dim =
absl::c_binary_search(dim_numbers.offset_dims(), i);
if (is_output_window_dim) {
while (absl::c_binary_search(dim_numbers.collapsed_slice_dims(),
slice_sizes_idx)) {
slice_sizes_idx++;
}
index_count[i] = slice_sizes[slice_sizes_idx++];
}
}
return {std::move(index_base), std::move(index_count),
std::vector<int64_t>(output_rank, 1)};
}
class OutputBatchIndexToInputIndex {
public:
explicit OutputBatchIndexToInputIndex(
const GatherDimensionNumbers* dim_numbers, const Shape& input_shape,
const Shape& output_shape, const Literal* start_indices)
: dim_numbers_(*dim_numbers), start_indices_(*start_indices) {
for (int64_t i = 0; i < output_shape.dimensions_size(); i++) {
output_dim_is_batch_dims_.push_back(
!absl::c_binary_search(dim_numbers_.offset_dims(), i));
}
for (int64_t i = 0; i < input_shape.dimensions_size(); i++) {
int64_t index_of_input_dim_in_index_vector =
std::distance(dim_numbers_.start_index_map().begin(),
absl::c_find(dim_numbers_.start_index_map(), i));
if (index_of_input_dim_in_index_vector ==
dim_numbers_.start_index_map_size()) {
input_dim_value_to_index_vector_.push_back(-1);
} else {
input_dim_value_to_index_vector_.push_back(
index_of_input_dim_in_index_vector);
}
}
index_vector_index_.resize(start_indices_.shape().dimensions_size());
input_index_.resize(input_shape.dimensions_size());
int64_t index_vector_size =
start_indices_.shape().dimensions(dim_numbers_.index_vector_dim());
index_vector_.resize(index_vector_size);
}
absl::StatusOr<absl::Span<const int64_t>> operator()(
absl::Span<const int64_t> output_index) {
PropagateOutputIndexGatherDimsToIndexVectorIndex(output_index);
TF_RETURN_IF_ERROR(FetchIndexVector());
PropagateIndexVectorToInputIndex();
return absl::Span<const int64_t>(input_index_);
}
private:
void PropagateOutputIndexGatherDimsToIndexVectorIndex(
absl::Span<const int64_t> output_index) {
int64_t index_vector_index_i = 0;
for (int64_t i = 0, e = output_index.size(); i < e; i++) {
if (!output_dim_is_batch_dims_[i]) {
continue;
}
if (index_vector_index_i == dim_numbers_.index_vector_dim()) {
index_vector_index_i++;
}
index_vector_index_[index_vector_index_i++] = output_index[i];
}
}
absl::Status FetchIndexVector() {
int64_t index_vector_dim = dim_numbers_.index_vector_dim();
for (int64_t i = 0, e = index_vector_.size(); i < e; i++) {
index_vector_index_[index_vector_dim] = i;
auto start_index = start_indices_.GetIntegralAsS64(index_vector_index_);
TF_RET_CHECK(start_index.has_value());
index_vector_[i] = *start_index;
}
return absl::OkStatus();
}
void PropagateIndexVectorToInputIndex() {
for (int64_t i = 0, e = input_index_.size(); i < e; i++) {
if (input_dim_value_to_index_vector_[i] != -1) {
input_index_[i] = index_vector_[input_dim_value_to_index_vector_[i]];
}
}
}
std::vector<int64_t> input_dim_value_to_index_vector_;
std::vector<bool> output_dim_is_batch_dims_;
std::vector<int64_t> index_vector_index_;
std::vector<int64_t> index_vector_;
std::vector<int64_t> input_index_;
const GatherDimensionNumbers& dim_numbers_;
const Literal& start_indices_;
};
class OutputOffsetIndexToInputIndex {
public:
explicit OutputOffsetIndexToInputIndex(
const GatherDimensionNumbers& dim_numbers, const Shape& input_shape,
const Shape& output_shape) {
std::vector<int64_t> window_index_to_output_index;
int64_t output_index_count = 0;
for (int64_t i = 0; i < output_shape.dimensions_size(); i++) {
if (absl::c_binary_search(dim_numbers.offset_dims(), i)) {
window_index_to_output_index.push_back(output_index_count++);
} else {
output_index_count++;
}
}
int64_t window_dim_count = 0;
for (int64_t i = 0; i < input_shape.dimensions_size(); i++) {
if (absl::c_binary_search(dim_numbers.collapsed_slice_dims(), i)) {
input_dim_value_to_output_index_.push_back(-1);
} else {
input_dim_value_to_output_index_.push_back(
window_index_to_output_index[window_dim_count++]);
}
}
input_index_.resize(input_shape.dimensions_size());
}
absl::StatusOr<absl::Span<const int64_t>> operator()(
absl::Span<const int64_t> output_index) {
PropagateOutputIndexWindowDimsToInputIndex(output_index);
return absl::Span<const int64_t>(input_index_);
}
int64_t input_dim_value_to_output_index(int64_t input_dim) {
return input_dim_value_to_output_index_[input_dim];
}
private:
void PropagateOutputIndexWindowDimsToInputIndex(
absl::Span<const int64_t> output_index) {
for (int64_t i = 0, e = input_index_.size(); i < e; i++) {
if (input_dim_value_to_output_index_[i] != -1) {
input_index_[i] = output_index[input_dim_value_to_output_index_[i]];
}
}
}
std::vector<int64_t> input_dim_value_to_output_index_;
std::vector<int64_t> input_index_;
};
static absl::StatusOr<std::reference_wrapper<const Literal>>
ReshapedGatherIndices(int64_t index_vector_dim, const Literal& start_indices,
Literal* reshaped_start_indices) {
if (start_indices.shape().dimensions_size() != index_vector_dim) {
return std::cref(start_indices);
}
std::vector<int64_t> new_shape(start_indices.shape().dimensions().begin(),
start_indices.shape().dimensions().end());
new_shape.push_back(1);
if (start_indices.shape().is_dynamic()) {
TF_ASSIGN_OR_RETURN(*reshaped_start_indices,
start_indices.ToStatic().Reshape(new_shape));
} else {
TF_ASSIGN_OR_RETURN(*reshaped_start_indices,
start_indices.Reshape(new_shape));
}
return std::cref(*reshaped_start_indices);
}
absl::Status HloEvaluator::HandleGather(const HloInstruction* gather) {
Literal result = Literal::CreateFromShape(gather->shape());
const Shape& shape = gather->shape();
const GatherDimensionNumbers& dim_numbers =
gather->gather_dimension_numbers();
const Literal& operand = GetEvaluatedLiteralFor(gather->operand(0));
Literal reshaped_start_indices;
TF_ASSIGN_OR_RETURN(
const Literal& start_indices,
ReshapedGatherIndices(dim_numbers.index_vector_dim(),
GetEvaluatedLiteralFor(gather->operand(1)),
&reshaped_start_indices));
ShapeUtil::IndexIterationSpace start_indices_iteration_space =
IterationSpaceForOutputBatchIndices(shape, dim_numbers);
ShapeUtil::IndexIterationSpace offset_indices_iteration_space =
IterationSpaceForOutputOffsetIndices(
shape.dimensions_size(), gather->gather_slice_sizes(), dim_numbers);
std::vector<int64_t> input_index(operand.shape().dimensions_size());
std::vector<int64_t> output_index(gather->shape().dimensions_size());
std::vector<int64_t> input_index_clamped(operand.shape().dimensions_size());
OutputBatchIndexToInputIndex output_batch_index_to_input_index(
&gather->gather_dimension_numbers(), operand.shape(),
shape, &start_indices);
OutputOffsetIndexToInputIndex output_offset_index_to_input_index(
gather->gather_dimension_numbers(), operand.shape(),
shape);
const Shape& operand_shape = operand.shape();
if (ShapeUtil::IsZeroElementArray(operand_shape)) {
evaluated_[gather] = std::move(result);
return absl::OkStatus();
}
auto gather_inner_loop_body =
[&](absl::Span<const int64_t> output_window_index,
absl::Span<const int64_t> input_gather_index,
absl::Span<const int64_t> output_gather_index)
-> absl::StatusOr<bool> {
TF_ASSIGN_OR_RETURN(
absl::Span<const int64_t> input_window_index,
output_offset_index_to_input_index(output_window_index));
for (int i = 0, e = output_index.size(); i < e; i++) {
output_index[i] = output_gather_index[i] + output_window_index[i];
DCHECK_LT(output_index[i], shape.dimensions(i));
}
for (int i = 0, e = input_gather_index.size(); i < e; i++) {
int64_t output_dim =
output_offset_index_to_input_index.input_dim_value_to_output_index(i);
int64_t output_dim_size =
output_dim == -1 ? 1 : shape.dimensions(output_dim);
input_index_clamped[i] =
std::min(operand_shape.dimensions(i) - output_dim_size,
std::max(int64_t{0}, input_gather_index[i]));
}
for (int i = 0, e = input_index.size(); i < e; i++) {
input_index[i] = input_index_clamped[i] + input_window_index[i];
DCHECK_GE(input_index[i], 0);
DCHECK_LT(input_index[i], operand_shape.dimensions(i));
}
result.CopyElementFrom(operand, input_index, output_index);
return true;
};
auto gather_outer_loop_body =
[&](absl::Span<const int64_t> output_gather_index)
-> absl::StatusOr<bool> {
TF_ASSIGN_OR_RETURN(absl::Span<const int64_t> input_gather_index,
output_batch_index_to_input_index(output_gather_index));
TF_RETURN_IF_ERROR(ShapeUtil::ForEachIndexWithStatus(
shape, offset_indices_iteration_space,
std::bind(gather_inner_loop_body, std::placeholders::_1,
input_gather_index, output_gather_index)));
return true;
};
TF_RETURN_IF_ERROR(ShapeUtil::ForEachIndexWithStatus(
shape, start_indices_iteration_space, gather_outer_loop_body));
evaluated_[gather] = std::move(result);
return absl::OkStatus();
}
namespace {
absl::StatusOr<std::reference_wrapper<const Literal>> ReshapedScatterIndices(
int64_t index_vector_dim, const Literal& indices,
Literal* reshaped_indices) {
if (indices.shape().dimensions_size() != index_vector_dim) {
return std::cref(indices);
}
std::vector<int64_t> new_shape(indices.shape().dimensions().begin(),
indices.shape().dimensions().end());
new_shape.push_back(1);
if (indices.shape().is_dynamic()) {
TF_ASSIGN_OR_RETURN(*reshaped_indices,
indices.ToStatic().Reshape(new_shape));
} else {
TF_ASSIGN_OR_RETURN(*reshaped_indices, indices.Reshape(new_shape));
}
return std::cref(*reshaped_indices);
}
template <bool kForUpdateWindowIndices>
ShapeUtil::IndexIterationSpace GetIterationSpaceImpl(
absl::Span<const int64_t> updates_dims,
const ScatterDimensionNumbers& dim_numbers) {
int64_t updates_rank = updates_dims.size();
std::vector<int64_t> index_base(updates_rank, 0);
std::vector<int64_t> index_count(updates_rank, 1);
for (int64_t i = 0; i < updates_rank; i++) {
if (kForUpdateWindowIndices) {
bool is_update_window_dim =
absl::c_binary_search(dim_numbers.update_window_dims(), i);
if (is_update_window_dim) {
index_count[i] = updates_dims[i];
}
} else {
bool is_update_scatter_dim =
!absl::c_binary_search(dim_numbers.update_window_dims(), i);
if (is_update_scatter_dim) {
index_count[i] = updates_dims[i];
}
}
}
return {std::move(index_base), std::move(index_count),
std::vector<int64_t>(updates_rank, 1)};
}
ShapeUtil::IndexIterationSpace IterationSpaceForUpdateScatterIndices(
absl::Span<const int64_t> updates_dims,
const ScatterDimensionNumbers& dim_numbers) {
return GetIterationSpaceImpl<false>(updates_dims,
dim_numbers);
}
ShapeUtil::IndexIterationSpace IterationSpaceForUpdateWindowIndices(
absl::Span<const int64_t> updates_dims,
const ScatterDimensionNumbers& dim_numbers) {
return GetIterationSpaceImpl<true>(updates_dims,
dim_numbers);
}
class UpdateScatterIndexToInputIndex {
public:
explicit UpdateScatterIndexToInputIndex(
const ScatterDimensionNumbers& dim_numbers, int64_t input_rank,
int64_t updates_rank, const Literal* scatter_indices)
: dim_numbers_(dim_numbers), scatter_indices_(*scatter_indices) {
for (int64_t i = 0; i < updates_rank; i++) {
update_dim_is_scatter_dims_.push_back(
!absl::c_binary_search(dim_numbers_.update_window_dims(), i));
}
for (int64_t i = 0; i < input_rank; i++) {
int64_t index_of_input_dim_in_index_vector =
FindIndex(dim_numbers_.scatter_dims_to_operand_dims(), i);
if (index_of_input_dim_in_index_vector ==
dim_numbers_.scatter_dims_to_operand_dims_size()) {
input_dim_value_to_index_vector_.push_back(-1);
} else {
input_dim_value_to_index_vector_.push_back(
index_of_input_dim_in_index_vector);
}
}
index_vector_index_.resize(scatter_indices_.shape().dimensions_size());
input_index_.resize(input_rank);
int64_t index_vector_size =
scatter_indices_.shape().dimensions(dim_numbers_.index_vector_dim());
index_vector_.resize(index_vector_size);
}
absl::StatusOr<absl::Span<const int64_t>> operator()(
absl::Span<const int64_t> update_index) {
PropagateUpdateIndexScatterDimsToIndexVectorIndex(update_index);
TF_RETURN_IF_ERROR(FetchIndexVector());
PropagateIndexVectorToInputIndex();
return absl::Span<const int64_t>(input_index_);
}
private:
void PropagateUpdateIndexScatterDimsToIndexVectorIndex(
absl::Span<const int64_t> update_index) {
int64_t index_vector_index_i = 0;
for (int64_t i = 0, e = update_index.size(); i < e; i++) {
if (!update_dim_is_scatter_dims_[i]) {
continue;
}
if (index_vector_index_i == dim_numbers_.index_vector_dim()) {
index_vector_index_i++;
}
index_vector_index_[index_vector_index_i++] = update_index[i];
}
}
absl::Status FetchIndexVector() {
int64_t index_vector_dim = dim_numbers_.index_vector_dim();
for (int64_t i = 0, e = index_vector_.size(); i < e; i++) {
index_vector_index_[index_vector_dim] = i;
index_vector_[i] =
*scatter_indices_.GetIntegralAsS64(index_vector_index_);
}
return absl::OkStatus();
}
void PropagateIndexVectorToInputIndex() {
for (int64_t i = 0, e = input_index_.size(); i < e; i++) {
if (input_dim_value_to_index_vector_[i] != -1) {
input_index_[i] = index_vector_[input_dim_value_to_index_vector_[i]];
}
}
}
std::vector<int64_t> input_dim_value_to_index_vector_;
std::vector<bool> update_dim_is_scatter_dims_;
std::vector<int64_t> index_vector_index_;
std::vector<int64_t> index_vector_;
std::vector<int64_t> input_index_;
const ScatterDimensionNumbers& dim_numbers_;
const Literal& scatter_indices_;
};
class UpdateWindowIndexToInputIndex {
public:
explicit UpdateWindowIndexToInputIndex(
const ScatterDimensionNumbers& dim_numbers, int64_t input_rank,
int64_t update_rank) {
std::vector<int64_t> window_index_to_update_index;
int64_t update_index_count = 0;
for (int64_t i = 0; i < update_rank; i++) {
if (absl::c_binary_search(dim_numbers.update_window_dims(), i)) {
window_index_to_update_index.push_back(update_index_count++);
} else {
update_index_count++;
}
}
int64_t window_dim_count = 0;
for (int64_t i = 0; i < input_rank; i++) {
if (absl::c_binary_search(dim_numbers.inserted_window_dims(), i)) {
input_dim_value_to_update_index_.push_back(-1);
} else {
input_dim_value_to_update_index_.push_back(
window_index_to_update_index[window_dim_count++]);
}
}
input_index_.resize(input_rank);
}
absl::StatusOr<absl::Span<const int64_t>> operator()(
absl::Span<const int64_t> update_index) {
PropagateUpdateIndexWindowDimsToInputIndex(update_index);
return absl::Span<const int64_t>(input_index_);
}
int64_t input_dim_value_to_update_index(int64_t input_dim) {
return input_dim_value_to_update_index_[input_dim];
}
private:
void PropagateUpdateIndexWindowDimsToInputIndex(
absl::Span<const int64_t> update_index) {
for (int64_t i = 0, e = input_index_.size(); i < e; i++) {
if (input_dim_value_to_update_index_[i] != -1) {
input_index_[i] = update_index[input_dim_value_to_update_index_[i]];
}
}
}
std::vector<int64_t> input_dim_value_to_update_index_;
std::vector<int64_t> input_index_;
};
}
absl::Status HloEvaluator::HandleScatter(const HloInstruction* hlo) {
auto* scatter = DynCast<HloScatterInstruction>(hlo);
const ScatterDimensionNumbers& dim_numbers =
scatter->scatter_dimension_numbers();
absl::InlinedVector<const Literal*, 1> operands;
operands.reserve(scatter->scatter_operand_count());
for (const HloInstruction* operand_inst : scatter->scatter_operands()) {
operands.push_back(&GetEvaluatedLiteralFor(operand_inst));
}
Literal reshaped_scatter_indices;
TF_ASSIGN_OR_RETURN(
const Literal& scatter_indices,
ReshapedScatterIndices(dim_numbers.index_vector_dim(),
GetEvaluatedLiteralFor(scatter->scatter_indices()),
&reshaped_scatter_indices));
absl::InlinedVector<const Literal*, 1> updates;
updates.reserve(operands.size());
for (const HloInstruction* updates_inst : scatter->scatter_updates()) {
updates.push_back(&GetEvaluatedLiteralFor(updates_inst));
}
auto updates_dims = updates[0]->shape().dimensions();
auto operand_dims = operands[0]->shape().dimensions();
ShapeUtil::IndexIterationSpace scatter_indices_iteration_space =
IterationSpaceForUpdateScatterIndices(updates_dims, dim_numbers);
ShapeUtil::IndexIterationSpace window_indices_iteration_space =
IterationSpaceForUpdateWindowIndices(updates_dims, dim_numbers);
std::vector<int64_t> input_index(operand_dims.size());
std::vector<int64_t> update_index(updates_dims.size());
UpdateScatterIndexToInputIndex update_scatter_index_to_input_index(
scatter->scatter_dimension_numbers(),
operand_dims.size(), updates_dims.size(),
&scatter_indices);
UpdateWindowIndexToInputIndex update_window_index_to_input_index(
scatter->scatter_dimension_numbers(),
operand_dims.size(), updates_dims.size());
Literal result = operands.size() > 1 ? LiteralUtil::MakeTuple(operands)
: operands[0]->Clone();
auto maybe_slice = [](MutableLiteralBase& literal, int idx) {
if (literal.shape().IsTuple()) {
return MutableBorrowingLiteral(&literal, {idx});
}
DCHECK_EQ(idx, 0);
return MutableBorrowingLiteral(&literal);
};
HloEvaluator embedded_evaluator;
auto scatter_inner_loop_body =
[&](absl::Span<const int64_t> update_window_index,
absl::Span<const int64_t> input_scatter_index,
absl::Span<const int64_t> update_scatter_index)
-> absl::StatusOr<bool> {
TF_ASSIGN_OR_RETURN(
absl::Span<const int64_t> input_window_index,
update_window_index_to_input_index(update_window_index));
for (int i = 0, e = update_index.size(); i < e; i++) {
update_index[i] = update_scatter_index[i] + update_window_index[i];
DCHECK_LT(update_index[i], updates_dims[i]);
}
for (int i = 0, e = input_scatter_index.size(); i < e; i++) {
int64_t update_dim =
update_window_index_to_input_index.input_dim_value_to_update_index(i);
int64_t update_dim_size = update_dim == -1 ? 1 : updates_dims[update_dim];
if ((input_scatter_index[i] < 0) ||
(input_scatter_index[i] > operand_dims[i] - update_dim_size)) {
return true;
}
}
for (int i = 0, e = input_index.size(); i < e; i++) {
input_index[i] = input_scatter_index[i] + input_window_index[i];
}
absl::InlinedVector<Literal, 2> to_apply_args;
to_apply_args.reserve(operands.size() + updates.size());
for (int i = 0, n = operands.size(); i < n; ++i) {
to_apply_args.push_back(
LiteralUtil::GetScalarLiteral(maybe_slice(result, i), input_index));
}
for (int i = 0, n = operands.size(); i < n; ++i) {
to_apply_args.push_back(
LiteralUtil::GetScalarLiteral(*updates[i], update_index));
}
Literal updated_result =
embedded_evaluator.Evaluate(*scatter->to_apply(), to_apply_args)
.value();
embedded_evaluator.ResetVisitStates();
for (int i = 0, n = operands.size(); i < n; ++i) {
auto result_slice = maybe_slice(result, i);
LiteralUtil::SetScalarLiteral(result_slice, input_index,
maybe_slice(updated_result, i));
}
return true;
};
auto scatter_outer_loop_body =
[&](absl::Span<const int64_t> update_scatter_index)
-> absl::StatusOr<bool> {
TF_ASSIGN_OR_RETURN(
absl::Span<const int64_t> input_scatter_index,
update_scatter_index_to_input_index(update_scatter_index));
TF_RETURN_IF_ERROR(ShapeUtil::ForEachIndexWithStatus(
updates[0]->shape(), window_indices_iteration_space,
[&](absl::Span<const int64_t> update_window_index) {
return scatter_inner_loop_body(
update_window_index, input_scatter_index, update_scatter_index);
}));
return true;
};
TF_RETURN_IF_ERROR(ShapeUtil::ForEachIndexWithStatus(
updates[0]->shape(), scatter_indices_iteration_space,
scatter_outer_loop_body));
evaluated_[scatter] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleBroadcast(const HloInstruction* broadcast) {
const Literal& operand = GetEvaluatedLiteralFor(broadcast->operand(0));
TF_RET_CHECK(broadcast->shape().element_type() ==
operand.shape().element_type())
<< " broadcast from a different data type is not supported";
TF_RET_CHECK(broadcast->dimensions().size() == operand.shape().rank())
<< "broadcast dimensions is of size: " << broadcast->dimensions().size()
<< " and rank of operand_to_broadcast is: " << operand.shape().rank();
for (int64_t i = 0; i < broadcast->dimensions().size(); ++i) {
auto operand_dim_size = operand.shape().dimensions(i);
auto broadcast_dim_size =
broadcast->shape().dimensions(broadcast->dimensions(i));
TF_RET_CHECK(operand_dim_size == broadcast_dim_size) << absl::StreamFormat(
"Operand dimension %d is broadcast to output dimension %d, but the "
"sizes of these two dims do not match (%d vs %d): %s",
i, broadcast->dimensions(i), operand_dim_size, broadcast_dim_size,
broadcast->ToString());
}
TF_ASSIGN_OR_RETURN(
evaluated_[broadcast],
operand.Broadcast(broadcast->shape(), broadcast->dimensions()));
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleAfterAll(const HloInstruction* after_all) {
evaluated_[after_all] = LiteralUtil::CreateToken();
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleAddDependency(
const HloInstruction* add_dependency) {
evaluated_[add_dependency] =
GetEvaluatedLiteralFor(add_dependency->operand(0)).Clone();
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleGetTupleElement(
const HloInstruction* get_tuple_element) {
const auto result_shape = get_tuple_element->shape();
const int64_t index = get_tuple_element->tuple_index();
auto operand = get_tuple_element->operand(0);
TF_ASSIGN_OR_RETURN(
auto inferred_return_shape,
ShapeInference::InferGetTupleElementShape(operand->shape(), index));
TF_RET_CHECK(ShapeUtil::Compatible(result_shape, inferred_return_shape))
<< "return shape set to: " << ShapeUtil::HumanString(result_shape)
<< " but is inferred to be: "
<< ShapeUtil::HumanString(inferred_return_shape);
const Literal& operand_tuple_literal = GetEvaluatedLiteralFor(operand);
evaluated_[get_tuple_element] =
Literal(ShapeUtil::GetTupleElementShape(operand->shape(), index));
return evaluated_[get_tuple_element].CopyFrom(operand_tuple_literal,
{},
{index});
}
absl::Status HloEvaluator::HandleCopy(const HloInstruction* copy) {
if (copy->shape().element_type() !=
copy->operand(0)->shape().element_type()) {
TF_ASSIGN_OR_RETURN(Literal result,
GetEvaluatedLiteralFor(copy->operand(0))
.Convert(copy->shape().element_type()));
TF_RET_CHECK(ShapeUtil::Compatible(copy->shape(), result.shape()));
evaluated_[copy] = std::move(result);
} else {
TF_RET_CHECK(
ShapeUtil::Compatible(copy->shape(), copy->operand(0)->shape()));
evaluated_[copy] = GetEvaluatedLiteralFor(copy->operand(0)).Clone();
}
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleAsyncStart(const HloInstruction* async_start) {
std::vector<const Literal*> arg_literals;
arg_literals.reserve(async_start->operands().size());
for (auto operand : async_start->operands()) {
const Literal& arg_literal = GetEvaluatedLiteralFor(operand);
arg_literals.push_back(&arg_literal);
}
std::unique_ptr<HloEvaluator> embedded_evaluator =
CreateEmbedded(max_loop_iterations_);
embedded_evaluator->set_dynamic_dimension_inference(
dynamic_dimension_inference_);
TF_ASSIGN_OR_RETURN(
Literal result,
embedded_evaluator->Evaluate(*async_start->async_wrapped_computation(),
arg_literals));
evaluated_[async_start] = Literal(async_start->shape());
for (int i = 0; i < arg_literals.size(); ++i) {
TF_RETURN_IF_ERROR(evaluated_[async_start].CopyFrom(
*arg_literals[i], {0, i},
{}));
}
TF_RETURN_IF_ERROR(evaluated_[async_start].MoveFrom(
std::move(result), {1}));
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleAsyncUpdate(
const HloInstruction* async_update) {
const Literal& operand_tuple_literal =
GetEvaluatedLiteralFor(async_update->operand(0));
evaluated_[async_update] = Literal(async_update->shape());
TF_RETURN_IF_ERROR(evaluated_[async_update].CopyFrom(operand_tuple_literal,
{},
{}));
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleAsyncDone(const HloInstruction* async_done) {
const Literal& operand_tuple_literal =
GetEvaluatedLiteralFor(async_done->operand(0));
evaluated_[async_done] = Literal(async_done->shape());
TF_RETURN_IF_ERROR(evaluated_[async_done].CopyFrom(operand_tuple_literal,
{},
{1}));
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleCopyStart(const HloInstruction* copy_start) {
if (copy_start->user_count() != 1 ||
copy_start->users().at(0)->opcode() != HloOpcode::kCopyDone) {
return absl::FailedPreconditionError(
absl::StrCat("Cannot evaluate a kCopyStart that doesn't have a single "
"kCopyDone user. Instruction: ",
copy_start->ToString()));
}
const Literal context_literal = LiteralUtil::CreateR0<uint32_t>(0);
evaluated_[copy_start] = LiteralUtil::MakeTuple(
{&GetEvaluatedLiteralFor(copy_start->operand(0)),
&GetEvaluatedLiteralFor(copy_start->operand(0)), &context_literal});
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleCopyDone(const HloInstruction* copy_done) {
const HloInstruction* operand = copy_done->operand(0);
if (operand->opcode() != HloOpcode::kCopyStart) {
return absl::FailedPreconditionError(
absl::StrCat("Cannot evaluate a kCopyDone that doesn't have a "
"kCopyStart as operand. Instruction: ",
copy_done->ToString()));
}
const Literal& operand_tuple_literal = GetEvaluatedLiteralFor(operand);
evaluated_[copy_done] =
Literal(ShapeUtil::GetTupleElementShape(operand->shape(), 0));
TF_RETURN_IF_ERROR(evaluated_[copy_done].CopyFrom(operand_tuple_literal,
{},
{0}));
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleCall(const HloInstruction* call) {
auto* computation = call->to_apply();
auto operands = call->operands();
std::vector<const Literal*> arg_literals;
arg_literals.reserve(operands.size());
for (auto operand : operands) {
const Literal& arg_literal = GetEvaluatedLiteralFor(operand);
arg_literals.push_back(&arg_literal);
}
std::unique_ptr<HloEvaluator> embedded_evaluator =
CreateEmbedded(max_loop_iterations_);
embedded_evaluator->set_dynamic_dimension_inference(
dynamic_dimension_inference_);
TF_ASSIGN_OR_RETURN(Literal result,
embedded_evaluator->Evaluate(*computation, arg_literals));
evaluated_[call] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleFusion(const HloInstruction* fusion) {
HloModuleConfig config;
HloModule empty_hlo_module("EmptyModuleForFusion", config,
std::make_unique<CompilationEnvironments>(
fusion->GetModule()->comp_envs()));
HloCloneContext context(&empty_hlo_module);
auto cloned_fused_computation =
fusion->fused_instructions_computation()->Clone(
"clone_with_layout", &context);
for (auto* instruction : cloned_fused_computation->instructions()) {
if (!LayoutUtil::HasLayout(instruction->shape())) {
LayoutUtil::SetToDefaultLayout(instruction->mutable_shape());
}
}
auto readded_computation =
empty_hlo_module.AddEntryComputation(std::move(cloned_fused_computation));
auto operands = fusion->operands();
std::vector<const Literal*> arg_literals;
arg_literals.reserve(operands.size());
for (auto operand : operands) {
const Literal& arg_literal = GetEvaluatedLiteralFor(operand);
arg_literals.push_back(&arg_literal);
}
std::unique_ptr<HloEvaluator> embedded_evaluator =
CreateEmbedded(max_loop_iterations_);
embedded_evaluator->set_dynamic_dimension_inference(
dynamic_dimension_inference_);
TF_ASSIGN_OR_RETURN(Literal result, embedded_evaluator->Evaluate(
*readded_computation, arg_literals));
evaluated_[fusion] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleConditional(
const HloInstruction* conditional) {
const auto& branch_index_literal =
GetEvaluatedLiteralFor(conditional->operand(0));
int branch_index;
if (conditional->operand(0)->shape().element_type() == PRED) {
branch_index = branch_index_literal.Get<bool>({}) ? 0 : 1;
} else {
branch_index = branch_index_literal.Get<int32_t>({});
if (branch_index < 0 || branch_index >= conditional->branch_count()) {
branch_index = conditional->branch_count() - 1;
}
}
const auto& branch_computation_arg =
GetEvaluatedLiteralFor(conditional->operand(1 + branch_index));
std::unique_ptr<HloEvaluator> embedded_evaluator =
CreateEmbedded(max_loop_iterations_);
embedded_evaluator->set_dynamic_dimension_inference(
dynamic_dimension_inference_);
TF_ASSIGN_OR_RETURN(Literal result,
embedded_evaluator->Evaluate(
*conditional->branch_computation(branch_index),
{&branch_computation_arg}));
evaluated_[conditional] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleConvert(const HloInstruction* convert) {
const HloInstruction* operand = convert->operand(0);
TF_RET_CHECK(ShapeUtil::SameDimensions(operand->shape(), convert->shape()));
TF_ASSIGN_OR_RETURN(Literal result, GetEvaluatedLiteralFor(operand).Convert(
convert->shape().element_type()));
evaluated_[convert] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleDynamicSlice(
const HloInstruction* dynamic_slice) {
auto operand = dynamic_slice->operand(0);
auto start_indices = dynamic_slice->operand(1);
auto result_shape = dynamic_slice->shape();
TF_ASSIGN_OR_RETURN(
auto inferred_return_shape,
ShapeInference::InferDynamicSliceShape(
operand->shape(),
Cast<HloDynamicSliceInstruction>(dynamic_slice)->index_shapes(),
dynamic_slice->dynamic_slice_sizes()));
TF_RET_CHECK(ShapeUtil::Compatible(result_shape, inferred_return_shape))
<< "return shape is set to: " << ShapeUtil::HumanString(result_shape)
<< " but is inferred to be: "
<< ShapeUtil::HumanString(inferred_return_shape);
TF_RET_CHECK(
primitive_util::IsIntegralType(start_indices->shape().element_type()));
const Literal& operand_literal = GetEvaluatedLiteralFor(operand);
std::vector<int64_t> start =
GetS64Indices(absl::MakeConstSpan(dynamic_slice->operands()).subspan(1));
for (int64_t i = 0; i < start.size(); ++i) {
start[i] = std::min<int64_t>(
std::max(int64_t{0}, start[i]),
operand_literal.shape().dimensions(i) - result_shape.dimensions(i));
}
std::vector<int64_t> operand_index(start.size());
Literal result(result_shape);
const size_t element_byte_size =
primitive_util::ByteWidth(result_shape.element_type());
auto* operand_base = static_cast<const char*>(operand_literal.untyped_data());
auto func = [&](void* dest, absl::Span<const int64_t> result_index) {
for (int64_t i = 0; i < operand_index.size(); ++i) {
CHECK_GE(result_index[i] + start[i], 0);
operand_index[i] = result_index[i] + start[i];
}
auto* src = operand_base + (element_byte_size *
IndexUtil::MultidimensionalIndexToLinearIndex(
operand_literal.shape(), operand_index));
std::memcpy(dest, src, element_byte_size);
return true;
};
TF_RETURN_IF_ERROR(result.PopulateInplace(func));
evaluated_[dynamic_slice] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleDynamicUpdateSlice(const HloInstruction* dus) {
auto operand = dus->operand(0);
auto update = dus->operand(1);
auto start_indices = dus->operand(2);
auto result_shape = dus->shape();
TF_ASSIGN_OR_RETURN(
auto inferred_return_shape,
ShapeInference::InferDynamicUpdateSliceShape(
operand->shape(), update->shape(),
Cast<HloDynamicUpdateSliceInstruction>(dus)->index_shapes()));
TF_RET_CHECK(ShapeUtil::Compatible(result_shape, inferred_return_shape))
<< "return shape is set to: " << ShapeUtil::HumanString(result_shape)
<< " but is inferred to be: "
<< ShapeUtil::HumanString(inferred_return_shape);
TF_RET_CHECK(
primitive_util::IsIntegralType(start_indices->shape().element_type()));
TF_RET_CHECK(ShapeUtil::Compatible(result_shape, operand->shape()));
const Literal& operand_literal = GetEvaluatedLiteralFor(operand);
const Literal& update_literal = GetEvaluatedLiteralFor(update);
auto result = operand_literal.Clone();
const auto rank = result.shape().rank();
std::vector<int64_t> start =
GetS64Indices(absl::MakeConstSpan(dus->operands()).subspan(2));
for (int64_t i = 0; i < rank; ++i) {
start[i] = std::min<int64_t>(
std::max<int64_t>(0, start[i]),
result.shape().dimensions(i) - update_literal.shape().dimensions(i));
}
std::vector<int64_t> result_index(rank, 0);
auto func = [&](absl::Span<const int64_t> update_index) {
std::transform(update_index.begin(), update_index.end(), start.begin(),
result_index.begin(), std::plus<int64_t>());
result.CopyElementFrom(update_literal, update_index, result_index);
return true;
};
std::vector<int64_t> base(update_literal.shape().dimensions_size(), 0);
std::vector<int64_t> step(update_literal.shape().dimensions_size(), 1);
ShapeUtil::ForEachIndexNoStatus(update_literal.shape(), base,
update_literal.shape().dimensions(), step,
func);
evaluated_[dus] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleSelect(const HloInstruction* select) {
const auto& pred = GetEvaluatedLiteralFor(select->operand(0));
const auto& on_true = GetEvaluatedLiteralFor(select->operand(1));
const auto& on_false = GetEvaluatedLiteralFor(select->operand(2));
if (ShapeUtil::IsScalar(pred.shape())) {
if (pred.Get<bool>({})) {
evaluated_[select] = on_true.Clone();
} else {
evaluated_[select] = on_false.Clone();
}
return absl::OkStatus();
}
return DefaultAction(select);
}
namespace {
absl::StatusOr<Literal> CreateScalarLiteral(int64_t value,
PrimitiveType element_type) {
return primitive_util::PrimitiveTypeSwitch<absl::StatusOr<Literal>>(
[&](auto primitive_type_constant) -> absl::StatusOr<Literal> {
if constexpr (primitive_util::IsIntegralType(primitive_type_constant)) {
return LiteralUtil::CreateR0(
static_cast<NativeTypeOf<primitive_type_constant>>(value));
}
return InvalidArgument("Unsupported element type.");
},
element_type);
}
absl::StatusOr<Literal> TryParseAndEvaluateWhileInductionVar(
const HloInstruction* while_hlo) {
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_hlo, {});
if (!parsed_while_loop.has_value() || parsed_while_loop->is_dynamic()) {
return FailedPrecondition(
"Cannot evaluate a while loop's induction variable since the loop "
"does not match a known loop pattern or the loop is not static.");
}
int64_t induction_var_value =
parsed_while_loop->static_while_loop->induction_var_init_value +
parsed_while_loop->static_while_loop->trip_count *
parsed_while_loop->static_while_loop->step_size;
Shape result_shape = while_hlo->shape().tuple_shapes(
parsed_while_loop->static_while_loop->induction_var_index);
TF_ASSIGN_OR_RETURN(
Literal result,
CreateScalarLiteral(induction_var_value, result_shape.element_type()));
std::vector<Literal*> while_result_element_ptrs;
while_result_element_ptrs.reserve(while_hlo->shape().tuple_shapes_size());
std::vector<Literal> while_result_elements(
while_hlo->shape().tuple_shapes_size());
for (int i = 0; i < while_hlo->shape().tuple_shapes_size(); ++i) {
if (i == parsed_while_loop->static_while_loop->induction_var_index) {
while_result_element_ptrs.push_back(&result);
} else {
const Shape& shape = while_hlo->shape().tuple_shapes(i);
while_result_elements[i] =
Literal::CreateFromShapeWithUnknownLeafArrays(shape);
while_result_element_ptrs.push_back(&while_result_elements[i]);
}
}
return LiteralUtil::MakeTuple(while_result_element_ptrs);
}
}
absl::Status HloEvaluator::HandleWhile(const HloInstruction* while_hlo) {
const HloComputation* cond_comp = while_hlo->while_condition();
const HloComputation* body_comp = while_hlo->while_body();
auto lcv = GetEvaluatedLiteralFor(while_hlo->operand(0)).Clone();
if (!lcv.IsKnown()) {
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_hlo,
{});
evaluated_[while_hlo] =
Literal::CreateFromShapeWithUnknownLeafArrays(while_hlo->shape());
if (!parsed_while_loop.has_value() || parsed_while_loop->is_dynamic() ||
visitor_shape_index_.size() != 1 ||
parsed_while_loop->static_while_loop->induction_var_index !=
visitor_shape_index_[0]) {
return absl::OkStatus();
}
Shape induction_var_shape =
ShapeUtil::GetSubshape(while_hlo->shape(), visitor_shape_index_);
int64_t trip_count = parsed_while_loop->static_while_loop->trip_count;
TF_ASSIGN_OR_RETURN(
Literal induction_var_val,
CreateScalarLiteral(trip_count, induction_var_shape.element_type()));
TF_RETURN_IF_ERROR(evaluated_[while_hlo].CopyFrom(
induction_var_val, visitor_shape_index_,
{}));
return absl::OkStatus();
}
bool keep_going = true;
int64_t iteration_count = 0;
std::unique_ptr<HloEvaluator> cond_evaluator =
CreateEmbedded(max_loop_iterations_);
cond_evaluator->set_dynamic_dimension_inference(dynamic_dimension_inference_);
std::unique_ptr<HloEvaluator> loop_body_evaluator =
CreateEmbedded(max_loop_iterations_);
loop_body_evaluator->set_dynamic_dimension_inference(
dynamic_dimension_inference_);
while (keep_going) {
if (max_loop_iterations_ >= 0 && iteration_count++ > max_loop_iterations_) {
absl::StatusOr<Literal> result =
TryParseAndEvaluateWhileInductionVar(while_hlo);
if (result.ok()) {
lcv = std::move(result).value();
break;
} else {
return InvalidArgument("Loop %s exceeded loop iteration limit (%d).",
while_hlo->name(), max_loop_iterations_);
}
}
TF_ASSIGN_OR_RETURN(auto cond_val,
cond_evaluator->Evaluate(*cond_comp, {&lcv}));
keep_going = cond_val.GetFirstElement<bool>();
if (keep_going) {
TF_ASSIGN_OR_RETURN(auto body_val,
loop_body_evaluator->Evaluate(*body_comp, {&lcv}));
VLOG(3) << "Loop iteration result: " << body_val.ToString();
lcv = std::move(body_val);
cond_evaluator->ResetVisitStates();
loop_body_evaluator->ResetVisitStates();
}
}
evaluated_[while_hlo] = std::move(lcv);
return absl::OkStatus();
}
namespace {
template <typename NativeT>
Literal ExtractLiteralFromIndexPositions(const Literal& from,
absl::Span<int64_t const> indices) {
absl::InlinedVector<NativeT, 10> values;
for (int64_t index : indices) {
values.push_back(from.Get<NativeT>({index}));
}
return LiteralUtil::CreateR1<NativeT>(values);
}
absl::StatusOr<Literal> ExtractFromIndexPositions(
const Literal& from, absl::Span<int64_t const> indices) {
PrimitiveType type = from.shape().element_type();
return primitive_util::PrimitiveTypeSwitch<absl::StatusOr<Literal>>(
[&](auto primitive_type_constant) -> absl::StatusOr<Literal> {
if constexpr (primitive_util::IsArrayType(primitive_type_constant)) {
return ExtractLiteralFromIndexPositions<
NativeTypeOf<primitive_type_constant>>(from, indices);
}
return InvalidArgument("Unsupported type for Sort: %s",
PrimitiveType_Name(type));
},
type);
}
void IterateThroughWindow(
const Shape& window_shape, const Window& window, const Shape& base_shape,
const absl::Span<const int64_t> window_count_index,
const std::function<void(absl::Span<const int64_t>)>& f) {
const int64_t rank = base_shape.rank();
DimensionVector window_index(rank);
std::fill(window_index.begin(), window_index.end(), 0);
do {
DimensionVector base_index(rank);
bool out_of_bound = false;
for (int64_t i = 0; i < rank; ++i) {
base_index[i] = window_count_index[i] * window.dimensions(i).stride() +
window_index[i] * window.dimensions(i).window_dilation() -
window.dimensions(i).padding_low();
if (base_index[i] % window.dimensions(i).base_dilation() != 0) {
out_of_bound = true;
break;
}
base_index[i] /= window.dimensions(i).base_dilation();
if (base_index[i] < 0 || base_index[i] >= base_shape.dimensions(i)) {
out_of_bound = true;
break;
}
}
if (!out_of_bound) {
f(base_index);
}
} while (IndexUtil::BumpIndices(window_shape, absl::MakeSpan(window_index)));
}
template <typename Fp, typename Uint, typename ResultT>
absl::StatusOr<Literal> StochasticConvertOp(const Literal& operand_literal,
const Literal& random_literal,
const Shape& result_shape) {
std::function<ResultT(Fp, Uint)> stochastic_convert_op =
[](Fp operand, Uint random) -> ResultT {
bool is_negative = static_cast<bool>(Eigen::numext::signbit(operand));
if (Eigen::numext::isinf(operand)) {
return is_negative ? std::numeric_limits<ResultT>::min()
: std::numeric_limits<ResultT>::max();
}
if (Eigen::numext::isnan(operand)) {
return static_cast<ResultT>(0);
}
if (operand >= static_cast<Fp>(std::numeric_limits<ResultT>::max())) {
return std::numeric_limits<ResultT>::max();
}
if (operand <= static_cast<Fp>(std::numeric_limits<ResultT>::min())) {
return std::numeric_limits<ResultT>::min();
}
operand = Eigen::numext::abs(operand);
auto truncated = static_cast<ResultT>(operand);
Fp fractional = operand - static_cast<Fp>(truncated);
if (fractional == Fp{0}) {
return is_negative ? -truncated : truncated;
}
auto fixed_fractional = static_cast<Uint>(std::ldexp(
static_cast<double>(fractional), std::numeric_limits<Uint>::digits));
if (random < fixed_fractional) {
if (truncated == std::numeric_limits<ResultT>::max()) {
return std::numeric_limits<ResultT>::min();
}
truncated++;
}
return is_negative ? -truncated : truncated;
};
Literal result(result_shape);
TF_RETURN_IF_ERROR(
result.Populate<ResultT>([&](absl::Span<const int64_t> multi_index) {
return stochastic_convert_op(operand_literal.Get<Fp>(multi_index),
random_literal.Get<Uint>(multi_index));
}));
return std::move(result);
}
template <PrimitiveType operand_type, PrimitiveType random_type,
PrimitiveType result_type>
absl::StatusOr<Literal> StochasticConvertOp(const Literal& operand_literal,
const Literal& random_literal,
const Shape& result_shape) {
return StochasticConvertOp<
typename primitive_util::PrimitiveTypeToNative<operand_type>::type,
typename primitive_util::PrimitiveTypeToNative<random_type>::type,
typename primitive_util::PrimitiveTypeToNative<result_type>::type>(
operand_literal, random_literal, result_shape);
}
template <PrimitiveType operand_type, PrimitiveType random_type>
absl::StatusOr<Literal> StochasticConvertOp(const Literal& operand_literal,
const Literal& random_literal,
const Shape& result_shape) {
return primitive_util::PrimitiveTypeSwitch<absl::StatusOr<Literal>>(
[&](auto primitive_type_constant) -> absl::StatusOr<Literal> {
if constexpr (primitive_util::IsSignedIntegralType(
primitive_type_constant)) {
return StochasticConvertOp<operand_type, random_type,
primitive_type_constant>(
operand_literal, random_literal, result_shape);
}
return Unimplemented(
"Stochastically converting from type %s to type %s is not "
"implemented.",
PrimitiveType_Name(operand_literal.shape().element_type()),
PrimitiveType_Name(result_shape.element_type()));
},
result_shape.element_type());
}
absl::StatusOr<Literal> StochasticConvertOp(const Literal& operand_literal,
const Literal& random_literal,
const Shape& result_shape) {
return primitive_util::PrimitiveTypeSwitch<absl::StatusOr<Literal>>(
[&](auto primitive_type_constant) -> absl::StatusOr<Literal> {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
return StochasticConvertOp<
primitive_type_constant,
primitive_util::UnsignedIntegralTypeForBitWidth(
primitive_util::BitWidth(primitive_type_constant))>(
operand_literal, random_literal, result_shape);
}
return Unimplemented(
"Stochastically converting from type %s to type %s is not "
"implemented.",
PrimitiveType_Name(operand_literal.shape().element_type()),
PrimitiveType_Name(result_shape.element_type()));
},
operand_literal.shape().element_type());
}
}
absl::Status HloEvaluator::HandleReverse(const HloInstruction* reverse) {
const Shape& result_shape = reverse->shape();
const auto reverse_dimensions = reverse->dimensions();
auto operand = reverse->operand(0);
TF_ASSIGN_OR_RETURN(
auto inferred_return_shape,
ShapeInference::InferReverseShape(operand->shape(), reverse_dimensions));
TF_RET_CHECK(ShapeUtil::Compatible(result_shape, inferred_return_shape))
<< "return shape set to: " << ShapeUtil::HumanString(result_shape)
<< " but is inferred to be: "
<< ShapeUtil::HumanString(inferred_return_shape);
const Literal& operand_literal = GetEvaluatedLiteralFor(operand);
Literal result(result_shape);
const size_t element_byte_size =
primitive_util::ByteWidth(result_shape.element_type());
auto* operand_base = static_cast<const char*>(operand_literal.untyped_data());
TF_RETURN_IF_ERROR(result.PopulateInplaceParallel(
[&](void* dest, absl::Span<const int64_t> out_index, int) {
std::vector<int64_t> from_index(out_index.begin(), out_index.end());
for (const int64_t dim : reverse_dimensions) {
from_index[dim] = result_shape.dimensions(dim) - 1 - out_index[dim];
}
auto* src =
operand_base +
(element_byte_size * IndexUtil::MultidimensionalIndexToLinearIndex(
operand_literal.shape(), from_index));
std::memcpy(dest, src, element_byte_size);
}));
evaluated_[reverse] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleSelectAndScatter(
const HloInstruction* select_and_scatter) {
auto operand = select_and_scatter->operand(0);
auto source = select_and_scatter->operand(1);
const Window& window = select_and_scatter->window();
const Literal& init_literal =
GetEvaluatedLiteralFor(select_and_scatter->operand(2));
TF_RET_CHECK(ShapeUtil::IsScalar(init_literal.shape()));
TF_ASSIGN_OR_RETURN(Literal result,
init_literal.Broadcast(select_and_scatter->shape(), {}));
std::vector<int64_t> window_dimension_sizes;
for (const auto& window_dimension : window.dimensions()) {
window_dimension_sizes.push_back(window_dimension.size());
}
const Shape window_shape = ShapeUtil::MakeShape(
operand->shape().element_type(), window_dimension_sizes);
const HloComputation* select = select_and_scatter->select();
const HloComputation* scatter = select_and_scatter->scatter();
const Literal& operand_literal = GetEvaluatedLiteralFor(operand);
const Literal& source_literal = GetEvaluatedLiteralFor(source);
int64_t rank = operand_literal.shape().rank();
HloEvaluator embedded_evaluator(max_loop_iterations_);
DimensionVector source_index(rank, 0);
do {
std::optional<Literal> selected_val;
std::optional<DimensionVector> selected_index;
IterateThroughWindow(
window_shape, window, operand_literal.shape(), source_index,
[&](absl::Span<const int64_t> operand_index) {
auto curr_val =
LiteralUtil::GetScalarLiteral(operand_literal, operand_index);
if (!selected_val.has_value()) {
selected_val.emplace(curr_val.Clone());
selected_index.emplace(operand_index.begin(), operand_index.end());
}
Literal computed_result =
embedded_evaluator
.Evaluate(*select, {&selected_val.value(), &curr_val})
.value();
bool selected = !computed_result.Get<bool>({});
if (selected) {
*selected_val = std::move(curr_val);
selected_index.emplace(operand_index.begin(), operand_index.end());
}
embedded_evaluator.ResetVisitStates();
});
IterateThroughWindow(
window_shape, window, operand_literal.shape(), source_index,
[&](absl::Span<const int64_t> operand_index) {
if (std::equal(operand_index.begin(), operand_index.end(),
selected_index->begin())) {
auto source =
LiteralUtil::GetScalarLiteral(source_literal, source_index);
auto scattered =
LiteralUtil::GetScalarLiteral(result, operand_index);
Literal computed_result =
embedded_evaluator.Evaluate(*scatter, {&source, &scattered})
.value();
LiteralUtil::SetScalarLiteral(result, operand_index,
computed_result);
embedded_evaluator.ResetVisitStates();
}
});
} while (
IndexUtil::BumpIndices(source->shape(), absl::MakeSpan(source_index)));
evaluated_[select_and_scatter] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleSlice(const HloInstruction* slice) {
auto operand = slice->operand(0);
const Shape& shape = slice->shape();
TF_ASSIGN_OR_RETURN(auto inferred_return_shape,
ShapeInference::InferSliceShape(
operand->shape(), slice->slice_starts(),
slice->slice_limits(), slice->slice_strides()));
TF_RET_CHECK(ShapeUtil::Compatible(shape, inferred_return_shape))
<< "return shape set to: " << ShapeUtil::HumanString(shape)
<< " but is inferred to be: "
<< ShapeUtil::HumanString(inferred_return_shape);
const int64_t rank = operand->shape().rank();
const Literal& operand_literal = GetEvaluatedLiteralFor(operand);
const size_t element_byte_size =
primitive_util::ByteWidth(shape.element_type());
auto* operand_base = static_cast<const char*>(operand_literal.untyped_data());
auto func = [&](void* dest, absl::Span<const int64_t> out_index, int) {
DimensionVector operand_index(rank);
for (int64_t i = 0; i < rank; ++i) {
operand_index[i] =
slice->slice_starts(i) + out_index[i] * slice->slice_strides(i);
}
auto* src = operand_base + (element_byte_size *
IndexUtil::MultidimensionalIndexToLinearIndex(
operand_literal.shape(), operand_index));
std::memcpy(dest, src, element_byte_size);
};
Literal result(shape);
TF_RETURN_IF_ERROR(result.PopulateInplaceParallel(func));
evaluated_[slice] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleSort(const HloInstruction* sort) {
TF_RET_CHECK(sort->operand_count() >= 1)
<< "Expected at least 1 operand for sort";
for (int64_t i = 1; i < sort->operand_count(); ++i) {
TF_RET_CHECK(ShapeUtil::SameDimensions(sort->operand(0)->shape(),
sort->operand(i)->shape()))
<< "All Sort operands must have the same dimensions";
}
if (VLOG_IS_ON(3)) {
for (int64_t i = 0; i < sort->operand_count(); ++i) {
VLOG(3) << "HandleSort operand " << i << " literal: "
<< GetEvaluatedLiteralFor(sort->operand(i)).ToString();
}
}
Shape key_shape = sort->operand(0)->shape();
auto rank = key_shape.rank();
std::vector<Literal> result_literals;
result_literals.reserve(sort->operand_count());
for (int64_t i = 0; i < sort->operand_count(); ++i) {
result_literals.emplace_back(sort->operand(i)->shape());
}
std::vector<int64_t> zero_base(rank, 0);
std::vector<int64_t> increment(rank, 1);
int64_t sort_dim = sort->dimensions(0);
int64_t sort_dim_elements = key_shape.dimensions(sort_dim);
TF_RET_CHECK(sort_dim >= 0 && sort_dim < increment.size())
<< "Unexpected out-of-bound sort dimension " << sort_dim
<< " accessing increment of size " << increment.size();
increment[sort_dim] = sort_dim_elements;
auto comparator =
[sort](absl::Span<const Literal> literals_to_sort, int64_t a, int64_t b,
HloEvaluator* embedded_evaluator) -> absl::StatusOr<bool> {
absl::InlinedVector<Literal, 8> literals;
literals.reserve(2 * sort->operand_count());
for (int64_t i = 0; i < sort->operand_count(); ++i) {
literals.push_back(
LiteralUtil::GetScalarLiteral(literals_to_sort[i], {a}));
literals.push_back(
LiteralUtil::GetScalarLiteral(literals_to_sort[i], {b}));
}
absl::InlinedVector<const Literal*, 8> literal_ptrs;
absl::c_transform(literals, std::back_inserter(literal_ptrs),
[](const Literal& literal) { return &literal; });
TF_ASSIGN_OR_RETURN(
auto computed_result,
embedded_evaluator->Evaluate(*sort->to_apply(), literal_ptrs));
embedded_evaluator->ResetVisitStates();
return computed_result.Get<bool>({});
};
auto less_than =
[&comparator](absl::Span<const Literal> literals_to_sort, int64_t a,
int64_t b,
HloEvaluator* embedded_evaluator) -> absl::StatusOr<bool> {
TF_ASSIGN_OR_RETURN(bool a_is_smaller,
comparator(literals_to_sort, a, b, embedded_evaluator));
#ifndef NDEBUG
TF_ASSIGN_OR_RETURN(bool b_is_smaller,
comparator(literals_to_sort, b, a, embedded_evaluator));
TF_RET_CHECK(!(b_is_smaller && a_is_smaller));
TF_ASSIGN_OR_RETURN(bool b_is_reflexive,
comparator(literals_to_sort, b, b, embedded_evaluator));
TF_RET_CHECK(!b_is_reflexive);
TF_ASSIGN_OR_RETURN(bool a_is_reflexive,
comparator(literals_to_sort, a, a, embedded_evaluator));
TF_RET_CHECK(!a_is_reflexive);
#endif
return a_is_smaller;
};
std::function<absl::Status(absl::Span<const Literal>, absl::Span<int64_t>,
absl::Span<int64_t>, absl::Span<int64_t>,
std::vector<int64_t>&, HloEvaluator*)>
merge = [&](absl::Span<const Literal> literals_to_sort,
absl::Span<int64_t> lhs, absl::Span<int64_t> rhs,
absl::Span<int64_t> output, std::vector<int64_t>& tmp,
HloEvaluator* embedded_evaluator) -> absl::Status {
tmp.clear();
tmp.reserve(output.size());
while (!lhs.empty() && !rhs.empty()) {
TF_ASSIGN_OR_RETURN(bool rhs_is_smaller,
less_than(literals_to_sort, rhs.front(), lhs.front(),
embedded_evaluator));
if (rhs_is_smaller) {
tmp.push_back(rhs.front());
rhs.remove_prefix(1);
} else {
tmp.push_back(lhs.front());
lhs.remove_prefix(1);
}
}
absl::c_copy(lhs, std::back_inserter(tmp));
absl::c_copy(rhs, std::back_inserter(tmp));
absl::c_copy(tmp, output.begin());
return absl::OkStatus();
};
auto* env = tsl::Env::Default();
const int max_parallelism = tsl::port::MaxParallelism();
constexpr size_t kMinElementsPerThread{1024};
const size_t useful_parallelism = std::min<size_t>(
sort_dim_elements / kMinElementsPerThread, max_parallelism);
const size_t work_per_thread = useful_parallelism > 1
? sort_dim_elements / useful_parallelism
: std::numeric_limits<size_t>::max();
std::function<absl::Status(absl::Span<const Literal>, absl::Span<int64_t>,
std::vector<int64_t>*, HloEvaluator*)>
mergesort = [&merge, &mergesort, &less_than, this, env, work_per_thread](
absl::Span<const Literal> literals_to_sort,
absl::Span<int64_t> to_sort,
std::vector<int64_t>* scratch,
HloEvaluator* embedded_evaluator) -> absl::Status {
if (to_sort.size() < 2) {
return absl::OkStatus();
}
size_t halfway = to_sort.size() / 2;
auto lhs = to_sort.subspan(0, halfway);
auto rhs = to_sort.subspan(halfway);
std::unique_ptr<HloEvaluator> thread_local_embedded_evaluator;
if (embedded_evaluator == nullptr) {
thread_local_embedded_evaluator = CreateEmbedded(max_loop_iterations_);
embedded_evaluator = thread_local_embedded_evaluator.get();
}
constexpr size_t kMinElementsForMergesort{9};
if (to_sort.size() >= kMinElementsForMergesort) {
std::unique_ptr<std::vector<int64_t>> thread_local_scratch;
if (!scratch) {
thread_local_scratch = std::make_unique<std::vector<int64_t>>();
scratch = thread_local_scratch.get();
}
absl::Status lhs_status;
if (to_sort.size() >= work_per_thread) {
std::unique_ptr<tsl::Thread> thread = absl::WrapUnique(env->StartThread(
tsl::ThreadOptions(), "XLA_mergesort",
[literals_to_sort, lhs, &mergesort, &lhs_status] {
lhs_status = mergesort(literals_to_sort, lhs, nullptr, nullptr);
}));
TF_RETURN_IF_ERROR(
mergesort(literals_to_sort, rhs, scratch, embedded_evaluator));
thread.reset();
} else {
TF_RETURN_IF_ERROR(
mergesort(literals_to_sort, rhs, scratch, embedded_evaluator));
lhs_status =
mergesort(literals_to_sort, lhs, scratch, embedded_evaluator);
}
TF_RETURN_IF_ERROR(lhs_status);
TF_RETURN_IF_ERROR(merge(literals_to_sort, lhs, rhs, to_sort, *scratch,
embedded_evaluator));
} else {
for (auto i = to_sort.begin(); i != to_sort.end(); ++i) {
auto len = i - to_sort.begin();
auto ub = to_sort.begin();
auto needle = *i;
while (len != 0) {
auto half_len = len / 2;
auto midpoint = ub + half_len;
TF_ASSIGN_OR_RETURN(bool is_smaller,
less_than(literals_to_sort, needle, *midpoint,
embedded_evaluator));
if (is_smaller) {
len = half_len;
} else {
ub = midpoint + 1;
len -= half_len + 1;
}
}
std::rotate(ub, i, i + 1);
}
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(ShapeUtil::ForEachIndexWithStatus(
key_shape, zero_base, key_shape.dimensions(), increment,
[&](absl::Span<const int64_t> indices) -> absl::StatusOr<bool> {
std::vector<int64_t> limit_indices(indices.begin(), indices.end());
absl::c_for_each(limit_indices, [](int64_t& index) { ++index; });
limit_indices[sort_dim] = sort_dim_elements;
std::vector<Literal> literals_to_sort;
literals_to_sort.reserve(sort->operand_count());
for (int64_t i = 0; i < sort->operand_count(); ++i) {
TF_ASSIGN_OR_RETURN(auto literal_to_sort,
GetEvaluatedLiteralFor(sort->operand(i))
.Slice(indices, limit_indices)
.Reshape({sort_dim_elements}));
literals_to_sort.push_back(std::move(literal_to_sort));
}
std::vector<int64_t> indices_to_sort(sort_dim_elements);
std::iota(indices_to_sort.begin(), indices_to_sort.end(), 0);
TF_RETURN_IF_ERROR(mergesort(literals_to_sort,
absl::MakeSpan(indices_to_sort), nullptr,
nullptr));
std::vector<int64_t> slice_dimensions(rank, 1);
slice_dimensions[sort_dim] = sort_dim_elements;
std::vector<int64_t> start_indices(rank, 0);
for (int64_t i = 0; i < sort->operand_count(); ++i) {
TF_ASSIGN_OR_RETURN(
Literal sorted_literal,
ExtractFromIndexPositions(literals_to_sort[i], indices_to_sort));
TF_ASSIGN_OR_RETURN(auto sorted_literal_reshaped,
sorted_literal.Reshape(slice_dimensions));
TF_RETURN_IF_ERROR(result_literals[i].CopySliceFrom(
sorted_literal_reshaped, start_indices, indices,
slice_dimensions));
}
return true;
}));
if (sort->operand_count() == 1) {
evaluated_[sort] = std::move(result_literals[0]);
} else {
std::vector<const Literal*> literal_ptrs;
absl::c_transform(result_literals, std::back_inserter(literal_ptrs),
[](const Literal& literal) { return &literal; });
Literal result_tuple = LiteralUtil::MakeTuple(literal_ptrs);
VLOG(3) << "HandleSort result_tuple: " << result_tuple.ToString();
evaluated_[sort] = std::move(result_tuple);
}
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleStochasticConvert(
const HloInstruction* stochastic_convert) {
const HloInstruction* operand = stochastic_convert->operand(0);
const HloInstruction* random = stochastic_convert->operand(1);
const Shape& result_shape = stochastic_convert->shape();
TF_RET_CHECK(ShapeUtil::SameDimensions(operand->shape(), random->shape()));
TF_RET_CHECK(ShapeUtil::SameDimensions(operand->shape(), result_shape));
const Literal& operand_literal = GetEvaluatedLiteralFor(operand);
const Literal& random_literal = GetEvaluatedLiteralFor(random);
TF_ASSIGN_OR_RETURN(
evaluated_[stochastic_convert],
StochasticConvertOp(operand_literal, random_literal, result_shape));
return absl::OkStatus();
}
static bool IsScalarAdd(HloComputation* computation) {
HloInstruction* instruction = computation->root_instruction();
if (instruction->opcode() == HloOpcode::kAdd &&
computation->num_parameters() == 2) {
const HloInstruction* lhs = instruction->operand(0);
const HloInstruction* rhs = instruction->operand(1);
return lhs->opcode() == HloOpcode::kParameter &&
ShapeUtil::IsScalar(lhs->shape()) &&
rhs->opcode() == HloOpcode::kParameter &&
ShapeUtil::IsScalar(rhs->shape()) && lhs != rhs;
}
return false;
}
static absl::StatusOr<bool> PerformReductionStep(
bool is_tuple, absl::Span<const int64_t> input_index,
absl::Span<const int64_t> output_index,
absl::Span<const Literal* const> input_args, absl::Span<Literal> results,
HloComputation* computation, HloEvaluator* embedded_evaluator) {
int num_args = results.size();
absl::InlinedVector<Literal, 1> arg_values;
arg_values.reserve(num_args);
absl::InlinedVector<Literal, 1> accumulators;
accumulators.reserve(num_args);
for (int64_t i = 0; i < num_args; ++i) {
arg_values.emplace_back(
ShapeUtil::MakeShape(input_args[i]->shape().element_type(), {}));
accumulators.emplace_back(
ShapeUtil::MakeShape(input_args[i]->shape().element_type(), {}));
arg_values[i].CopyElementFrom(*input_args[i], input_index, {});
accumulators[i].CopyElementFrom(results[i], output_index, {});
}
absl::InlinedVector<Literal*, 2> embedded_operands;
for (Literal& accumulator : accumulators) {
embedded_operands.push_back(&accumulator);
}
for (Literal& local_input : arg_values) {
embedded_operands.push_back(&local_input);
}
TF_ASSIGN_OR_RETURN(
Literal computed_result,
embedded_evaluator->Evaluate(*computation, embedded_operands));
embedded_evaluator->ResetVisitStates();
if (is_tuple) {
std::vector<Literal> computed_results = computed_result.DecomposeTuple();
for (int64_t i = 0; i < num_args; ++i) {
results[i].CopyElementFrom(computed_results[i], {}, output_index);
}
} else {
results[0].CopyElementFrom(computed_result, {}, output_index);
}
return true;
}
static absl::StatusOr<bool> GenerateReduceOutputElement(
bool is_tuple, bool use_fast_path, absl::Span<const int64_t> output_index,
absl::Span<const Literal* const> init_values,
absl::Span<const Literal* const> input_args, absl::Span<Literal> results,
HloComputation* function, HloEvaluator* embedded_evaluator,
absl::Span<const int64_t> arg_dim_steps,
absl::Span<const int64_t> arg_dim_counts,
absl::Span<const int64_t> result_to_arg_index) {
bool use_fast_add = use_fast_path &&
ShapeUtil::ElementIsFloating(init_values[0]->shape()) &&
IsScalarAdd(function) && !is_tuple;
const Shape& arg_shape = input_args[0]->shape();
absl::Span<const int64_t> arg_dimensions = arg_shape.dimensions();
std::vector<int64_t> base(arg_dimensions.size());
for (int64_t i = 0; i < output_index.size(); ++i) {
base[result_to_arg_index[i]] = output_index[i];
}
for (int64_t i = 0; i < results.size(); ++i) {
results[i].CopyElementFrom(*init_values[i], {}, output_index);
}
if (use_fast_add) {
double computed_result = *init_values[0]->GetAsDouble({});
const Literal* input_arg0 = input_args[0];
const Shape& shape = input_arg0->shape();
absl::Span<const int64_t> minor_to_major = LayoutUtil::MinorToMajor(shape);
static constexpr int kChunkSize = 512;
int64_t linear_indices[kChunkSize];
int n_linear_indices = 0;
auto reduction_step = [&](absl::Span<const int64_t> input_index) -> bool {
linear_indices[n_linear_indices++] =
IndexUtil::MultidimensionalIndexToLinearIndex(shape, minor_to_major,
input_index);
if (n_linear_indices == kChunkSize) {
computed_result += *input_arg0->GetSumAsDouble(
absl::MakeConstSpan(linear_indices, n_linear_indices));
n_linear_indices = 0;
}
return true;
};
ShapeUtil::ForEachIndexNoStatus(arg_shape, base, arg_dim_counts,
arg_dim_steps, reduction_step);
if (n_linear_indices > 0) {
computed_result += *input_arg0->GetSumAsDouble(
absl::MakeConstSpan(linear_indices, n_linear_indices));
}
TF_RETURN_IF_ERROR(results[0].SetFromDouble(output_index, computed_result));
return true;
}
TF_RETURN_IF_ERROR(ShapeUtil::ForEachIndexWithStatus(
arg_shape, base, arg_dim_counts, arg_dim_steps,
[&](absl::Span<const int64_t> input_index) {
return PerformReductionStep(is_tuple, input_index, output_index,
input_args, results, function,
embedded_evaluator);
}));
return true;
}
absl::Status HloEvaluator::HandleReduce(const HloInstruction* hlo) {
const HloReduceInstruction* reduce = Cast<HloReduceInstruction>(hlo);
int64_t num_args = reduce->inputs().size();
absl::Span<const int64_t> dimensions_to_reduce(reduce->dimensions());
HloComputation* function = reduce->to_apply();
absl::InlinedVector<const Shape*, 1> operand_shapes;
for (const HloInstruction* operand : reduce->operands()) {
operand_shapes.push_back(&operand->shape());
}
TF_ASSIGN_OR_RETURN(auto inferred_return_shape,
ShapeInference::InferReduceShape(
operand_shapes, dimensions_to_reduce,
function->ComputeProgramShape()));
TF_RET_CHECK(ShapeUtil::CompatibleIgnoringFpPrecision(reduce->shape(),
inferred_return_shape))
<< "return shape is set to: " << ShapeUtil::HumanString(reduce->shape())
<< " but is inferred to be: "
<< ShapeUtil::HumanString(inferred_return_shape);
absl::InlinedVector<const Literal*, 1> input_args(num_args);
absl::InlinedVector<const Literal*, 1> init_values(num_args);
for (int64_t i = 0; i < num_args; ++i) {
input_args[i] = &GetEvaluatedLiteralFor(reduce->inputs()[i]);
VLOG(3) << "HandleReduce arg_literal: " << input_args[i]->ToString();
init_values[i] = &GetEvaluatedLiteralFor(reduce->init_values()[i]);
VLOG(3) << "HandleReduce init_literal: " << init_values[i]->ToString();
TF_RET_CHECK(ShapeUtil::IsScalar(init_values[i]->shape()));
}
const Shape& arg_shape = input_args[0]->shape();
const Shape& out_shape = inferred_return_shape;
bool is_tuple = out_shape.IsTuple();
const Shape& output_shape = inferred_return_shape.IsTuple()
? inferred_return_shape.tuple_shapes(0)
: inferred_return_shape;
absl::Span<const int64_t> arg_dimensions = arg_shape.dimensions();
std::vector<int64_t> arg_dim_steps(arg_dimensions.size());
std::vector<int64_t> arg_dim_counts(arg_dimensions.size());
for (const int64_t dim : dimensions_to_reduce) {
arg_dim_steps[dim] = 1;
arg_dim_counts[dim] = arg_dimensions[dim];
}
std::vector<int64_t> result_to_arg_index;
for (int64_t i = 0; i < arg_dimensions.size(); ++i) {
if (arg_dim_steps[i] == 0) {
result_to_arg_index.push_back(i);
}
}
const int num_threads = ShapeUtil::GetForEachIndexParallelThreadCount() + 1;
std::vector<std::unique_ptr<HloEvaluator>> embedded_evaluators;
embedded_evaluators.reserve(num_threads);
for (int i = 0; i < num_threads; ++i) {
embedded_evaluators.push_back(CreateEmbedded(max_loop_iterations_));
}
absl::InlinedVector<Literal, 1> results(num_args);
for (int64_t i = 0; i < num_args; ++i) {
results[i] = Literal(is_tuple ? out_shape.tuple_shapes(i) : out_shape);
}
TF_RETURN_IF_ERROR(ShapeUtil::ForEachIndexParallelWithStatus(
output_shape, [&](absl::Span<const int64_t> output_index, int thread_id) {
return GenerateReduceOutputElement(
is_tuple, use_fast_path_reduce_, output_index, init_values,
input_args, absl::Span<Literal>(results), function,
embedded_evaluators[thread_id + 1].get(), arg_dim_steps,
arg_dim_counts, result_to_arg_index);
}));
if (is_tuple) {
Literal tuple_result(inferred_return_shape);
for (int64_t i = 0; i < num_args; ++i) {
TF_CHECK_OK(tuple_result.MoveFrom(std::move(results[i]), {i}));
}
evaluated_[reduce] = std::move(tuple_result);
} else {
CHECK_EQ(results.size(), 1);
evaluated_[reduce] = std::move(results[0]);
}
if (!ShapeUtil::Compatible(reduce->shape(), inferred_return_shape)) {
TF_ASSIGN_OR_RETURN(evaluated_[reduce],
evaluated_[reduce].ConvertToShape(reduce->shape()));
}
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleReduceWindow(const HloInstruction* hlo) {
auto* reduce_window = Cast<HloReduceWindowInstruction>(hlo);
const Window& window = reduce_window->window();
HloComputation* function = reduce_window->to_apply();
TF_ASSIGN_OR_RETURN(auto inferred_return_shape,
ShapeInference::InferReduceWindowShape(
reduce_window->input_shapes(),
reduce_window->init_value_shapes(), window,
function->ComputeProgramShape()));
TF_RET_CHECK(
ShapeUtil::Compatible(reduce_window->shape(), inferred_return_shape))
<< "return shape is set to: "
<< ShapeUtil::HumanStringWithLayout(reduce_window->shape())
<< " but is inferred to be: "
<< ShapeUtil::HumanStringWithLayout(inferred_return_shape);
absl::InlinedVector<const Literal*, 2> input_literal_vec, init_literal_vec;
auto input_arrays = reduce_window->inputs();
auto init_values = reduce_window->init_values();
int64_t num_args = input_arrays.size();
for (int i = 0; i < num_args; ++i) {
const Literal& input_literal = GetEvaluatedLiteralFor(input_arrays[i]);
VLOG(3) << "HandleReduceWindow arg_literal: " << input_literal.ToString();
input_literal_vec.push_back(&input_literal);
const Literal& init_literal = GetEvaluatedLiteralFor(init_values[i]);
VLOG(3) << "HandleReduceWindow init_literal: " << init_literal.ToString();
TF_RET_CHECK(ShapeUtil::IsScalar(init_literal.shape()));
init_literal_vec.push_back(&init_literal);
}
absl::InlinedVector<int64_t, 2> window_dimension_sizes;
for (const auto& window_dimension : window.dimensions()) {
window_dimension_sizes.push_back(window_dimension.size());
}
const Shape window_shape = ShapeUtil::MakeShape(
input_arrays[0]->shape().element_type(), window_dimension_sizes);
const int num_threads = ShapeUtil::GetForEachIndexParallelThreadCount() + 1;
std::vector<std::unique_ptr<HloEvaluator>> embedded_evaluators;
embedded_evaluators.reserve(num_threads);
for (int i = 0; i < num_threads; ++i) {
embedded_evaluators.push_back(CreateEmbedded(max_loop_iterations_));
}
auto evaluate_impl = [&init_literal_vec, &window_shape, &window,
&input_literal_vec, &embedded_evaluators, function,
&inferred_return_shape](
absl::Span<const int64_t> output_index,
int thread_id) -> absl::InlinedVector<Literal, 2> {
const int embedded_evaluator_index = thread_id + 1;
CHECK_GE(embedded_evaluator_index, 0);
CHECK_LT(embedded_evaluator_index, embedded_evaluators.size());
HloEvaluator& embedded_evaluator =
*embedded_evaluators[embedded_evaluator_index];
absl::InlinedVector<Literal, 2> computed_result;
computed_result.reserve(init_literal_vec.size());
for (const auto* init : init_literal_vec) {
computed_result.push_back(init->Clone());
}
IterateThroughWindow(
window_shape, window, input_literal_vec[0]->shape(), output_index,
[&](absl::Span<const int64_t> operand_index) -> void {
absl::InlinedVector<const Literal*, 2> args;
for (auto& curr_result_val : computed_result) {
VLOG(2) << "Pushing:" << curr_result_val.ToString() << "\n";
args.push_back(&curr_result_val);
}
absl::InlinedVector<Literal, 2> curr_val_literal_vec;
curr_val_literal_vec.reserve(input_literal_vec.size());
for (const auto* input_literal : input_literal_vec) {
curr_val_literal_vec.push_back(Literal(ShapeUtil::MakeShape(
input_literal->shape().element_type(), {})));
curr_val_literal_vec.back().CopyElementFrom(*input_literal,
operand_index, {});
VLOG(2) << "Pushing:" << curr_val_literal_vec.back().ToString()
<< "\n";
args.push_back(&curr_val_literal_vec.back());
}
computed_result[0] =
embedded_evaluator.Evaluate(*function, args).value();
VLOG(2) << "Computed result:" << computed_result[0].ToString()
<< "\n";
embedded_evaluator.ResetVisitStates();
if (inferred_return_shape.IsTuple()) {
auto decomposed = computed_result[0].DecomposeTuple();
computed_result.clear();
computed_result.reserve(decomposed.size());
for (int i = 0; i < decomposed.size(); ++i) {
computed_result.push_back(std::move(decomposed[i]));
}
}
});
VLOG(2) << "Final result size:" << computed_result.size() << "\n";
for (const auto& res : computed_result) {
VLOG(2) << res.ToString() << "\n";
}
return computed_result;
};
Literal result(inferred_return_shape);
if (inferred_return_shape.IsTuple()) {
absl::InlinedVector<Literal, 1> results(num_args);
for (int64_t i = 0; i < num_args; ++i) {
results[i] = Literal(inferred_return_shape.tuple_shapes(i));
}
ShapeUtil::ForEachIndexParallel(
inferred_return_shape.tuple_shapes(0),
[&results, &evaluate_impl](absl::Span<const int64_t> output_index,
int thread_id) -> bool {
absl::InlinedVector<Literal, 2> computed_result_vec =
evaluate_impl(output_index, thread_id);
for (int i = 0; i < computed_result_vec.size(); ++i) {
results[i].CopyElementFrom(computed_result_vec[i], {},
output_index);
}
return true;
});
result = Literal::MoveIntoTuple(absl::MakeSpan(results));
VLOG(2) << "Final result is:" << result.ToString() << "\n";
} else {
TF_RETURN_IF_ERROR(Apply<PopulateParallelImpl>(
result, [&evaluate_impl](absl::Span<const int64_t> output_index,
int thread_id) {
return std::move(evaluate_impl(output_index, thread_id)[0]);
}));
}
VLOG(2) << "Final result is:" << result.ToString() << "\n";
evaluated_[reduce_window] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleMap(const HloInstruction* map) {
auto operands = map->operands();
const HloComputation* computation = map->to_apply();
Literal result(map->shape());
HloEvaluator embedded_evaluator(max_loop_iterations_);
TF_RETURN_IF_ERROR(
Apply<PopulateImpl>(result, [&](absl::Span<const int64_t> multi_index) {
std::vector<Literal> arg_literals;
arg_literals.reserve(operands.size());
for (auto operand : operands) {
const Literal& arg_literal = GetEvaluatedLiteralFor(operand);
arg_literals.push_back(
LiteralUtil::GetScalarLiteral(arg_literal, multi_index));
}
Literal computed_result =
embedded_evaluator.Evaluate(*computation, arg_literals).value();
embedded_evaluator.ResetVisitStates();
return computed_result;
}));
evaluated_[map] = std::move(result);
return absl::OkStatus();
}
absl::Status HloEvaluator::HandleCustomCall(const HloInstruction* custom_call) {
if (!custom_call_handler_) {
return DefaultAction(custom_call);
}
std::vector<const Literal*> operands;
operands.reserve(custom_call->operand_count());
for (const HloInstruction* operand : custom_call->operands()) {
operands.push_back(&GetEvaluatedLiteralFor(operand));
}
TF_ASSIGN_OR_RETURN(
auto output, custom_call_handler_(custom_call, absl::MakeSpan(operands)));
evaluated_[custom_call] = std::move(output);
return absl::OkStatus();
}
absl::Status HloEvaluator::Preprocess(const HloInstruction* hlo) {
VLOG(3) << "About to visit HLO: " << hlo->ToString();
if (!enable_partial_evaluation_) {
for (const HloInstruction* operand : hlo->operands()) {
if (!IsAlreadyEvaluated(operand) ||
!GetEvaluatedLiteralFor(operand).IsKnown()) {
return tsl::errors::FailedPrecondition(
"Failed to evaluate instruction since its operands are unknown "
"or undetermined and partial evaluation is not enabled.");
}
}
}
return ShapeUtil::ValidateShape(hlo->shape());
}
absl::Status HloEvaluator::Postprocess(const HloInstruction* hlo) {
VLOG(3) << "Finished visiting " << hlo->ToString()
<< "; evaluated value is: " << GetEvaluatedLiteralFor(hlo).ToString();
auto evaluated_shape = GetEvaluatedLiteralFor(hlo).shape();
xla::Shape hlo_shape = hlo->shape();
if (hlo_shape.IsArray() && !hlo_shape.has_layout()) {
*hlo_shape.mutable_layout() =
LayoutUtil::GetDefaultLayoutForShape(hlo_shape);
}
if (evaluated_shape.has_layout() && hlo_shape.has_layout() &&
!Layout::Equal().MinorToMajorOnly()(evaluated_shape.layout(),
hlo_shape.layout())) {
evaluated_.at(hlo) = evaluated_.at(hlo).Relayout(hlo_shape);
}
return absl::OkStatus();
}
namespace {
template <typename T>
std::unique_ptr<Array2D<T>> MatmulArray2DImpl(
const Array2D<T>& lhs, const Array2D<T>& rhs,
const std::function<void(const void* run_options_ptr, T* out, T* lhs,
T* rhs, int64_t m, int64_t n, int64_t k,
int32_t transpose_lhs, int32_t transpose_rhs)>&
impl_fn) {
CHECK_EQ(lhs.width(), rhs.height());
int m = lhs.height();
int n = rhs.width();
int k = lhs.width();
auto result = std::make_unique<Array2D<T>>(m, n);
impl_fn(
nullptr, result->data(), rhs.data(), lhs.data(), n, m,
k,
0,
0);
return result;
}
}
std::unique_ptr<Array2D<Eigen::half>> HloEvaluator::MatmulArray2D(
const Array2D<Eigen::half>& lhs, const Array2D<Eigen::half>& rhs) {
return MatmulArray2DImpl<Eigen::half>(
lhs, rhs, __xla_cpu_runtime_EigenSingleThreadedMatMulF16);
}
std::unique_ptr<Array2D<float>> HloEvaluator::MatmulArray2D(
const Array2D<float>& lhs, const Array2D<float>& rhs) {
return MatmulArray2DImpl<float>(
lhs, rhs, __xla_cpu_runtime_EigenSingleThreadedMatMulF32);
}
std::unique_ptr<Array2D<double>> HloEvaluator::MatmulArray2D(
const Array2D<double>& lhs, const Array2D<double>& rhs) {
return MatmulArray2DImpl<double>(
lhs, rhs, __xla_cpu_runtime_EigenSingleThreadedMatMulF64);
}
std::unique_ptr<Array2D<std::complex<float>>> HloEvaluator::MatmulArray2D(
const Array2D<std::complex<float>>& lhs,
const Array2D<std::complex<float>>& rhs) {
return MatmulArray2DImpl<std::complex<float>>(
lhs, rhs, __xla_cpu_runtime_EigenSingleThreadedMatMulC64);
}
std::unique_ptr<Array2D<std::complex<double>>> HloEvaluator::MatmulArray2D(
const Array2D<std::complex<double>>& lhs,
const Array2D<std::complex<double>>& rhs) {
return MatmulArray2DImpl<std::complex<double>>(
lhs, rhs, __xla_cpu_runtime_EigenSingleThreadedMatMulC128);
}
std::unique_ptr<Array2D<int32_t>> HloEvaluator::MatmulArray2D(
const Array2D<int32_t>& lhs, const Array2D<int32_t>& rhs) {
return MatmulArray2DImpl<int32_t>(
lhs, rhs, __xla_cpu_runtime_EigenSingleThreadedMatMulS32);
}
std::unique_ptr<Array2D<uint8_t>> HloEvaluator::MatmulArray2D(
const Array2D<uint8_t>& lhs, const Array2D<uint8_t>& rhs) {
return MatmulArray2DImpl<uint8_t>(
lhs, rhs, __xla_cpu_runtime_EigenSingleThreadedMatMulU8);
}
std::unique_ptr<Array2D<float>> Array2DF8E5M2ToF32(
const Array2D<tsl::float8_e5m2>& input) {
auto result = std::make_unique<Array2D<float>>(input.height(), input.width());
for (int64_t rowno = 0; rowno < input.height(); ++rowno) {
for (int64_t colno = 0; colno < input.width(); ++colno) {
(*result)(rowno, colno) = static_cast<float>(input(rowno, colno));
}
}
return result;
}
std::unique_ptr<Array2D<float>> Array2DF8E4M3FNToF32(
const Array2D<tsl::float8_e4m3fn>& input) {
auto result = std::make_unique<Array2D<float>>(input.height(), input.width());
for (int64_t rowno = 0; rowno < input.height(); ++rowno) {
for (int64_t colno = 0; colno < input.width(); ++colno) {
(*result)(rowno, colno) = static_cast<float>(input(rowno, colno));
}
}
return result;
}
std::unique_ptr<Array2D<tsl::float8_e5m2>> Array2DF32ToF8E5M2(
const Array2D<float>& input) {
auto result = std::make_unique<Array2D<tsl::float8_e5m2>>(input.height(),
input.width());
for (int64_t rowno = 0; rowno < input.height(); ++rowno) {
for (int64_t colno = 0; colno < input.width(); ++colno) {
(*result)(rowno, colno) =
static_cast<tsl::float8_e5m2>(input(rowno, colno));
}
}
return result;
}
std::unique_ptr<Array2D<tsl::float8_e4m3fn>> Array2DF32ToF8E4M3FN(
const Array2D<float>& input) {
auto result = std::make_unique<Array2D<tsl::float8_e4m3fn>>(input.height(),
input.width());
for (int64_t rowno = 0; rowno < input.height(); ++rowno) {
for (int64_t colno = 0; colno < input.width(); ++colno) {
(*result)(rowno, colno) =
static_cast<tsl::float8_e4m3fn>(input(rowno, colno));
}
}
return result;
}
static bool promote_f8_to_f32 = true;
std::unique_ptr<Array2D<tsl::float8_e5m2>> HloEvaluator::MatmulArray2D(
const Array2D<tsl::float8_e5m2>& lhs,
const Array2D<tsl::float8_e5m2>& rhs) {
if (promote_f8_to_f32) {
auto lhs_float = Array2DF8E5M2ToF32(lhs);
auto rhs_float = Array2DF8E5M2ToF32(rhs);
auto result = MatmulArray2D(*lhs_float, *rhs_float);
return Array2DF32ToF8E5M2(*result);
} else {
return MatmulArray2DImpl<tsl::float8_e5m2>(
lhs, rhs, __xla_cpu_runtime_EigenSingleThreadedMatMulF8E5M2);
}
}
std::unique_ptr<Array2D<tsl::float8_e4m3fn>> HloEvaluator::MatmulArray2D(
const Array2D<tsl::float8_e4m3fn>& lhs,
const Array2D<tsl::float8_e4m3fn>& rhs) {
if (promote_f8_to_f32) {
auto lhs_float = Array2DF8E4M3FNToF32(lhs);
auto rhs_float = Array2DF8E4M3FNToF32(rhs);
auto result = MatmulArray2D(*lhs_float, *rhs_float);
return Array2DF32ToF8E4M3FN(*result);
} else {
return MatmulArray2DImpl<tsl::float8_e4m3fn>(
lhs, rhs, __xla_cpu_runtime_EigenSingleThreadedMatMulF8E4M3FN);
}
}
} | #include "xla/hlo/evaluator/hlo_evaluator.h"
#include <array>
#include <complex>
#include <cstdint>
#include <initializer_list>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/internal/endian.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/array4d.h"
#include "xla/client/xla_builder.h"
#include "xla/comparison_util.h"
#include "xla/debug_options_flags.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/permutation_util.h"
#include "xla/primitive_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/dynamic_dimension_inference.h"
#include "xla/service/hlo_element_type_converter.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/shape_inference.h"
#include "xla/service/tuple_points_to_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_utils.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
static std::array<bool, 2> use_bf16_params{true, false};
class HloEvaluatorTest : public HloTestBase {
public:
HloEvaluatorTest() : use_bfloat16_(false) { InitializeFftData(); }
absl::StatusOr<Literal> Evaluate(
absl::Span<const Literal* const> arg_literals = {}) {
if (use_bfloat16_) {
HloElementTypeConverter(F32, BF16).Run(m_.get()).value();
}
return evaluator_.Evaluate(*m_->entry_computation(), arg_literals);
}
Literal EvaluateWithModule(
HloModule* module, absl::Span<const Literal* const> arg_literals = {}) {
if (use_bfloat16_) {
HloElementTypeConverter(F32, BF16).Run(m_.get()).value();
}
return evaluator_.Evaluate(*module->entry_computation(), arg_literals)
.value();
}
void TestUnaryOp(HloOpcode opcode, Literal expected, Literal input,
float aabs = 0) {
HloComputation::Builder b(TestName());
auto c1 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(input)));
b.AddInstruction(HloInstruction::CreateUnary(expected.shape(), opcode, c1));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto element_type = expected.shape().element_type();
if (element_type == F32 || element_type == F64) {
ErrorSpec error(aabs);
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, error));
} else {
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
}
void TestBinaryOp(HloOpcode opcode, Literal expected, Literal lhs,
Literal rhs) {
HloComputation::Builder b(TestName());
auto c1 = b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs)));
auto c2 = b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs)));
b.AddInstruction(
HloInstruction::CreateBinary(expected.shape(), opcode, c1, c2));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
void TestTernaryOp(HloOpcode opcode, Literal expected, Literal src0,
Literal src1, Literal src2) {
HloComputation::Builder b(TestName());
auto operand0 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(src0)));
auto operand1 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(src1)));
auto operand2 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(src2)));
b.AddInstruction(HloInstruction::CreateTernary(
expected.shape(), opcode, operand0, operand1, operand2));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
void TestEvaluateInstruction(HloInstruction* instruction,
const Literal& expected) {
TF_ASSERT_OK_AND_ASSIGN(Literal result, evaluator_.Evaluate(instruction));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
void TestEvaluationFailure(HloInstruction* instruction) {
absl::StatusOr<Literal> result = evaluator_.Evaluate(instruction);
EXPECT_TRUE(!result.ok());
}
void TestRecursivelyEvaluateInstruction(HloInstruction* instruction,
const Literal& expected) {
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
evaluator_.Evaluate(
instruction, {},
true));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
void TestRecursiveEvaluationFailure(HloInstruction* instruction) {
absl::StatusOr<Literal> result =
evaluator_.Evaluate(instruction, {},
true);
EXPECT_TRUE(!result.ok());
}
std::unique_ptr<HloComputation> MaxComputationScalarF32() {
HloComputation::Builder max_computation("max");
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
auto param_lhs = max_computation.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "lhs"));
auto param_rhs = max_computation.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "rhs"));
max_computation.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kMaximum, param_lhs, param_rhs));
return max_computation.Build();
}
void ReduceWindowMaxIotaTest(int window_size, int padding, int stride,
int window_dilation, int base_dilation,
const Literal& expected) {
HloComputation::Builder b(TestName());
auto arg_array = std::make_unique<Array2D<float>>(4, 4);
arg_array->FillIota(0);
auto arg_literal = LiteralUtil::CreateR2FromArray2D<float>(*arg_array);
HloInstruction* arg_instruction = b.AddInstruction(
HloInstruction::CreateConstant(std::move(arg_literal)));
auto init_value = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.f)));
auto max_func = m_->AddEmbeddedComputation(MaxComputationScalarF32());
Window window;
WindowDimension dim;
dim.set_size(window_size);
dim.set_stride(stride);
dim.set_padding_low(padding);
dim.set_padding_high(padding);
dim.set_window_dilation(window_dilation);
dim.set_base_dilation(base_dilation);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
int dim0 = expected.shape().dimensions(0);
int dim1 = expected.shape().dimensions(1);
Shape shape = ShapeUtil::MakeShape(F32, {dim0, dim1});
b.AddInstruction(HloInstruction::CreateReduceWindow(
shape, arg_instruction, init_value, window, max_func));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
protected:
explicit HloEvaluatorTest(bool use_bfloat16) : use_bfloat16_(use_bfloat16) {
InitializeFftData();
}
void InitializeFftData();
HloEvaluator evaluator_;
const bool use_bfloat16_;
std::unique_ptr<HloModule> m_ = CreateNewVerifiedModule();
ErrorSpec fft_error_ = ErrorSpec(1e-4, 1e-5);
Literal fft_c64x2x4x8_;
Literal fft_c64x2x4x8_1d_;
Literal fft_c64x2x4x8_2d_;
Literal fft_c64x2x4x8_3d_;
};
class HloEvaluatorBf16Test : public ::testing::WithParamInterface<bool>,
public HloEvaluatorTest {
protected:
HloEvaluatorBf16Test() : HloEvaluatorTest(GetParam()) {}
};
INSTANTIATE_TEST_SUITE_P(HloEvaluatorTest_Instantiation, HloEvaluatorBf16Test,
::testing::ValuesIn(use_bf16_params));
TEST_P(HloEvaluatorBf16Test, DoesClamp) {
auto low = LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}});
auto value = LiteralUtil::CreateR2<float>({{0.f, 5.f}, {0.f, 4.f}});
auto high = LiteralUtil::CreateR2<float>({{2.f, 4.f}, {4.f, 4.f}});
Shape shape = low.shape();
HloComputation::Builder b(TestName());
auto c1 = b.AddInstruction(HloInstruction::CreateConstant(std::move(low)));
auto c2 = b.AddInstruction(HloInstruction::CreateConstant(std::move(value)));
auto c3 = b.AddInstruction(HloInstruction::CreateConstant(std::move(high)));
b.AddInstruction(
HloInstruction::CreateTernary(shape, HloOpcode::kClamp, c1, c2, c3));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<float>({{0, 4}, {2, 4}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DoesClampInt64) {
auto ones = [](int bits) { return (int64_t{1} << bits) - 1; };
auto low =
LiteralUtil::CreateR2<int64_t>({{0, ones(54)}, {ones(54), ones(58)}});
auto value = LiteralUtil::CreateR2<int64_t>({{0, ones(56)}, {0, ones(58)}});
auto high = LiteralUtil::CreateR2<int64_t>(
{{ones(54), ones(55)}, {ones(56), ones(58)}});
Shape shape = low.shape();
HloComputation::Builder b(TestName());
auto c1 = b.AddInstruction(HloInstruction::CreateConstant(std::move(low)));
auto c2 = b.AddInstruction(HloInstruction::CreateConstant(std::move(value)));
auto c3 = b.AddInstruction(HloInstruction::CreateConstant(std::move(high)));
b.AddInstruction(
HloInstruction::CreateTernary(shape, HloOpcode::kClamp, c1, c2, c3));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected =
LiteralUtil::CreateR2<int64_t>({{0, ones(55)}, {ones(54), ones(58)}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DISABLED_DoesClampSpecialBroadcast) {
auto low = LiteralUtil::CreateR0<float>(0.f);
auto value = LiteralUtil::CreateR2<float>({{-1.f, 0.f}, {1.f, 2.f}});
auto high = LiteralUtil::CreateR0<float>(1.f);
Shape shape = value.shape();
HloComputation::Builder b(TestName());
auto c1 = b.AddInstruction(HloInstruction::CreateConstant(std::move(low)));
auto c2 = b.AddInstruction(HloInstruction::CreateConstant(std::move(value)));
auto c3 = b.AddInstruction(HloInstruction::CreateConstant(std::move(high)));
b.AddInstruction(
HloInstruction::CreateTernary(shape, HloOpcode::kClamp, c1, c2, c3));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<float>({{0, 0}, {1, 1}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DoesSelect) {
auto pred = LiteralUtil::CreateR2<bool>({{true, false}, {false, true}});
auto on_true = LiteralUtil::CreateR2<float>({{2.f, 4.f}, {4.f, 4.f}});
auto on_false = LiteralUtil::CreateR2<float>({{0.f, 5.f}, {0.f, 4.f}});
Shape shape = on_true.shape();
HloComputation::Builder b(TestName());
auto c1 = b.AddInstruction(HloInstruction::CreateConstant(std::move(pred)));
auto c2 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(on_true)));
auto c3 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(on_false)));
b.AddInstruction(
HloInstruction::CreateTernary(shape, HloOpcode::kSelect, c1, c2, c3));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({}));
auto expected = LiteralUtil::CreateR2<float>({{2, 5}, {0, 4}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, DoesAdd) {
auto lhs = LiteralUtil::CreateR2<int64_t>({{1, 0}, {-100, 4}});
auto rhs = LiteralUtil::CreateR2<int64_t>({{2, 4}, {4, 4}});
auto expected = LiteralUtil::CreateR2<int64_t>({{3, 4}, {-96, 8}});
TestBinaryOp(HloOpcode::kAdd, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_P(HloEvaluatorBf16Test, DoesAnd) {
auto lhs = LiteralUtil::CreateR2<int64_t>({{1, 0}, {-100, 4}});
auto rhs = LiteralUtil::CreateR2<int64_t>({{2, 4}, {4, 4}});
auto expected = LiteralUtil::CreateR2<int64_t>({{0, 0}, {4, 4}});
TestBinaryOp(HloOpcode::kAnd, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_F(HloEvaluatorTest, DoesOr) {
auto lhs = LiteralUtil::CreateR2<int64_t>({{1, 0}, {-100, 4}});
auto rhs = LiteralUtil::CreateR2<int64_t>({{2, 4}, {4, 4}});
auto expected = LiteralUtil::CreateR2<int64_t>({{3, 4}, {-100, 4}});
TestBinaryOp(HloOpcode::kOr, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_F(HloEvaluatorTest, DoesXor) {
auto lhs = LiteralUtil::CreateR2<int64_t>({{1, 0}, {-100, 4}});
auto rhs = LiteralUtil::CreateR2<int64_t>({{2, 4}, {4, 4}});
auto expected = LiteralUtil::CreateR2<int64_t>({{3, 4}, {-104, 0}});
TestBinaryOp(HloOpcode::kXor, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_F(HloEvaluatorTest, DoesMultiply) {
auto lhs = LiteralUtil::CreateR2<int32_t>({{-1, 0}, {-100, 4}});
auto rhs = LiteralUtil::CreateR2<int32_t>(
{{std::numeric_limits<int32_t>::min(), 4}, {4, 4}});
auto expected = LiteralUtil::CreateR2<int32_t>(
{{std::numeric_limits<int32_t>::min(), 0}, {-400, 16}});
TestBinaryOp(HloOpcode::kMultiply, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_F(HloEvaluatorTest, DoesDivideInt64) {
auto lhs = LiteralUtil::CreateR2<int64_t>({{1, 0}, {-100, 4}});
auto rhs = LiteralUtil::CreateR2<int64_t>({{2, 4}, {4, 4}});
auto expected = LiteralUtil::CreateR2<int64_t>({{0, 0}, {-25, 1}});
TestBinaryOp(HloOpcode::kDivide, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_F(HloEvaluatorTest, DoesClampS64) {
auto low = LiteralUtil::CreateR1<int64_t>(
{-8616761059752331528LL, 6780561065411491190LL, -8616761059752331528LL});
auto value = LiteralUtil::CreateR1<int64_t>(
{-6780561065411491190LL, 6780561065411491180LL, 4241131823772864090LL});
auto high = LiteralUtil::CreateR1<int64_t>(
{-6780561065411491180LL, 8616761059752331528LL, 3832151243857508051LL});
auto expected = LiteralUtil::CreateR1<int64_t>(
{-6780561065411491190LL, 6780561065411491190LL, 3832151243857508051LL});
TestTernaryOp(HloOpcode::kClamp, std::move(expected), std::move(low),
std::move(value), std::move(high));
}
TEST_P(HloEvaluatorBf16Test, DoesDivideDouble) {
auto lhs = LiteralUtil::CreateR2<double>({{1.0, 0.0}, {-100.0, 4.0}});
auto rhs = LiteralUtil::CreateR2<double>({{2.2, 4.0}, {4.0, 4.0}});
auto expected =
LiteralUtil::CreateR2<double>({{0.45454545454545453, 0}, {-25, 1}});
TestBinaryOp(HloOpcode::kDivide, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_F(HloEvaluatorTest, DoesAbsR2) {
auto operand = LiteralUtil::CreateR2<int64_t>({{1, -20}, {-100, 4}});
auto expected = LiteralUtil::CreateR2<int64_t>({{1, 20}, {100, 4}});
TestUnaryOp(HloOpcode::kAbs, std::move(expected), std::move(operand));
}
TEST_P(HloEvaluatorBf16Test, DoesAbsR0) {
auto operand = LiteralUtil::CreateR0<float>(-1.0f);
auto expected = LiteralUtil::CreateR0<float>(1.0f);
TestUnaryOp(HloOpcode::kAbs, std::move(expected), std::move(operand));
}
TEST_P(HloEvaluatorBf16Test, DoesAbsR1WithZeroSize) {
auto operand = LiteralUtil::CreateR1<float>({});
auto expected = LiteralUtil::CreateR1<float>({});
TestUnaryOp(HloOpcode::kAbs, std::move(expected), std::move(operand));
}
TEST_F(HloEvaluatorTest, DoesAbsC128) {
auto x = LiteralUtil::CreateR0<complex128>({1, 2});
auto expected_real = LiteralUtil::CreateR0<double>(2.23607);
TestUnaryOp(HloOpcode::kAbs, std::move(expected_real), std::move(x), 3e-06);
}
TEST_F(HloEvaluatorTest, DoesNegateR2) {
auto operand = LiteralUtil::CreateR2<int32_t>(
{{0, std::numeric_limits<int32_t>::min()}, {-1, 4}});
auto expected = LiteralUtil::CreateR2<int32_t>(
{{0, std::numeric_limits<int>::min()}, {1, -4}});
TestUnaryOp(HloOpcode::kNegate, std::move(expected), std::move(operand));
}
TEST_P(HloEvaluatorBf16Test, DoesCosR2) {
auto operand = LiteralUtil::CreateR2<float>({{0, M_PI}, {-M_PI, 2 * M_PI}});
auto expected = LiteralUtil::CreateR2<float>({{1, -1}, {-1, 1}});
TestUnaryOp(HloOpcode::kCos, std::move(expected), std::move(operand),
use_bfloat16_ ? 0.031250 : 9.5367431640625E-7);
}
TEST_P(HloEvaluatorBf16Test, DoesSinR2) {
auto operand = LiteralUtil::CreateR2<float>({{0, M_PI}, {-M_PI, 2 * M_PI}});
auto expected = LiteralUtil::CreateR2<float>({{0, 0}, {0, 0}});
TestUnaryOp(HloOpcode::kSin, std::move(expected), std::move(operand),
use_bfloat16_ ? 0.031250 : 9.5367431640625E-7);
}
TEST_P(HloEvaluatorBf16Test, DoesTanR2) {
auto operand = LiteralUtil::CreateR2<float>({{0, M_PI}, {-M_PI, 2 * M_PI}});
auto expected = LiteralUtil::CreateR2<float>({{0, 0}, {0, 0}});
TestUnaryOp(HloOpcode::kTan, std::move(expected), std::move(operand),
use_bfloat16_ ? 0.031250 : 9.5367431640625E-7);
}
TEST_F(HloEvaluatorTest, DoesNotR2) {
auto operand =
LiteralUtil::CreateR2<int32_t>({{0, std::numeric_limits<int>::min()},
{-1, std::numeric_limits<int>::max()}});
auto expected =
LiteralUtil::CreateR2<int32_t>({{-1, std::numeric_limits<int>::max()},
{0, std::numeric_limits<int>::min()}});
TestUnaryOp(HloOpcode::kNot, std::move(expected), std::move(operand));
}
TEST_F(HloEvaluatorTest, DoesRealC128) {
auto x = LiteralUtil::CreateR1<complex128>({{1, 0}, {-100, 4}});
auto expected_real = LiteralUtil::CreateR1<double>({1, -100});
TestUnaryOp(HloOpcode::kReal, std::move(expected_real), std::move(x));
}
TEST_F(HloEvaluatorTest, DoesImagC128) {
auto x = LiteralUtil::CreateR1<complex128>({{1, 0}, {-100, 4}});
auto expected_imag = LiteralUtil::CreateR1<double>({0, 4});
TestUnaryOp(HloOpcode::kImag, std::move(expected_imag), std::move(x));
}
TEST_P(HloEvaluatorBf16Test, DoesImagF32AndBf16) {
auto x = LiteralUtil::CreateR1<float>({1, -100});
auto expected_imag = LiteralUtil::CreateR1<float>({0, 0});
TestUnaryOp(HloOpcode::kImag, std::move(expected_imag), std::move(x));
}
TEST_F(HloEvaluatorTest, DoesImagF64) {
auto x = LiteralUtil::CreateR1<double>({1, -100});
auto expected_imag = LiteralUtil::CreateR1<double>({0, 0});
TestUnaryOp(HloOpcode::kImag, std::move(expected_imag), std::move(x));
}
TEST_F(HloEvaluatorTest, DoesTraverseInstructions) {
auto lhs = LiteralUtil::CreateR2<int64_t>({{1, 0}, {-100, 4}});
auto rhs = LiteralUtil::CreateR2<int64_t>({{2, 4}, {4, 4}});
auto rhs2 = LiteralUtil::CreateR2<int64_t>({{1, -20}, {-100, 4}});
std::vector<const Literal*> args = {&lhs, &rhs, &rhs2};
Shape shape = ShapeUtil::MakeShape(S64, {2, 2});
HloComputation::Builder b(TestName());
auto param_lhs =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "lhs"));
auto param_rhs =
b.AddInstruction(HloInstruction::CreateParameter(1, shape, "rhs"));
auto lhs_instruction = b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, param_lhs, param_rhs));
auto param_rhs2 =
b.AddInstruction(HloInstruction::CreateParameter(2, shape, "rhs2"));
b.AddInstruction(HloInstruction::CreateBinary(shape, HloOpcode::kAdd,
lhs_instruction, param_rhs2));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate(args));
auto expected = LiteralUtil::CreateR2<int64_t>({{4, -16}, {-196, 12}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, DoesReshape) {
HloComputation::Builder b(TestName());
const int64_t dimensions[] = {11, 8, 7, 5, 9};
TF_ASSERT_OK_AND_ASSIGN(auto literal,
LiteralUtil::CreateRandomLiteral<F32>(
ShapeUtil::MakeShape(F32, dimensions), 0.0, 1.0));
auto literal_clone = literal.Clone();
HloInstruction* literal_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(literal)));
Shape shape = ShapeUtil::MakeShape(F32, {8, 7, 11, 9, 5});
const int64_t permutation[] = {1, 2, 0, 4, 3};
b.AddInstruction(
HloInstruction::CreateTranspose(shape, literal_instruction, permutation));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({}));
using NativeT = typename primitive_util::PrimitiveTypeToNative<F32>::type;
result.EachCell<NativeT>(
[&](absl::Span<const int64_t> indices, NativeT value) {
std::vector<int64_t> rindexes = PermuteInverse(indices, permutation);
EXPECT_NEAR(value, literal_clone.Get<NativeT>(rindexes), 0.031250);
});
}
TEST_F(HloEvaluatorTest, DoesBroadcast) {
HloComputation::Builder b(TestName());
auto input_literal = LiteralUtil::CreateR2<int32_t>({{1, 2}, {3, 4}, {5, 6}});
auto output_literal = LiteralUtil::CreateR3<int32_t>(
{{{1, 2}, {3, 4}, {5, 6}}, {{1, 2}, {3, 4}, {5, 6}}});
HloInstruction* literal_instruction = b.AddInstruction(
HloInstruction::CreateConstant(std::move(input_literal)));
b.AddInstruction(HloInstruction::CreateBroadcast(
output_literal.shape(), literal_instruction, {1, 2}));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({}));
EXPECT_TRUE(LiteralTestUtil::Equal(result, output_literal));
}
TEST_F(HloEvaluatorTest, DoesBroadcastScalar) {
HloComputation::Builder b(TestName());
auto input_literal = LiteralUtil::CreateR0<int32_t>(111);
auto output_literal = LiteralUtil::CreateR2<int32_t>(
{{111, 111}, {111, 111}, {111, 111}, {111, 111}, {111, 111}, {111, 111}});
HloInstruction* literal_instruction = b.AddInstruction(
HloInstruction::CreateConstant(std::move(input_literal)));
b.AddInstruction(HloInstruction::CreateBroadcast(
output_literal.shape(), literal_instruction,
{}));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({}));
EXPECT_TRUE(LiteralTestUtil::Equal(result, output_literal));
}
TEST_F(HloEvaluatorTest, DoesConcatenateSimple) {
HloComputation::Builder b(TestName());
HloInstruction* operand1 = b.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<int64_t>({{-1, -2}, {100, 200}})));
HloInstruction* operand2 = b.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<int64_t>({{-2, -3}, {-100, -200}})));
std::vector<HloInstruction*> operands = {operand1, operand2};
Shape shape = ShapeUtil::MakeShape(S64, {4, 2});
b.AddInstruction(HloInstruction::CreateConcatenate(shape, operands, 0));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<int64_t>(
{{-1, -2}, {100, 200}, {-2, -3}, {-100, -200}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, ConcatenateHandlesShapeWithZeroElement) {
HloComputation::Builder b(TestName());
HloInstruction* operand1 = b.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<int64_t>({100, 200})));
HloInstruction* operand2 = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<int64_t>({})));
std::vector<HloInstruction*> operands = {operand1, operand2};
Shape shape = ShapeUtil::MakeShape(S64, {2});
b.AddInstruction(HloInstruction::CreateConcatenate(shape, operands, 0));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR1<int64_t>({100, 200});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, ConvertWithSameLayout) {
HloComputation::Builder b(TestName());
auto input_literal = LiteralUtil::CreateR2<int32_t>({{1, 2}, {3, 4}, {5, 6}});
auto expected =
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}});
ASSERT_TRUE(LayoutUtil::LayoutsInShapesEqual(input_literal.shape(),
expected.shape()));
HloInstruction* constant = b.AddInstruction(
HloInstruction::CreateConstant(std::move(input_literal)));
b.AddInstruction(HloInstruction::CreateConvert(expected.shape(), constant));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
}
TEST_P(HloEvaluatorBf16Test, ConvertWithDifferentLayout) {
HloComputation::Builder b(TestName());
auto input_literal = LiteralUtil::CreateR2WithLayout<int32_t>(
{{1, 2}, {3, 4}, {5, 6}}, LayoutUtil::MakeLayout({0, 1}));
auto expected = LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}}, LayoutUtil::MakeLayout({1, 0}));
ASSERT_FALSE(LayoutUtil::LayoutsInShapesEqual(input_literal.shape(),
expected.shape()));
HloInstruction* constant = b.AddInstruction(
HloInstruction::CreateConstant(std::move(input_literal)));
b.AddInstruction(HloInstruction::CreateConvert(expected.shape(), constant));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
}
PaddingConfig CreatePaddingConfig(
std::initializer_list<std::array<int64_t, 3>> padding_dimensions) {
PaddingConfig padding_config;
for (auto& paddings_per_dim : padding_dimensions) {
auto dimension = padding_config.add_dimensions();
dimension->set_edge_padding_low(paddings_per_dim[0]);
dimension->set_edge_padding_high(paddings_per_dim[1]);
dimension->set_interior_padding(paddings_per_dim[2]);
}
return padding_config;
}
TEST_F(HloEvaluatorTest, Pad2DIntegerArrayWithZeroDimension) {
auto operand = LiteralUtil::CreateR2<int32_t>({{}, {}});
HloComputation::Builder b(TestName());
auto operand_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(operand)));
constexpr int32_t kPadValue = 10;
auto pad_value = LiteralUtil::CreateR0<int32_t>(kPadValue);
auto padding_value_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(pad_value)));
auto padding_config = CreatePaddingConfig({{{1, 0, 2}}, {{0, 2, 1}}});
Shape shape = ShapeUtil::MakeShape(S32, {5, 2});
b.AddInstruction(HloInstruction::CreatePad(
shape, operand_instruction, padding_value_instruction, padding_config));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<int32_t>(
{{10, 10}, {10, 10}, {10, 10}, {10, 10}, {10, 10}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, Pad4DFloatArrayWithInteriorPadding) {
HloComputation::Builder b(TestName());
Array4D<float> input_array(3, 2, 1, 1, {1, 2, 3, 4, 5, 6});
auto input = LiteralUtil::CreateR4FromArray4D<float>(input_array);
HloInstruction* input_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(input)));
constexpr float kPadValue = 1.5;
auto pad_value = LiteralUtil::CreateR0<float>(kPadValue);
HloInstruction* pad_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(pad_value)));
Shape shape = ShapeUtil::MakeShape(F32, {8, 5, 1, 1});
auto r4_padding_on_dim0_dim1 =
CreatePaddingConfig({{{1, 0, 2}}, {{0, 2, 1}}, {{0, 0, 0}}, {{0, 0, 0}}});
b.AddInstruction(HloInstruction::CreatePad(
shape, input_instruction, pad_instruction, r4_padding_on_dim0_dim1));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected_array = std::make_unique<Array4D<float>>(8, 5, 1, 1);
expected_array->Fill(kPadValue);
(*expected_array)(1, 0, 0, 0) = 1.0f;
(*expected_array)(1, 2, 0, 0) = 2.0f;
(*expected_array)(4, 0, 0, 0) = 3.0f;
(*expected_array)(4, 2, 0, 0) = 4.0f;
(*expected_array)(7, 0, 0, 0) = 5.0f;
(*expected_array)(7, 2, 0, 0) = 6.0f;
auto expected = LiteralUtil::CreateR4FromArray4D<float>(*expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, NegativePadding2D) {
HloComputation::Builder b(TestName());
auto input_array = std::make_unique<Array2D<float>>(4, 3);
input_array->FillUnique(1.0f);
auto input = LiteralUtil::CreateR2FromArray2D<float>(*input_array);
HloInstruction* input_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(input)));
auto pad_value_instruction = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.718f)));
auto r2_padding_on_dim0_dim1 =
CreatePaddingConfig({{{-1, -2, 0}}, {{-2, 4, 0}}});
Shape shape = ShapeUtil::MakeShape(F32, {1, 5});
b.AddInstruction(HloInstruction::CreatePad(shape, input_instruction,
pad_value_instruction,
r2_padding_on_dim0_dim1));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected_array = std::make_unique<Array2D<float>>(1, 5);
(*expected_array)(0, 0) = 7.0f;
(*expected_array)(0, 1) = 2.718f;
(*expected_array)(0, 2) = 2.718f;
(*expected_array)(0, 3) = 2.718f;
(*expected_array)(0, 4) = 2.718f;
auto expected = LiteralUtil::CreateR2FromArray2D<float>(*expected_array);
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, ErrorSpec(0.031250)));
}
TEST_P(HloEvaluatorBf16Test, NegativeAndInteriorPadding2D) {
HloComputation::Builder b(TestName());
auto input_array = std::make_unique<Array2D<float>>(4, 3);
input_array->FillUnique(1.0f);
auto input = LiteralUtil::CreateR2FromArray2D<float>(*input_array);
HloInstruction* input_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(input)));
auto pad_value_instruction = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.718f)));
PaddingConfig padding_config = MakeNoPaddingConfig(2);
auto r2_padding_on_dim0_dim1 =
CreatePaddingConfig({{{-2, -5, 1}}, {{-2, 4, 2}}});
Shape shape = ShapeUtil::MakeShape(F32, {0, 9});
b.AddInstruction(HloInstruction::CreatePad(shape, input_instruction,
pad_value_instruction,
r2_padding_on_dim0_dim1));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected_array = std::make_unique<Array2D<float>>(0, 9);
auto expected = LiteralUtil::CreateR2FromArray2D<float>(*expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, Pad2DFloatArrayDifferentTypes) {
HloComputation::Builder b(TestName());
b.AddInstruction(HloInstruction::CreatePad(
ShapeUtil::MakeShape(BF16, {5, 2}),
b.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<bfloat16>({{}, {}}))),
b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(10.0f))),
CreatePaddingConfig({{{1, 0, 2}}, {{0, 2, 1}}})));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
bfloat16 bf16_c(10.0f);
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<bfloat16>({{bf16_c, bf16_c},
{bf16_c, bf16_c},
{bf16_c, bf16_c},
{bf16_c, bf16_c},
{bf16_c, bf16_c}}),
result));
}
TEST_P(HloEvaluatorBf16Test, DotRank2AndRank1) {
HloComputation::Builder b(TestName());
auto lhs_array = std::make_unique<Array2D<float>>(4, 1);
lhs_array->FillUnique(1.0f);
auto lhs_literal = LiteralUtil::CreateR2FromArray2D<float>(*lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
auto rhs_literal = LiteralUtil::CreateR2<float>({{1, 2}});
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Shape shape = ShapeUtil::MakeShape(F32, {4, 2});
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
b.AddInstruction(HloInstruction::CreateDot(shape, lhs_instruction,
rhs_instruction, dot_dnums,
DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected_array = Array2D<float>({
{1.f, 2.f},
{2.f, 4.f},
{3.f, 6.f},
{4.f, 8.f},
});
auto expected = LiteralUtil::CreateR2FromArray2D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DotRank1AndRank2) {
HloComputation::Builder b(TestName());
auto lhs_literal = LiteralUtil::CreateR1<float>({1, 2, 3});
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
auto rhs_array = std::make_unique<Array2D<float>>(3, 2);
rhs_array->FillUnique(1.0f);
auto rhs_literal = LiteralUtil::CreateR2FromArray2D<float>(*rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Shape shape = ShapeUtil::MakeShape(F32, {2});
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(0);
dot_dnums.add_rhs_contracting_dimensions(0);
b.AddInstruction(HloInstruction::CreateDot(shape, lhs_instruction,
rhs_instruction, dot_dnums,
DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR1<float>({22.f, 28.f});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DotRank2AndRank2) {
HloComputation::Builder b(TestName());
auto lhs_array = std::make_unique<Array2D<float>>(4, 3);
lhs_array->FillUnique(1.0f);
auto lhs_literal = LiteralUtil::CreateR2FromArray2D<float>(*lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
auto rhs_array = std::make_unique<Array2D<float>>(3, 2);
rhs_array->FillUnique(1.0f);
auto rhs_literal = LiteralUtil::CreateR2FromArray2D<float>(*rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Shape shape = ShapeUtil::MakeShape(F32, {4, 2});
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
b.AddInstruction(HloInstruction::CreateDot(shape, lhs_instruction,
rhs_instruction, dot_dnums,
DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected_array = Array2D<float>({
{22.f, 28.f},
{58.f, 76.f},
{94.f, 124.f},
{130.f, 172.f},
});
auto expected = LiteralUtil::CreateR2FromArray2D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DotRank4AndRank4) {
HloComputation::Builder b(TestName());
auto lhs_array = std::make_unique<Array4D<float>>(2, 2, 3, 1);
lhs_array->FillIota(1.0f);
auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(*lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
auto rhs_array = std::make_unique<Array4D<float>>(2, 2, 3, 1);
rhs_array->FillIota(2.0f);
auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(*rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Shape shape = ShapeUtil::MakeShape(F32, {2, 1, 1});
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_batch_dimensions(0);
dot_dnums.add_rhs_batch_dimensions(0);
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_lhs_contracting_dimensions(2);
dot_dnums.add_rhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(2);
b.AddInstruction(HloInstruction::CreateDot(shape, lhs_instruction,
rhs_instruction, dot_dnums,
DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
float expected_1 = 0;
for (float i = 1.0f; i < 7.0f; ++i) {
expected_1 += i * i + i;
}
float expected_2 = 0;
for (float i = 7.0f; i < 13.0f; ++i) {
expected_2 += i * i + i;
}
auto expected_array = Array3D<float>({{{expected_1}}, {{expected_2}}});
auto expected = LiteralUtil::CreateR3FromArray3D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, SimpleConv1D) {
HloComputation::Builder b(TestName());
Array3D<float> lhs_array = {{{1, 2, 3}}};
auto lhs_literal = LiteralUtil::CreateR3FromArray3D<float>(lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
Array3D<float> rhs_array = {{{3.f, 4.f}}};
auto rhs_literal = LiteralUtil::CreateR3FromArray3D<float>(rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Window window;
WindowDimension dim;
dim.set_size(2);
dim.set_stride(1);
dim.set_padding_low(0);
dim.set_padding_high(1);
dim.set_window_dilation(1);
dim.set_base_dilation(1);
*window.add_dimensions() = dim;
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(0);
dnums.set_input_feature_dimension(1);
dnums.set_output_feature_dimension(1);
dnums.add_input_spatial_dimensions(2);
dnums.add_output_spatial_dimensions(2);
dnums.set_kernel_output_feature_dimension(0);
dnums.set_kernel_input_feature_dimension(1);
dnums.add_kernel_spatial_dimensions(2);
Shape shape = ShapeUtil::MakeShape(F32, {1, 1, 3});
b.AddInstruction(HloInstruction::CreateConvolve(
shape, lhs_instruction, rhs_instruction, 1,
1, window, dnums, DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
Array3D<float> expected_array = {{{11.f, 18.f, 9.f}}};
auto expected = LiteralUtil::CreateR3FromArray3D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, Simple4x4Conv2DWith2x2Kernel) {
HloComputation::Builder b(TestName());
Array4D<float> lhs_array(1, 1, 4, 4);
lhs_array.FillWithYX(Array2D<float>({
{1, 2, 3, 4 },
{5, 6, 7, 8 },
{9, 10, 11, 12},
{13, 14, 15, 16},
}));
auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
Array4D<float> rhs_array(1, 1, 2, 2);
rhs_array.FillWithYX(Array2D<float>({
{5, 6},
{7, 8},
}));
auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Window window;
WindowDimension dim;
dim.set_size(2);
dim.set_stride(1);
dim.set_padding_low(0);
dim.set_padding_high(1);
dim.set_window_dilation(1);
dim.set_base_dilation(1);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
ConvolutionDimensionNumbers dnums =
XlaBuilder::CreateDefaultConvDimensionNumbers(2);
Shape shape = ShapeUtil::MakeShape(F32, {1, 1, 4, 4});
b.AddInstruction(HloInstruction::CreateConvolve(
shape, lhs_instruction, rhs_instruction, 1,
1, window, dnums, DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
Array4D<float> expected_array(1, 1, 4, 4);
expected_array.FillWithYX(Array2D<float>({
{100, 126, 152, 76},
{204, 230, 256, 124},
{308, 334, 360, 172},
{149, 160, 171, 80},
}));
auto expected = LiteralUtil::CreateR4FromArray4D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, Conv2DGeneralDimensionsReversed) {
HloComputation::Builder b(TestName());
Array4D<float> input({
{{{1, 2, 3, 4}},
{{5, 6, 7, 8}},
{{9, 10, 11, 12}}},
{{{13, 14, 15, 16}},
{{17, 18, 19, 20}},
{{21, 22, 23, 24}}}
});
Array4D<float> weight({{
{{1, 7, 13},
{4, 10, 16}},
{{2, 8, 14},
{5, 11, 17}},
{{3, 9, 15},
{6, 12, 18}}
}});
auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(input);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(weight);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
rhs_instruction = b.AddInstruction(HloInstruction::CreateReverse(
rhs_instruction->shape(), rhs_instruction, {3, 1}));
Window window;
WindowDimension dim;
dim.set_size(3);
dim.set_stride(1);
dim.set_padding_low(0);
dim.set_padding_high(0);
dim.set_window_dilation(1);
dim.set_base_dilation(1);
dim.set_window_reversal(true);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(2);
dnums.set_output_batch_dimension(2);
dnums.set_input_feature_dimension(0);
dnums.set_output_feature_dimension(0);
dnums.add_input_spatial_dimensions(1);
dnums.add_output_spatial_dimensions(1);
dnums.add_input_spatial_dimensions(3);
dnums.add_output_spatial_dimensions(3);
dnums.set_kernel_output_feature_dimension(0);
dnums.set_kernel_input_feature_dimension(2);
dnums.add_kernel_spatial_dimensions(3);
dnums.add_kernel_spatial_dimensions(1);
Shape shape = ShapeUtil::MakeShape(F32, {1, 1, 1, 2});
b.AddInstruction(HloInstruction::CreateConvolve(
shape, lhs_instruction, rhs_instruction, 1,
1, window, dnums, DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
Array4D<float> expected_array({{{{2514, 2685}}}});
Array4D<float> expected_array_bf16({{{{2512, 2688}}}});
auto expected = LiteralUtil::CreateR4FromArray4D<float>(
use_bfloat16_ ? expected_array_bf16 : expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, Conv2DGeneralDimensions) {
HloComputation::Builder b(TestName());
Array4D<float> input({
{{{1, 2, 3, 4}},
{{5, 6, 7, 8}},
{{9, 10, 11, 12}}},
{{{13, 14, 15, 16}},
{{17, 18, 19, 20}},
{{21, 22, 23, 24}}}
});
Array4D<float> weight({{
{{1, 7, 13},
{4, 10, 16}},
{{2, 8, 14},
{5, 11, 17}},
{{3, 9, 15},
{6, 12, 18}}
}});
auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(input);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(weight);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Window window;
WindowDimension dim;
dim.set_size(3);
dim.set_stride(1);
dim.set_padding_low(0);
dim.set_padding_high(0);
dim.set_window_dilation(1);
dim.set_base_dilation(1);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(2);
dnums.set_output_batch_dimension(2);
dnums.set_input_feature_dimension(0);
dnums.set_output_feature_dimension(0);
dnums.add_input_spatial_dimensions(1);
dnums.add_output_spatial_dimensions(1);
dnums.add_input_spatial_dimensions(3);
dnums.add_output_spatial_dimensions(3);
dnums.set_kernel_output_feature_dimension(0);
dnums.set_kernel_input_feature_dimension(2);
dnums.add_kernel_spatial_dimensions(3);
dnums.add_kernel_spatial_dimensions(1);
Shape shape = ShapeUtil::MakeShape(F32, {1, 1, 1, 2});
b.AddInstruction(HloInstruction::CreateConvolve(
shape, lhs_instruction, rhs_instruction, 1,
1, window, dnums, DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
Array4D<float> expected_array({{{{2514, 2685}}}});
Array4D<float> expected_array_bf16({{{{2512, 2688}}}});
auto expected = LiteralUtil::CreateR4FromArray4D<float>(
use_bfloat16_ ? expected_array_bf16 : expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DilatedBaseConv2DWithHighPadding) {
HloComputation::Builder b(TestName());
Array4D<float> lhs_array(1, 1, 4, 4);
lhs_array.FillWithYX(Array2D<float>({
{1, 2, 3, 4 },
{5, 6, 7, 8 },
{9, 10, 11, 12},
{13, 14, 15, 16},
}));
auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
Array4D<float> rhs_array(1, 1, 2, 2);
rhs_array.FillWithYX(Array2D<float>({
{5, 6},
{7, 8},
}));
auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Window window;
WindowDimension dim;
dim.set_size(2);
dim.set_stride(1);
dim.set_padding_low(0);
dim.set_padding_high(1);
dim.set_window_dilation(1);
dim.set_base_dilation(2);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
ConvolutionDimensionNumbers dnums =
XlaBuilder::CreateDefaultConvDimensionNumbers(2);
Shape shape = ShapeUtil::MakeShape(F32, {1, 1, 7, 7});
b.AddInstruction(HloInstruction::CreateConvolve(
shape, lhs_instruction, rhs_instruction, 1,
1, window, dnums, DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
Array4D<float> expected_array(1, 1, 7, 7);
expected_array.FillWithYX(Array2D<float>({
{5, 12, 10, 18, 15, 24, 20},
{35, 48, 42, 56, 49, 64, 56},
{25, 36, 30, 42, 35, 48, 40},
{63, 80, 70, 88, 77, 96, 84},
{45, 60, 50, 66, 55, 72, 60},
{91, 112, 98, 120, 105, 128, 112},
{65, 84, 70, 90, 75, 96, 80},
}));
auto expected = LiteralUtil::CreateR4FromArray4D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DilatedBaseConv2DWithLowAndHighPadding) {
HloComputation::Builder b(TestName());
Array4D<float> lhs_array(1, 1, 4, 4);
lhs_array.FillWithYX(Array2D<float>({
{1, 2, 3, 4 },
{5, 6, 7, 8 },
{9, 10, 11, 12},
{13, 14, 15, 16},
}));
auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
Array4D<float> rhs_array(1, 1, 2, 2);
rhs_array.FillWithYX(Array2D<float>({
{5, 6},
{7, 8},
}));
auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Window window;
WindowDimension dim;
dim.set_size(2);
dim.set_stride(1);
dim.set_padding_low(1);
dim.set_padding_high(1);
dim.set_window_dilation(1);
dim.set_base_dilation(2);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
ConvolutionDimensionNumbers dnums =
XlaBuilder::CreateDefaultConvDimensionNumbers(2);
Shape shape = ShapeUtil::MakeShape(F32, {1, 1, 8, 8});
b.AddInstruction(HloInstruction::CreateConvolve(
shape, lhs_instruction, rhs_instruction, 1,
1, window, dnums, DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
Array4D<float> expected_array(1, 1, 8, 8);
expected_array.FillWithYX(Array2D<float>({
{8, 7, 16, 14, 24, 21, 32, 28},
{6, 5, 12, 10, 18, 15, 24, 20},
{40, 35, 48, 42, 56, 49, 64, 56},
{30, 25, 36, 30, 42, 35, 48, 40},
{72, 63, 80, 70, 88, 77, 96, 84},
{54, 45, 60, 50, 66, 55, 72, 60},
{104, 91, 112, 98, 120, 105, 128, 112},
{78, 65, 84, 70, 90, 75, 96, 80},
}));
auto expected = LiteralUtil::CreateR4FromArray4D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test,
DilatedWindowAndBaseConv2DWithDifferentLowAndHighPaddingAndStrides) {
HloComputation::Builder b(TestName());
Array4D<float> lhs_array(1, 1, 4, 4);
lhs_array.FillWithYX(Array2D<float>({
{1, 2, 3, 4 },
{5, 6, 7, 8 },
{9, 10, 11, 12},
{13, 14, 15, 16},
}));
auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
Array4D<float> rhs_array(1, 1, 2, 3);
rhs_array.FillWithYX(Array2D<float>({
{5, 6, 7},
{8, 9, 10},
}));
auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Window window;
WindowDimension dim;
dim.set_size(2);
dim.set_stride(1);
dim.set_padding_low(2);
dim.set_padding_high(2);
dim.set_window_dilation(2);
dim.set_base_dilation(2);
*window.add_dimensions() = dim;
dim.set_size(3);
dim.set_stride(3);
dim.set_padding_low(2);
dim.set_padding_high(-1);
dim.set_window_dilation(1);
dim.set_base_dilation(3);
*window.add_dimensions() = dim;
ConvolutionDimensionNumbers dnums =
XlaBuilder::CreateDefaultConvDimensionNumbers(2);
Shape shape = ShapeUtil::MakeShape(F32, {1, 1, 9, 3});
b.AddInstruction(HloInstruction::CreateConvolve(
shape, lhs_instruction, rhs_instruction, 1,
1, window, dnums, DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
Array4D<float> expected_array(1, 1, 9, 3);
expected_array.FillWithYX(Array2D<float>({
{10, 20, 30},
{0, 0, 0},
{57, 74, 91},
{0, 0, 0},
{125, 142, 159},
{0, 0, 0},
{193, 210, 227},
{0, 0, 0},
{91, 98, 105},
}));
auto expected = LiteralUtil::CreateR4FromArray4D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, Conv2DGroupedConvolution) {
HloComputation::Builder b(TestName());
std::vector<int64_t> input_dims = {1, 2, 2, 4};
std::vector<int64_t> filter_dims = {2, 2, 2, 8};
Shape input_shape = ShapeUtil::MakeShapeWithType<float>(input_dims);
Shape filter_shape = ShapeUtil::MakeShapeWithType<float>(filter_dims);
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(0);
dnums.add_input_spatial_dimensions(1);
dnums.add_output_spatial_dimensions(1);
dnums.add_input_spatial_dimensions(2);
dnums.add_output_spatial_dimensions(2);
dnums.set_input_feature_dimension(3);
dnums.set_output_feature_dimension(3);
dnums.add_kernel_spatial_dimensions(0);
dnums.add_kernel_spatial_dimensions(1);
dnums.set_kernel_input_feature_dimension(2);
dnums.set_kernel_output_feature_dimension(3);
Window window;
WindowDimension dim;
dim.set_size(2);
dim.set_stride(1);
dim.set_padding_low(0);
dim.set_padding_high(0);
dim.set_window_dilation(1);
dim.set_base_dilation(1);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
std::vector<float> input_elems(ShapeUtil::ElementsIn(input_shape));
std::iota(input_elems.begin(), input_elems.end(), -7);
auto input_r1 = LiteralUtil::CreateR1<float>(input_elems);
auto input_r4 = input_r1.Reshape(input_dims).value();
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(input_r4)));
std::vector<float> filter_elems(ShapeUtil::ElementsIn(filter_shape));
std::iota(filter_elems.begin(), filter_elems.end(), -31);
auto filter_r1 = LiteralUtil::CreateR1<float>(filter_elems);
auto filter_r4 = filter_r1.Reshape(filter_dims).value();
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(filter_r4)));
Shape shape = ShapeUtil::MakeShape(F32, {1, 1, 1, 8});
b.AddInstruction(HloInstruction::CreateConvolve(
shape, lhs_instruction, rhs_instruction,
2, 1, window, dnums,
DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
Array4D<float> expected_array(1, 1, 1, 8);
expected_array.FillWithYX(
Array2D<float>({{668, 664, 660, 656, 668, 680, 692, 704}}));
auto expected = LiteralUtil::CreateR4FromArray4D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
void HloEvaluatorTest::InitializeFftData() {
fft_c64x2x4x8_ = LiteralUtil::CreateR3<complex64>({
{{{0.0, 0.0}, {1.0, 0.0}, {2.0, 0.0}, {3.0, 0.0},
{4.0, 0.0}, {5.0, 0.0}, {6.0, 0.0}, {7.0, 0.0}},
{{0.0, 0.0}, {0.0, 1.0}, {0.0, 2.0}, {0.0, 3.0},
{0.0, 4.0}, {0.0, 5.0}, {0.0, 6.0}, {0.0, 7.0}},
{{0.0, 7.0}, {1.0, 6.0}, {2.0, 5.0}, {3.0, 4.0},
{4.0, 3.0}, {5.0, 2.0}, {6.0, 1.0}, {7.0, 0.0}},
{{7.0, 0.0}, {6.0, 1.0}, {5.0, 2.0}, {4.0, 3.0},
{3.0, 4.0}, {2.0, 5.0}, {1.0, 6.0}, {0.0, 7.0}}},
{{{-4.0, 0.0}, {-3.0, 0.0}, {-2.0, 0.0}, {-1.0, 0.0},
{1.0, 0.0}, {2.0, 0.0}, {3.0, 0.0}, {4.0, 0.0}},
{{0.0, -4.0}, {0.0, -3.0}, {0.0, -2.0}, {0.0, -1.0},
{0.0, 1.0}, {0.0, 2.0}, {0.0, 3.0}, {0.0, 4.0}},
{{3.5, 3.5}, {-1.707107, -0.707107}, {-1.0, -0.0}, {-0.707107, 0.292893},
{-0.5, 0.5}, {-0.292893, 0.707107}, {0.0, 1.0}, {0.707107, 1.707107}},
{{3.5, 3.5}, {1.707107, 0.707107}, {1.0, 0.0}, {0.707107, -0.292893},
{0.5, -0.5}, {0.292893, -0.707107}, {-0.0, -1.0}, {-0.707107, -1.707107}}}
});
fft_c64x2x4x8_1d_ = LiteralUtil::CreateR3<complex64>({
{{{28.0, 0.0}, {-4.0, 9.656854}, {-4.0, 4.0}, {-4.0, 1.656854},
{-4.0, 0.0}, {-4.0, -1.656854}, {-4.0, -4.0}, {-4.0, -9.656854}},
{{0.0, 28.0}, {-9.656854, -4.0}, {-4.0, -4.0}, {-1.656854, -4.0},
{0.0, -4.0}, {1.656854, -4.0}, {4.0, -4.0}, {9.656854, -4.0}},
{{28.0, 28.0}, {5.656854, 13.656854}, {0.0, 8.0}, {-2.343146, 5.656854},
{-4.0, 4.0}, {-5.656854, 2.343146}, {-8.0, -0.0}, {-13.656854, -5.656854}},
{{28.0, 28.0}, {-5.656854, -13.656854}, {-0.0, -8.0}, {2.343146, -5.656854},
{4.0, -4.0}, {5.656854, -2.343146}, {8.0, 0.0}, {13.656854, 5.656854}}},
{{{0.0, 0.0}, {-5.0, 12.071068}, {-4.0, 4.0}, {-5.0, 2.071068},
{-4.0, 0.0}, {-5.0, -2.071068}, {-4.0, -4.0}, {-5.0, -12.071068}},
{{0.0, 0.0}, {-12.071068, -5.0}, {-4.0, -4.0}, {-2.071068, -5.0},
{0.0, -4.0}, {2.071068, -5.0}, {4.0, -4.0}, {12.071068, -5.0}},
{{0.0, 7.0}, {1.0, 6.0}, {2.0, 5.0}, {3.0, 4.0},
{4.0, 3.0}, {5.0, 2.0}, {6.0, 1.0}, {7.0, 0.0}},
{{7.0, 0.0}, {6.0, 1.0}, {5.0, 2.0}, {4.0, 3.0},
{3.0, 4.0}, {2.0, 5.0}, {1.0, 6.0}, {0.0, 7.0}}}
});
fft_c64x2x4x8_2d_ = LiteralUtil::CreateR3<complex64>({
{{{84.0, 84.0}, {-13.656854, 5.656854}, {-8.0, 0.0}, {-5.656854, -2.343146},
{-4.0, -4.0}, {-2.343146, -5.656854}, {0.0, -8.0}, {5.656854, -13.656854}},
{{0.0, 0.0}, {0.0, -0.0}, {0.0, 0.0}, {0.0, 0.0},
{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{28.0, -28.0}, {16.970562, 40.970562}, {0.0, 24.0}, {-7.029438, 16.970562},
{-12.0, 12.0}, {-16.970562, 7.029438}, {-24.0, 0.0}, {-40.970562, -16.970562}},
{{0.0, -56.0}, {-19.313708, -8.0}, {-8.0, -8.0}, {-3.313708, -8.0},
{0.0, -8.0}, {3.313708, -8.0}, {8.0, -8.0}, {19.313708, -8.0}}},
{{{7.0, 7.0}, {-10.071068, 14.071068}, {-1.0, 7.0}, {-0.071068, 4.071068},
{3.0, 3.0}, {4.071068, -0.071068}, {7.0, -1.0}, {14.071068, -10.071068}},
{{0.0, 0.0}, {-12.0, 24.142136}, {-12.0, 8.0}, {-16.0, 4.142136},
{-16.0, 0.0}, {-20.0, -4.142136}, {-20.0, -8.0}, {-24.0, -24.142136}},
{{-7.0, 7.0}, {2.071068, 22.071068}, {-3.0, 11.0}, {-3.928932, 8.071068},
{-3.0, 3.0}, {-4.071068, -0.071068}, {-3.0, -5.0}, {-10.071068, -14.071068}},
{{0.0, -14.0}, {0.0, -12.0}, {0.0, -10.0}, {0.0, -8.0},
{0.0, -6.0}, {0.0, -4.0}, {0.0, -2.0}, {0.0, 0.0}}}
});
fft_c64x2x4x8_3d_ = LiteralUtil::CreateR3<complex64>({
{{{91.0, 91.0}, {-23.727922, 19.727922}, {-9.0, 7.0}, {-5.727922, 1.727922},
{-1.0, -1.0}, {1.727922, -5.727922}, {7.0, -9}, {19.727922, -23.727922}},
{{0.0, 0.0}, {-12.0, 24.142136}, {-12.0, 8.0}, {-16.0, 4.142136},
{-16.0, 0.0}, {-20.0, -4.142136}, {-20.0, -8.0}, {-24.0, -24.142136}},
{{21.0, -21.0}, {19.041630, 63.041630}, {-3.0, 35.0}, {-10.958370, 25.041630},
{-15.0, 15.0}, {-21.041630, 6.958370}, {-27.0, -5.0}, {-51.041630, -31.041630}},
{{0.0, -70.0}, {-19.313708, -20.0}, {-8.0, -18.0}, {-3.313708, -16.0},
{0.0, -14.0}, {3.313708, -12.0}, {8.0, -10.0}, {19.313708, -8.0}}},
{{{77.0, 77.0}, {-3.585786, -8.414214}, {-7.0, -7.0}, {-5.585786, -6.414214},
{-7.0, -7.0}, {-6.414214, -5.585786}, {-7.0, -7.0}, {-8.414214, -3.585786}},
{{0.0, 0.0}, {12.0, -24.142136}, {12.0, -8.0}, {16.0, -4.142136},
{16.0, 0.0}, {20.0, 4.142136}, {20.0, 8.0}, {24.0, 24.142136}},
{{35.0, -35.0}, {14.899494, 18.899494}, {3.0, 13.0}, {-3.100506, 8.899494},
{-9.0, 9.0}, {-12.899494, 7.100506}, {-21.0, 5.0}, {-30.899494, -2.899494}},
{{0.0, -42.0}, {-19.313708, 4.0}, {-8.0, 2.0}, {-3.313708, 0.0},
{0.0, -2.0}, {3.313708, -4.0}, {8.0, -6.0}, {19.313708, -8.0}}}
});
}
TEST_F(HloEvaluatorTest, 1D_FFT_4_on_c64x4) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[4] parameter(0)
ROOT fft = c64[4] fft(operand), fft_type=FFT, fft_length={4}
}
)";
auto input = LiteralUtil::CreateR1<complex64>(
{{1.0, 0.0}, {2.0, 0.0}, {3.0, 0.0}, {4.0, 0.0}});
auto expected = LiteralUtil::CreateR1<complex64>(
{{10.0, 0.0}, {-2.0, 2.0}, {-2.0, 0.0}, {-2.0, -2.0}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_IFFT_4_on_c64x4) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[4] parameter(0)
ROOT ifft = c64[4] fft(operand), fft_type=IFFT, fft_length={4}
}
)";
auto input = LiteralUtil::CreateR1<complex64>(
{{10.0, 0.0}, {-2.0, 2.0}, {-2.0, 0.0}, {-2.0, -2.0}});
auto expected = LiteralUtil::CreateR1<complex64>(
{{1.0, 0.0}, {2.0, 0.0}, {3.0, 0.0}, {4.0, 0.0}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_RFFT_4_on_f32x4) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = f32[4] parameter(0)
ROOT rfft = c64[3] fft(operand), fft_type=RFFT, fft_length={4}
}
)";
auto input = LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0, 4.0});
auto expected =
LiteralUtil::CreateR1<complex64>({{10.0, 0.0}, {-2.0, 2.0}, {-2.0, 0.0}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_IRFFT_4_on_c64x3) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[3] parameter(0)
ROOT irfft = f32[4] fft(operand), fft_type=IRFFT, fft_length={4}
}
)";
auto input =
LiteralUtil::CreateR1<complex64>({{10.0, 0.0}, {-2.0, 2.0}, {-2.0, 0.0}});
auto expected = LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0, 4.0});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_FFT_8_on_c64x2x4x8) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[2, 4, 8] parameter(0)
ROOT fft = c64[2, 4, 8] fft(operand), fft_type=FFT, fft_length={8}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&fft_c64x2x4x8_}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), fft_c64x2x4x8_1d_.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(fft_c64x2x4x8_1d_, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_IFFT_8_on_c64x2x4x8) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[2, 4, 8] parameter(0)
ROOT ifft = c64[2, 4, 8] fft(operand), fft_type=IFFT, fft_length={8}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&fft_c64x2x4x8_1d_}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), fft_c64x2x4x8_.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(fft_c64x2x4x8_, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_RFFT_8_on_f32x8) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = f32[8] parameter(0)
ROOT rfft = c64[5] fft(operand), fft_type=RFFT, fft_length={8}
}
)";
auto input =
LiteralUtil::CreateR1<float>({1.8, 2.7, 3.6, 4.5, 5.4, 6.3, 7.2, 8.1});
auto expected = LiteralUtil::CreateR1<complex64>({{39.6, 0.0},
{-3.6, 8.691169},
{-3.6, 3.6},
{-3.6, 1.491169},
{-3.6, 0.0}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_IRFFT_8_on_c64x5) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[5] parameter(0)
ROOT irfft = f32[8] fft(operand), fft_type=IRFFT, fft_length={8}
}
)";
auto input = LiteralUtil::CreateR1<complex64>({{39.6, 0.0},
{-3.6, 8.691169},
{-3.6, 3.6},
{-3.6, 1.491169},
{-3.6, 0.0}});
auto expected =
LiteralUtil::CreateR1<float>({1.8, 2.7, 3.6, 4.5, 5.4, 6.3, 7.2, 8.1});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_RFFT_9_on_f32x9) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = f32[9] parameter(0)
ROOT rfft = c64[5] fft(operand), fft_type=RFFT, fft_length={9}
}
)";
auto input = LiteralUtil::CreateR1<float>(
{1.8, 2.7, 3.6, 4.5, 5.4, 6.3, 7.2, 8.1, 9.9});
auto expected = LiteralUtil::CreateR1<complex64>({{49.5, 0.0},
{-3.360560, 11.705792},
{-3.893717, 5.712929},
{-4.5, 3.117691},
{-4.895723, 1.021942}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_IRFFT_9_on_c64x5) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[5] parameter(0)
ROOT irfft = f32[9] fft(operand), fft_type=IRFFT, fft_length={9}
}
)";
auto input = LiteralUtil::CreateR1<complex64>({{49.5, 0.0},
{-3.360560, 11.705792},
{-3.893717, 5.712929},
{-4.5, 3.117691},
{-4.895723, 1.021942}});
auto expected = LiteralUtil::CreateR1<float>(
{1.8, 2.7, 3.6, 4.5, 5.4, 6.3, 7.2, 8.1, 9.9});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 2D_FFT_4x8_on_c64x2x4x8) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[2, 4, 8] parameter(0)
ROOT fft = c64[2, 4, 8] fft(operand), fft_type=FFT, fft_length={4, 8}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&fft_c64x2x4x8_}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), fft_c64x2x4x8_2d_.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(fft_c64x2x4x8_2d_, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 2D_IFFT_4x8_on_c64x2x4x8) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[2, 4, 8] parameter(0)
ROOT ifft = c64[2, 4, 8] fft(operand), fft_type=IFFT, fft_length={4, 8}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&fft_c64x2x4x8_2d_}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), fft_c64x2x4x8_.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(fft_c64x2x4x8_, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 2D_RFFT_3x8_on_f32x3x8) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = f32[3, 8] parameter(0)
ROOT rfft = c64[3, 5] fft(operand), fft_type=RFFT, fft_length={3, 8}
}
)";
auto input =
LiteralUtil::CreateR2<float>({{1.8, 2.7, 3.6, 4.5, 5.4, 6.3, 7.2, 8.1},
{8.1, 7.2, 6.3, 5.4, 4.5, 3.6, 2.7, 1.8},
{1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8}});
auto expected = LiteralUtil::CreateR2<complex64>({{{118.8, 0.0},
{-4.4, 10.622540},
{-4.4, 4.4},
{-4.4, 1.822540},
{-4.4, 0.0}},
{{0.0, 0.0},
{-19.926162, 0.797280},
{-10.128203, -3.728203},
{-6.069756, -5.602720},
{-3.2, -6.928203}},
{{0.0, 0.0},
{13.526162, 14.653687},
{3.728203, 10.128203},
{-0.330244, 8.253687},
{-3.2, 6.928203}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 2D_IRFFT_3x8_on_c64x3x5) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[3, 5] parameter(0)
ROOT irfft = f32[3, 8] fft(operand), fft_type=IRFFT, fft_length={3, 8}
}
)";
auto input = LiteralUtil::CreateR2<complex64>({{{118.8, 0.0},
{-4.4, 10.622540},
{-4.4, 4.4},
{-4.4, 1.822540},
{-4.4, 0.0}},
{{0.0, 0.0},
{-19.926162, 0.797280},
{-10.128203, -3.728203},
{-6.069756, -5.602720},
{-3.2, -6.928203}},
{{0.0, 0.0},
{13.526162, 14.653687},
{3.728203, 10.128203},
{-0.330244, 8.253687},
{-3.2, 6.928203}}});
auto expected =
LiteralUtil::CreateR2<float>({{1.8, 2.7, 3.6, 4.5, 5.4, 6.3, 7.2, 8.1},
{8.1, 7.2, 6.3, 5.4, 4.5, 3.6, 2.7, 1.8},
{1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 2D_RFFT_3x9_on_f32x3x9) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = f32[3, 9] parameter(0)
ROOT rfft = c64[3, 5] fft(operand), fft_type=RFFT, fft_length={3, 9}
}
)";
auto input = LiteralUtil::CreateR2<float>(
{{1.9, 2.8, 3.7, 4.6, 5.5, 6.4, 7.3, 8.2, 9.1},
{9.1, 8.2, 7.3, 6.4, 5.5, 4.6, 3.7, 2.8, 1.9},
{1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9}});
auto expected = LiteralUtil::CreateR2<complex64>({{{148.5, 0.0},
{-4.95, 13.600013},
{-4.95, 5.899180},
{-4.95, 2.857884},
{-4.95, 0.872819}},
{{0.0, 0.0},
{-25.014467, 2.096690},
{-12.888800, -3.503916},
{-8.1, -5.715768},
{-4.974333, -7.159452}},
{{0.0, 0.0},
{17.814467, 17.685147},
{5.688800, 12.084542},
{0.9, 9.872690},
{-2.225667, 8.429006}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 2D_IRFFT_3x9_on_c64x3x5) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[3, 5] parameter(0)
ROOT irfft = f32[3, 9] fft(operand), fft_type=IRFFT, fft_length={3, 9}
}
)";
auto input = LiteralUtil::CreateR2<complex64>({{{148.5, 0.0},
{-4.95, 13.600013},
{-4.95, 5.899180},
{-4.95, 2.857884},
{-4.95, 0.872819}},
{{0.0, 0.0},
{-25.014467, 2.096690},
{-12.888800, -3.503916},
{-8.1, -5.715768},
{-4.974333, -7.159452}},
{{0.0, 0.0},
{17.814467, 17.685147},
{5.688800, 12.084542},
{0.9, 9.872690},
{-2.225667, 8.429006}}});
auto expected = LiteralUtil::CreateR2<float>(
{{1.9, 2.8, 3.7, 4.6, 5.5, 6.4, 7.3, 8.2, 9.1},
{9.1, 8.2, 7.3, 6.4, 5.5, 4.6, 3.7, 2.8, 1.9},
{1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_FFT_2x4x8_on_c64x2x4x8) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[2, 4, 8] parameter(0)
ROOT fft = c64[2, 4, 8] fft(operand), fft_type=FFT, fft_length={2, 4, 8}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&fft_c64x2x4x8_}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), fft_c64x2x4x8_3d_.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(fft_c64x2x4x8_3d_, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_IFFT_2x4x8_on_c64x2x4x8) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[2, 4, 8] parameter(0)
ROOT ifft = c64[2, 4, 8] fft(operand), fft_type=IFFT, fft_length={2, 4, 8}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&fft_c64x2x4x8_3d_}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), fft_c64x2x4x8_.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(fft_c64x2x4x8_, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_RFFT_3x3x4_on_f32x3x3x4) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = f32[3, 3, 4] parameter(0)
ROOT rfft = c64[3, 3, 3] fft(operand), fft_type=RFFT, fft_length={3, 3, 4}
}
)";
auto input = LiteralUtil::CreateR3<float>(
{{{1.8, 2.7, 3.6, 4.5}, {8.1, 7.2, 6.3, 5.4}, {1.1, 2.2, 3.3, 4.4}},
{{5.4, 6.3, 7.2, 8.1}, {4.5, 3.6, 2.7, 1.8}, {5.5, 6.6, 7.7, 8.8}},
{{-1.8, -2.7, -3.6, -4.5},
{-5.4, -6.3, -7.2, -8.1},
{1.9, 2.9, 3.9, 4.9}}});
auto expected = LiteralUtil::CreateR3<complex64>(
{{{{92.8, 0.0}, {-2.8, 2.8}, {-2.8, 0.0}},
{{-5.9, 35.160631}, {-11.519100, -8.919100}, {-1.3, -10.219100}},
{{-5.9, -35.160631}, {8.919100, 11.519100}, {-1.3, 10.219100}}},
{{{29.5, -81.579593}, {1.390897, 5.190897}, {-1.9, 3.290897}},
{{-25.1, -49.017038}, {1.044486, 4.844486}, {-1.9, 2.944486}},
{{11.8, 27.712813}, {1.517691, 4.717691}, {-1.6, 3.117691}}},
{{{29.5, 81.579593}, {-5.190897, -1.390897}, {-1.9, -3.290897}},
{{11.8, -27.712813}, {-4.717691, -1.517691}, {-1.6, -3.117691}},
{{-25.1, 49.017038}, {-4.844486, -1.044486}, {-1.9, -2.944486}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_IRFFT_3x3x4_on_c64x3x3x3) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[3, 3, 3] parameter(0)
ROOT irfft = f32[3, 3, 4] fft(operand), fft_type=IRFFT, fft_length={3, 3, 4}
}
)";
auto input = LiteralUtil::CreateR3<complex64>(
{{{{92.8, 0.0}, {-2.8, 2.8}, {-2.8, 0.0}},
{{-5.9, 35.160631}, {-11.519100, -8.919100}, {-1.3, -10.219100}},
{{-5.9, -35.160631}, {8.919100, 11.519100}, {-1.3, 10.219100}}},
{{{29.5, -81.579593}, {1.390897, 5.190897}, {-1.9, 3.290897}},
{{-25.1, -49.017038}, {1.044486, 4.844486}, {-1.9, 2.944486}},
{{11.8, 27.712813}, {1.517691, 4.717691}, {-1.6, 3.117691}}},
{{{29.5, 81.579593}, {-5.190897, -1.390897}, {-1.9, -3.290897}},
{{11.8, -27.712813}, {-4.717691, -1.517691}, {-1.6, -3.117691}},
{{-25.1, 49.017038}, {-4.844486, -1.044486}, {-1.9, -2.944486}}}});
auto expected = LiteralUtil::CreateR3<float>(
{{{1.8, 2.7, 3.6, 4.5}, {8.1, 7.2, 6.3, 5.4}, {1.1, 2.2, 3.3, 4.4}},
{{5.4, 6.3, 7.2, 8.1}, {4.5, 3.6, 2.7, 1.8}, {5.5, 6.6, 7.7, 8.8}},
{{-1.8, -2.7, -3.6, -4.5},
{-5.4, -6.3, -7.2, -8.1},
{1.9, 2.9, 3.9, 4.9}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_RFFT_3x3x5_on_f32x3x3x5) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = f32[3, 3, 5] parameter(0)
ROOT rfft = c64[3, 3, 3] fft(operand), fft_type=RFFT, fft_length={3, 3, 5}
}
)";
auto input = LiteralUtil::CreateR3<float>({{{1.8, 2.7, 3.6, 4.5, 5.4},
{8.1, 7.2, 6.3, 5.4, 4.5},
{1.1, 2.2, 3.3, 4.4, 5.5}},
{{5.4, 6.3, 7.2, 8.1, 9.0},
{4.5, 3.6, 2.7, 1.8, 0.9},
{5.5, 6.6, 7.7, 8.8, 9.9}},
{{-1.8, -2.7, -3.6, -4.5, -5.4},
{-5.4, -6.3, -7.2, -8.1, -9.0},
{1.9, 2.9, 3.9, 4.9, 5.9}}});
auto expected = LiteralUtil::CreateR3<complex64>(
{{{{119.5, 0.0}, {-3.5, 4.817337}, {-3.5, 1.137219}},
{{-5.75, 56.724664}, {-19.206730, -10.537254}, {-5.775483, -12.245880}},
{{-5.75, -56.724664}, {15.956730, 15.010495}, {2.525483, 13.301869}}},
{{{39.25, -106.088112}, {3.286913, 7.382528}, {-1.038404, 4.885305}},
{{-29.0, -64.951905}, {2.690922, 6.949515}, {-1.179098, 4.452292}},
{{16.75, 30.743902}, {3.363918, 6.649878}, {-0.733751, 4.546954}}},
{{{39.25, 106.088112}, {-8.036913, -0.844714}, {-3.711596, -3.341936}},
{{16.75, -30.743902}, {-7.363918, -1.144350}, {-3.266249, -3.247275}},
{{-29.0, 64.951905}, {-7.440922, -0.411701}, {-3.570902, -2.908924}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_IRFFT_3x3x5_on_c64x3x3x3) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[3, 3, 3] parameter(0)
ROOT irfft = f32[3, 3, 5] fft(operand), fft_type=IRFFT, fft_length={3, 3, 5}
}
)";
auto input = LiteralUtil::CreateR3<complex64>(
{{{{119.5, 0.0}, {-3.5, 4.817337}, {-3.5, 1.137219}},
{{-5.75, 56.724664}, {-19.206730, -10.537254}, {-5.775483, -12.245880}},
{{-5.75, -56.724664}, {15.956730, 15.010495}, {2.525483, 13.301869}}},
{{{39.25, -106.088112}, {3.286913, 7.382528}, {-1.038404, 4.885305}},
{{-29.0, -64.951905}, {2.690922, 6.949515}, {-1.179098, 4.452292}},
{{16.75, 30.743902}, {3.363918, 6.649878}, {-0.733751, 4.546954}}},
{{{39.25, 106.088112}, {-8.036913, -0.844714}, {-3.711596, -3.341936}},
{{16.75, -30.743902}, {-7.363918, -1.144350}, {-3.266249, -3.247275}},
{{-29.0, 64.951905}, {-7.440922, -0.411701}, {-3.570902, -2.908924}}}});
auto expected = LiteralUtil::CreateR3<float>({{{1.8, 2.7, 3.6, 4.5, 5.4},
{8.1, 7.2, 6.3, 5.4, 4.5},
{1.1, 2.2, 3.3, 4.4, 5.5}},
{{5.4, 6.3, 7.2, 8.1, 9.0},
{4.5, 3.6, 2.7, 1.8, 0.9},
{5.5, 6.6, 7.7, 8.8, 9.9}},
{{-1.8, -2.7, -3.6, -4.5, -5.4},
{-5.4, -6.3, -7.2, -8.1, -9.0},
{1.9, 2.9, 3.9, 4.9, 5.9}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_FFT_8_on_c64x2x4x8_with_layout) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[2, 4, 8]{0, 2, 1} parameter(0)
ROOT fft = c64[2, 4, 8]{1, 2, 0} fft(operand), fft_type=FFT, fft_length={8}
}
)";
auto input = fft_c64x2x4x8_.Relayout(LayoutUtil::MakeLayout({0, 2, 1}));
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), fft_c64x2x4x8_1d_.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(fft_c64x2x4x8_1d_, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 2D_FFT_4x8_on_c64x2x4x8_with_layout) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[2, 4, 8]{2, 0, 1} parameter(0)
ROOT fft = c64[2, 4, 8]{1, 0, 2} fft(operand), fft_type=FFT, fft_length={4, 8}
}
)";
auto input = fft_c64x2x4x8_.Relayout(LayoutUtil::MakeLayout({2, 0, 1}));
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), fft_c64x2x4x8_2d_.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(fft_c64x2x4x8_2d_, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_FFT_2x4x8_on_c64x2x4x8_with_layout) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[2, 4, 8]{1, 2, 0} parameter(0)
ROOT fft =
c64[2, 4, 8]{0, 2, 1} fft(operand), fft_type=FFT, fft_length={2, 4, 8}
}
)";
auto input = fft_c64x2x4x8_.Relayout(LayoutUtil::MakeLayout({1, 2, 0}));
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), fft_c64x2x4x8_3d_.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(fft_c64x2x4x8_3d_, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_FFT_0_on_c64x1x1x1x1) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[1, 1, 1, 1] parameter(0)
ROOT fft = c64[1, 1, 1, 1] fft(operand), fft_type=FFT, fft_length={0}
}
)";
auto input = LiteralUtil::CreateR4<complex64>({{{{{42.24, 24.42}}}}});
auto expected = LiteralUtil::CreateR4<complex64>({{{{{0.0, 0.0}}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_FFT_1_on_c64x1x1x1x0) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[1, 1, 1, 0] parameter(0)
ROOT fft = c64[1, 1, 1, 0] fft(operand), fft_type=FFT, fft_length={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto input,
LiteralUtil::CreateR4<complex64>({{{{}}}}).Reshape({1, 1, 1, 0}));
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), input.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(input, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_FFT_1_on_c64x1x1x1x1) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[1, 1, 1, 1] parameter(0)
ROOT fft = c64[1, 1, 1, 1] fft(operand), fft_type=FFT, fft_length={1}
}
)";
auto input = LiteralUtil::CreateR4<complex64>({{{{{42.24, 24.42}}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), input.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(input, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_FFT_1x0x1_on_c64x1x1x1x1) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[1, 1, 1, 1] parameter(0)
ROOT fft = c64[1, 1, 1, 1] fft(operand), fft_type=FFT, fft_length={1, 0, 1}
}
)";
auto input = LiteralUtil::CreateR4<complex64>({{{{{42.24, 24.42}}}}});
auto expected = LiteralUtil::CreateR4<complex64>({{{{{0.0, 0.0}}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_FFT_1x1x1_on_c64x0x1x0x1) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[0, 1, 0, 1] parameter(0)
ROOT fft = c64[0, 1, 0, 1] fft(operand), fft_type=FFT, fft_length={1, 1, 1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto input,
LiteralUtil::CreateR4<complex64>({{{{}}}}).Reshape({0, 1, 0, 1}));
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), input.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(input, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_FFT_1x1x1_on_c64x1x1x1x1) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[1, 1, 1, 1] parameter(0)
ROOT fft = c64[1, 1, 1, 1] fft(operand), fft_type=FFT, fft_length={1, 1, 1}
}
)";
auto input = LiteralUtil::CreateR4<complex64>({{{{{42.24, 24.42}}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), input.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(input, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_FFT_3x1x1_on_c64x1x3x1x1) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[1, 3, 1, 1] parameter(0)
ROOT fft = c64[1, 3, 1, 1] fft(operand), fft_type=FFT, fft_length={3, 1, 1}
}
)";
auto input = LiteralUtil::CreateR4<complex64>(
{{{{{42.24, 24.42}}}, {{{-42.24, 24.42}}}, {{{42.24, -24.42}}}}});
auto expected =
LiteralUtil::CreateR4<complex64>({{{{{42.24, 24.42}}},
{{{84.5367, 97.5818}}},
{{{-0.0566792, -48.7418}}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_IFFT_3x1x1_on_c64x1x3x1x1) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[1, 3, 1, 1] parameter(0)
ROOT ifft = c64[1, 3, 1, 1] fft(operand), fft_type=IFFT, fft_length={3, 1, 1}
}
)";
auto input = LiteralUtil::CreateR4<complex64>({{{{{42.24, 24.42}}},
{{{84.5367, 97.5818}}},
{{{-0.0566792, -48.7418}}}}});
auto expected = LiteralUtil::CreateR4<complex64>(
{{{{{42.24, 24.42}}}, {{{-42.24, 24.42}}}, {{{42.24, -24.42}}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_FFT_5_on_c64x5) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[5] parameter(0)
ROOT fft = c64[5] fft(operand), fft_type=FFT, fft_length={5}
}
)";
auto input = LiteralUtil::CreateR1<complex64>(
{{1.0, 5.0}, {2.0, 4.0}, {3.0, 3.0}, {4.0, 2.0}, {5.0, 1.0}});
auto expected = LiteralUtil::CreateR1<complex64>({{15.0, 15.0},
{0.940955, 5.94095},
{-1.6877, 3.3123},
{-3.3123, 1.6877},
{-5.94095, -0.940955}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_IFFT_5_on_c64x5) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[5] parameter(0)
ROOT ifft = c64[5] fft(operand), fft_type=IFFT, fft_length={5}
}
)";
auto input = LiteralUtil::CreateR1<complex64>({{15.0, 15.0},
{0.940955, 5.94095},
{-1.6877, 3.3123},
{-3.3123, 1.6877},
{-5.94095, -0.940955}});
auto expected = LiteralUtil::CreateR1<complex64>(
{{1.0, 5.0}, {2.0, 4.0}, {3.0, 3.0}, {4.0, 2.0}, {5.0, 1.0}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 1D_FFT_4_on_zero_c64x4) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[4] parameter(0)
ROOT fft = c64[4] fft(operand), fft_type=FFT, fft_length={4}
}
)";
auto input = LiteralUtil::CreateR1<complex64>(
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), input.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(input, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_FFT_3x3x4_on_zero_c64x3x3x4) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[3, 3, 4] parameter(0)
ROOT fft = c64[3, 3, 4] fft(operand), fft_type=FFT, fft_length={3, 3, 4}
}
)";
auto input = LiteralUtil::CreateR3<complex64>(
{{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}},
{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}},
{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), input.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(input, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_IFFT_3x3x4_on_zero_c64x3x3x4) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[3, 3, 4] parameter(0)
ROOT ifft = c64[3, 3, 4] fft(operand), fft_type=IFFT, fft_length={3, 3, 4}
}
)";
auto input = LiteralUtil::CreateR3<complex64>(
{{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}},
{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}},
{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), input.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(input, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_RFFT_3x3x4_on_zero_f32x3x3x4) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = f32[3, 3, 4] parameter(0)
ROOT rfft = c64[3, 3, 3] fft(operand), fft_type=RFFT, fft_length={3, 3, 4}
}
)";
auto input = LiteralUtil::CreateR3<float>(
{{{0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}},
{{0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}},
{{0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}}});
auto expected = LiteralUtil::CreateR3<complex64>(
{{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}},
{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}},
{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 3D_IRFFT_3x3x4_on_zero_c64x3x3x3) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[3, 3, 3] parameter(0)
ROOT irfft = f32[3, 3, 4] fft(operand), fft_type=IRFFT, fft_length={3, 3, 4}
}
)";
auto input = LiteralUtil::CreateR3<complex64>(
{{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}},
{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}},
{{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}},
{{0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}}}});
auto expected = LiteralUtil::CreateR3<float>(
{{{0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}},
{{0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}},
{{0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}, {0.0, 0.0, 0.0, 0.0}}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
TEST_F(HloEvaluatorTest, 2D_IRFFT_3x4_on_c64x3x3) {
const char* hlo_text = R"(
HloModule Fft
ENTRY main {
operand = c64[3, 3] parameter(0)
ROOT irfft = f32[3, 4] fft(operand), fft_type=IRFFT, fft_length={3, 4}
}
)";
auto input =
LiteralUtil::CreateR2<complex64>({{{0.0, 0.0}, {1.0, 0.0}, {2.0, 0.0}},
{{3.0, 0.0}, {4.0, 0.0}, {5.0, 0.0}},
{{6.0, 0.0}, {7.0, 0.0}, {8.0, 0.0}}});
auto expected =
LiteralUtil::CreateR2<float>({{4.0, -0.5, 0.0, -0.5},
{-1.5, 0.433013, 0.0, -0.433013},
{-1.5, -0.433013, 0.0, 0.433013}});
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&input}));
EXPECT_TRUE(ShapeUtil::Compatible(result.shape(), expected.shape()));
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, fft_error_));
}
class HloEvaluatorPreciseReduceTest : public HloTestBase {};
TEST_F(HloEvaluatorPreciseReduceTest, AddReductionPrecisionTest) {
auto m = CreateNewVerifiedModule();
HloComputation::Builder b(TestName());
constexpr int kNumElements = 1 << 25;
std::vector<float> v(kNumElements, 1.0f);
HloInstruction* arg_instruction = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(v)));
HloInstruction* init_value = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.f)));
HloComputation::Builder add_computation("add");
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
auto param_lhs = add_computation.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "lhs"));
auto param_rhs = add_computation.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "rhs"));
add_computation.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, param_lhs, param_rhs));
auto add_func = m->AddEmbeddedComputation(add_computation.Build());
HloInstruction* reduce_instruction = b.AddInstruction(
HloInstruction::CreateReduce(scalar_shape, arg_instruction, init_value,
{0}, add_func));
m->AddEntryComputation(b.Build());
HloEvaluator hlo_eval;
Literal result = hlo_eval.Evaluate(reduce_instruction).value();
LiteralTestUtil::ExpectR0Equal<float>(kNumElements, result);
}
void BM_ReducePrecisely(::testing::benchmark::State& state) {
HloComputation::Builder b("BM_ReducePrecisely");
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsFromFlags());
HloModule module("BM_ReducePrecisely", config);
constexpr int kNumElements = 1 << 25;
std::vector<float> v(kNumElements, 1.0f);
HloInstruction* arg_instruction = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(v)));
auto init_value = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.f)));
HloComputation::Builder add_computation("add");
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
auto param_lhs = add_computation.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "lhs"));
auto param_rhs = add_computation.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "rhs"));
add_computation.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, param_lhs, param_rhs));
auto add_func = module.AddEmbeddedComputation(add_computation.Build());
HloInstruction* reduce_instruction = b.AddInstruction(
HloInstruction::CreateReduce(scalar_shape, arg_instruction, init_value,
{0}, add_func));
module.AddEntryComputation(b.Build());
for (auto s : state) {
HloEvaluator hlo_eval;
hlo_eval.Evaluate(reduce_instruction).value();
}
}
BENCHMARK(BM_ReducePrecisely);
TEST_P(HloEvaluatorBf16Test, ReduceAdd) {
HloComputation::Builder b(TestName());
auto arg_array = std::make_unique<Array2D<float>>(2, 3);
arg_array->FillUnique(1.0f);
auto arg_literal = LiteralUtil::CreateR2FromArray2D<float>(*arg_array);
HloInstruction* arg_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(arg_literal)));
auto init_value = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.f)));
HloComputation::Builder add_computation("add");
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
auto param_lhs = add_computation.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "lhs"));
auto param_rhs = add_computation.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "rhs"));
add_computation.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, param_lhs, param_rhs));
auto add_func = m_->AddEmbeddedComputation(add_computation.Build());
Shape shape = ShapeUtil::MakeShape(F32, {2});
b.AddInstruction(
HloInstruction::CreateReduce(shape, arg_instruction, init_value,
{1}, add_func));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR1<float>({6, 18});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, ReduceWindowMax) {
HloComputation::Builder b(TestName());
auto arg_array = std::make_unique<Array2D<float>>(2, 3);
arg_array->FillUnique(1.0f);
auto arg_literal = LiteralUtil::CreateR2FromArray2D<float>(*arg_array);
HloInstruction* arg_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(arg_literal)));
auto init_value = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.f)));
auto max_func = m_->AddEmbeddedComputation(MaxComputationScalarF32());
Window window;
WindowDimension dim;
dim.set_size(2);
dim.set_stride(1);
dim.set_padding_low(0);
dim.set_padding_high(0);
dim.set_window_dilation(1);
dim.set_base_dilation(1);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
Shape shape = ShapeUtil::MakeShape(F32, {1, 2});
b.AddInstruction(HloInstruction::CreateReduceWindow(
shape, arg_instruction, init_value, window, max_func));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<float>({{6, 7}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, ReduceWindowMaxIotaWindowDilation) {
auto expected = LiteralUtil::CreateR2<float>({{10, 11}, {14, 15}});
ReduceWindowMaxIotaTest(
2,
0,
1,
2,
1,
expected);
}
TEST_P(HloEvaluatorBf16Test, ReduceWindowMaxIotaStrideWindowDilation) {
auto expected = LiteralUtil::CreateR2<float>({{10}});
ReduceWindowMaxIotaTest(
2,
0,
2,
2,
1,
expected);
}
TEST_P(HloEvaluatorBf16Test, ReduceWindowMaxIotaBaseDilation) {
auto expected = LiteralUtil::CreateR2<float>({{0, 1, 1, 2, 2, 3},
{4, 5, 5, 6, 6, 7},
{4, 5, 5, 6, 6, 7},
{8, 9, 9, 10, 10, 11},
{8, 9, 9, 10, 10, 11},
{12, 13, 13, 14, 14, 15}});
ReduceWindowMaxIotaTest(
2,
0,
1,
1,
2,
expected);
}
TEST_P(HloEvaluatorBf16Test, ReduceWindowMaxIotaStrideBaseDilation) {
auto expected =
LiteralUtil::CreateR2<float>({{0, 1, 2}, {4, 5, 6}, {8, 9, 10}});
ReduceWindowMaxIotaTest(
2,
0,
2,
1,
2,
expected);
}
TEST_P(HloEvaluatorBf16Test, ReduceWindowMaxIotaStrideBothDilation) {
auto expected =
LiteralUtil::CreateR2<float>({{5, 6, 7}, {9, 10, 11}, {13, 14, 15}});
ReduceWindowMaxIotaTest(
2,
0,
2,
2,
2,
expected);
}
TEST_P(HloEvaluatorBf16Test, ReduceWindowMaxIotaPaddingStrideBaseDilation) {
auto expected =
LiteralUtil::CreateR2<float>({{0, 2, 3}, {8, 10, 11}, {12, 14, 15}});
ReduceWindowMaxIotaTest(
3,
1,
3,
1,
2,
expected);
}
TEST_P(HloEvaluatorBf16Test, ReduceWindowAdd) {
HloComputation::Builder b(TestName());
auto arg_array = std::make_unique<Array2D<float>>(2, 3);
arg_array->FillUnique(1.0f);
auto arg_literal = LiteralUtil::CreateR2FromArray2D<float>(*arg_array);
HloInstruction* arg_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(arg_literal)));
auto init_value = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.f)));
HloComputation::Builder add_computation("add");
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
auto param_lhs = add_computation.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "lhs"));
auto param_rhs = add_computation.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "rhs"));
add_computation.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, param_lhs, param_rhs));
auto add_func = m_->AddEmbeddedComputation(add_computation.Build());
Window window;
WindowDimension dim;
dim.set_size(1);
dim.set_stride(1);
dim.set_padding_low(0);
dim.set_padding_high(0);
dim.set_window_dilation(1);
dim.set_base_dilation(1);
*window.add_dimensions() = dim;
dim.set_size(2);
dim.set_stride(1);
dim.set_padding_low(1);
dim.set_padding_high(0);
dim.set_window_dilation(1);
dim.set_base_dilation(1);
*window.add_dimensions() = dim;
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
b.AddInstruction(HloInstruction::CreateReduceWindow(
shape, arg_instruction, init_value, window, add_func));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<float>({{1, 3, 5}, {5, 11, 13}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, ReduceWindowAdd6D) {
HloComputation::Builder b(TestName());
std::vector<int64_t> input_dims(6, 4);
Literal arg_literal =
LiteralUtil::CreateFullWithDescendingLayout<float>(input_dims, 1.0f);
HloInstruction* arg_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(arg_literal)));
auto init_value = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.f)));
HloComputation::Builder add_computation("add");
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
auto param_lhs = add_computation.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "lhs"));
auto param_rhs = add_computation.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "rhs"));
add_computation.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, param_lhs, param_rhs));
auto add_func = m_->AddEmbeddedComputation(add_computation.Build());
Window window;
WindowDimension trivial_dim;
trivial_dim.set_size(1);
trivial_dim.set_stride(1);
trivial_dim.set_padding_low(0);
trivial_dim.set_padding_high(0);
trivial_dim.set_window_dilation(1);
trivial_dim.set_base_dilation(1);
WindowDimension active_dim;
active_dim.set_size(2);
active_dim.set_stride(1);
active_dim.set_padding_low(0);
active_dim.set_padding_high(0);
active_dim.set_window_dilation(1);
active_dim.set_base_dilation(1);
*window.add_dimensions() = trivial_dim;
*window.add_dimensions() = active_dim;
*window.add_dimensions() = active_dim;
*window.add_dimensions() = active_dim;
*window.add_dimensions() = trivial_dim;
*window.add_dimensions() = trivial_dim;
Shape shape = ShapeUtil::MakeShape(F32, {4, 3, 3, 3, 4, 4});
b.AddInstruction(HloInstruction::CreateReduceWindow(
shape, arg_instruction, init_value, window, add_func));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
std::vector<int64_t> output_dims = {4, 3, 3, 3, 4, 4};
Literal result_literal =
LiteralUtil::CreateFullWithDescendingLayout<float>(output_dims, 8.0f);
EXPECT_TRUE(LiteralTestUtil::Equal(result_literal, result));
}
TEST_P(HloEvaluatorBf16Test, Min3In5Stride2Tuple) {
HloComputation::Builder builder("main");
auto input1 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({10000, 1000, 100, 10, 1})));
auto input2 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({10000, 1000, 100, 10, 1})));
HloComputation::Builder bcompute("ComputeFunction");
auto shape1 = ShapeUtil::MakeShape(F32, {});
auto shape2 = ShapeUtil::MakeShape(F32, {});
auto p2 =
bcompute.AddInstruction(HloInstruction::CreateParameter(0, shape1, "x0"));
auto p3 =
bcompute.AddInstruction(HloInstruction::CreateParameter(1, shape2, "x1"));
auto p4 =
bcompute.AddInstruction(HloInstruction::CreateParameter(2, shape1, "y0"));
auto p5 =
bcompute.AddInstruction(HloInstruction::CreateParameter(3, shape2, "y1"));
std::vector<HloInstruction*> compute_vec = {
bcompute.AddInstruction(
HloInstruction::CreateBinary(shape1, HloOpcode::kMinimum, p2, p4)),
bcompute.AddInstruction(
HloInstruction::CreateBinary(shape2, HloOpcode::kMinimum, p3, p5))};
bcompute.AddInstruction(HloInstruction::CreateTuple(compute_vec));
auto compute_tuple = m_->AddEmbeddedComputation(bcompute.Build());
std::vector<HloInstruction*> input_vec = {input1, input2};
auto init1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::MaxValue(F32)));
auto init2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::MaxValue(F32)));
std::vector<HloInstruction*> init_vec = {init1, init2};
auto padding = std::pair<int64_t, int64_t>(0, 0);
TF_ASSERT_OK_AND_ASSIGN(auto window,
ShapeInference::InferWindowFromDimensions(
{3}, {2}, absl::MakeSpan(&padding, 1),
{},
{}));
std::vector<const Shape*> input_shapes = {&input1->shape(), &input2->shape()};
std::vector<const Shape*> init_shapes = {&init1->shape(), &init2->shape()};
TF_ASSERT_OK_AND_ASSIGN(Shape shape,
ShapeInference::InferReduceWindowShape(
input_shapes, init_shapes, window,
compute_tuple->ComputeProgramShape()));
builder.AddInstruction(HloInstruction::CreateReduceWindow(
shape, input_vec, init_vec, window, compute_tuple));
auto r1 = LiteralUtil::CreateR1<float>({100, 1});
auto expected = LiteralUtil::MakeTuple({&r1, &r1});
m_->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, Min3In5Stride2TupleDiffInput) {
HloComputation::Builder builder("main");
auto input1 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({10000, 1000, 100, 10, 1})));
auto input2 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<int>({15, 28, 300, 107, 12})));
HloComputation::Builder bcompute("ComputeFunction");
auto shape1 = ShapeUtil::MakeShape(F32, {});
auto shape2 = ShapeUtil::MakeShape(S32, {});
auto p2 =
bcompute.AddInstruction(HloInstruction::CreateParameter(0, shape1, "x0"));
auto p3 =
bcompute.AddInstruction(HloInstruction::CreateParameter(1, shape2, "x1"));
auto p4 =
bcompute.AddInstruction(HloInstruction::CreateParameter(2, shape1, "y0"));
auto p5 =
bcompute.AddInstruction(HloInstruction::CreateParameter(3, shape2, "y1"));
std::vector<HloInstruction*> compute_vec = {
bcompute.AddInstruction(
HloInstruction::CreateBinary(shape1, HloOpcode::kMinimum, p2, p4)),
bcompute.AddInstruction(
HloInstruction::CreateBinary(shape2, HloOpcode::kMinimum, p3, p5))};
bcompute.AddInstruction(HloInstruction::CreateTuple(compute_vec));
auto compute_tuple = m_->AddEmbeddedComputation(bcompute.Build());
std::vector<HloInstruction*> input_vec = {input1, input2};
auto init1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::MaxValue(F32)));
auto init2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::MaxValue(S32)));
std::vector<HloInstruction*> init_vec = {init1, init2};
auto padding = std::pair<int64_t, int64_t>(0, 0);
TF_ASSERT_OK_AND_ASSIGN(auto window,
ShapeInference::InferWindowFromDimensions(
{3}, {2}, absl::MakeSpan(&padding, 1),
{},
{}));
std::vector<const Shape*> input_shapes = {&input1->shape(), &input2->shape()};
std::vector<const Shape*> init_shapes = {&init1->shape(), &init2->shape()};
TF_ASSERT_OK_AND_ASSIGN(Shape shape,
ShapeInference::InferReduceWindowShape(
input_shapes, init_shapes, window,
compute_tuple->ComputeProgramShape()));
builder.AddInstruction(HloInstruction::CreateReduceWindow(
shape, input_vec, init_vec, window, compute_tuple));
auto r1 = LiteralUtil::CreateR1<float>({100, 1});
auto r2 = LiteralUtil::CreateR1<int>({15, 12});
auto expected = LiteralUtil::MakeTuple({&r1, &r2});
m_->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, StridedSlice) {
HloComputation::Builder b(TestName());
auto operand_array = std::make_unique<Array2D<float>>(3, 5);
operand_array->FillUnique(1.0f);
auto operand_literal =
LiteralUtil::CreateR2FromArray2D<float>(*operand_array);
HloInstruction* operand = b.AddInstruction(
HloInstruction::CreateConstant(std::move(operand_literal)));
Shape shape = ShapeUtil::MakeShape(F32, {2, 1});
b.AddInstruction(HloInstruction::CreateSlice(shape, operand,
{0, 2},
{3, 5},
{2, 3}));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<float>({
{3},
{19},
});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DynamicSlice) {
HloComputation::Builder b(TestName());
auto operand_array = std::make_unique<Array2D<float>>(2, 4);
operand_array->FillUnique(1.0f);
auto operand_literal =
LiteralUtil::CreateR2FromArray2D<float>(*operand_array);
HloInstruction* operand = b.AddInstruction(
HloInstruction::CreateConstant(std::move(operand_literal)));
auto zero = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
auto one = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
b.AddInstruction(
HloInstruction::CreateDynamicSlice(shape, operand, {zero, one}, {2, 3}));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<float>({
{2, 3, 4},
{6, 7, 8},
});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DynamicSliceModSlice) {
HloComputation::Builder b(TestName());
auto operand_array = std::make_unique<Array2D<float>>(2, 4);
operand_array->FillUnique(1.0f);
auto operand_literal =
LiteralUtil::CreateR2FromArray2D<float>(*operand_array);
HloInstruction* operand = b.AddInstruction(
HloInstruction::CreateConstant(std::move(operand_literal)));
auto two = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(2)));
auto one = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
b.AddInstruction(
HloInstruction::CreateDynamicSlice(shape, operand, {two, one}, {2, 3}));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<float>({
{2, 3, 4},
{6, 7, 8},
});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DynamicSliceUpdate) {
HloComputation::Builder b(TestName());
auto operand_array = std::make_unique<Array2D<double>>(2, 3);
operand_array->FillUnique(1.0);
auto operand_literal =
LiteralUtil::CreateR2FromArray2D<double>(*operand_array);
HloInstruction* operand = b.AddInstruction(
HloInstruction::CreateConstant(std::move(operand_literal)));
auto zero = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
auto one = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
auto update = b.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<double>({{-2.0, -3.0}, {-6.0, -7.0}})));
Shape shape = ShapeUtil::MakeShape(F64, {2, 3});
b.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
shape, operand, update, {zero, one}));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<double>({
{1, -2, -3},
{5, -6, -7},
});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, SetAndGetTuples) {
HloComputation::Builder b(TestName());
auto operand_array = std::make_unique<Array2D<double>>(2, 3);
operand_array->FillUnique(1.0);
auto operand_literal2 =
LiteralUtil::CreateR2FromArray2D<double>(*operand_array);
HloInstruction* operand2 = b.AddInstruction(
HloInstruction::CreateConstant(std::move(operand_literal2)));
HloInstruction* operand1 = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<int64_t>({0, 1})));
auto tuple =
b.AddInstruction(HloInstruction::CreateTuple({operand1, operand2}));
Shape shape = ShapeUtil::MakeShape(F64, {2, 3});
b.AddInstruction(HloInstruction::CreateGetTupleElement(shape, tuple, 1));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<double>({
{1, 2, 3},
{5, 6, 7},
});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, SetAndGetNestedTuples) {
HloComputation::Builder b(TestName());
auto operand_array = std::make_unique<Array2D<double>>(2, 3);
operand_array->FillUnique(1.0);
HloInstruction* operand2 = b.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2FromArray2D<double>(*operand_array)));
HloInstruction* operand1 = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<int64_t>({0, 1})));
auto tuple1 =
b.AddInstruction(HloInstruction::CreateTuple({operand1, operand2}));
auto tuple2 =
b.AddInstruction(HloInstruction::CreateTuple({operand2, operand2}));
auto outer_tuple =
b.AddInstruction(HloInstruction::CreateTuple({tuple1, tuple2}));
b.AddInstruction(
HloInstruction::CreateGetTupleElement(tuple2->shape(), outer_tuple, 1));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto result_inner_literal =
LiteralUtil::CreateR2FromArray2D<double>(*operand_array);
auto expected =
LiteralUtil::MakeTuple({&result_inner_literal, &result_inner_literal});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, Reverse) {
HloComputation::Builder b(TestName());
Array4D<float> input({
{{{1.0f}, {2.0f}},
{{3.0f}, {4.0f}},
{{5.0f}, {6.0f}}},
{{{7.0f}, {8.0f}},
{{9.0f}, {10.0f}},
{{11.0f}, {12.0f}}},
{{{13.0f}, {14.0f}},
{{15.0f}, {16.0f}},
{{17.0f}, {18.0f}}},
{{{19.0f}, {20.0f}},
{{21.0f}, {22.0f}},
{{23.0f}, {24.0f}}},
});
auto operand_literal = LiteralUtil::CreateR4FromArray4D<float>(input);
HloInstruction* operand = b.AddInstruction(
HloInstruction::CreateConstant(std::move(operand_literal)));
const Shape shape = ShapeUtil::MakeShape(F32, {4, 3, 2, 1});
b.AddInstruction(HloInstruction::CreateReverse(shape, operand, {0, 1}));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR4FromArray4D<float>({
{{{23.0f}, {24.0f}},
{{21.0f}, {22.0f}},
{{19.0f}, {20.0f}}},
{{{17.0f}, {18.0f}},
{{15.0f}, {16.0f}},
{{13.0f}, {14.0f}}},
{{{11.0f}, {12.0f}},
{{9.0f}, {10.0f}},
{{7.0f}, {8.0f}}},
{{{5.0f}, {6.0f}},
{{3.0f}, {4.0f}},
{{1.0f}, {2.0f}}},
});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, EvaluateWithSubstitutions) {
HloComputation::Builder b(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4});
HloInstruction* param0 =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "param0"));
HloInstruction* square = b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, param0, param0));
HloInstruction* add = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param0, square));
HloEvaluator evaluator;
Literal param0_literal = LiteralUtil::CreateR1<float>({1, 2, 3, 4});
Literal square_literal = LiteralUtil::CreateR1<float>({10, 20, 30, 40});
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
evaluator.EvaluateWithSubstitutions(
add, {{param0, ¶m0_literal}, {square, &square_literal}}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR1<float>({11, 22, 33, 44}), result));
}
TEST_P(HloEvaluatorBf16Test, EvaluateWithSubstitutionsWithConstantOperand) {
HloComputation::Builder b(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {4});
HloInstruction* param0 =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "param0"));
HloInstruction* square = b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, param0, param0));
HloInstruction* constant = b.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1, 2, 3, 4})));
HloInstruction* add = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, constant, square));
HloEvaluator evaluator;
Literal square_literal = LiteralUtil::CreateR1<float>({10, 20, 30, 40});
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
evaluator.EvaluateWithSubstitutions(add, {{square, &square_literal}}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR1<float>({11, 22, 33, 44}), result));
}
TEST_F(HloEvaluatorTest, EvaluateWithSubstitutionsLiteralBase) {
HloComputation::Builder b(TestName());
Shape shape = ShapeUtil::MakeShape(S64, {3});
HloInstruction* param0 =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "param0"));
HloInstruction* square = b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, param0, param0));
int64_t int64_values[] = {1, 2, 3};
const Shape literal_shape = ShapeUtil::MakeShape(S64, {3});
BorrowingLiteral literal(reinterpret_cast<const char*>(int64_values),
literal_shape);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(Literal result, evaluator.EvaluateWithSubstitutions(
square, {{param0, &literal}}));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<int64_t>({1, 4, 9}),
result));
}
TEST_F(HloEvaluatorTest, EvaluateGather_TensorFlowGatherV1) {
const char* hlo_text = R"(
HloModule TensorFlowGatherV1
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = s32[2,3] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1, 3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal start_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&operand, &start_indices}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {7, 8, 9}}), result));
}
TEST_F(HloEvaluatorTest, EvaluateGather_TensorFlowGatherV2) {
const char* hlo_text = R"(
HloModule TensorFlowGatherV2
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = s32[3,2] gather(operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3, 1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal start_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&operand, &start_indices}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{1, 3}, {4, 6}, {7, 9}}), result));
}
TEST_F(HloEvaluatorTest, EvaluateGather_TensorFlowGatherMultipleBatchDims) {
const char* hlo_text = R"(
HloModule TensorFlowGatherMultipleBatchDims
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2,2] parameter(1)
ROOT gather = s32[2,3,2] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=2,
slice_sizes={3, 1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal start_indices = LiteralUtil::CreateR2<int32_t>({{0, 2}, {2, 1}});
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&operand, &start_indices}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR3<int32_t>(
{{{1, 3}, {4, 6}, {7, 9}}, {{3, 2}, {6, 5}, {9, 8}}}),
result));
}
TEST_F(HloEvaluatorTest, EvaluateGather_TensorFlowGatherNd) {
const char* hlo_text = R"(
HloModule TensorFlowGatherNd
ENTRY main {
operand = s32[3,3,2] parameter(0)
indices = s32[2,2] parameter(1)
ROOT gather = s32[2,2] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0,1},
start_index_map={0,1},
index_vector_dim=1,
slice_sizes={1,1,2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR3<int32_t>({{{-1, 1}, {-2, 2}, {-3, 3}},
{{-4, 4}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}});
Literal start_indices = LiteralUtil::CreateR2<int32_t>({{0, 0}, {1, 0}});
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&operand, &start_indices}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{-1, 1}, {-4, 4}}), result));
}
TEST_F(HloEvaluatorTest,
EvaluateGather_TensorFlowGatherNdNonDefaultIndexVectorDim) {
const char* hlo_text = R"(
HloModule TensorFlowGatherNd
ENTRY main {
operand = s32[3,3,2] parameter(0)
indices = s32[2,2] parameter(1)
ROOT gather = s32[2,2] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0,1},
start_index_map={0,1},
index_vector_dim=0,
slice_sizes={1,1,2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR3<int32_t>({{{-1, 1}, {-2, 2}, {-3, 3}},
{{-4, 4}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}});
Literal start_indices = LiteralUtil::CreateR2<int32_t>({{0, 0}, {1, 0}});
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&operand, &start_indices}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{-2, 2}, {-1, 1}}), result));
}
TEST_F(HloEvaluatorTest, EvaluateGather_DynamicSlice) {
const char* hlo_text = R"(
HloModule DynamicSlice
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = s32[1,1] gather(operand, indices),
offset_dims={0,1},
collapsed_slice_dims={},
start_index_map={0,1},
index_vector_dim=0,
slice_sizes={1,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal start_indices = LiteralUtil::CreateR1<int32_t>({1, 1});
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&operand, &start_indices}));
EXPECT_TRUE(
LiteralTestUtil::Equal(LiteralUtil::CreateR2<int32_t>({{5}}), result));
}
TEST_F(HloEvaluatorTest, EvaluateGather_BatchDynamicSlice) {
const char* hlo_text = R"(
HloModule BatchDynamicSlice
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2,2] parameter(1)
ROOT gather = s32[2,1,1] gather(operand, indices),
offset_dims={1,2},
collapsed_slice_dims={},
start_index_map={0,1},
index_vector_dim=0,
slice_sizes={1,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal start_indices = LiteralUtil::CreateR2<int32_t>({{2, 1}, {1, 1}});
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&operand, &start_indices}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR3<int32_t>({{{8}}, {{5}}}), result));
}
TEST_F(HloEvaluatorTest, EvaluateGather_ZeroDimBounds) {
const char* hlo_text = R"(
HloModule TensorFlowGatherV1
ENTRY main {
operand = s32[3,0] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = s32[2,0] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1, 0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand = LiteralUtil::CreateR2<int32_t>({{}, {}, {}});
Literal start_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&operand, &start_indices}));
EXPECT_TRUE(
LiteralTestUtil::Equal(LiteralUtil::CreateR2<int32_t>({{}, {}}), result));
}
TEST_F(HloEvaluatorTest, EvaluateGather_NoOutputWindowDims) {
const std::string hlo_text = R"(
HloModule GatherXd
ENTRY main {
operand = s32[3] parameter(0)
indices = s32[2,2,1] parameter(1)
ROOT gather = s32[2,2] gather(operand, indices),
offset_dims={},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand = LiteralUtil::CreateR1<int32_t>({0, 1, 2});
Literal start_indices =
LiteralUtil::CreateR3<int32_t>({{{0}, {1}}, {{2}, {1}}});
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&operand, &start_indices}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{0, 1}, {2, 1}}), result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_TensorFlowScatterV1_Update) {
const char* hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,3] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {70, 80, 90}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {4, 5, 6}, {70, 80, 90}}),
result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_TensorFlowScatterV2_Update) {
const char* hlo_text = R"(
HloModule TensorFlowScatterV2
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[3,2] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={0},
inserted_window_dims={1},
scatter_dims_to_operand_dims={1},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 30}, {40, 60}, {70, 90}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{10, 2, 30}, {40, 5, 60}, {70, 8, 90}}),
result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_TensorFlowScatter_Add) {
const char* hlo_text = R"(
HloModule TensorFlowScatter
add_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(s32[] lhs, s32[] rhs)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,3] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=add_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {70, 80, 90}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{11, 22, 33}, {4, 5, 6}, {77, 88, 99}}),
result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_TensorFlowScatter_Mul) {
const char* hlo_text = R"(
HloModule TensorFlowScatter
mul_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT mul = s32[] multiply(s32[] lhs, s32[] rhs)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,3] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=mul_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {70, 80, 90}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(
LiteralTestUtil::Equal(LiteralUtil::CreateR2<int32_t>(
{{10, 40, 90}, {4, 5, 6}, {490, 640, 810}}),
result));
}
TEST_P(HloEvaluatorBf16Test, EvaluateScatter_TensorFlowScatter_F32) {
const char* hlo_text = R"(
HloModule TensorFlowScatter
add_f32 (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(f32[] lhs, f32[] rhs)
}
ENTRY main {
operand = f32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = f32[2,3] parameter(2)
ROOT scatter = f32[3,3] scatter(operand, indices, updates),
to_apply=add_f32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand = LiteralUtil::CreateR2<float>(
{{1.1, 2.2, 3.3}, {4.4, 5.5, 6.6}, {7.7, 8.8, 9.9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({2, 1});
Literal updates =
LiteralUtil::CreateR2<float>({{0.4, 1.1, 0.7}, {2.3, 3.1, 1.6}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR2<float>(
{{1.1, 2.2, 3.3}, {6.7, 8.6, 8.2}, {8.1, 9.9, 10.6}}),
result, ErrorSpec{0.1, 0.01}));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_TensorFlowScatter_RepeatedIndices) {
const char* hlo_text = R"(
HloModule TensorFlowScatter
add_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(s32[] lhs, s32[] rhs)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,3] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=add_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({1, 1});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {70, 80, 90}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {84, 105, 126}, {7, 8, 9}}),
result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_TensorFlowScatter_MultipleBatchDims) {
const char* hlo_text = R"(
HloModule TensorFlowScatterMultipleBatchDims
add_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(s32[] lhs, s32[] rhs)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2,2] parameter(1)
updates = s32[2,3,2] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=add_s32,
update_window_dims={1},
inserted_window_dims={1},
scatter_dims_to_operand_dims={1},
index_vector_dim=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>({{0, 2}, {2, 1}});
Literal updates = LiteralUtil::CreateR3<int32_t>(
{{{10, 30}, {40, 60}, {70, 90}}, {{5, 5}, {5, 5}, {5, 5}}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(
LiteralTestUtil::Equal(LiteralUtil::CreateR2<int32_t>(
{{11, 7, 38}, {44, 10, 71}, {77, 13, 104}}),
result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_TensorFlowScatterNd) {
const char* hlo_text = R"(
HloModule TensorFlowScatterNd
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3,2] parameter(0)
indices = s32[2,2] parameter(1)
updates = s32[2,2] parameter(2)
ROOT scatter = s32[3,3,2] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR3<int32_t>({{{-1, 1}, {-2, 2}, {-3, 3}},
{{-4, 4}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>({{0, 0}, {1, 0}});
Literal updates = LiteralUtil::CreateR2<int32_t>({{-10, 10}, {-40, 40}});
Literal expected =
LiteralUtil::CreateR3<int32_t>({{{-10, 10}, {-2, 2}, {-3, 3}},
{{-40, 40}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest,
EvaluateScatter_TensorFlowScatterNd_NonDefaultIndexVectorDim) {
const char* hlo_text = R"(
HloModule TensorFlowScatterNdNonDefaultIndexVectorDim
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3,2] parameter(0)
indices = s32[2,2] parameter(1)
updates = s32[2,2] parameter(2)
ROOT scatter = s32[3,3,2] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR3<int32_t>({{{-1, 1}, {-2, 2}, {-3, 3}},
{{-4, 4}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>({{0, 0}, {1, 0}});
Literal updates = LiteralUtil::CreateR2<int32_t>({{-10, 10}, {-20, 20}});
Literal expected =
LiteralUtil::CreateR3<int32_t>({{{-20, 20}, {-10, 10}, {-3, 3}},
{{-4, 4}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_DynamicUpdateSlice) {
const char* hlo_text = R"(
HloModule DynamicUpdateSlice
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[1,1] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={0,1},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({1, 1});
Literal updates = LiteralUtil::CreateR2<int32_t>({{10}});
Literal expected =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 10, 6}, {7, 8, 9}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_BatchDynamicUpdateSlice) {
const char* hlo_text = R"(
HloModule BatchDynamicUpdateSlice
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2,2] parameter(1)
updates = s32[2,1,1] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1,2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>({{2, 1}, {1, 1}});
Literal updates = LiteralUtil::CreateR3<int32_t>({{{10}}, {{20}}});
Literal expected =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 20, 6}, {7, 10, 9}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_ZeroDimBounds) {
const char* hlo_text = R"(
HloModule TensorFlowScatter_ZeroDimBounds
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,0] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,0] parameter(2)
ROOT scatter = s32[3,0] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand = LiteralUtil::CreateR2<int32_t>({{}, {}, {}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
Literal updates = LiteralUtil::CreateR2<int32_t>({{}, {}});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Equal(operand, result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_NoUpdateWindowDims) {
const std::string hlo_text = R"(
HloModule Scatter_NoUpdateWindowDims
add_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(s32[] lhs, s32[] rhs)
}
ENTRY main {
operand = s32[3] parameter(0)
indices = s32[2,2,1] parameter(1)
updates = s32[2,2] parameter(2)
ROOT scatter = s32[3] scatter(operand, indices, updates),
to_apply=add_s32,
update_window_dims={},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand = LiteralUtil::CreateR1<int32_t>({0, 1, 2});
Literal scatter_indices =
LiteralUtil::CreateR3<int32_t>({{{0}, {1}}, {{2}, {1}}});
Literal updates = LiteralUtil::CreateR2<int32_t>({{10, 20}, {30, 40}});
Literal expected = LiteralUtil::CreateR1<int32_t>({10, 61, 32});
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&operand, &scatter_indices, &updates}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_NegativeIndices) {
const char* hlo_text = R"(
HloModule TensorFlowScatter_NegativeIndices
add_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(s32[] lhs, s32[] rhs)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,3] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=add_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({-1, 2});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {70, 80, 90}});
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {77, 88, 99}}),
EvaluateWithModule(module.get(),
{&operand, &scatter_indices, &updates})));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_OobIndices) {
const std::string hlo_text = R"(
HloModule BatchDynamicUpdateSlice
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3]{1,0} parameter(0)
indices = s32[6,2]{1,0} parameter(1)
updates = s32[6,1,1]{2,1,0} parameter(2)
ROOT scatter = s32[3,3]{1,0} scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1,2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>(
{{2, 7}, {2, 1}, {1, 1}, {5, 1}, {2147483647, 1}, {1, 2}});
Literal updates = LiteralUtil::CreateR3<int32_t>(
{{{10}}, {{20}}, {{30}}, {{40}}, {{50}}, {{60}}});
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 30, 60}, {7, 20, 9}}),
EvaluateWithModule(module.get(),
{&operand, &scatter_indices, &updates})));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_OobUpdateWindow) {
const char* hlo_text = R"(
HloModule TensorFlowScatterNd_OobUpdateWindow
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3,2] parameter(0)
indices = s32[1,2] parameter(1)
updates = s32[1,2,2] parameter(2)
ROOT scatter = s32[3,3,2] scatter(operand, indices, updates),
to_apply=update_s32,
update_window_dims={1,2},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
Literal operand =
LiteralUtil::CreateR3<int32_t>({{{-1, 1}, {-2, 2}, {-3, 3}},
{{-4, 4}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>({{0, 2}});
Literal updates = LiteralUtil::CreateR3<int32_t>({{{-10, 10}, {-40, 40}}});
Literal expected = operand.Clone();
EXPECT_TRUE(LiteralTestUtil::Equal(
expected, EvaluateWithModule(module.get(),
{&operand, &scatter_indices, &updates})));
}
TEST_F(HloEvaluatorTest, EvaluateScatter_Multioutput) {
const char* hlo_text = R"(
HloModule MultioutputScatter
update {
lhs0 = s32[] parameter(0)
lhs1 = f32[] parameter(1)
rhs0 = s32[] parameter(2)
rhs1 = f32[] parameter(3)
ROOT tuple = (s32[], f32[]) tuple(rhs0, rhs1)
}
ENTRY main {
operand0 = s32[3,3,2] parameter(0)
operand1 = f32[3,3,2] parameter(1)
indices = s32[2,2] parameter(2)
updates0 = s32[2,2] parameter(3)
updates1 = f32[2,2] parameter(4)
ROOT scatter = (s32[3,3,2], f32[3,3,2]) scatter(operand0, operand1, indices, updates0, updates1),
to_apply=update,
update_window_dims={1},
inserted_window_dims={0,1},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal operand0 =
LiteralUtil::CreateR3<int32_t>({{{-1, 1}, {-2, 2}, {-3, 3}},
{{-4, 4}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}});
Literal operand1 =
LiteralUtil::CreateR3<float>({{{-2, 2}, {-3, 3}, {-4, 4}},
{{-5, 5}, {-6, 6}, {-7, 7}},
{{-8, 8}, {-9, 9}, {-10, 10}}});
Literal scatter_indices = LiteralUtil::CreateR2<int32_t>({{0, 0}, {1, 0}});
Literal updates0 = LiteralUtil::CreateR2<int32_t>({{-10, 10}, {-40, 40}});
Literal updates1 = LiteralUtil::CreateR2<float>({{-11, 11}, {-41, 41}});
Literal expected = LiteralUtil::MakeTupleOwned(
LiteralUtil::CreateR3<int32_t>({{{-10, 10}, {-2, 2}, {-3, 3}},
{{-40, 40}, {-5, 5}, {-6, 6}},
{{-7, 7}, {-8, 8}, {-9, 9}}}),
LiteralUtil::CreateR3<float>({{{-11, 11}, {-3, 3}, {-4, 4}},
{{-41, 41}, {-6, 6}, {-7, 7}},
{{-8, 8}, {-9, 9}, {-10, 10}}}));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
Evaluate({&operand0, &operand1, &scatter_indices, &updates0, &updates1}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, DoesCompareBF16) {
auto lhs = LiteralUtil::CreateR2<bfloat16>(
{{bfloat16(0.25), bfloat16(0.35), bfloat16(0.125)},
{bfloat16(-0.25), bfloat16(-0.35), bfloat16(-0.125)}});
auto rhs = LiteralUtil::CreateR2<bfloat16>(
{{bfloat16(0.5), bfloat16(0.125), bfloat16(0.125)},
{bfloat16(0.25), bfloat16(-0.375), bfloat16(-0.127)}});
auto expected =
LiteralUtil::CreateR2<bool>({{false, true, true}, {false, true, true}});
HloComputation::Builder b(TestName());
auto c1 = b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs)));
auto c2 = b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs)));
b.AddInstruction(HloInstruction::CreateCompare(expected.shape(), c1, c2,
ComparisonDirection::kGe));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, Bf16Reduction) {
const std::string hlo_text = R"(
HloModule Bf16Reduction
add_bf16 (lhs: bf16[], rhs: bf16[]) -> bf16[] {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(bf16[] lhs, bf16[] rhs)
}
ENTRY main {
arg0 = bf16[4]{0} parameter(0)
init = bf16[] constant(0)
ROOT %reduce = bf16[] reduce(arg0, init), dimensions={0}, to_apply=add_bf16
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal arg = LiteralUtil::CreateR1<bfloat16>(
{bfloat16(1.0f), bfloat16(3.0f), bfloat16(-2.0f), bfloat16(42.0f)});
Literal expected = LiteralUtil::CreateR0<bfloat16>(bfloat16(44.0f));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&arg}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, MixedPrecisionReduction) {
const std::string hlo_text = R"(
HloModule MixedPrecisionReduction
add_f32 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY main {
arg0 = f32[4]{0} parameter(0)
init = f32[] constant(0)
ROOT %reduce = bf16[] reduce(arg0, init), dimensions={0}, to_apply=add_f32
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal arg = LiteralUtil::CreateR1<float>({1.0f, 3.0f, -2.0f, 42.0f});
Literal expected = LiteralUtil::CreateR0<bfloat16>(bfloat16(44.0f));
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({&arg}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, DontFailOnCallUnimplementedOps) {
const std::string hlo_text = R"(
HloModule DontFailOnCall
call {
token0 = token[] after-all()
constant = u32[3]{0} constant({1,2,3})
ROOT outfeed = token[] outfeed(constant, token0), outfeed_shape=u32[3]{0}
}
ENTRY main {
ROOT result = token[] call(), to_apply=call
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
auto statusor = Evaluate();
EXPECT_FALSE(statusor.status().ok());
}
TEST_F(HloEvaluatorTest, DontFailOnFusionWithUnimplementedOps) {
const std::string hlo_text = R"(
HloModule DontFailOnFusion
fused_computation {
token0 = token[] after-all()
constant = u32[3]{0} constant({1,2,3})
ROOT outfeed = token[] outfeed(constant, token0), outfeed_shape=u32[3]{0}
}
ENTRY main {
ROOT result = token[] fusion(), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
auto statusor = Evaluate();
EXPECT_FALSE(statusor.status().ok());
}
TEST_P(HloEvaluatorBf16Test, SliceWithDifferentLayout) {
const std::string hlo_text = R"(
HloModule SliceWithDifferentLayout
ENTRY main {
arg = f32[2,2,2]{0,1,2} parameter(0)
ROOT %slice = f32[2,2,2]{1,0,2} slice(f32[2,2,2]{0,1,2} %arg), slice={[0:2], [0:2], [0:2]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal arg = LiteralUtil::CreateR3WithLayout<float>(
{{{1.0f, 2.0f}, {3.0f, 4.0f}}, {{5.0f, 6.0f}, {7.0f, 8.0f}}},
LayoutUtil::MakeLayout({0, 1, 2}));
TF_ASSERT_OK_AND_ASSIGN(Literal actual, Evaluate({&arg}));
EXPECT_TRUE(LiteralTestUtil::Equal(arg, actual));
}
TEST_P(HloEvaluatorBf16Test, Bitcast) {
const absl::string_view hlo_text_base = R"(
HloModule Bitcast
ENTRY main {
param = %s[32,121]{1,0} parameter(0)
ROOT bitcast = %s[121,32,1]{0,1,2} bitcast(%s[32,121]{1,0} param)
}
)";
std::string hlo_text;
if (use_bfloat16_) {
hlo_text = absl::StrFormat(hlo_text_base, "bf16", "bf16", "bf16");
} else {
hlo_text = absl::StrFormat(hlo_text_base, "f32", "f32", "f32");
}
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
auto args = MakeFakeArguments(m_.get()).value();
TF_ASSERT_OK_AND_ASSIGN(Literal actual, Evaluate({&args[0]}));
if (use_bfloat16_) {
EXPECT_TRUE(
absl::c_equal(args[0].data<bfloat16>(), actual.data<bfloat16>()));
} else {
EXPECT_TRUE(absl::c_equal(args[0].data<float>(), actual.data<float>()));
}
}
TEST_F(HloEvaluatorTest, Int32Overflow) {
const absl::string_view hlo_text = R"(
HloModule Test
ENTRY main {
c1 = s32[] constant(1073741824)
sum = s32[] add(c1, c1)
c2 = s32[] constant(-2147483648)
sub = s32[] subtract(c2, c1)
c3 = u32[] constant(4294967295)
c4 = u32[] constant(33)
mul = s32[] multiply(c1, c1)
pow = u32[] power(c3, c4)
ROOT tuple = (s32[], s32[], s32[], u32[]) tuple(sum, sub, mul, pow)
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(auto literal, Evaluate({}));
std::vector<Literal> actual = literal.DecomposeTuple();
ASSERT_EQ(actual.size(), 4);
uint32_t pow30 = uint32_t{1} << 30;
uint32_t pow31 = uint32_t{1} << 31;
EXPECT_EQ(actual[0].GetFirstElement<int32_t>(), static_cast<int32_t>(pow31));
EXPECT_EQ(actual[1].GetFirstElement<int32_t>(),
static_cast<int32_t>(-(pow31 + pow30)));
EXPECT_EQ(actual[2].GetFirstElement<int32_t>(),
static_cast<int32_t>(pow31 * pow31));
EXPECT_EQ(actual[3].GetFirstElement<uint32_t>(), uint32_t{4294967295});
}
TEST_F(HloEvaluatorTest, GetDimensionSize) {
const absl::string_view hlo_text = R"(
HloModule Test
ENTRY main {
size = s32[] parameter(0)
data = s32[4] parameter(1)
data_dynamic = s32[<=4] set-dimension-size(data, size), dimensions={0}
sum = s32[<=4] add(data_dynamic, data)
ROOT dynamic_size = s32[] get-dimension-size(sum), dimensions={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(DynamicDimensionInference dynamic_dimension_inference,
DynamicDimensionInference::Run(m_.get()));
evaluator_.set_dynamic_dimension_inference(&dynamic_dimension_inference);
Literal size_arg = LiteralUtil::CreateR0<int32_t>(3);
Literal data_arg = LiteralUtil::CreateR1<int32_t>({1, 2, 3, 4});
TF_ASSERT_OK_AND_ASSIGN(Literal actual, Evaluate({&size_arg, &data_arg}));
EXPECT_EQ(actual.GetFirstElement<int32_t>(), static_cast<int32_t>(3));
}
TEST_F(HloEvaluatorTest, EvaluateWithWrongInputShapes) {
const absl::string_view hlo_text = R"(
HloModule Test
ENTRY main {
p0 = s32[1] parameter(0)
ROOT sum = s32[1] add(p0, p0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal input_wrong_shape = LiteralUtil::CreateR1<int32_t>({0, 1});
EXPECT_EQ(
HloEvaluator().Evaluate(*m_, {&input_wrong_shape}).status().message(),
"Shape mismatch at parameter 0. Computation expected s32[1]{0}, "
"but arg was s32[2]{0}.");
EXPECT_EQ(HloEvaluator()
.Evaluate(*m_->entry_computation(), {&input_wrong_shape})
.status()
.message(),
"Shape mismatch at parameter 0. Computation expected s32[1]{0}, "
"but arg was s32[2]{0}.");
}
TEST_F(HloEvaluatorTest, EvaluateWithWrongNumberOfInputs) {
const absl::string_view hlo_text = R"(
HloModule Test
ENTRY main {
p0 = s32[1] parameter(0)
ROOT sum = s32[1] add(p0, p0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal input = LiteralUtil::CreateR1<int32_t>({0});
EXPECT_EQ(HloEvaluator().Evaluate(*m_, {&input, &input}).status().message(),
"Expected 1 argument, but got 2.");
EXPECT_EQ(HloEvaluator()
.Evaluate(*m_->entry_computation(), {&input, &input})
.status()
.message(),
"Expected 1 argument, but got 2.");
}
TEST_F(HloEvaluatorTest, PreserveFusionInputLayout) {
const absl::string_view hlo_text = R"(
HloModule FusionInputLayout
fused_computation {
param_0 = f32[20,20]{0,1} parameter(0)
ROOT bitcast = f32[20,20]{1,0} bitcast(param_0)
}
ENTRY kernel_entry {
parameter.0 = f32[20,20]{0,1} parameter(0)
ROOT fusion = f32[20,20]{1,0} fusion(parameter.0),
kind=kLoop, calls=fused_computation
})";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
auto args = MakeFakeArguments(m_.get()).value();
TF_ASSERT_OK_AND_ASSIGN(Literal actual, Evaluate({&args[0]}));
EXPECT_TRUE(absl::c_equal(args[0].data<float>(), actual.data<float>()));
}
TEST_F(HloEvaluatorTest, PreserveFusionOutputLayout) {
const absl::string_view hlo_text = R"(
HloModule FusionOutputLayout
fused_computation {
param_0 = f32[20,20]{1,0} parameter(0)
ROOT bitcast = f32[20,20]{0,1} bitcast(param_0)
}
ENTRY kernel_entry {
parameter.0 = f32[20,20]{1,0} parameter(0)
ROOT fusion = f32[20,20]{0,1} fusion(parameter.0),
kind=kLoop, calls=fused_computation
})";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
auto args = MakeFakeArguments(m_.get()).value();
TF_ASSERT_OK_AND_ASSIGN(Literal actual, Evaluate({&args[0]}));
EXPECT_TRUE(absl::c_equal(args[0].data<float>(), actual.data<float>()));
}
TEST_F(HloEvaluatorTest, PreserveMOFusionOutputLayout) {
const absl::string_view hlo_text = R"(
HloModule MOFusionOutputLayout
fused_computation {
param_0 = f32[20,20]{1,0} parameter(0)
bitcast = f32[20,20]{0,1} bitcast(param_0)
ROOT tuple = (f32[20,20]{0,1}) tuple(bitcast)
}
ENTRY kernel_entry {
parameter.0 = f32[20,20]{1,0} parameter(0)
ROOT fusion = (f32[20,20]{0,1}) fusion(parameter.0),
kind=kLoop, calls=fused_computation
})";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
auto args = MakeFakeArguments(m_.get()).value();
TF_ASSERT_OK_AND_ASSIGN(Literal actual_tuple, Evaluate({&args[0]}));
std::vector<Literal> actual_literals = actual_tuple.DecomposeTuple();
EXPECT_TRUE(
absl::c_equal(args[0].data<float>(), actual_literals[0].data<float>()));
}
TEST_F(HloEvaluatorTest, EvaluateCustomCall_NoHandler) {
const absl::string_view hlo_text = R"(
HloModule EvaluateCustomCall_NoHandler
ENTRY kernel_entry {
parameter.0 = u32[2,2]{1,0} parameter(0)
ROOT test_root = (u32[2,2]{1,0}) custom-call(parameter.0),
custom_call_target="_my_custom_call"
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
auto args = MakeFakeArguments(m_.get()).value();
EXPECT_EQ(HloEvaluator().Evaluate(*m_, {&args[0]}).status().code(),
::tsl::error::UNIMPLEMENTED);
}
TEST_F(HloEvaluatorTest, EvaluateCustomCall_HandlerError) {
const absl::string_view hlo_text = R"(
HloModule EvaluateCustomCall_HandlerError
ENTRY kernel_entry {
parameter.0 = u32[2,2]{1,0} parameter(0)
ROOT test_root = (u32[2,2]{1,0}) custom-call(parameter.0),
custom_call_target="_my_custom_call"
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
auto args = MakeFakeArguments(m_.get()).value();
HloEvaluator evaluator;
evaluator.set_custom_call_handler([](const HloInstruction* custom_call,
absl::Span<const Literal*> operands) {
return Internal("Test error");
});
EXPECT_EQ(evaluator.Evaluate(*m_, {&args[0]}).status().code(),
::tsl::error::INTERNAL);
}
TEST_F(HloEvaluatorTest, EvaluateCustomCall_ManyInputs) {
const absl::string_view hlo_text = R"(
HloModule EvaluateCustomCall_ManyInputs
ENTRY kernel_entry {
parameter.0 = u32[1]{0} parameter(0)
parameter.1 = u32[1]{0} parameter(1)
ROOT test_root = u32[1]{0} custom-call(parameter.0, parameter.1),
custom_call_target="_my_custom_call"
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
auto args = MakeFakeArguments(m_.get()).value();
HloEvaluator evaluator;
evaluator.set_custom_call_handler([](const HloInstruction* custom_call,
absl::Span<const Literal*> operands) {
EXPECT_EQ(HloOpcode::kCustomCall, custom_call->opcode());
EXPECT_EQ("_my_custom_call", custom_call->custom_call_target());
EXPECT_EQ(2, custom_call->operand_count());
EXPECT_EQ(2, operands.size());
auto output = Literal::CreateFromShape(custom_call->shape());
auto operand0_data = operands[0]->data<uint32_t>();
auto operand1_data = operands[1]->data<uint32_t>();
auto output_data = output.data<uint32_t>();
output_data[0] = operand0_data[0] + operand1_data[0];
return output;
});
TF_ASSERT_OK_AND_ASSIGN(
Literal actual_literal,
evaluator.Evaluate(*m_->entry_computation(), {&args[0], &args[1]}));
auto arg0_data = args[0].data<uint32_t>();
auto arg1_data = args[1].data<uint32_t>();
std::vector<uint32_t> expected_data = {arg0_data[0] + arg1_data[0]};
EXPECT_TRUE(absl::c_equal(expected_data, actual_literal.data<uint32_t>()));
}
TEST_F(HloEvaluatorTest, EvaluateCustomCallInFusion) {
const absl::string_view hlo_text = R"(
fusion1 {
p = f32[] parameter(0)
ROOT c = f32[] custom-call(p), custom_call_target="__cchandler1"
}
ENTRY e {
p = f32[] parameter(0)
ROOT f = f32[] fusion(p), kind=kCustom, calls=fusion1
})";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
auto input = LiteralUtil::CreateR0<float>(0);
HloEvaluator evaluator;
evaluator.set_custom_call_handler([](const HloInstruction* custom_call,
absl::Span<const Literal*> operands) {
return LiteralUtil::CreateR0<float>(1 -
operands[0]->GetFirstElement<float>());
});
TF_ASSERT_OK_AND_ASSIGN(auto output, evaluator.Evaluate(*m_, {&input}));
EXPECT_EQ(output, LiteralUtil::CreateR0<float>(1));
}
TEST_F(HloEvaluatorTest, IsFiniteF16) {
const absl::string_view hlo_text = R"(
HloModule test
ENTRY IsFiniteTest {
c = f16[6] constant({nan, 7, nan, -1, inf, -inf})
ROOT is-finite = pred[6] is-finite(c)
})";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(
Literal actual_literal,
HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_THAT(actual_literal.data<bool>(),
::testing::ElementsAre(false, true, false, true, false, false));
}
TEST_F(HloEvaluatorTest, IsFiniteBf16) {
const absl::string_view hlo_text = R"(
HloModule test
ENTRY IsFiniteTest {
c = bf16[6] constant({nan, 7, nan, -1, inf, -inf})
ROOT is-finite = pred[6] is-finite(c)
})";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(
Literal actual_literal,
HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_THAT(actual_literal.data<bool>(),
::testing::ElementsAre(false, true, false, true, false, false));
}
TEST_F(HloEvaluatorTest, ZeroSizedIotaWithHugeDimension) {
const absl::string_view hlo_text = R"(
HloModule test
ENTRY t {
ROOT i = f32[1000000000000, 0] iota(), iota_dimension=0
})";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(
Literal actual_literal,
HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_THAT(actual_literal.data<float>(), ::testing::IsEmpty());
}
TEST_F(HloEvaluatorTest, CopyStartCopyDone) {
const absl::string_view hlo_text = R"(
HloModule test
ENTRY CopyStartCopyDone {
init = f32[] constant(42.0)
copy-start = (f32[]{:S(1)}, f32[], u32[]) copy-start(init)
ROOT copy-done = f32[] copy-done(copy-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal expected = LiteralUtil::CreateR0<float>(42.0f);
TF_ASSERT_OK_AND_ASSIGN(
Literal result, HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, CopyDifferentTypes) {
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(R"(
HloModule test
ENTRY CopyDifferentTypes {
c = bf16[3] constant({1, 2, 3})
ROOT copy = f32[3] copy(bf16[3] c)
}
)"));
TF_ASSERT_OK_AND_ASSIGN(
Literal result, HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR1<float>({1.f, 2.f, 3.f}), result));
}
TEST_F(HloEvaluatorTest, AsyncOps) {
const absl::string_view hlo_text = R"(
HloModule test
ENTRY AsyncOps {
init = f32[] constant(42.0)
async-start = ((f32[]), f32[], u32[]) negate-start(init)
async-update = ((f32[]), f32[], u32[]) negate-update(async-start)
ROOT async-done = f32[] negate-done(async-update)
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal expected = LiteralUtil::CreateR0<float>(-42.0f);
TF_ASSERT_OK_AND_ASSIGN(
Literal result, HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, MapBF16) {
const absl::string_view hlo_text = R"(
HloModule test
map_computation {
p = bf16[] parameter(0)
add = bf16[] add(p, p)
ROOT conv = f32[] convert(add)
}
ENTRY CopyStartCopyDone {
c = bf16[3] constant({1, 2, 3})
ROOT map = f32[3] map(c), to_apply=map_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal expected = LiteralUtil::CreateR1<float>({2.f, 4.f, 6.f});
TF_ASSERT_OK_AND_ASSIGN(
Literal result, HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, MapS16) {
const absl::string_view hlo_text = R"(
HloModule test
map_computation {
p = s16[] parameter(0)
add = s16[] add(p, p)
ROOT conv = f32[] convert(add)
}
ENTRY CopyStartCopyDone {
c = s16[3] constant({1, 2, 3})
ROOT map = f32[3] map(c), to_apply=map_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal expected = LiteralUtil::CreateR1<float>({2.f, 4.f, 6.f});
TF_ASSERT_OK_AND_ASSIGN(
Literal result, HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, MapU16) {
const absl::string_view hlo_text = R"(
HloModule test
map_computation {
p = u16[] parameter(0)
add = u16[] add(p, p)
ROOT conv = f32[] convert(add)
}
ENTRY CopyStartCopyDone {
c = u16[3] constant({1, 2, 3})
ROOT map = f32[3] map(c), to_apply=map_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal expected = LiteralUtil::CreateR1<float>({2.f, 4.f, 6.f});
TF_ASSERT_OK_AND_ASSIGN(
Literal result, HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, MapMixed) {
const absl::string_view hlo_text = R"(
HloModule test
map_computation {
p0 = u16[] parameter(0)
p1 = f32[] parameter(1)
c0 = f32[] convert(p0)
ROOT add = f32[] add(c0, p1)
}
ENTRY CopyStartCopyDone {
c0 = u16[3] constant({1, 2, 3})
c1 = f32[3] constant({1.5, 2.5, 3.5})
ROOT map = f32[3] map(c0, c1), to_apply=map_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal expected = LiteralUtil::CreateR1<float>({2.5f, 4.5f, 6.5f});
TF_ASSERT_OK_AND_ASSIGN(
Literal result, HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, DotUpcast) {
const absl::string_view hlo_text = R"(
HloModule test
ENTRY DotUpcast {
l = s16[4,3]{1,0} parameter(0)
r = s8[3,2]{1,0} parameter(1)
ROOT result = s32[4,2] dot(l, r), lhs_contracting_dims={1},
rhs_contracting_dims={0}
}
)";
auto lhs_array = std::make_unique<Array2D<int16_t>>(4, 3);
lhs_array->FillUnique(1);
auto lhs_literal = LiteralUtil::CreateR2FromArray2D<int16_t>(*lhs_array);
auto rhs_array = std::make_unique<Array2D<int8_t>>(3, 2);
rhs_array->FillUnique(1);
auto rhs_literal = LiteralUtil::CreateR2FromArray2D<int8_t>(*rhs_array);
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&lhs_literal, &rhs_literal}));
auto expected_array =
Array2D<int32_t>({{22, 28}, {58, 76}, {94, 124}, {130, 172}});
auto expected = LiteralUtil::CreateR2FromArray2D<int32_t>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, SortC64) {
const absl::string_view hlo_text = R"(
HloModule m
sort_lt_comparator {
parameter.0 = c64[] parameter(0)
real.0 = f32[] real(parameter.0)
parameter.1 = c64[] parameter(1)
real.1 = f32[] real(parameter.1)
ROOT compare = pred[] compare(real.0, real.1), direction=LT
}
ENTRY main {
c = c64[3] constant({(2, 0), (4, 0), (6, 0)})
ROOT sort = c64[3]{0} sort(c), dimensions={0}, to_apply=sort_lt_comparator
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal expected =
LiteralUtil::CreateR1<std::complex<float>>({2.f, 4.f, 6.f});
TF_ASSERT_OK_AND_ASSIGN(
Literal result, HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, ConvertC128ToC64) {
const absl::string_view hlo_text = R"(
HloModule m
ENTRY main {
c = c128[3] constant({(2, 0), (4, 0), (6, 0)})
ROOT sort = c64[3]{0} convert(c)
}
)";
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
Literal expected =
LiteralUtil::CreateR1<std::complex<float>>({2.f, 4.f, 6.f});
TF_ASSERT_OK_AND_ASSIGN(
Literal result, HloEvaluator().Evaluate(*m_->entry_computation(), {}));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, RecursivelyEvaluateNonConstantOperands) {
Literal c0_literal = LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}});
Literal c1_literal = LiteralUtil::CreateR2<float>({{0.f, 5.f}, {0.f, 4.f}});
Literal c2_literal = LiteralUtil::CreateR2<float>({{2.f, 4.f}, {4.f, 4.f}});
Shape shape = c0_literal.shape();
HloComputation::Builder b(TestName());
HloInstruction* c0 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(c0_literal)));
HloInstruction* c1 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(c1_literal)));
HloInstruction* c2 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(c2_literal)));
HloInstruction* add0 = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, c0, c1));
HloInstruction* add1 = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, c1, c2));
HloInstruction* add2 = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, add0, add1));
m_->AddEntryComputation(b.Build());
Literal expected = LiteralUtil::CreateR2<float>({{2, 16}, {6, 16}});
TestRecursivelyEvaluateInstruction(add2, expected);
}
TEST_F(HloEvaluatorTest, GetTupleElementOnPartiallyKnownTupleSucceeds) {
Literal c0_literal = LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}});
Shape shape = c0_literal.shape();
HloComputation::Builder b(TestName());
HloInstruction* c0 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(c0_literal)));
HloInstruction* p0 =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "param.0"));
HloInstruction* p1 =
b.AddInstruction(HloInstruction::CreateParameter(1, shape, "param.1"));
HloInstruction* tuple =
b.AddInstruction(HloInstruction::CreateTuple({p0, p1, c0}));
HloInstruction* gte =
b.AddInstruction(HloInstruction::CreateGetTupleElement(tuple, 2));
m_->AddEntryComputation(b.Build());
Literal expected = LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}});
TestRecursivelyEvaluateInstruction(gte, expected);
}
TEST_F(HloEvaluatorTest, InfeedFailure) {
HloComputation::Builder b(TestName());
HloInstruction* token = b.AddInstruction(HloInstruction::CreateToken());
HloInstruction* infeed = b.AddInstruction(HloInstruction::CreateInfeed(
ShapeUtil::MakeShape(F32, {4, 4}), token, ""));
m_->AddEntryComputation(b.Build());
TestRecursiveEvaluationFailure(infeed);
}
TEST_F(HloEvaluatorTest, GetUnknownTupleElementFails) {
Literal c0_literal = LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}});
Shape shape = c0_literal.shape();
HloComputation::Builder b(TestName());
HloInstruction* c0 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(c0_literal)));
HloInstruction* p0 =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "param.0"));
HloInstruction* p1 =
b.AddInstruction(HloInstruction::CreateParameter(1, shape, "param.1"));
HloInstruction* tuple =
b.AddInstruction(HloInstruction::CreateTuple({p0, p1, c0}));
HloInstruction* gte =
b.AddInstruction(HloInstruction::CreateGetTupleElement(tuple, 0));
m_->AddEntryComputation(b.Build());
TestRecursiveEvaluationFailure(gte);
}
TEST_F(HloEvaluatorTest, GetTupleElementFromNestedTupleSucceeds) {
Literal c0_literal = LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}});
Shape shape = c0_literal.shape();
HloComputation::Builder b(TestName());
HloInstruction* c0 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(c0_literal)));
HloInstruction* p0 =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "param.0"));
HloInstruction* p1 =
b.AddInstruction(HloInstruction::CreateParameter(1, shape, "param.1"));
HloInstruction* tuple0 =
b.AddInstruction(HloInstruction::CreateTuple({p0, c0}));
HloInstruction* tuple1 =
b.AddInstruction(HloInstruction::CreateTuple({tuple0, p1}));
HloInstruction* gte0 =
b.AddInstruction(HloInstruction::CreateGetTupleElement(tuple1, 0));
HloInstruction* gte1 =
b.AddInstruction(HloInstruction::CreateGetTupleElement(gte0, 1));
m_->AddEntryComputation(b.Build());
Literal expected = LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}});
TestRecursivelyEvaluateInstruction(gte1, expected);
}
TEST_F(HloEvaluatorTest, GetTupleElementInterleavedWithTupleSucceeds) {
Literal c0_literal = LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}});
Shape shape = c0_literal.shape();
HloComputation::Builder b(TestName());
HloInstruction* c0 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(c0_literal)));
HloInstruction* p0 =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "param.0"));
HloInstruction* p1 =
b.AddInstruction(HloInstruction::CreateParameter(1, shape, "param.1"));
HloInstruction* p2 =
b.AddInstruction(HloInstruction::CreateParameter(2, shape, "param.2"));
HloInstruction* tuple0 =
b.AddInstruction(HloInstruction::CreateTuple({p0, c0}));
HloInstruction* tuple1 =
b.AddInstruction(HloInstruction::CreateTuple({tuple0, p1}));
HloInstruction* gte0 =
b.AddInstruction(HloInstruction::CreateGetTupleElement(tuple1, 0));
HloInstruction* tuple2 =
b.AddInstruction(HloInstruction::CreateTuple({gte0, p2}));
HloInstruction* gte1 =
b.AddInstruction(HloInstruction::CreateGetTupleElement(tuple2, 0));
HloInstruction* gte2 =
b.AddInstruction(HloInstruction::CreateGetTupleElement(gte1, 1));
m_->AddEntryComputation(b.Build());
Literal expected = LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}});
TestRecursivelyEvaluateInstruction(gte2, expected);
}
TEST_F(HloEvaluatorTest, ParameterThroughCallSucceeds) {
constexpr absl::string_view kHloModule = R"(
HloModule parameter_through_call
%identity {
ROOT %param = s32[] parameter(0)
}
ENTRY parameter_through_call {
%constant = s32[] constant(42)
ROOT %call = s32[] call(s32[] %constant), to_apply=%identity
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
const HloInstruction* parameter_instruction = nullptr;
for (const auto* computation : hlo_module->computations()) {
for (const auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kParameter) {
parameter_instruction = instruction;
}
}
}
ASSERT_NE(parameter_instruction, nullptr);
Literal expected = LiteralUtil::CreateR0<int32_t>(42);
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
evaluator_.Evaluate(parameter_instruction, {},
true));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, ParameterThroughCallSucceedsWithPrecomputation) {
constexpr absl::string_view kHloModule = R"(
HloModule parameter_through_call
%identity {
ROOT %param = s32[] parameter(0)
}
ENTRY parameter_through_call {
%constant = s32[] constant(42)
ROOT %call = s32[] call(s32[] %constant), to_apply=%identity
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
const HloInstruction* parameter_instruction = nullptr;
for (const auto* computation : hlo_module->computations()) {
for (const auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kParameter) {
parameter_instruction = instruction;
}
}
}
ASSERT_NE(parameter_instruction, nullptr);
Literal expected = LiteralUtil::CreateR0<int32_t>(42);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<TuplePointsToAnalysis> tuple_points_to,
TuplePointsToAnalysis::Run(hlo_module.get()));
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(hlo_module.get());
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
evaluator_.Evaluate(parameter_instruction,
{tuple_points_to.get(), call_graph.get()},
true));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
class PatternMatchParseWhileLoopTest : public HloTestBase {};
TEST_F(PatternMatchParseWhileLoopTest, LoopBoundDefinedInsideOfCond) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%while_condition {
%param = (s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%loop_bound = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %loop_bound), direction=LT
}
%while_body {
%param = (s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024, 1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.1, f32[1024, 1024] %gte.2)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%constant.0 = s32[] constant(0)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_op);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 5);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 5);
}
TEST_F(PatternMatchParseWhileLoopTest,
LoopBoundDefinedInsideOfCondWithPrecomputation) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%while_condition {
%param = (s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%loop_bound = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %loop_bound), direction=LT
}
%while_body {
%param = (s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024, 1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.1, f32[1024, 1024] %gte.2)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%constant.0 = s32[] constant(0)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<TuplePointsToAnalysis> tuple_points_to,
TuplePointsToAnalysis::Run(hlo_module.get()));
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(hlo_module.get());
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop = PatternMatchParseWhileLoop(
while_op, {tuple_points_to.get(), call_graph.get()});
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 5);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 5);
}
TEST_F(PatternMatchParseWhileLoopTest, LoopBoundDefinedOutsideOfCond) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.2, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%constant.0 = s32[] constant(0)
%constant.1 = s32[] constant(10)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %constant.1, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=3
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_op);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 10);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 10);
}
TEST_F(PatternMatchParseWhileLoopTest, LoopBoundComputedOutsideOfCond) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.2, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%constant.0 = s32[] constant(0)
%constant.1 = s32[] constant(10)
%constant.2 = s32[] constant(4)
%loop_bound = s32[] multiply(s32[] %constant.1, s32[] %constant.2)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %loop_bound, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=3
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_op);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 40);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 40);
}
TEST_F(PatternMatchParseWhileLoopTest, StepSizeNotOne) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.2, f32[1024, 1024] %gte.3)
%constant = s32[] constant(4)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%constant.0 = s32[] constant(0)
%constant.1 = s32[] constant(10)
%constant.2 = s32[] constant(4)
%loop_bound = s32[] multiply(s32[] %constant.1, s32[] %constant.2)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %loop_bound, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=3
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_op);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 10);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 4);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 40);
}
TEST_F(PatternMatchParseWhileLoopTest, RecursiveCond) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%compute_pred {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%compare = pred[] compare(gte.0, %gte.1), direction=LT
ROOT %tuple = (pred[]) tuple(pred[] %compare)
}
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%call = (pred[]) call((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %param), to_apply=%compute_pred
ROOT %gte.4 = pred[] get-tuple-element((pred[]) %call), index=0
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.2, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%constant.0 = s32[] constant(0)
%loop_bound = s32[] constant(10)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %loop_bound, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=3
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_op);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 10);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 10);
}
TEST_F(PatternMatchParseWhileLoopTest, RecursiveCondGetTupleElement) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%compute_pred {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%compare = pred[] compare(gte.0, %gte.1), direction=LT
ROOT %tuple = (pred[]) tuple(pred[] %compare)
}
%get_tuple_element {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%call = (pred[]) call((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %param), to_apply=%compute_pred
%gte.4 = pred[] get-tuple-element((pred[]) %call), index=0
ROOT %tuple.1 = (pred[]) tuple(pred[] %gte.4)
}
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%call = (pred[]) call((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %param), to_apply=%get_tuple_element
ROOT %gte.4 = pred[] get-tuple-element((pred[]) %call), index=0
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.2, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%constant.0 = s32[] constant(0)
%loop_bound = s32[] constant(10)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %loop_bound, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=3
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_op);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 10);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 10);
}
TEST_F(PatternMatchParseWhileLoopTest, LoopBoundDependsOnAnotherLoop) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%compute_pred.0 {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%compare = pred[] compare(gte.0, %gte.1), direction=LT
ROOT %tuple = (pred[]) tuple(pred[] %compare)
}
%while_condition.0 {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%call = (pred[]) call((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %param), to_apply=%compute_pred.0
ROOT %gte.4 = pred[] get-tuple-element((pred[]) %call), index=0
}
%while_body.0 {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.2, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
%compute_pred.1 {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%compare = pred[] compare(gte.0, %gte.1), direction=LT
ROOT %tuple = (pred[]) tuple(pred[] %compare)
}
%while_condition.1 {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%call = (pred[]) call((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %param), to_apply=%compute_pred.1
ROOT %gte.4 = pred[] get-tuple-element((pred[]) %call), index=0
}
%while_body.1 {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.2, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%param.2 = f32[1024, 1024] parameter(1)
%constant.0 = s32[] constant(0)
%loop_bound = s32[] constant(10)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init.0 = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %loop_bound, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while.0 = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init.0), condition=%while_condition.0, body=%while_body.0
%result.0 = f32[1024, 1024] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while.0), index=3
%new_loop_bound = s32[] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while.0), index=0
%while_init.1 = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %new_loop_bound, f32[1024, 1024] %param.2, f32[1024, 1024] %result.0)
%while.1 = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init.1), condition=%while_condition.1, body=%while_body.1
ROOT %result.1 = f32[1024, 1024] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while.1), index=3
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_op);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 10);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 10);
}
TEST_F(PatternMatchParseWhileLoopTest, DynamicLoop) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.2, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%param.2 = s32[] parameter(1)
%loop_bound = s32[] constant(10)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %param.2, s32[] %loop_bound, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=3
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_op);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_TRUE(parsed_while_loop->is_dynamic());
}
TEST_F(PatternMatchParseWhileLoopTest, BooleanCond) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%while_condition {
%param = (pred[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
ROOT %gte.0 = pred[] get-tuple-element(%param), index=0
}
%while_body {
%param = (pred[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = pred[] get-tuple-element(%param), index=0
%gte.1 = f32[1024, 1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.1, f32[1024, 1024] %gte.2)
%new_loop_cond = pred[] constant(false)
ROOT %loop_result = (pred[], f32[1024, 1024], f32[1024, 1024]) tuple(%new_loop_cond, %gte.1, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%constant.0 = pred[] constant(true)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (pred[], f32[1024, 1024], f32[1024, 1024]) tuple(pred[] %constant.0, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (pred[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((pred[], f32[1024, 1024], f32[1024, 1024]) %while), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_op);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 1);
}
TEST_F(PatternMatchParseWhileLoopTest, NestedLoop) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%nested_while_condition {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%nested_while_body {
%param = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.2, f32[1024, 1024] %gte.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %accumulation)
}
%while_condition {
%param = (s32[], s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
ROOT result = pred[] compare(%gte.0, %gte.1), direction=LT
}
%while_body {
%param = (s32[], s32[], s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = s32[] get-tuple-element(%param), index=1
%gte.2 = s32[] get-tuple-element(%param), index=2
%gte.3 = f32[1024, 1024] get-tuple-element(%param), index=3
%gte.4 = f32[1024, 1024] get-tuple-element(%param), index=4
%constant.4 = s32[] constant(0)
%nested_while_init = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.4, s32[] %gte.2, f32[1024, 1024] %gte.3, f32[1024, 1024] %gte.4)
%nested_while = (s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%nested_while_init), condition=%nested_while_condition, body=%nested_while_body
%nested_while_result = f32[1024, 1024] get-tuple-element((s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %nested_while), index=3
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %gte.2, %gte.3, %nested_while_result)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%param.2 = s32[] parameter(1)
%constant.0 = s32[] constant(0)
%constant.2 = s32[] constant(4)
%loop_bound = s32[] multiply(s32[] %param.2, s32[] %constant.2)
%constant.3 = s32[] constant(5)
%nested_loop_bound = s32[] multiply(s32[] %constant.3, s32[] %constant.2)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], s32[], s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, s32[] %loop_bound, s32[] %nested_loop_bound, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], s32[], s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((s32[], s32[], s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=4
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
CHECK_EQ(while_op->opcode(), HloOpcode::kWhile);
HloComputation* while_body = while_op->while_body();
HloInstruction* nested_while =
while_body->root_instruction()->mutable_operand(4)->mutable_operand(0);
CHECK_EQ(nested_while->opcode(), HloOpcode::kWhile);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(nested_while);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 20);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 20);
}
TEST_F(PatternMatchParseWhileLoopTest, CopiedLoopCond) {
constexpr absl::string_view kHloModule = R"(
HloModule accumulated_all_reduce
%while_condition {
%param = (s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%copy.0 = s32[] copy(s32[] %gte.0)
%loop_bound = s32[] constant(5)
%result = pred[] compare(%gte.0, %loop_bound), direction=LT
ROOT %copy.1 = pred[] copy(pred[] %result)
}
%while_body {
%param = (s32[], f32[1024, 1024], f32[1024, 1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024, 1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024, 1024] get-tuple-element(%param), index=2
%accumulation = f32[1024, 1024] add(f32[1024, 1024] %gte.1, f32[1024, 1024] %gte.2)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024, 1024], f32[1024, 1024]) tuple(%increment_iteration, %gte.1, %accumulation)
}
ENTRY accumulated_all_reduce {
%param.1 = f32[1024, 1024] parameter(0)
%constant.0 = s32[] constant(0)
%accumulation_buffer_init = f32[] constant(0)
%accumulation_buffer = f32[1024, 1024] broadcast(f32[] %accumulation_buffer_init), dimensions={}
%while_init = (s32[], f32[1024, 1024], f32[1024, 1024]) tuple(s32[] %constant.0, f32[1024, 1024] %param.1, f32[1024, 1024] %accumulation_buffer)
%while = (s32[], f32[1024, 1024], f32[1024, 1024]) while(%while_init), condition=%while_condition, body=%while_body
ROOT %result = f32[1024, 1024] get-tuple-element((s32[], f32[1024, 1024], f32[1024, 1024]) %while), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op =
hlo_module->entry_computation()->root_instruction()->mutable_operand(0);
std::optional<ParsedWhileLoop> parsed_while_loop =
PatternMatchParseWhileLoop(while_op);
ASSERT_TRUE(parsed_while_loop.has_value());
EXPECT_FALSE(parsed_while_loop->is_dynamic());
EXPECT_EQ(parsed_while_loop->static_while_loop->trip_count, 5);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_index, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->induction_var_init_value, 0);
EXPECT_EQ(parsed_while_loop->static_while_loop->step_size, 1);
EXPECT_EQ(parsed_while_loop->static_while_loop->loop_bound, 5);
}
TEST_F(HloEvaluatorTest, DotTraced) {
const absl::string_view hlo_text = R"(
HloModule test
ENTRY DotUpcast {
l = s16[4,3]{1,0} parameter(0)
r = s8[3,2]{1,0} parameter(1)
ROOT result = s32[4,2] dot(l, r), lhs_contracting_dims={1},
rhs_contracting_dims={0}
}
)";
auto lhs_array = std::make_unique<Array2D<int16_t>>(4, 3);
lhs_array->FillUnique(1);
auto lhs_literal = LiteralUtil::CreateR2FromArray2D<int16_t>(*lhs_array);
auto rhs_array = std::make_unique<Array2D<int8_t>>(3, 2);
rhs_array->FillUnique(1);
auto rhs_literal = LiteralUtil::CreateR2FromArray2D<int8_t>(*rhs_array);
TF_ASSERT_OK_AND_ASSIGN(m_, ParseAndReturnVerifiedModule(hlo_text));
absl::flat_hash_set<std::array<int64_t, 3>> macs_traced;
auto mac_handler = [&macs_traced](int64_t result_index, int64_t lhs_index,
int64_t rhs_index) -> void {
macs_traced.insert(
std::array<int64_t, 3>{result_index, lhs_index, rhs_index});
};
evaluator_.set_trace_mac_handler(mac_handler);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
Evaluate({&lhs_literal, &rhs_literal}));
auto expected_array =
Array2D<int32_t>({{22, 28}, {58, 76}, {94, 124}, {130, 172}});
auto expected = LiteralUtil::CreateR2FromArray2D<int32_t>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
const absl::flat_hash_set<std::array<int64_t, 3>> macs_expected = {
{1, 0, 1}, {0, 0, 0}, {2, 4, 2}, {5, 6, 1}, {2, 5, 4}, {4, 7, 2},
{2, 3, 0}, {5, 7, 3}, {5, 8, 5}, {4, 6, 0}, {6, 9, 0}, {7, 10, 3},
{7, 11, 5}, {1, 1, 3}, {0, 2, 4}, {3, 4, 3}, {1, 2, 5}, {7, 9, 1},
{6, 10, 2}, {6, 11, 4}, {3, 5, 5}, {4, 8, 4}, {0, 1, 2}, {3, 3, 1}};
EXPECT_EQ(macs_traced, macs_expected);
}
TEST_F(HloEvaluatorTest, SimpleConvTraced) {
HloComputation::Builder b(TestName());
Array4D<float> lhs_array(1, 1, 4, 4);
lhs_array.FillWithYX(Array2D<float>({
{1, 2, 3, 4 },
{5, 6, 7, 8 },
{9, 10, 11, 12},
{13, 14, 15, 16},
}));
auto lhs_literal = LiteralUtil::CreateR4FromArray4D<float>(lhs_array);
HloInstruction* lhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs_literal)));
Array4D<float> rhs_array(1, 1, 2, 2);
rhs_array.FillWithYX(Array2D<float>({
{5, 6},
{7, 8},
}));
auto rhs_literal = LiteralUtil::CreateR4FromArray4D<float>(rhs_array);
HloInstruction* rhs_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs_literal)));
Window window;
WindowDimension dim;
dim.set_size(2);
dim.set_stride(1);
dim.set_padding_low(0);
dim.set_padding_high(1);
dim.set_window_dilation(1);
dim.set_base_dilation(1);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
ConvolutionDimensionNumbers dnums =
XlaBuilder::CreateDefaultConvDimensionNumbers(2);
Shape shape = ShapeUtil::MakeShape(F32, {1, 1, 4, 4});
b.AddInstruction(HloInstruction::CreateConvolve(
shape, lhs_instruction, rhs_instruction, 1,
1, window, dnums, DefaultPrecisionConfig(2)));
m_->AddEntryComputation(b.Build());
absl::flat_hash_set<std::array<int64_t, 3>> macs_traced;
auto mac_handler = [&macs_traced](int64_t result_index, int64_t lhs_index,
int64_t rhs_index) -> void {
macs_traced.insert(
std::array<int64_t, 3>{result_index, lhs_index, rhs_index});
};
evaluator_.set_trace_mac_handler(mac_handler);
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
Array4D<float> expected_array(1, 1, 4, 4);
expected_array.FillWithYX(Array2D<float>({
{100, 126, 152, 76},
{204, 230, 256, 124},
{308, 334, 360, 172},
{149, 160, 171, 80},
}));
auto expected = LiteralUtil::CreateR4FromArray4D<float>(expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
const absl::flat_hash_set<std::array<int64_t, 3>> macs_expected = {
{10, 14, 2}, {7, 7, 0}, {11, 15, 2}, {4, 4, 0}, {3, 7, 2},
{5, 9, 2}, {8, 9, 1}, {12, 12, 0}, {6, 10, 2}, {5, 6, 1},
{13, 14, 1}, {15, 15, 0}, {11, 11, 0}, {0, 5, 3}, {10, 10, 0},
{2, 7, 3}, {13, 13, 0}, {1, 6, 3}, {0, 0, 0}, {4, 9, 3},
{8, 12, 2}, {8, 13, 3}, {9, 9, 0}, {6, 7, 1}, {9, 13, 2},
{2, 6, 2}, {0, 1, 1}, {6, 6, 0}, {5, 10, 3}, {10, 15, 3},
{14, 14, 0}, {7, 11, 2}, {0, 4, 2}, {10, 11, 1}, {6, 11, 3},
{2, 2, 0}, {3, 3, 0}, {9, 14, 3}, {12, 13, 1}, {1, 5, 2},
{5, 5, 0}, {14, 15, 1}, {1, 1, 0}, {2, 3, 1}, {4, 5, 1},
{4, 8, 2}, {9, 10, 1}, {8, 8, 0}, {1, 2, 1},
};
EXPECT_EQ(macs_traced, macs_expected);
}
TEST(EvalErrorTest, OK) {
EXPECT_EQ(std::nullopt, internal::ParseEvalErrorDetail(absl::OkStatus()));
}
TEST(EvalErrorTest, NoPayload) {
EXPECT_EQ(std::nullopt,
internal::ParseEvalErrorDetail(absl::InternalError("hmm")));
}
TEST(EvalErrorTest, Payload) {
absl::Status s = absl::InternalError("hmm");
std::string payload;
payload.resize(sizeof(internal::EvalErrorDetail));
absl::little_endian::Store32(
const_cast<char*>(payload.data()),
static_cast<uint32_t>(
internal::EvalErrorDetail::kDynamicValueDependence));
s.SetPayload(internal::kEvalErrorDetailUrl, absl::Cord(payload));
EXPECT_EQ(internal::ParseEvalErrorDetail(s),
internal::EvalErrorDetail::kDynamicValueDependence);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/evaluator/hlo_evaluator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/evaluator/hlo_evaluator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9d971230-2c49-4826-81f8-9511039c41a5 | cpp | tensorflow/tensorflow | auto_sharding | third_party/xla/xla/hlo/experimental/auto_sharding/auto_sharding.cc | third_party/xla/xla/hlo/experimental/auto_sharding/auto_sharding_test.cc | #include "xla/hlo/experimental/auto_sharding/auto_sharding.h"
#include <algorithm>
#include <climits>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <iterator>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <queue>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_cost_graph.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_device_mesh.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_memory.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_option.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_solver.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_strategy.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_util.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_wrapper.h"
#include "xla/hlo/experimental/auto_sharding/cluster_environment.h"
#include "xla/hlo/experimental/auto_sharding/matrix.h"
#include "xla/hlo/experimental/auto_sharding/metrics.h"
#include "xla/hlo/experimental/auto_sharding/profiling_result.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/transforms/hlo_constant_splitter.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/service/buffer_value.h"
#include "xla/service/call_graph.h"
#include "xla/service/computation_layout.h"
#include "xla/service/dump.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/hlo_value.h"
#include "xla/service/optimize_input_output_buffer_alias.h"
#include "xla/service/sharding_propagation.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
namespace {
constexpr double kSaltiplier = 0.0;
}
std::vector<double> CommunicationReshardingCostVector(
const StrategyGroup& strategy_group, const Shape& operand_shape,
const HloSharding& required_sharding,
const ClusterEnvironment& cluster_env) {
CHECK(!strategy_group.is_tuple) << "Only works with strategy vector.";
std::vector<double> ret;
ret.reserve(strategy_group.GetStrategies().size());
auto required_sharding_for_resharding = required_sharding.IsTileMaximal()
? HloSharding::Replicate()
: required_sharding;
for (const ShardingStrategy& x : strategy_group.GetStrategies()) {
ret.push_back(cluster_env.ReshardingCost(operand_shape, x.output_sharding,
required_sharding_for_resharding));
}
return ret;
}
double ComputeMemoryReshardingCost(const Shape& shape,
const HloSharding& src_sharding,
const HloSharding& dst_sharding,
const DeviceMesh& device_mesh) {
int64_t src_n_dim = NumTileDimensions(src_sharding);
int64_t dst_n_dim = NumTileDimensions(dst_sharding);
int64_t src_sharded_bytes = ByteSizeOfShapeWithSharding(shape, src_sharding);
double result = std::max(src_sharded_bytes,
ByteSizeOfShapeWithSharding(shape, dst_sharding));
if (src_n_dim != dst_n_dim && src_n_dim != -1 && dst_n_dim != -1) {
absl::StatusOr<Shape> inter_shape = ComputeIntermediateShape(
src_sharding, dst_sharding, shape, device_mesh);
if (inter_shape.ok()) {
std::optional<HloSharding> src_inter_sharding =
hlo_sharding_util::ReshapeSharding(shape, *inter_shape, src_sharding);
std::optional<HloSharding> dst_inter_sharding =
hlo_sharding_util::ReshapeSharding(shape, *inter_shape, dst_sharding);
if (!src_inter_sharding.has_value() || !dst_inter_sharding.has_value()) {
src_inter_sharding = HloSharding::Replicate();
dst_inter_sharding = HloSharding::Replicate();
}
result = std::max(
result,
static_cast<double>(std::max(
ByteSizeOfShapeWithSharding(*inter_shape, src_inter_sharding),
ByteSizeOfShapeWithSharding(*inter_shape, dst_inter_sharding))));
}
}
return result - src_sharded_bytes;
}
std::vector<double> MemoryReshardingCostVector(
const StrategyGroup& strategy_group, const Shape& operand_shape,
const HloSharding& required_sharding,
const ClusterEnvironment& cluster_env) {
CHECK(!strategy_group.is_tuple) << "Only works with strategy vector.";
std::vector<double> ret;
ret.reserve(strategy_group.GetStrategies().size());
auto required_sharding_for_resharding = required_sharding.IsTileMaximal()
? HloSharding::Replicate()
: required_sharding;
CHECK_OK(required_sharding.Validate(operand_shape))
<< strategy_group.ToString();
for (const ShardingStrategy& x : strategy_group.GetStrategies()) {
ret.push_back(ComputeMemoryReshardingCost(operand_shape, x.output_sharding,
required_sharding_for_resharding,
cluster_env.device_mesh_));
}
return ret;
}
std::unique_ptr<StrategyGroup> CreateLeafStrategyGroupWithoutInNodes(
const size_t instruction_id, StrategyGroups& strategy_groups) {
auto strategy_group = std::make_unique<StrategyGroup>();
strategy_group->is_tuple = false;
strategy_group->node_idx = strategy_groups.size();
strategy_groups.push_back(strategy_group.get());
strategy_group->instruction_id = instruction_id;
return strategy_group;
}
std::unique_ptr<StrategyGroup> CreateLeafStrategyGroup(
const size_t instruction_id, const HloInstruction* ins,
const StrategyMap& strategy_map, StrategyGroups& strategy_groups) {
auto strategy_group =
CreateLeafStrategyGroupWithoutInNodes(instruction_id, strategy_groups);
for (int64_t i = 0; i < ins->operand_count(); ++i) {
strategy_group->in_nodes.push_back(strategy_map.at(ins->operand(i)).get());
}
return strategy_group;
}
std::unique_ptr<StrategyGroup> CreateTupleStrategyGroup(
const size_t instruction_id) {
auto strategy_group = std::make_unique<StrategyGroup>();
strategy_group->is_tuple = true;
strategy_group->node_idx = -1;
strategy_group->instruction_id = instruction_id;
return strategy_group;
}
std::pair<ReshardingCosts, ReshardingCosts>
GenerateReshardingCostsAndMissingShardingsForAllOperands(
const HloInstruction* ins, const HloSharding& output_sharding,
const StrategyMap& strategy_map, const ClusterEnvironment& cluster_env,
const CallGraph& call_graph, InputShardings& input_shardings) {
ReshardingCosts communication_resharding_costs;
ReshardingCosts memory_resharding_costs;
if (input_shardings.shardings.empty() && ins->operand_count() > 0) {
input_shardings.shardings.resize(ins->operand_count());
}
for (int64_t k = 0; k < ins->operand_count(); ++k) {
const HloInstruction* operand = ins->operand(k);
const Shape& operand_shape = operand->shape();
const StrategyGroup& operand_strategy_group = *strategy_map.at(operand);
const auto& operand_strategies = operand_strategy_group.GetStrategies();
const std::vector<double> zeros(operand_strategies.size(), 0.0);
if (operand_shape.IsToken() || operand_shape.rank() == 0) {
communication_resharding_costs.push_back(zeros);
memory_resharding_costs.push_back(zeros);
if (!input_shardings.shardings[k].has_value()) {
input_shardings.shardings[k] = HloSharding::Replicate();
}
} else {
std::optional<HloSharding> cur_input_sharding;
CHECK_EQ(input_shardings.shardings.size(), ins->operand_count());
if (input_shardings.shardings[k].has_value()) {
cur_input_sharding = input_shardings.shardings[k];
} else {
cur_input_sharding = GetInputSharding(
ins, k, output_sharding, call_graph, cluster_env.NumDevices());
}
bool is_sharding_default_replicated = false;
if (!cur_input_sharding.has_value()) {
if ((ins->opcode() == HloOpcode::kGather && k == 0) ||
(ins->opcode() == HloOpcode::kScatter && k != 0)) {
is_sharding_default_replicated = true;
cur_input_sharding = HloSharding::Replicate();
} else if (ins->opcode() == HloOpcode::kCustomCall) {
is_sharding_default_replicated = true;
cur_input_sharding = HloSharding::Replicate();
} else if (ins->opcode() == HloOpcode::kRngBitGenerator) {
cur_input_sharding = HloSharding::Replicate();
}
}
CHECK(cur_input_sharding.has_value());
if (!input_shardings.shardings[k].has_value()) {
input_shardings.shardings[k] = cur_input_sharding;
}
if (ins->opcode() == HloOpcode::kGather && k == 0 &&
is_sharding_default_replicated) {
VLOG(2) << "Zeroing out operand 0 resharding costs for gather sharding "
<< output_sharding.ToString();
communication_resharding_costs.push_back(zeros);
memory_resharding_costs.push_back(zeros);
input_shardings.shardings[k] = std::nullopt;
} else {
communication_resharding_costs.push_back(
CommunicationReshardingCostVector(
operand_strategy_group, operand_shape, *cur_input_sharding,
cluster_env));
memory_resharding_costs.push_back(
MemoryReshardingCostVector(operand_strategy_group, operand_shape,
*cur_input_sharding, cluster_env));
}
}
}
return std::make_pair(communication_resharding_costs,
memory_resharding_costs);
}
std::tuple<ReshardingCosts, ReshardingCosts, InputShardings>
GenerateReshardingCostsAndShardingsForAllOperands(
const HloInstruction* ins, const HloSharding& output_sharding,
const StrategyMap& strategy_map, const ClusterEnvironment& cluster_env,
const CallGraph& call_graph) {
InputShardings input_shardings_optional;
std::pair<ReshardingCosts, ReshardingCosts> resharding_costs =
GenerateReshardingCostsAndMissingShardingsForAllOperands(
ins, output_sharding, strategy_map, cluster_env, call_graph,
input_shardings_optional);
for (const auto& sharding_optional : input_shardings_optional.shardings) {
CHECK(sharding_optional.has_value());
}
return {resharding_costs.first, resharding_costs.second,
input_shardings_optional};
}
void FollowArrayOrTokenStrategyGroup(
const StrategyGroup& src_strategy_group, const Shape& shape,
const size_t instruction_id, const ClusterEnvironment& cluster_env,
const StableMap<NodeIdx, std::vector<ShardingStrategy>>&
pretrimmed_strategy_map,
StrategyGroup& strategy_group) {
CHECK(shape.IsArray() || shape.IsToken());
std::vector<ShardingStrategy> pretrimmed_strategies;
auto pretrimmed_strategy_map_it =
pretrimmed_strategy_map.find(src_strategy_group.node_idx);
if (pretrimmed_strategy_map_it != pretrimmed_strategy_map.end()) {
pretrimmed_strategies = pretrimmed_strategy_map_it->second;
} else {
strategy_group.following = &src_strategy_group;
}
const auto& src_strategies = src_strategy_group.GetStrategies();
for (int64_t sid = 0;
sid < src_strategies.size() + pretrimmed_strategies.size(); ++sid) {
const HloSharding* output_spec;
if (sid < src_strategies.size()) {
output_spec = &src_strategies[sid].output_sharding;
} else {
output_spec =
&pretrimmed_strategies[sid - src_strategies.size()].output_sharding;
VLOG(1) << "Adding outspec from the trimmed strategy map: "
<< output_spec->ToString();
}
const std::string name = ToStringSimple(*output_spec);
double compute_cost = 0, communication_cost = 0;
double memory_cost = ByteSizeOfShapeWithSharding(shape, *output_spec);
size_t num_in_nodes = strategy_group.in_nodes.size();
InputShardings input_shardings{name, {num_in_nodes, *output_spec}};
ReshardingCosts communication_resharding_costs;
ReshardingCosts memory_resharding_costs;
for (size_t i = 0; i < strategy_group.in_nodes.size(); ++i) {
communication_resharding_costs.push_back(
CommunicationReshardingCostVector(*strategy_group.in_nodes[i], shape,
*output_spec, cluster_env));
memory_resharding_costs.push_back(MemoryReshardingCostVector(
*strategy_group.in_nodes[i], shape, *output_spec, cluster_env));
}
strategy_group.AddStrategy(
ShardingStrategy({*output_spec, compute_cost, communication_cost,
memory_cost, communication_resharding_costs,
memory_resharding_costs}),
input_shardings);
}
}
std::unique_ptr<StrategyGroup> HandlePartialReduce(
const HloInstruction* ins, const size_t instruction_id,
StrategyGroups& strategy_groups, const ClusterEnvironment& cluster_env,
StrategyMap& strategy_map, const CallGraph& call_graph) {
absl::StatusOr<int64_t> reduction_dim = GetPartialReduceReductionDim(ins);
CHECK_OK(reduction_dim);
const Shape& shape = ins->shape();
const HloInstruction* operand = ins->operand(0);
const StrategyGroup* src_strategy_group = strategy_map.at(operand).get();
std::unique_ptr<StrategyGroup> strategy_group =
CreateTupleStrategyGroup(instruction_id);
int64_t output_size = shape.tuple_shapes_size();
for (size_t i = 0; i < output_size; ++i) {
std::unique_ptr<StrategyGroup> child_strategy_group =
CreateLeafStrategyGroupWithoutInNodes(instruction_id, strategy_groups);
child_strategy_group->in_nodes.push_back(src_strategy_group);
child_strategy_group->following = src_strategy_group;
for (const auto& src_strategy : src_strategy_group->GetStrategies()) {
const HloSharding& input_spec = src_strategy.output_sharding;
if (input_spec.IsManual() || input_spec.IsManualSubgroup()) {
continue;
}
HloSharding output_spec = input_spec;
if (!(input_spec.IsReplicated() || input_spec.IsTileMaximal())) {
output_spec = hlo_sharding_util::PartiallyReplicateTiledShardingOnDims(
input_spec, {*reduction_dim});
}
std::string name = ToStringSimple(output_spec);
InputShardings input_shardings = {std::move(name)};
for (int64_t k = 0; k < output_size * 2; ++k) {
if (k < output_size) {
input_shardings.shardings.push_back(input_spec);
} else {
input_shardings.shardings.push_back(HloSharding::Replicate());
}
}
double compute_cost = 0, communication_cost = 0;
double memory_cost = ByteSizeOfShapeWithSharding(
ins->shape().tuple_shapes(i), output_spec);
std::pair<ReshardingCosts, ReshardingCosts> resharding_costs =
GenerateReshardingCostsAndMissingShardingsForAllOperands(
ins, output_spec, strategy_map, cluster_env, call_graph,
input_shardings);
child_strategy_group->AddStrategy(
ShardingStrategy({std::move(output_spec), compute_cost,
communication_cost, memory_cost,
std::move(resharding_costs.first),
std::move(resharding_costs.second)}),
std::move(input_shardings));
}
strategy_group->AddChild(std::move(child_strategy_group));
}
return strategy_group;
}
std::unique_ptr<StrategyGroup> MaybeFollowInsStrategyGroup(
const StrategyGroup& src_strategy_group, const Shape& shape,
const size_t instruction_id, StrategyGroups& strategy_groups,
const ClusterEnvironment& cluster_env,
const StableMap<NodeIdx, std::vector<ShardingStrategy>>&
pretrimmed_strategy_map) {
const auto& children = src_strategy_group.GetChildren();
std::unique_ptr<StrategyGroup> strategy_group;
if (src_strategy_group.is_tuple) {
CHECK(shape.IsTuple());
CHECK_EQ(shape.tuple_shapes_size(), children.size());
strategy_group = CreateTupleStrategyGroup(instruction_id);
for (size_t i = 0; i < children.size(); ++i) {
auto child_strategies = MaybeFollowInsStrategyGroup(
*children[i], shape.tuple_shapes(i), instruction_id, strategy_groups,
cluster_env, pretrimmed_strategy_map);
child_strategies->tuple_element_idx = i;
strategy_group->AddChild(std::move(child_strategies));
}
} else {
strategy_group =
CreateLeafStrategyGroupWithoutInNodes(instruction_id, strategy_groups);
strategy_group->in_nodes.push_back(&src_strategy_group);
FollowArrayOrTokenStrategyGroup(src_strategy_group, shape, instruction_id,
cluster_env, pretrimmed_strategy_map,
*strategy_group);
}
return strategy_group;
}
absl::StatusOr<std::unique_ptr<StrategyGroup>> FollowReduceStrategy(
const HloInstruction* ins, const Shape& output_shape,
const HloInstruction* operand, const HloInstruction* unit,
const size_t instruction_id, StrategyMap& strategy_map,
StrategyGroups& strategy_groups, const ClusterEnvironment& cluster_env,
const bool allow_mixed_mesh_shape, const bool crash_at_error) {
std::unique_ptr<StrategyGroup> strategy_group;
if (output_shape.IsTuple()) {
strategy_group = CreateTupleStrategyGroup(instruction_id);
for (size_t i = 0; i < ins->shape().tuple_shapes_size(); ++i) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<StrategyGroup> child_strategy,
FollowReduceStrategy(
ins, ins->shape().tuple_shapes().at(i), ins->operand(i),
ins->operand(i + ins->shape().tuple_shapes_size()),
instruction_id, strategy_map, strategy_groups, cluster_env,
allow_mixed_mesh_shape, crash_at_error));
child_strategy->tuple_element_idx = i;
strategy_group->AddChild(std::move(child_strategy));
}
} else if (output_shape.IsArray()) {
strategy_group = CreateLeafStrategyGroup(instruction_id, ins, strategy_map,
strategy_groups);
const StrategyGroup* src_strategy_group = strategy_map.at(operand).get();
strategy_group->following = src_strategy_group;
std::vector<int64_t> op_dim_to_output_dim =
GetDimensionMapping(ins->dimensions(),
operand->shape().rank());
CHECK_EQ(ins->dimensions().size() + output_shape.rank(),
operand->shape().rank())
<< "Invalid kReduce: output size + reduced dimensions size != op count";
for (const auto& src_strategy : src_strategy_group->GetStrategies()) {
const HloSharding& input_sharding = src_strategy.output_sharding;
const auto& tensor_dim_to_mesh = cluster_env.GetTensorDimToMeshDimWrapper(
operand->shape(), input_sharding,
true,
crash_at_error);
if (tensor_dim_to_mesh.size() != operand->shape().rank()) {
return absl::InvalidArgumentError(
"Cannot generate tensor dim to mesh dim mapping");
}
std::vector<int64_t> all_reduce_dims;
for (int64_t op_dim = 0; op_dim < operand->shape().rank(); ++op_dim) {
int64_t mesh_dim = tensor_dim_to_mesh[op_dim];
if (mesh_dim == -1) {
continue;
}
if (op_dim_to_output_dim[op_dim] == -1) {
all_reduce_dims.push_back(mesh_dim);
}
}
std::unique_ptr<HloInstruction> operand_clone = operand->Clone();
std::unique_ptr<HloInstruction> unit_clone = unit->Clone();
std::unique_ptr<HloInstruction> new_reduce = HloInstruction::CreateReduce(
output_shape, operand_clone.get(), unit_clone.get(),
ins->dimensions(), ins->to_apply());
operand_clone->set_sharding(src_strategy.output_sharding);
if (!new_reduce->ReplaceOperandWith(0, operand_clone.get()).ok()) {
continue;
}
CHECK(InferReduceShardingFromOperand(new_reduce.get(), false, true));
HloSharding output_spec = new_reduce->sharding();
new_reduce.reset();
operand_clone.reset();
unit_clone.reset();
const std::string name = ToStringSimple(output_spec);
double compute_cost = 0, communication_cost = 0;
double memory_cost =
ByteSizeOfShapeWithSharding(output_shape, output_spec);
for (int64_t mesh_dim : all_reduce_dims) {
communication_cost += cluster_env.AllReduceCost(memory_cost, mesh_dim);
}
ReshardingCosts communication_resharding_costs;
ReshardingCosts memory_resharding_costs;
for (int64_t k = 0; k < ins->operand_count(); ++k) {
const HloInstruction* cur_operand = ins->operand(k);
const auto& operand_strategy_group = *strategy_map.at(cur_operand);
const auto& operand_strategies = operand_strategy_group.GetStrategies();
if (ToString(cur_operand->shape().dimensions()) ==
ToString(operand->shape().dimensions())) {
communication_resharding_costs.push_back(
CommunicationReshardingCostVector(operand_strategy_group,
cur_operand->shape(),
input_sharding, cluster_env));
memory_resharding_costs.push_back(MemoryReshardingCostVector(
operand_strategy_group, cur_operand->shape(), input_sharding,
cluster_env));
} else {
const std::vector<double> zeros(operand_strategies.size(), 0);
communication_resharding_costs.push_back(zeros);
memory_resharding_costs.push_back(zeros);
}
}
const ShardingStrategy strategy = ShardingStrategy(
{output_spec, compute_cost, communication_cost, memory_cost,
communication_resharding_costs, memory_resharding_costs});
strategy_group->AddStrategy(strategy, {name, {input_sharding}});
}
} else {
LOG(FATAL) << "Unhandled kReduce shape: " << ins->shape().ToString();
}
return strategy_group;
}
std::vector<size_t> FindReplicateStrategyIndices(
const std::vector<ShardingStrategy>& strategies) {
std::vector<size_t> indices;
for (size_t i = 0; i < strategies.size(); i++) {
if (strategies.at(i).output_sharding.IsReplicated()) {
indices.push_back(i);
}
}
return indices;
}
std::tuple<ReshardingCosts, ReshardingCosts, InputShardings>
ReshardingCostsForTupleOperand(const HloInstruction* operand,
const StrategyGroup& operand_strategy_vector) {
ReshardingCosts communication_resharding_costs;
ReshardingCosts memory_resharding_costs;
std::vector<HloSharding> tuple_element_shardings;
for (size_t tuple_element_idx = 0;
tuple_element_idx < operand->shape().tuple_shapes_size();
tuple_element_idx++) {
const StrategyGroup& tuple_element_strategy_group =
*operand_strategy_vector.GetChildren()[tuple_element_idx];
const auto& tuple_element_strategies =
tuple_element_strategy_group.GetStrategies();
std::vector<size_t> indices =
FindReplicateStrategyIndices(tuple_element_strategies);
CHECK_GT(indices.size(), 0)
<< "There is no replicated strategy in instruction "
<< operand->ToString() << ".\nStrategies:\n"
<< tuple_element_strategy_group.ToString();
memory_resharding_costs.push_back(
std::vector<double>(tuple_element_strategies.size(), 0));
communication_resharding_costs.push_back(
std::vector<double>(tuple_element_strategies.size(), kInfinityCost));
tuple_element_shardings.push_back(HloSharding::Replicate());
for (const size_t i : indices) {
communication_resharding_costs.back().at(i) = 0.0;
}
}
return {
communication_resharding_costs,
memory_resharding_costs,
{{}, {HloSharding::Tuple(operand->shape(), tuple_element_shardings)}}};
}
ReshardingCosts CreateZeroReshardingCostsForAllOperands(
const HloInstruction* ins, const StrategyMap& strategy_map) {
ReshardingCosts resharding_costs;
for (size_t i = 0; i < ins->operand_count(); ++i) {
const HloInstruction* operand = ins->operand(i);
const StrategyGroup& operand_strategy_group = *strategy_map.at(operand);
if (operand->shape().IsTuple()) {
if (ins->opcode() == HloOpcode::kConditional ||
ins->opcode() == HloOpcode::kOutfeed) {
resharding_costs.push_back(std::vector<double>(1, 0));
} else {
CHECK_EQ(ins->operand_count(), 0)
<< "Do not support instructions with more than one tuple "
"operand.";
for (size_t tuple_element_idx = 0;
tuple_element_idx < operand->shape().tuple_shapes_size();
tuple_element_idx++) {
const StrategyGroup& tuple_element_strategy_group =
*operand_strategy_group.GetChildren().at(tuple_element_idx);
resharding_costs.push_back(std::vector<double>(
tuple_element_strategy_group.GetStrategies().size(), 0));
}
}
} else {
const auto& strategies = operand_strategy_group.GetStrategies();
resharding_costs.push_back(std::vector<double>(strategies.size(), 0));
}
}
return resharding_costs;
}
void GenerateOutfeedStrategy(const HloInstruction* ins, const Shape& shape,
const ClusterEnvironment& cluster_env,
const StrategyMap& strategy_map,
const double replicated_penalty,
StrategyGroup& strategy_group) {
HloSharding output_spec = HloSharding::Replicate();
ReshardingCosts communication_resharding_costs;
ReshardingCosts memory_resharding_costs;
InputShardings input_shardings = {"R"};
const int tuple_size = ins->operand(0)->shape().tuple_shapes_size();
const auto& operand_strategy_group = strategy_map.at(ins->operand(0));
const auto& operand_children = operand_strategy_group->GetChildren();
if (ins->has_sharding()) {
std::vector<Shape> operand_shapes(ins->operand_count());
for (int i = 0; i < ins->operand_count(); ++i) {
operand_shapes[i] = ins->operand(i)->shape();
}
auto all_operands_tuple_shape = ShapeUtil::MakeTupleShape(operand_shapes);
auto get_input_sharding = [&](int index) {
auto sharding = ins->sharding();
if (sharding.IsTuple()) {
return (index >= 0)
? sharding.GetSubSharding(all_operands_tuple_shape,
{0, static_cast<int64_t>(index)})
: sharding.GetSubSharding(all_operands_tuple_shape, {1});
} else {
return sharding;
}
};
for (size_t i = 0; i < tuple_size; ++i) {
const StrategyGroup& child = *operand_children[i];
const Shape& tuple_shape = ins->operand(0)->shape().tuple_shapes(i);
const HloSharding& input_sharding = get_input_sharding(i);
input_shardings.shardings.push_back(input_sharding);
communication_resharding_costs.push_back(
CommunicationReshardingCostVector(child, tuple_shape, input_sharding,
cluster_env));
memory_resharding_costs.push_back(MemoryReshardingCostVector(
child, tuple_shape, input_sharding, cluster_env));
}
const HloSharding& input_sharding = get_input_sharding(-1);
input_shardings.shardings.push_back(input_sharding);
} else {
for (size_t i = 0; i < tuple_size; ++i) {
const StrategyGroup& child = *operand_children[i];
const std::vector<double> zeros(child.GetStrategies().size(), 0);
communication_resharding_costs.push_back(zeros);
memory_resharding_costs.push_back(zeros);
}
}
communication_resharding_costs.push_back({});
memory_resharding_costs.push_back({});
double memory_cost = ByteSizeOfShapeWithSharding(shape, output_spec);
strategy_group.AddStrategy(
ShardingStrategy({HloSharding::Replicate(), replicated_penalty, 0,
memory_cost, std::move(communication_resharding_costs),
std::move(memory_resharding_costs)}),
input_shardings);
}
double ComputeCommunicationCost(const HloInstruction* ins,
const InputShardings& operand_shardings,
const ClusterEnvironment& cluster_env) {
switch (ins->opcode()) {
case HloOpcode::kGather: {
if (operand_shardings.shardings[0].has_value() &&
!operand_shardings.shardings[0]->IsReplicated()) {
auto mesh_shape = cluster_env.device_mesh_.dimensions();
auto mesh_dim = std::distance(
mesh_shape.begin(),
std::max_element(mesh_shape.begin(), mesh_shape.end()));
return cluster_env.AllReduceCost(ByteSizeOfShape(ins->shape()),
mesh_dim);
}
return 0;
}
default:
LOG(FATAL) << "Unhandled instruction " << ins->ToString();
}
}
void AddReplicatedStrategy(
const HloInstruction* ins, const Shape& shape,
const ClusterEnvironment& cluster_env, const StrategyMap& strategy_map,
const double replicated_penalty,
absl::flat_hash_set<int64_t> operands_to_consider_all_strategies_for,
StrategyGroup& strategy_group) {
HloSharding replicated_strategy = HloSharding::Replicate();
HloSharding output_spec = replicated_strategy;
double memory_cost = ByteSizeOfShapeWithSharding(shape, output_spec);
CHECK_LE(operands_to_consider_all_strategies_for.size(), 1);
if (!operands_to_consider_all_strategies_for.empty()) {
int64_t operand_to_consider_all_strategies_for =
*operands_to_consider_all_strategies_for.begin();
auto operand = ins->operand(operand_to_consider_all_strategies_for);
CHECK(!operand->shape().IsTuple());
const auto& operand_strategy_group = strategy_map.at(operand).get();
const auto& operand_strategies = operand_strategy_group->GetStrategies();
InputShardings input_shardings = {"R"};
input_shardings.shardings.resize(ins->operand_count());
std::vector<InputShardings> possible_input_shardings(
operand_strategies.size(), input_shardings);
std::vector<ReshardingCosts> possible_communication_resharding_costs(
operand_strategies.size(), ReshardingCosts(ins->operand_count()));
std::vector<ReshardingCosts> possible_memory_resharding_costs(
operand_strategies.size(), ReshardingCosts(ins->operand_count()));
for (int64_t k = 0; k < ins->operand_count(); ++k) {
const HloInstruction* operand = ins->operand(k);
const Shape& operand_shape = operand->shape();
CHECK(!operand_shape.IsTuple());
const StrategyGroup& operand_strategy_group = *strategy_map.at(operand);
if (k == operand_to_consider_all_strategies_for) {
CHECK_EQ(possible_input_shardings.size(), operand_strategies.size());
for (size_t j = 0; j < possible_input_shardings.size(); ++j) {
const auto& operand_sharding = operand_strategies[j].output_sharding;
possible_input_shardings[j].shardings[k] = operand_sharding;
possible_communication_resharding_costs[j][k] =
CommunicationReshardingCostVector(operand_strategy_group,
operand_shape, operand_sharding,
cluster_env);
possible_memory_resharding_costs[j][k] =
MemoryReshardingCostVector(operand_strategy_group, operand_shape,
operand_sharding, cluster_env);
}
} else {
for (size_t j = 0; j < possible_input_shardings.size(); ++j) {
possible_input_shardings[j].shardings[k] = replicated_strategy;
possible_communication_resharding_costs[j][k] =
CommunicationReshardingCostVector(
operand_strategy_group, operand_shape, replicated_strategy,
cluster_env);
possible_memory_resharding_costs[j][k] =
MemoryReshardingCostVector(operand_strategy_group, operand_shape,
replicated_strategy, cluster_env);
}
}
}
for (size_t j = 0; j < possible_input_shardings.size(); ++j) {
double communication_cost = ComputeCommunicationCost(
ins, possible_input_shardings[j], cluster_env);
strategy_group.AddStrategy(
ShardingStrategy(
{replicated_strategy, replicated_penalty, communication_cost,
memory_cost,
std::move(possible_communication_resharding_costs[j]),
std::move(possible_memory_resharding_costs[j])}),
std::move(possible_input_shardings[j]));
}
} else {
ReshardingCosts communication_resharding_costs;
ReshardingCosts memory_resharding_costs;
InputShardings input_shardings = {"R"};
if (ins->operand_count() > 0 && ins->operand(0)->shape().IsTuple()) {
CHECK_EQ(ins->operand_count(), 1)
<< "Do not support instructions with more than one tuple "
"operand. If this CHECK fails, we will need to fix "
"b/233412625.";
std::tie(communication_resharding_costs, memory_resharding_costs,
input_shardings) =
ReshardingCostsForTupleOperand(ins->operand(0),
*strategy_map.at(ins->operand(0)));
} else {
for (int64_t k = 0; k < ins->operand_count(); ++k) {
const HloInstruction* operand = ins->operand(k);
const Shape& operand_shape = operand->shape();
const StrategyGroup& operand_strategy_group = *strategy_map.at(operand);
const auto& operand_strategies = operand_strategy_group.GetStrategies();
if (ins->opcode() == HloOpcode::kConditional) {
std::vector<double> zeros(operand_strategies.size(), 0);
communication_resharding_costs.push_back(zeros);
memory_resharding_costs.push_back(zeros);
} else {
communication_resharding_costs.push_back(
CommunicationReshardingCostVector(operand_strategy_group,
operand_shape, output_spec,
cluster_env));
memory_resharding_costs.push_back(MemoryReshardingCostVector(
operand_strategy_group, operand_shape, output_spec, cluster_env));
input_shardings.shardings.push_back(output_spec);
}
}
}
strategy_group.AddStrategy(
ShardingStrategy({HloSharding::Replicate(), replicated_penalty, 0,
memory_cost,
std::move(communication_resharding_costs),
std::move(memory_resharding_costs)}),
input_shardings);
}
}
double ComputeSortCommunicationCost(const int64_t sort_dim,
const int64_t operand_sharded_dim,
const int64_t mesh_sharding_dim,
const Shape& shape,
const ClusterEnvironment& cluster_env) {
if (sort_dim == operand_sharded_dim) {
return cluster_env.AllToAllCost(ByteSizeOfShape(shape), mesh_sharding_dim);
}
return 0;
}
void EnumerateAll1DPartition(
const HloInstruction* ins, const Shape& shape,
const DeviceMesh& device_mesh, const ClusterEnvironment& cluster_env,
const StrategyMap& strategy_map, const bool only_allow_divisible,
bool allow_shardings_small_dims_across_many_devices,
const std::string& suffix, const CallGraph& call_graph,
StrategyGroup& strategy_group) {
for (int64_t i = 0; i < shape.rank(); ++i) {
for (int64_t j = 0; j < device_mesh.num_dimensions(); ++j) {
bool small_dims_sharding_check =
!allow_shardings_small_dims_across_many_devices &&
shape.dimensions(i) < device_mesh.dim(j);
bool divisibility_check =
(only_allow_divisible &&
!IsDivisible(shape.dimensions(i), device_mesh.dim(j)));
if (device_mesh.dim(j) == 1 || small_dims_sharding_check ||
divisibility_check) {
continue;
}
const std::string name = absl::StrFormat("S%d @ %d", i, j) + suffix;
HloSharding output_spec = Tile(shape, {i}, {j}, device_mesh);
double compute_cost = 0, communication_cost = 0;
double memory_cost = ByteSizeOfShapeWithSharding(shape, output_spec);
ReshardingCosts communication_resharding_costs;
ReshardingCosts memory_resharding_costs;
InputShardings input_shardings = {name};
if (ins->opcode() == HloOpcode::kConditional) {
communication_resharding_costs =
CreateZeroReshardingCostsForAllOperands(ins, strategy_map);
memory_resharding_costs =
CreateZeroReshardingCostsForAllOperands(ins, strategy_map);
} else if (ins->operand_count() > 0 &&
ins->operand(0)->shape().IsTuple()) {
CHECK_EQ(ins->operand_count(), 1)
<< "Do not support instructions with more than one tuple "
"operand.";
std::tie(communication_resharding_costs, memory_resharding_costs,
input_shardings) =
ReshardingCostsForTupleOperand(ins->operand(0),
*strategy_map.at(ins->operand(0)));
} else if (ins->opcode() == HloOpcode::kRngBitGenerator &&
ins->operand(0)->shape().IsArray()) {
input_shardings.shardings.push_back(HloSharding::Replicate());
std::tie(communication_resharding_costs, memory_resharding_costs) =
GenerateReshardingCostsAndMissingShardingsForAllOperands(
ins, output_spec, strategy_map, cluster_env, call_graph,
input_shardings);
} else {
std::tie(communication_resharding_costs, memory_resharding_costs,
input_shardings) =
GenerateReshardingCostsAndShardingsForAllOperands(
ins, output_spec, strategy_map, cluster_env, call_graph);
}
if (ins->opcode() == HloOpcode::kSort) {
auto sort_ins = xla::DynCast<HloSortInstruction>(ins);
CHECK(sort_ins);
communication_cost = ComputeSortCommunicationCost(
sort_ins->sort_dimension(), i, j, shape, cluster_env);
} else if (IsTopKCustomCall(ins)) {
communication_cost = ComputeSortCommunicationCost(
ins->operand(0)->shape().rank() - 1, i, j, shape, cluster_env);
}
strategy_group.AddStrategy(
ShardingStrategy({output_spec, compute_cost, communication_cost,
memory_cost,
std::move(communication_resharding_costs),
std::move(memory_resharding_costs)}),
input_shardings);
}
}
}
void BuildStrategyAndCostForOp(const HloInstruction* ins, const Shape& shape,
const DeviceMesh& device_mesh,
const ClusterEnvironment& cluster_env,
const StrategyMap& strategy_map,
const CallGraph& call_graph,
absl::Span<const int64_t> tensor_dims,
StrategyGroup& strategy_group);
void EnumerateAllPartition(
const HloInstruction* ins, const Shape& shape,
const DeviceMesh& device_mesh, const ClusterEnvironment& cluster_env,
const StrategyMap& strategy_map, bool only_allow_divisible,
bool allow_shardings_small_dims_across_many_devices,
const CallGraph& call_graph, const int64_t partition_dimensions,
const std::vector<int64_t>& tensor_dims, StrategyGroup& strategy_group) {
const auto tensor_dims_size = tensor_dims.size();
if (tensor_dims_size == partition_dimensions) {
BuildStrategyAndCostForOp(ins, shape, device_mesh, cluster_env,
strategy_map, call_graph, tensor_dims,
strategy_group);
return;
}
for (int64_t i = 0; i < shape.rank(); ++i) {
auto tensor_it = std::find(tensor_dims.begin(), tensor_dims.end(), i);
if (tensor_it != tensor_dims.end()) {
continue;
}
if (!allow_shardings_small_dims_across_many_devices &&
shape.dimensions(i) < device_mesh.dim(tensor_dims_size)) {
continue;
}
if (only_allow_divisible &&
!IsDivisible(shape.dimensions(i), device_mesh.dim(tensor_dims_size))) {
continue;
}
std::vector<int64_t> next_tensor_dims = tensor_dims;
next_tensor_dims.push_back(i);
EnumerateAllPartition(
ins, shape, device_mesh, cluster_env, strategy_map,
only_allow_divisible, allow_shardings_small_dims_across_many_devices,
call_graph, partition_dimensions, next_tensor_dims, strategy_group);
}
}
void BuildStrategyAndCostForOp(const HloInstruction* ins, const Shape& shape,
const DeviceMesh& device_mesh,
const ClusterEnvironment& cluster_env,
const StrategyMap& strategy_map,
const CallGraph& call_graph,
absl::Span<const int64_t> tensor_dims,
StrategyGroup& strategy_group) {
std::vector<int64_t> mesh_dims(tensor_dims.size());
std::iota(mesh_dims.begin(), mesh_dims.end(), 0);
const std::string name =
absl::StrFormat("S{%s} @ {%s}", absl::StrJoin(tensor_dims, ","),
absl::StrJoin(mesh_dims, ","));
HloSharding output_spec = Tile(shape, tensor_dims, mesh_dims, device_mesh);
double compute_cost = 0, communication_cost = 0;
double memory_cost = ByteSizeOfShapeWithSharding(shape, output_spec);
InputShardings input_shardings = {name};
ReshardingCosts communication_resharding_costs;
ReshardingCosts memory_resharding_costs;
if (ins->opcode() == HloOpcode::kConditional) {
communication_resharding_costs =
CreateZeroReshardingCostsForAllOperands(ins, strategy_map);
memory_resharding_costs =
CreateZeroReshardingCostsForAllOperands(ins, strategy_map);
} else if (ins->operand_count() > 0 && ins->operand(0)->shape().IsTuple()) {
CHECK_EQ(ins->operand_count(), 1)
<< "Do not support instructions with more than one tuple "
"operand. If this CHECK fails, we will need to fix "
"b/233412625.";
std::tie(communication_resharding_costs, memory_resharding_costs,
input_shardings) =
ReshardingCostsForTupleOperand(ins->operand(0),
*strategy_map.at(ins->operand(0)));
} else {
std::tie(communication_resharding_costs, memory_resharding_costs,
input_shardings) =
GenerateReshardingCostsAndShardingsForAllOperands(
ins, output_spec, strategy_map, cluster_env, call_graph);
}
int64_t sort_or_topk_dim = -1;
if (ins->opcode() == HloOpcode::kSort) {
auto sort_ins = xla::DynCast<HloSortInstruction>(ins);
CHECK(sort_ins);
sort_or_topk_dim = sort_ins->sort_dimension();
} else if (IsTopKCustomCall(ins)) {
sort_or_topk_dim = ins->operand(0)->shape().rank() - 1;
}
if (sort_or_topk_dim != -1) {
if (auto index = GetIndex(tensor_dims, sort_or_topk_dim); index != -1) {
communication_cost = ComputeSortCommunicationCost(
sort_or_topk_dim, sort_or_topk_dim, index, shape, cluster_env);
}
}
strategy_group.AddStrategy(
ShardingStrategy({output_spec, compute_cost, communication_cost,
memory_cost, std::move(communication_resharding_costs),
std::move(memory_resharding_costs)}),
input_shardings);
}
void EnumerateAll1DPartitionReshape(const HloInstruction* ins,
const DeviceMesh& device_mesh,
const ClusterEnvironment& cluster_env,
const StrategyMap& strategy_map,
bool only_allow_divisible,
const std::string& suffix,
StrategyGroup& strategy_group) {
const HloInstruction* operand = ins->operand(0);
const Shape& operand_shape = operand->shape();
const StrategyGroup& operand_strategy_group = *strategy_map.at(operand);
for (int64_t i = 0; i < ins->shape().rank(); ++i) {
for (int64_t j = 0; j < device_mesh.num_dimensions(); ++j) {
if (device_mesh.dim(j) == 1 ||
(only_allow_divisible &&
!IsDivisible(ins->shape().dimensions(i), device_mesh.dim(j)))) {
continue;
}
HloSharding output_spec = Tile(ins->shape(), {i}, {j}, device_mesh);
std::optional<HloSharding> input_spec =
hlo_sharding_util::ReshapeSharding(ins->shape(), operand_shape,
output_spec);
if (!input_spec.has_value()) {
continue;
}
if (cluster_env.IsDeviceMesh1D() &&
VectorGreaterThanOneElementCount(
input_spec->tile_assignment().dimensions()) > 1) {
continue;
}
const std::string name = absl::StrFormat("S%d @ %d", i, j) + suffix;
double compute_cost = 0, communication_cost = 0;
double memory_cost =
ByteSizeOfShapeWithSharding(ins->shape(), output_spec);
ReshardingCosts communication_resharding_costs{
CommunicationReshardingCostVector(
operand_strategy_group, operand_shape, *input_spec, cluster_env)};
ReshardingCosts memory_resharding_costs{MemoryReshardingCostVector(
operand_strategy_group, operand_shape, *input_spec, cluster_env)};
strategy_group.AddStrategy(
ShardingStrategy({output_spec, compute_cost, communication_cost,
memory_cost,
std::move(communication_resharding_costs),
std::move(memory_resharding_costs)}),
{name, {*input_spec}});
}
}
}
int64_t MaxNumTiles(const StrategyMap& strategy_map,
const HloInstruction* ins) {
const StrategyGroup* strategy_group = strategy_map.at(ins).get();
while (strategy_group->following != nullptr) {
strategy_group = strategy_group->following;
}
int64_t max_num_tiles = -1;
for (const ShardingStrategy& strategy : strategy_group->GetStrategies()) {
max_num_tiles =
std::max(max_num_tiles, strategy.output_sharding.NumTiles());
}
return max_num_tiles;
}
std::pair<int64_t, bool> ChooseOperandToFollow(
const StrategyMap& strategy_map, const InstructionDepthMap& depth_map,
const AliasMap& alias_map, const int64_t max_depth,
const HloInstruction* ins) {
auto it = alias_map.find(ins);
if (it != alias_map.end()) {
for (int64_t i = 0; i < ins->operand_count(); ++i) {
const HloInstruction* operand = ins->operand(i);
if (operand == it->second) {
return {i, false};
}
}
}
std::optional<int64_t> follow_idx;
bool tie = false;
double max_priority = -1e20;
double depth_normalizer = 0.1 / max_depth;
double range_delta = 4 * depth_normalizer;
for (int64_t i = 0; i < ins->operand_count(); ++i) {
const HloInstruction* operand = ins->operand(i);
double priority = MaxNumTiles(strategy_map, operand) +
depth_map.at(operand) * depth_normalizer;
if (priority > max_priority + range_delta) {
follow_idx = i;
tie = false;
max_priority = priority;
} else if (priority >= max_priority - range_delta) {
tie = true;
}
}
CHECK(follow_idx.has_value());
return {*follow_idx, tie};
}
bool AllowTieFollowing(const HloInstruction* ins) {
if (ins->opcode() == HloOpcode::kCompare ||
ins->opcode() == HloOpcode::kAnd) {
return false;
}
if (ins->operand_count() == 3) {
return false;
}
return true;
}
void FillAllStrategiesForArray(
const HloInstruction* ins, const Shape& shape,
const ClusterEnvironment& cluster_env, const StrategyMap& strategy_map,
const AutoShardingOption& option, const double replicated_penalty,
const CallGraph& call_graph, const bool only_allow_divisible,
const bool create_replicated_strategies,
const bool create_partially_replicated_strategies,
StrategyGroup& strategy_group) {
if (create_partially_replicated_strategies || cluster_env.IsDeviceMesh1D()) {
EnumerateAll1DPartition(
ins, shape, cluster_env.device_mesh_, cluster_env, strategy_map,
only_allow_divisible,
option.allow_shardings_small_dims_across_many_devices, "", call_graph,
strategy_group);
}
if (cluster_env.IsDeviceMesh2D()) {
EnumerateAllPartition(ins, shape, cluster_env.device_mesh_, cluster_env,
strategy_map, only_allow_divisible,
option.allow_shardings_small_dims_across_many_devices,
call_graph, 2, {},
strategy_group);
}
if (cluster_env.IsDeviceMesh3D()) {
EnumerateAllPartition(ins, shape, cluster_env.device_mesh_, cluster_env,
strategy_map, only_allow_divisible,
option.allow_shardings_small_dims_across_many_devices,
call_graph,
3, {}, strategy_group);
}
if (option.allow_mixed_mesh_shape && cluster_env.IsDeviceMesh2D()) {
for (size_t i = 0; i < strategy_group.GetStrategies().size(); ++i) {
strategy_group.GetStrategy(i).compute_cost += replicated_penalty * 0.8;
}
EnumerateAll1DPartition(
ins, shape, cluster_env.device_mesh_1d_, cluster_env, strategy_map,
only_allow_divisible,
option.allow_shardings_small_dims_across_many_devices, " 1d",
call_graph, strategy_group);
}
if (create_replicated_strategies || strategy_group.GetStrategies().empty()) {
AddReplicatedStrategy(ins, shape, cluster_env, strategy_map,
replicated_penalty, {}, strategy_group);
}
}
absl::StatusOr<std::unique_ptr<StrategyGroup>> CreateAllStrategiesGroup(
const HloInstruction* ins, const Shape& shape, const size_t instruction_id,
StrategyGroups& strategy_groups, const ClusterEnvironment& cluster_env,
const StrategyMap& strategy_map, const AutoShardingOption& option,
const double replicated_penalty, const CallGraph& call_graph,
const bool only_allow_divisible, const bool create_replicated_strategies,
const bool create_partially_replicated_strategies) {
std::unique_ptr<StrategyGroup> strategy_group;
if (shape.IsTuple()) {
strategy_group = CreateTupleStrategyGroup(instruction_id);
for (size_t i = 0; i < shape.tuple_shapes_size(); ++i) {
auto child_strategies =
CreateAllStrategiesGroup(
ins, shape.tuple_shapes(i), instruction_id, strategy_groups,
cluster_env, strategy_map, option, replicated_penalty, call_graph,
only_allow_divisible, create_replicated_strategies,
create_partially_replicated_strategies)
.value();
child_strategies->tuple_element_idx = i;
strategy_group->AddChild(std::move(child_strategies));
}
} else if (shape.IsArray()) {
strategy_group = CreateLeafStrategyGroup(instruction_id, ins, strategy_map,
strategy_groups);
FillAllStrategiesForArray(
ins, shape, cluster_env, strategy_map, option, replicated_penalty,
call_graph, only_allow_divisible, create_replicated_strategies,
create_partially_replicated_strategies, *strategy_group);
} else if (shape.IsToken()) {
strategy_group = CreateLeafStrategyGroup(instruction_id, ins, strategy_map,
strategy_groups);
AddReplicatedStrategy(ins, shape, cluster_env, strategy_map,
replicated_penalty, {}, *strategy_group);
} else {
LOG(FATAL) << "Unsupported instruction shape: " << shape.DebugString();
}
return strategy_group;
}
bool ShardingIsConsistent(const HloSharding& partial_sharding,
const HloSharding& complete_sharding, bool strict) {
if (partial_sharding.tile_assignment().num_dimensions() >
complete_sharding.tile_assignment().num_dimensions()) {
return false;
}
for (size_t i = 0; i < partial_sharding.tile_assignment().num_dimensions();
++i) {
if (strict && partial_sharding.tile_assignment().dim(i) > 1 &&
partial_sharding.tile_assignment().dim(i) ==
complete_sharding.tile_assignment().dim(i)) {
return true;
}
if (!strict && partial_sharding.tile_assignment().dim(i) > 1 &&
complete_sharding.tile_assignment().dim(i) > 1) {
return true;
}
}
return false;
}
void TrimOrGenerateStrategiesBasedOnExistingSharding(
const Shape& output_shape, const StrategyMap& strategy_map,
const std::vector<HloInstruction*>& instructions,
const HloSharding& existing_sharding, const ClusterEnvironment& cluster_env,
StableMap<int64_t, std::vector<ShardingStrategy>>& pretrimmed_strategy_map,
const CallGraph& call_graph, const bool strict,
StrategyGroup& strategy_group) {
if (strategy_group.is_tuple) {
for (size_t i = 0; i < strategy_group.GetChildren().size(); ++i) {
TrimOrGenerateStrategiesBasedOnExistingSharding(
output_shape.tuple_shapes(i), strategy_map, instructions,
existing_sharding.tuple_elements().at(i), cluster_env,
pretrimmed_strategy_map, call_graph, strict,
strategy_group.GetChild(i));
}
} else {
if (existing_sharding.IsUnknown()) {
return;
}
if (spmd::ShardingIsComplete(existing_sharding,
cluster_env.device_mesh_.num_elements())) {
strategy_group.following = nullptr;
std::vector<std::pair<ShardingStrategy, InputShardings>> new_strategies;
const auto& strategy_input_shardings =
strategy_group.GetStrategyInputShardings();
for (size_t iid = 0; iid < strategy_input_shardings.size(); ++iid) {
const InputShardings& input_shardings = strategy_input_shardings[iid];
const ShardingStrategy& strategy =
strategy_group.GetStrategyForInputShardings(iid);
if (strategy.output_sharding == existing_sharding) {
VLOG(1) << "Keeping strategy: " << strategy.ToString();
new_strategies.push_back({strategy, input_shardings});
}
}
if (!new_strategies.empty()) {
pretrimmed_strategy_map[strategy_group.node_idx] =
strategy_group.GetStrategies();
strategy_group.ClearStrategies();
for (const auto& [strategy, input_shardings] : new_strategies) {
strategy_group.AddStrategy(strategy, input_shardings);
}
} else {
VLOG(1) << "Generate a new strategy based on user sharding.";
std::string name = ToStringSimple(existing_sharding);
ReshardingCosts communication_resharding_costs;
ReshardingCosts memory_resharding_costs;
InputShardings input_shardings = {name};
if (!strategy_group.in_nodes.empty()) {
HloInstruction* ins = instructions.at(strategy_group.instruction_id);
for (size_t i = 0; i < strategy_group.in_nodes.size(); i++) {
HloInstruction* operand =
instructions.at(strategy_group.in_nodes.at(i)->instruction_id);
std::optional<HloSharding> input_sharding =
ShardingPropagation::GetShardingFromUser(
*operand, *ins, 10, true, call_graph,
nullptr);
StrategyGroup* operand_strategy_group =
strategy_map.at(operand).get();
Shape operand_shape = operand->shape();
if (ins->opcode() == HloOpcode::kGetTupleElement) {
if (input_sharding && input_sharding->IsTuple()) {
input_sharding = input_sharding->GetSubSharding(
operand->shape(), {ins->tuple_index()});
}
operand_strategy_group =
&operand_strategy_group->GetChild(ins->tuple_index());
operand_shape = operand->shape().tuple_shapes(ins->tuple_index());
}
if (!input_sharding) {
if (existing_sharding.Validate(operand_shape).ok()) {
input_sharding = existing_sharding;
} else {
input_sharding = HloSharding::Replicate();
}
}
CHECK(input_sharding.has_value());
input_shardings.shardings.push_back(*input_sharding);
communication_resharding_costs.push_back(
CommunicationReshardingCostVector(
*operand_strategy_group, operand_shape, *input_sharding,
cluster_env));
memory_resharding_costs.push_back(MemoryReshardingCostVector(
*operand_strategy_group, operand_shape, *input_sharding,
cluster_env));
}
}
double memory_cost =
ByteSizeOfShapeWithSharding(output_shape, existing_sharding);
if (!strategy_group.GetStrategies().empty()) {
pretrimmed_strategy_map[strategy_group.node_idx] =
strategy_group.GetStrategies();
}
strategy_group.ClearStrategies();
strategy_group.AddStrategy(
ShardingStrategy({existing_sharding, 0, 0, memory_cost,
communication_resharding_costs,
memory_resharding_costs}),
input_shardings);
}
if (strategy_group.GetStrategies().size() == 1) {
for (auto& operand_communication_resharding_costs :
strategy_group.GetStrategy(0).communication_resharding_costs) {
if (operand_communication_resharding_costs.size() == 1 &&
operand_communication_resharding_costs[0] >= kInfinityCost) {
operand_communication_resharding_costs[0] = 0;
}
}
}
} else if (!strategy_group.following) {
std::vector<std::pair<ShardingStrategy, InputShardings>> new_vector;
const auto& strategy_input_shardings =
strategy_group.GetStrategyInputShardings();
for (size_t iid = 0; iid < strategy_input_shardings.size(); ++iid) {
const InputShardings& input_shardings = strategy_input_shardings[iid];
const ShardingStrategy& strategy =
strategy_group.GetStrategyForInputShardings(iid);
if (strategy.output_sharding.IsReplicated() ||
ShardingIsConsistent(existing_sharding, strategy.output_sharding,
strict) ||
(VectorGreaterThanOneElementCount(
strategy.output_sharding.tile_assignment().dimensions()) ==
1 &&
spmd::ShardingIsComplete(
strategy.output_sharding,
cluster_env.original_device_mesh_.num_elements()))) {
new_vector.push_back({strategy, input_shardings});
}
}
if (!new_vector.empty() &&
new_vector.size() != strategy_group.GetStrategies().size()) {
strategy_group.following = nullptr;
strategy_group.ClearStrategies();
for (const auto& [strategy, input_shardings] : new_vector) {
strategy_group.AddStrategy(strategy, input_shardings);
}
}
}
}
}
void CheckMemoryCosts(const StrategyGroup& strategy_group, const Shape& shape) {
if (strategy_group.is_tuple) {
for (size_t i = 0; i < strategy_group.GetChildren().size(); i++) {
CheckMemoryCosts(*strategy_group.GetChildren()[i],
shape.tuple_shapes().at(i));
}
} else {
double full_mem = 0.0;
for (const ShardingStrategy& strategy : strategy_group.GetStrategies()) {
if (strategy.output_sharding.IsReplicated()) {
full_mem = strategy.memory_cost;
size_t size = ByteSizeOfShape(shape);
CHECK_EQ(strategy.memory_cost, size);
}
}
for (const ShardingStrategy& strategy : strategy_group.GetStrategies()) {
if (!strategy.output_sharding.IsReplicated() && full_mem > 0.0) {
CHECK_GE(strategy.memory_cost * strategy.output_sharding.NumTiles(),
full_mem);
}
}
}
}
void RemoveShardingsWhereSmallDimsShardedAcrossManyDevices(
const Shape& shape, const bool instruction_has_user_sharding,
StrategyGroup& strategy_group) {
if (strategy_group.is_tuple) {
const auto& children = strategy_group.GetChildren();
for (size_t i = 0; i < children.size(); i++) {
RemoveShardingsWhereSmallDimsShardedAcrossManyDevices(
shape.tuple_shapes().at(i), instruction_has_user_sharding,
*children[i]);
}
return;
}
if (instruction_has_user_sharding &&
strategy_group.GetStrategies().size() == 1) {
return;
}
std::vector<int> invalid_strategy_indices;
for (size_t sid = 0; sid < strategy_group.GetStrategies().size(); ++sid) {
const ShardingStrategy& strategy = strategy_group.GetStrategy(sid);
if (strategy.output_sharding.IsReplicated()) {
continue;
}
const auto& tile_assignment = strategy.output_sharding.tile_assignment();
for (int64_t i = 0; i < shape.rank(); ++i) {
if (tile_assignment.dim(i) > 1 &&
tile_assignment.dim(i) > shape.dimensions(i)) {
invalid_strategy_indices.push_back(sid);
break;
}
}
}
if (invalid_strategy_indices.size() < strategy_group.GetStrategies().size()) {
for (size_t sid : invalid_strategy_indices) {
ShardingStrategy& strategy = strategy_group.GetStrategy(sid);
VLOG(1) << "Removing invalid strategy: " << strategy.ToString();
strategy.compute_cost = kInfinityCost;
}
}
}
void ScaleCostsWithExecutionCounts(const int64_t execution_count,
StrategyGroup& strategy_group) {
if (strategy_group.is_tuple) {
for (const auto& child : strategy_group.GetChildren()) {
ScaleCostsWithExecutionCounts(execution_count, *child);
}
} else {
for (size_t sid = 0; sid < strategy_group.GetStrategies().size(); ++sid) {
ShardingStrategy& strategy = strategy_group.GetStrategy(sid);
strategy.compute_cost *= execution_count;
strategy.communication_cost *= execution_count;
for (auto i = 0; i < strategy.communication_resharding_costs.size();
++i) {
for (auto j = 0; j < strategy.communication_resharding_costs[i].size();
++j) {
strategy.communication_resharding_costs[i][j] *= execution_count;
}
}
}
}
}
std::unique_ptr<StrategyGroup> CreateElementwiseOperatorStrategies(
const size_t instruction_id, const HloInstruction* ins,
const StrategyMap& strategy_map, const ClusterEnvironment& cluster_env,
const InstructionDepthMap& depth_map, const AliasMap& alias_map,
const StableMap<int64_t, std::vector<ShardingStrategy>>&
pretrimmed_strategy_map,
const int64_t max_depth, StrategyGroups& strategy_groups,
AssociativeDotPairs& associative_dot_pairs) {
std::unique_ptr<StrategyGroup> strategy_group = CreateLeafStrategyGroup(
instruction_id, ins, strategy_map, strategy_groups);
int64_t follow_idx;
bool tie;
std::tie(follow_idx, tie) =
ChooseOperandToFollow(strategy_map, depth_map, alias_map, max_depth, ins);
if (!tie || AllowTieFollowing(ins)) {
strategy_group->following = strategy_map.at(ins->operand(follow_idx)).get();
} else {
strategy_group->following = nullptr;
}
for (int64_t i = 0; i < ins->operand_count(); ++i) {
if (strategy_group->following != nullptr && i != follow_idx) {
continue;
}
StrategyGroup* src_strategy_group = strategy_map.at(ins->operand(i)).get();
CHECK(!src_strategy_group->is_tuple);
FollowArrayOrTokenStrategyGroup(*src_strategy_group, ins->shape(),
instruction_id, cluster_env,
pretrimmed_strategy_map, *strategy_group);
}
if (ins->opcode() == HloOpcode::kAdd) {
if (ins->operand(0)->opcode() == HloOpcode::kDot &&
ins->operand(1)->opcode() == HloOpcode::kDot) {
associative_dot_pairs.push_back({strategy_map.at(ins->operand(0)).get(),
strategy_map.at(ins->operand(1)).get()});
}
}
return strategy_group;
}
std::unique_ptr<StrategyGroup> HandleManuallyShardedInstruction(
const HloInstruction* ins, const Shape& shape, const size_t instruction_id,
StrategyGroups& strategy_groups, StrategyMap& strategy_map) {
std::unique_ptr<StrategyGroup> strategy_group;
if (shape.IsTuple()) {
strategy_group = CreateTupleStrategyGroup(instruction_id);
for (size_t i = 0; i < shape.tuple_shapes_size(); ++i) {
std::unique_ptr<StrategyGroup> child_strategies =
HandleManuallyShardedInstruction(ins, shape.tuple_shapes(i),
instruction_id, strategy_groups,
strategy_map);
child_strategies->tuple_element_idx = i;
strategy_group->AddChild(std::move(child_strategies));
}
} else if (shape.IsToken() || shape.IsArray()) {
strategy_group = CreateLeafStrategyGroup(instruction_id, ins, strategy_map,
strategy_groups);
ReshardingCosts communication_resharding_costs;
ReshardingCosts memory_resharding_costs;
InputShardings input_shardings = {"MANUAL"};
if (ins->operand_count() > 0 && ins->operand(0)->shape().IsTuple()) {
CHECK_EQ(ins->operand_count(), 1)
<< "Do not support instructions with more than one tuple "
"operand. If this CHECK fails, we will need to fix "
"b/233412625.";
std::tie(communication_resharding_costs, memory_resharding_costs,
input_shardings) =
ReshardingCostsForTupleOperand(ins->operand(0),
*strategy_map.at(ins->operand(0)));
} else {
for (int64_t k = 0; k < ins->operand_count(); ++k) {
const HloInstruction* operand = ins->operand(k);
const StrategyGroup& operand_strategy_group = *strategy_map.at(operand);
const auto& strategies = operand_strategy_group.GetStrategies();
const std::vector<double> zeros(strategies.size(), 0);
communication_resharding_costs.push_back(zeros);
memory_resharding_costs.push_back(zeros);
}
}
strategy_group->AddStrategy(
ShardingStrategy({HloSharding::Replicate(), 0, 0,
static_cast<double>(ShapeUtil::ByteSizeOf(shape)),
std::move(communication_resharding_costs),
std::move(memory_resharding_costs)}),
std::move(input_shardings));
} else {
LOG(FATAL) << "Unsupported instruction shape: " << shape.DebugString();
}
return strategy_group;
}
std::unique_ptr<StrategyGroup> CreateReshapeStrategies(
const size_t instruction_id, const HloInstruction* ins,
const StrategyMap& strategy_map, const ClusterEnvironment& cluster_env,
const bool only_allow_divisible, const double replicated_penalty,
const AutoShardingOption& option, StrategyGroups& strategy_groups,
const CallGraph& call_graph) {
std::unique_ptr<StrategyGroup> strategy_group = CreateLeafStrategyGroup(
instruction_id, ins, strategy_map, strategy_groups);
const HloInstruction* operand = ins->operand(0);
const StrategyGroup& operand_strategy_group = *strategy_map.at(operand);
CHECK(!operand_strategy_group.is_tuple);
for (const ShardingStrategy& operand_strategy :
operand_strategy_group.GetStrategies()) {
std::optional<HloSharding> output_sharding =
hlo_sharding_util::ReshapeSharding(operand->shape(), ins->shape(),
operand_strategy.output_sharding);
if (!output_sharding.has_value() ||
!IsValidTileAssignment(*output_sharding) ||
!TileAssignmentMatchesMesh(*output_sharding,
cluster_env.device_mesh_)) {
continue;
}
const std::string name = ToStringSimple(*output_sharding);
double compute_cost = 0, communication_cost = 0;
double memory_cost =
ByteSizeOfShapeWithSharding(ins->shape(), output_sharding);
std::vector<double> communication_resharding_costs =
CommunicationReshardingCostVector(
operand_strategy_group, operand->shape(),
operand_strategy.output_sharding, cluster_env);
std::vector<double> memory_resharding_costs = MemoryReshardingCostVector(
operand_strategy_group, operand->shape(),
operand_strategy.output_sharding, cluster_env);
strategy_group->AddStrategy(
ShardingStrategy({*output_sharding,
compute_cost,
communication_cost,
memory_cost,
{communication_resharding_costs},
{memory_resharding_costs}}),
{name, {operand_strategy.output_sharding}});
}
if (strategy_group->GetStrategies().empty()) {
VLOG(2) << "Enumerating all strategies for reshape";
FillAllStrategiesForArray(
ins, ins->shape(), cluster_env, strategy_map, option,
replicated_penalty, call_graph, only_allow_divisible,
true,
true, *strategy_group);
}
return strategy_group;
}
absl::StatusOr<AutoShardingSolverOutput>
CreateAutoShardingSolverRequestAndCallSolver(
const HloModule& hlo_module, const HloLiveRange& hlo_live_range,
const StrategyMap& strategy_map, const StrategyGroups& strategy_groups,
const CostGraph& cost_graph, const AliasSet& alias_set,
const std::vector<std::pair<LivenessIdx, LivenessIdx>>& node_intervals,
const std::vector<std::pair<LivenessIdx, LivenessIdx>>& edge_intervals,
const std::vector<absl::btree_set<int64_t>>& node_groups,
const std::vector<absl::btree_set<int64_t>>& edge_groups,
const std::vector<NodeStrategyIdx>& s_hint, const bool compute_iis,
const int64_t solver_timeout_in_seconds, const AutoShardingOption& option,
std::optional<double> max_cost, absl::string_view request_name,
const absl::flat_hash_map<std::string, HloSharding>&
sharding_propagation_solution,
bool deterministic_mode) {
AutoShardingSolverRequest request;
request.set_module_name(hlo_module.name());
request.set_num_nodes(strategy_groups.size());
request.set_memory_budget(option.memory_budget_per_device);
request.mutable_s_len()->Add(cost_graph.node_lens_.begin(),
cost_graph.node_lens_.end());
request.mutable_s_follow()->Add(cost_graph.follow_idx_.begin(),
cost_graph.follow_idx_.end());
request.mutable_s_hint()->Add(s_hint.begin(), s_hint.end());
request.mutable_solver_timeout()->set_solver_timeout_in_seconds(
solver_timeout_in_seconds);
if (option.memory_overbudget_coeff >= 0.0) {
request.mutable_overbudget_coeff()->set_coeff(
option.memory_overbudget_coeff);
}
request.set_crash_at_infinity_costs_check(!option.try_multiple_mesh_shapes);
request.set_compute_iis(compute_iis);
request.set_saltiplier(kSaltiplier);
request.set_deterministic_mode(deterministic_mode);
request.set_request_name(std::string(request_name));
request.set_enable_memory_edge_costs(option.model_resharding_memory_costs);
request.set_enable_output(
option.preserve_shardings ==
AutoShardingOption::PreserveShardingsType::kRemoveAllShardings);
if (max_cost) {
request.mutable_max_cost()->set_coeff(*max_cost);
}
for (const auto& [edge, edge_cost] : cost_graph.edge_costs_) {
const auto normalized_edge_cost = Normalize(edge_cost);
AutoShardingSolverRequest_Pair raw_edge;
raw_edge.set_first(edge.first);
raw_edge.set_second(edge.second);
*request.add_edges() = raw_edge;
AutoShardingSolverRequest_Costs rij;
AutoShardingSolverRequest_Costs mij;
for (NodeStrategyIdx i = 0; i < edge_cost.n_; i++) {
for (NodeStrategyIdx j = 0; j < edge_cost.m_; j++) {
rij.add_costs(normalized_edge_cost(i, j).communication_cost);
mij.add_costs(normalized_edge_cost(i, j).memory_cost);
}
}
request.mutable_resharding_costs()->Add(std::move(rij));
request.mutable_memory_edge_costs()->Add(std::move(mij));
}
const HloInstructionSequence& sequence =
hlo_live_range.flattened_instruction_sequence();
const std::vector<HloInstruction*>& instructions = sequence.instructions();
int num_nodes_without_default = 0;
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
const StrategyGroup* strategy_group = strategy_groups[node_idx];
const auto instruction = instructions.at(strategy_group->instruction_id);
const auto instruction_name = instruction->name();
const auto opcode = HloOpcodeString(instruction->opcode());
request.add_instruction_names(
absl::StrCat(instruction_name, " (id: ", node_idx, ")"));
request.add_opcodes(std::string(opcode));
request.add_metadata_source_files(instruction->metadata().source_file());
AutoShardingSolverRequest_Costs ci, di, mi, pi;
AutoShardingSolverRequest_Names strategy_names;
std::optional<HloSharding> default_strategy;
auto iter = sharding_propagation_solution.find(instruction_name);
if (iter != sharding_propagation_solution.end()) {
default_strategy = iter->second;
if (strategy_group->tuple_element_idx) {
const auto& tuple_elements = iter->second.tuple_elements();
CHECK_LT(*strategy_group->tuple_element_idx, tuple_elements.size());
default_strategy =
tuple_elements.at(*strategy_group->tuple_element_idx);
}
}
for (auto j = 0; j < strategy_group->GetStrategies().size(); ++j) {
const ShardingStrategy& strategy = strategy_group->GetStrategies()[j];
const HloSharding& sharding = strategy.output_sharding;
ci.add_costs(strategy.compute_cost);
di.add_costs(strategy.communication_cost +
cost_graph.extra_node_costs_[node_idx][j]);
mi.add_costs(strategy.memory_cost);
pi.add_costs(default_strategy && sharding == *default_strategy ? 0 : 1);
strategy_names.add_names(sharding.ToString());
}
if (option.use_sharding_propagation_for_default_shardings &&
*std::min_element(pi.costs().begin(), pi.costs().end()) > 0) {
LOG(WARNING) << "No default strategy for {node_idx " << node_idx
<< ", instruction ID " << strategy_group->instruction_id
<< ", instruction name " << instruction_name << "}";
++num_nodes_without_default;
}
request.mutable_computation_costs()->Add(std::move(ci));
request.mutable_communication_costs()->Add(std::move(di));
request.mutable_memory_costs()->Add(std::move(mi));
request.mutable_departure_costs()->Add(std::move(pi));
request.mutable_strategy_names()->Add(std::move(strategy_names));
}
LOG(INFO) << "Total nodes without default: " << num_nodes_without_default;
std::vector<std::pair<NodeIdx, NodeIdx>> new_followers;
for (const auto& pair : alias_set) {
const StrategyGroup* src_strategy_group = strategy_groups[pair.first];
const StrategyGroup* dst_strategy_group = strategy_groups[pair.second];
const auto& src_strategies = src_strategy_group->GetStrategies();
const auto& dst_strategies = dst_strategy_group->GetStrategies();
Matrix<double> raw_cost(src_strategies.size(), dst_strategies.size());
for (NodeStrategyIdx i = 0; i < src_strategies.size(); ++i) {
for (NodeStrategyIdx j = 0; j < dst_strategies.size(); ++j) {
if (src_strategies[i].output_sharding ==
dst_strategies[j].output_sharding) {
raw_cost(i, j) = 0.0;
} else {
raw_cost(i, j) = 1.0;
}
}
}
NodeIdx idx_a = pair.first;
NodeIdx idx_b = pair.second;
std::vector<NodeStrategyIdx> row_indices;
std::vector<NodeStrategyIdx> col_indices;
if (request.s_follow(idx_a) >= 0) {
row_indices = cost_graph.reindexing_vector_.at(idx_a);
idx_a = request.s_follow(idx_a);
} else {
row_indices.assign(request.s_len(idx_a), 0);
std::iota(row_indices.begin(), row_indices.end(), 0);
}
if (request.s_follow(idx_b) >= 0) {
col_indices = cost_graph.reindexing_vector_.at(idx_b);
idx_b = request.s_follow(idx_b);
} else {
col_indices.assign(request.s_len(idx_b), 0);
std::iota(col_indices.begin(), col_indices.end(), 0);
}
CHECK_EQ(request.s_len(idx_a), row_indices.size());
CHECK_EQ(request.s_len(idx_b), col_indices.size());
AutoShardingSolverRequest_Costs vij;
for (NodeStrategyIdx i : row_indices) {
for (NodeStrategyIdx j : col_indices) {
vij.add_costs(raw_cost(i, j));
}
}
bool convertible = (row_indices.size() == col_indices.size());
for (NodeStrategyIdx i = 0; i < row_indices.size() && convertible; ++i) {
if (vij.costs(i * col_indices.size() + i) != 0.0) convertible = false;
}
if (convertible && option.allow_alias_to_follower_conversion) {
new_followers.push_back({idx_a, idx_b});
} else {
AutoShardingSolverRequest_Pair alias;
alias.set_first(idx_a);
alias.set_second(idx_b);
*request.add_aliases() = alias;
request.mutable_value_costs()->Add(std::move(vij));
}
}
auto s_follow = request.mutable_s_follow();
for (auto [follower, followee] : new_followers) {
while (s_follow->at(follower) >= 0) follower = s_follow->at(follower);
while (s_follow->at(followee) >= 0) followee = s_follow->at(followee);
if (follower != followee) s_follow->Set(follower, followee);
}
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
if (s_follow->at(node_idx) < 0) continue;
while (s_follow->at(s_follow->at(node_idx)) >= 0) {
s_follow->Set(node_idx, s_follow->at(s_follow->at(node_idx)));
}
}
for (const auto& interval : node_intervals) {
AutoShardingSolverRequest_Pair pair;
pair.set_first(interval.first);
pair.set_second(interval.second);
*request.add_node_intervals() = std::move(pair);
}
for (const auto& interval : edge_intervals) {
AutoShardingSolverRequest_Pair pair;
pair.set_first(interval.first);
pair.set_second(interval.second);
*request.add_edge_intervals() = std::move(pair);
}
for (const auto& reduced_group : node_groups) {
AutoShardingSolverRequest_Group group;
group.mutable_prims()->Add(reduced_group.begin(), reduced_group.end());
*request.add_node_groups() = std::move(group);
}
for (const auto& reduced_group : edge_groups) {
AutoShardingSolverRequest_Group group;
group.mutable_prims()->Add(reduced_group.begin(), reduced_group.end());
*request.add_edge_groups() = std::move(group);
}
PopulateTemporalValues(cost_graph, request);
return FormulateAndSolveMIPFromSolverRequest(request);
}
void CheckHloSharding(
const HloInstructionSequence& sequence,
const absl::flat_hash_set<const HloInstruction*>& instructions_to_shard,
const size_t total_num_devices) {
const std::vector<HloInstruction*>& instructions = sequence.instructions();
std::vector<std::pair<size_t, std::string>> size_string;
for (const HloInstruction* ins : instructions) {
if (!instructions_to_shard.contains(ins) || !ins->has_sharding()) {
continue;
}
if (!ins->shape().IsTuple() &&
ins->opcode() != HloOpcode::kGetTupleElement) {
double size = ByteSizeOfShape(ins->shape()) / 1024 / 1024 / 1024;
if ((!spmd::ShardingIsComplete(ins->sharding(), total_num_devices) ||
ins->sharding().IsReplicated()) &&
size > 1) {
LOG(INFO) << "Instruction is not fully sharded: (" << size << " GB) "
<< ins->ToString();
} else if (!ins->has_sharding()) {
LOG(INFO) << "Instruction does not have sharding: " << ins->name();
}
for (const auto& op : ins->operands()) {
if (op->has_sharding()) {
if (op->sharding().IsReplicated() || ins->sharding().IsReplicated()) {
continue;
}
const std::vector<int64_t> ins_sharded_dims =
VectorGreaterThanOneElementIndices(
ins->sharding().tile_assignment().dimensions(),
ins->sharding().ReplicateOnLastTileDim());
const std::vector<int64_t> op_sharded_dims =
VectorGreaterThanOneElementIndices(
op->sharding().tile_assignment().dimensions(),
op->sharding().ReplicateOnLastTileDim());
bool not_consistent = false;
if (ins_sharded_dims.size() != op_sharded_dims.size()) {
not_consistent = true;
} else {
for (size_t i = 0; i < ins_sharded_dims.size(); i++) {
if (op->shape().dimensions().at(op_sharded_dims.at(i)) !=
ins->shape().dimensions().at(ins_sharded_dims.at(i))) {
not_consistent = true;
}
}
}
if (not_consistent) {
size_t op_size =
ByteSizeOfShape(op->shape()) / (1024.0 * 1024 * 1024);
std::string str = absl::StrCat("Shardings not consistent (op size ",
op_size, " GB):", ins->ToString(),
"\n Operand: ", op->ToString());
size_string.push_back({op_size, std::move(str)});
}
} else {
LOG(INFO) << "Instruction " << op->name()
<< " does not have sharding.";
}
}
}
}
struct {
bool operator()(const std::pair<size_t, std::string>& a,
const std::pair<size_t, std::string>& b) const {
return a.first > b.first;
}
} MemLarger;
std::sort(size_string.begin(), size_string.end(), MemLarger);
size_t k = 10;
k = std::min(k, size_string.size());
for (size_t t = 0; t < k; ++t) {
LOG(INFO) << size_string.at(t).second;
}
}
void SetHloSharding(
const HloInstructionSequence& sequence,
const absl::flat_hash_set<const HloInstruction*>& instructions_to_shard,
const StrategyMap& strategy_map, const CostGraph& cost_graph,
absl::Span<const NodeStrategyIdx> s_val, bool last_iteration) {
if (!last_iteration) {
LOG(INFO) << "Skip setting shardings (since not the last iteration)";
}
const std::vector<HloInstruction*>& instructions = sequence.instructions();
for (HloInstruction* inst : instructions) {
if (!instructions_to_shard.contains(inst)) {
continue;
}
if (inst->opcode() == HloOpcode::kOutfeed ||
inst->opcode() == HloOpcode::kRecv ||
inst->opcode() == HloOpcode::kRecvDone ||
inst->opcode() == HloOpcode::kSend ||
inst->opcode() == HloOpcode::kSendDone) {
continue;
}
auto iter = strategy_map.find(inst);
if (iter == strategy_map.end()) {
continue;
}
const StrategyGroup* strategy_group = iter->second.get();
if (strategy_group->is_tuple) {
const Shape& out_shape = inst->shape();
ShapeTree<HloSharding> output_tuple_sharding(out_shape, Undefined());
std::vector<HloSharding> output_flattened_shardings;
std::function<void(const StrategyGroup*)> extract_tuple_shardings;
bool set_tuple_sharding = true;
extract_tuple_shardings = [&](const StrategyGroup* strategy_group) {
if (strategy_group->is_tuple) {
for (const auto& child_strategies : strategy_group->GetChildren()) {
extract_tuple_shardings(child_strategies.get());
}
} else {
NodeIdx node_idx = strategy_group->node_idx;
NodeStrategyIdx stra_idx = s_val[node_idx];
const auto& strategy = strategy_group->GetStrategies()[stra_idx];
if (strategy.output_sharding.IsReplicated() && !last_iteration) {
set_tuple_sharding = false;
}
output_flattened_shardings.push_back(strategy.output_sharding);
}
};
extract_tuple_shardings(strategy_group);
int i = 0;
for (auto& leaf : output_tuple_sharding.leaves()) {
leaf.second = output_flattened_shardings[i++];
}
if (set_tuple_sharding) {
inst->set_sharding(HloSharding::Tuple(output_tuple_sharding));
}
} else {
const HloSharding& sharding_spec =
GetShardingStrategy(inst, strategy_map, cost_graph, s_val)
.output_sharding;
if (IsUndefined(sharding_spec)) {
continue;
}
if (sharding_spec.IsReplicated() && !last_iteration) {
VLOG(5) << "skip setting shardings for inst " << inst->name();
} else {
inst->set_sharding(sharding_spec);
}
}
}
}
absl::Status InsertReshardReshapes(
const HloInstructionSequence& sequence,
const absl::flat_hash_set<const HloInstruction*>& instructions_to_shard,
const StrategyMap& strategy_map, const CostGraph& cost_graph,
absl::Span<const NodeStrategyIdx> s_val,
const ClusterEnvironment& cluster_env, bool crash_at_error,
bool insert_resharding_reshapes_for_non_dot_ops,
absl::flat_hash_map<std::string, std::vector<HloSharding>>&
preserve_shardings) {
const std::vector<HloInstruction*>& instructions = sequence.instructions();
const DeviceMesh& device_mesh = cluster_env.device_mesh_;
ReshardingCache resharding_cache_entity;
ReshardingCache* resharding_cache = &resharding_cache_entity;
for (HloInstruction* inst : instructions) {
if (!instructions_to_shard.contains(inst) ||
spmd::IsSPMDShardToFullShapeCustomCall(inst)) {
continue;
}
if (inst->opcode() == HloOpcode::kDot ||
inst->opcode() == HloOpcode::kConvolution) {
const HloInstruction* lhs = inst->operand(0);
const HloInstruction* rhs = inst->operand(1);
const HloSharding& lhs_sharding = lhs->sharding();
const HloSharding& rhs_sharding = rhs->sharding();
std::vector<int64_t> lhs_con_dims;
std::vector<int64_t> rhs_con_dims;
if (inst->opcode() == HloOpcode::kDot) {
const DotDimensionNumbers& dot_dnums = inst->dot_dimension_numbers();
lhs_con_dims.push_back(dot_dnums.lhs_contracting_dimensions()[0]);
rhs_con_dims.push_back(dot_dnums.rhs_contracting_dimensions()[0]);
} else {
const ConvolutionDimensionNumbers& conv_dnums =
inst->convolution_dimension_numbers();
lhs_con_dims.push_back(conv_dnums.input_feature_dimension());
rhs_con_dims.push_back(conv_dnums.kernel_input_feature_dimension());
}
const std::vector<int64_t>& lhs_tensor_dim_to_mesh_dim =
cluster_env.GetTensorDimToMeshDimWrapper(
lhs->shape(), lhs_sharding,
true, crash_at_error);
const std::vector<int64_t>& rhs_tensor_dim_to_mesh_dim =
cluster_env.GetTensorDimToMeshDimWrapper(
rhs->shape(), rhs_sharding,
true, crash_at_error);
if (lhs_tensor_dim_to_mesh_dim.size() != lhs->shape().rank() ||
rhs_tensor_dim_to_mesh_dim.size() != rhs->shape().rank()) {
return absl::InvalidArgumentError(
"Cannot generate tensor dim to mesh dim mapping");
}
const InputShardings& input_shardings =
GetInputShardings(inst, strategy_map, cost_graph, s_val);
if (absl::StrContains(input_shardings.name, "allreduce") &&
std::any_of(lhs_con_dims.begin(), lhs_con_dims.end(),
[&lhs_tensor_dim_to_mesh_dim](int64_t dim) {
return lhs_tensor_dim_to_mesh_dim[dim] == -1;
}) &&
std::any_of(rhs_con_dims.begin(), rhs_con_dims.end(),
[&rhs_tensor_dim_to_mesh_dim](int64_t dim) {
return rhs_tensor_dim_to_mesh_dim[dim] == -1;
})) {
} else {
CHECK(input_shardings.shardings.size() == 2)
<< "Dot op requires both operands to have input shardings, "
"but get instruction: "
<< inst->ToString()
<< ", input shardings : " << input_shardings.ToString();
if (input_shardings.shardings[0].has_value()) {
TF_RETURN_IF_ERROR(FixMixedMeshShapeResharding(
inst, 0, *input_shardings.shardings[0], device_mesh,
resharding_cache));
}
if (input_shardings.shardings[1].has_value()) {
TF_RETURN_IF_ERROR(FixMixedMeshShapeResharding(
inst, 1, *input_shardings.shardings[1], device_mesh,
resharding_cache));
}
}
}
if (!insert_resharding_reshapes_for_non_dot_ops) {
continue;
}
if (inst->opcode() == HloOpcode::kOutfeed ||
inst->opcode() == HloOpcode::kSendDone ||
inst->opcode() == HloOpcode::kSend ||
inst->opcode() == HloOpcode::kRecv ||
inst->opcode() == HloOpcode::kRecvDone) {
} else {
if (inst->shape().IsTuple()) {
if (absl::c_any_of(
inst->shape().tuple_shapes(),
[](const Shape& shape) { return shape.IsTuple(); })) {
continue;
}
switch (inst->opcode()) {
case HloOpcode::kReduce:
case HloOpcode::kCustomCall:
case HloOpcode::kRngBitGenerator:
case HloOpcode::kSort: {
for (size_t i = 0; i < inst->shape().tuple_shapes_size(); ++i) {
const InputShardings& input_shardings =
GetInputShardingsForTuple(inst, {static_cast<int64_t>(i)},
strategy_map, cost_graph, s_val);
if (input_shardings.shardings.size() > i &&
input_shardings.shardings[i].has_value()) {
TF_RETURN_IF_ERROR(FixMixedMeshShapeResharding(
inst, i, *input_shardings.shardings[i], device_mesh,
resharding_cache));
}
}
break;
}
case HloOpcode::kTuple: {
for (size_t i = 0; i < inst->shape().tuple_shapes_size(); ++i) {
const InputShardings& input_shardings =
GetInputShardingsForTuple(inst, {static_cast<int64_t>(i)},
strategy_map, cost_graph, s_val);
CHECK_EQ(input_shardings.shardings.size(), 1);
CHECK(input_shardings.shardings[0].has_value());
TF_RETURN_IF_ERROR(FixMixedMeshShapeResharding(
inst, i, *input_shardings.shardings[0], device_mesh,
resharding_cache));
}
break;
}
case HloOpcode::kGetTupleElement: {
std::vector<std::optional<HloSharding>> dst_shardings(
inst->shape().tuple_shapes_size(), std::nullopt);
for (size_t i = 0; i < inst->shape().tuple_shapes_size(); ++i) {
CHECK(!inst->shape().tuple_shapes(i).IsTuple())
<< "We currently do not support ops with nested tuples as "
"output. See b/332951306.";
const InputShardings& input_shardings =
GetInputShardingsForTuple(inst, {static_cast<int64_t>(i)},
strategy_map, cost_graph, s_val);
if (!input_shardings.shardings.empty() &&
input_shardings.shardings[0].has_value()) {
dst_shardings[i] = *input_shardings.shardings[0];
}
}
TF_RETURN_IF_ERROR(
FixMixedMeshShapeReshardingGetTupleElementWithTupleOutput(
inst, dst_shardings, device_mesh));
break;
}
case HloOpcode::kWhile:
case HloOpcode::kInfeed:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kConditional:
case HloOpcode::kParameter: {
break;
}
default:
LOG(FATAL) << "Unhandled instruction: " + inst->ToString();
}
} else {
const InputShardings& input_shardings =
GetInputShardings(inst, strategy_map, cost_graph, s_val);
if (input_shardings.shardings.empty()) {
continue;
}
if (inst->opcode() == HloOpcode::kGetTupleElement) {
TF_RETURN_IF_ERROR(FixMixedMeshShapeReshardingGetTupleElement(
inst, inst->sharding(), device_mesh, preserve_shardings));
continue;
}
for (size_t i = 0; i < inst->operand_count(); ++i) {
if (input_shardings.shardings.size() > i &&
input_shardings.shardings[i].has_value()) {
TF_RETURN_IF_ERROR(FixMixedMeshShapeResharding(
inst, i, *input_shardings.shardings[i], device_mesh,
resharding_cache));
}
}
}
}
}
return absl::OkStatus();
}
absl::Status SetHloShardingPostProcessing(
const HloInstructionSequence& sequence,
const absl::flat_hash_set<const HloInstruction*>& instructions_to_shard,
absl::flat_hash_map<std::string, std::vector<HloSharding>>&
preserve_shardings) {
for (HloInstruction* inst : sequence.instructions()) {
if (!instructions_to_shard.contains(inst) ||
spmd::IsSPMDShardToFullShapeCustomCall(inst)) {
continue;
}
auto preserved_sharding_iter = preserve_shardings.find(inst->name());
if (preserved_sharding_iter == preserve_shardings.end()) {
continue;
}
const std::vector<HloSharding>& preserved_sharding =
preserved_sharding_iter->second;
if (inst->opcode() == HloOpcode::kOutfeed ||
inst->opcode() == HloOpcode::kSendDone) {
if (preserved_sharding.size() <= 1) {
CHECK_EQ(preserved_sharding.size(), 1);
inst->set_sharding(preserved_sharding[0]);
continue;
}
std::vector<Shape> tuple_elements_shape(
inst->operand(0)->shape().tuple_shapes().begin(),
inst->operand(0)->shape().tuple_shapes().end());
tuple_elements_shape.push_back(inst->operand(1)->shape());
Shape output_tuple_sharding_shape =
ShapeUtil::MakeTupleShape(tuple_elements_shape);
ShapeTree<HloSharding> output_tuple_sharding(output_tuple_sharding_shape,
Undefined());
size_t i = 0;
for (std::pair<ShapeIndex, HloSharding>& leaf :
output_tuple_sharding.leaves()) {
leaf.second = preserved_sharding.at(i++);
}
inst->set_sharding(HloSharding::Tuple(output_tuple_sharding));
} else if (inst->opcode() == HloOpcode::kSend ||
inst->opcode() == HloOpcode::kRecv ||
inst->opcode() == HloOpcode::kRecvDone) {
if (preserved_sharding.size() > 1) {
inst->set_sharding(
HloSharding::Tuple(inst->shape(), preserved_sharding));
continue;
}
if (preserved_sharding.size() != 1) {
return absl::InternalError(
absl::StrCat("An empty sharding was preserved for ", inst->name(),
". This should be reported as a bug."));
}
inst->set_sharding(preserved_sharding[0]);
}
}
return absl::OkStatus();
}
std::string PrintLivenessSet(const LivenessSet& liveness_set) {
std::string str("Liveness Set\n");
for (LivenessIdx time_idx = 0; time_idx < liveness_set.size(); ++time_idx) {
std::vector<std::string> names;
names.reserve(liveness_set[time_idx].size());
for (const HloValue* value : liveness_set[time_idx]) {
names.push_back(absl::StrCat(value->instruction()->name(),
value->index().ToString()));
}
std::sort(names.begin(), names.end());
absl::StrAppend(&str, "Time ", time_idx, ": ", absl::StrJoin(names, ", "),
"\n");
}
return str;
}
std::string PrintInstructions(const HloInstructionSequence& sequence) {
std::string str;
const std::vector<HloInstruction*>& instructions = sequence.instructions();
for (size_t i = 0; i < instructions.size(); ++i) {
absl::StrAppend(&str, "Instruction ", i, ": ", instructions[i]->ToString(),
"\n");
}
return str;
}
std::string PrintStrategyMap(const StrategyMap& strategy_map,
const HloInstructionSequence& sequence) {
std::string str("Strategy Map\n");
const std::vector<HloInstruction*>& instructions = sequence.instructions();
for (size_t i = 0; i < instructions.size(); ++i) {
absl::StrAppend(&str, "Instruction ", i, ": ", instructions[i]->ToString(),
"\n", strategy_map.at(instructions[i])->ToString());
}
return str;
}
std::string PrintAutoShardingSolution(const HloInstructionSequence& sequence,
const LivenessSet& liveness_set,
const StrategyMap& strategy_map,
const StrategyGroups& strategy_groups,
const CostGraph& cost_graph,
absl::Span<const NodeStrategyIdx> s_val,
const double objective) {
std::string str("=== Auto sharding strategy ===\n");
const std::vector<HloInstruction*>& instructions = sequence.instructions();
size_t N = strategy_groups.size();
for (NodeIdx node_idx = 0; node_idx < N; ++node_idx) {
const StrategyGroup& strategy_group = *strategy_groups[node_idx];
absl::StrAppend(
&str, node_idx, " ",
ToAdaptiveString(instructions[strategy_group.instruction_id]), " ");
NodeStrategyIdx stra_idx = cost_graph.RemapIndex(node_idx, s_val[node_idx]);
const ShardingStrategy& strategy = strategy_group.GetStrategies()[stra_idx];
absl::StrAppend(&str, strategy.ToString());
if (cost_graph.follow_idx_[node_idx] >= 0) {
absl::StrAppend(&str, " follow ", cost_graph.follow_idx_[node_idx]);
}
absl::StrAppend(&str, "\n");
}
return str;
}
std::string PrintSolutionMemoryUsage(const LivenessSet& liveness_set,
const StrategyMap& strategy_map,
const CostGraph& cost_graph,
absl::Span<const NodeStrategyIdx> s_val) {
std::string str("=== Memory ===\n");
std::vector<std::pair<LivenessIdx, double>> time_memory_usage;
std::function<double(const StrategyGroup&)> calculate_memory_usage;
calculate_memory_usage = [&](const StrategyGroup& strategy_group) {
if (strategy_group.is_tuple) {
double m = 0.0;
for (const auto& child : strategy_group.GetChildren()) {
m += calculate_memory_usage(*child);
}
return m;
}
NodeIdx ins_idx = strategy_group.node_idx;
NodeStrategyIdx stra_idx = cost_graph.RemapIndex(ins_idx, s_val[ins_idx]);
const auto& strategies = strategy_group.GetStrategies();
const ShardingStrategy& strategy = strategies[stra_idx];
return strategy.memory_cost;
};
for (LivenessIdx time_idx = 0; time_idx < liveness_set.size(); ++time_idx) {
double mem = 0.0;
for (const auto& val : liveness_set.at(time_idx)) {
const HloInstruction* ins = val->instruction();
auto tmp = calculate_memory_usage(*strategy_map.at(ins));
mem += tmp;
if (VLOG_IS_ON(6) && tmp / (1024 * 1024) > 1) {
absl::StrAppend(&str, " ", ins->name(),
": mem += ", tmp / (1024 * 1024),
" MB; mem=", mem / (1024 * 1024), " MB\n");
}
}
time_memory_usage.push_back({time_idx, mem});
if (VLOG_IS_ON(6)) {
absl::StrAppend(&str, "Time ", time_idx, ": ", mem / (1024 * 1024),
" MB\n");
}
}
struct {
bool operator()(std::pair<LivenessIdx, double> a,
std::pair<LivenessIdx, double> b) const {
return a.second > b.second;
}
} TimeMemLarger;
std::sort(time_memory_usage.begin(), time_memory_usage.end(), TimeMemLarger);
absl::StrAppend(&str,
"Using memory costs from ShardingStrategy, the max memory "
"consumption is ",
time_memory_usage.front().second / (1024 * 1024 * 1024),
" GB at time ", time_memory_usage.front().first, "\n");
size_t k = 3;
k = std::min(k, time_memory_usage.size());
std::vector<std::pair<std::string, double>> instruction_mem;
for (LivenessIdx time_idx = 0; time_idx < k; time_idx++) {
for (const auto& val : liveness_set[time_memory_usage.at(time_idx).first]) {
const HloInstruction* ins = val->instruction();
auto mem = calculate_memory_usage(*strategy_map.at(ins));
if (mem > 100 * 1024 * 1024) {
instruction_mem.push_back(
{absl::StrCat(ins->name(), val->index().ToString()), mem});
}
}
}
struct {
bool operator()(std::pair<std::string, double> a,
std::pair<std::string, double> b) const {
return a.second > b.second;
}
} NameMemLarger;
std::sort(instruction_mem.begin(), instruction_mem.end(), NameMemLarger);
size_t top_tensors = 10;
top_tensors = std::min(top_tensors, instruction_mem.size());
absl::StrAppend(&str, "Top ", top_tensors, " largest tensors:\n");
for (size_t i = 0; i < top_tensors; i++) {
absl::StrAppend(
&str, "instruction name: ", instruction_mem.at(i).first,
" memory usage: ", instruction_mem.at(i).second / (1024 * 1024 * 1024),
"GB\n");
}
return str;
}
absl::Status SaveShardingForInstruction(
const HloInstruction* inst, bool save_for_copy_users,
absl::flat_hash_map<std::string, std::vector<HloSharding>>&
preserve_shardings) {
auto save_sharding =
[&preserve_shardings](const HloInstruction* inst) -> absl::Status {
if (!inst->has_sharding()) {
return absl::OkStatus();
}
if (inst->sharding().IsUnknown() &&
(inst->sharding().IsShardLike() || inst->sharding().IsShardAs())) {
return absl::UnimplementedError(
"Auto-sharding currently does not support shard_as/shard_like "
"sharding annotations");
}
if (!inst->sharding().IsTuple()) {
preserve_shardings[inst->name()] = {inst->sharding()};
} else {
preserve_shardings[inst->name()] = inst->sharding().tuple_elements();
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(save_sharding(inst));
if (save_for_copy_users) {
for (const auto user : inst->users()) {
if (user->opcode() == HloOpcode::kCopy) {
TF_RETURN_IF_ERROR(save_sharding(user));
}
}
}
return absl::OkStatus();
}
void CheckUserShardingPreservation(
HloModule* module,
const absl::flat_hash_map<std::string, std::vector<HloSharding>>&
preserve_shardings) {
for (const auto computation : module->computations()) {
for (const auto inst : computation->instructions()) {
if (preserve_shardings.find(inst->name()) == preserve_shardings.end()) {
continue;
}
if (!inst->has_sharding()) {
LOG(FATAL) << "User sharding is not preserved! Instruction with name "
<< inst->name() << " should be: "
<< preserve_shardings.at(inst->name())[0].ToString()
<< "\nbut it's empty.";
} else if (!inst->sharding().IsTuple() &&
!preserve_shardings.at(inst->name())[0].IsUnknown() &&
preserve_shardings.at(inst->name())[0] != inst->sharding()) {
LOG(FATAL) << "User sharding is not preserved! Instruction with name "
<< inst->name() << " should be: "
<< preserve_shardings.at(inst->name())[0].ToString()
<< "\nbut it's: " << inst->sharding().ToString();
} else if (inst->sharding().IsTuple()) {
const std::vector<HloSharding>* preserve_shardings_tuple =
&preserve_shardings.at(inst->name());
for (size_t i = 0; i < inst->shape().tuple_shapes_size(); i++) {
if (!preserve_shardings_tuple->at(i).IsUnknown() &&
preserve_shardings_tuple->at(i) !=
inst->sharding().tuple_elements().at(i)) {
LOG(FATAL) << "Tuple sharding is not preserved! Instruction "
"with name "
<< inst->name() << " " << i << "th tuple element "
<< " should be: "
<< preserve_shardings_tuple->at(i).ToString()
<< "\nbut it's: "
<< inst->sharding().tuple_elements().at(i).ToString();
}
}
}
}
}
}
int64_t MemoryBudgetLowerBound(
const HloModule& module,
const absl::flat_hash_set<const HloInstruction*>& instructions_to_shard,
const LivenessSet& liveness_set, const HloAliasAnalysis& alias_analysis,
const int64_t num_devices,
const absl::flat_hash_map<std::string, std::vector<HloSharding>>&
preserved_shardings) {
auto get_value_sharding = [](const HloValue* value) -> HloSharding {
return !value->index().empty()
? value->instruction()->sharding().GetSubSharding(
value->instruction()->shape(), value->index())
: value->instruction()->sharding();
};
absl::flat_hash_map<HloBuffer::Id, const HloValue*>
buffer_to_sharded_value_mapping;
bool vlog_is_on_5 = VLOG_IS_ON(5);
for (const HloBuffer& buffer : alias_analysis.buffers()) {
for (const HloValue* value : buffer.values()) {
if (value->instruction()->has_sharding()) {
if (vlog_is_on_5) {
const HloSharding& this_value_sharding = get_value_sharding(value);
auto iter = buffer_to_sharded_value_mapping.find(buffer.id());
if (iter != buffer_to_sharded_value_mapping.end()) {
const HloSharding& buffer_value_sharding =
get_value_sharding(iter->second);
if (this_value_sharding != buffer_value_sharding) {
VLOG(1)
<< "We have a situation where two HloValues alias, but "
"they have different shardings. This can happen in the "
"presence of user-specified shardings, and is expected. "
"This, however, means that the memory budget estimate "
"is not very accurate. The aliasing HLOs are "
<< value->ToShortString() << " and "
<< iter->second->ToShortString();
}
}
}
buffer_to_sharded_value_mapping[buffer.id()] = value;
}
}
}
int64_t max_memory_usage = 0;
absl::flat_hash_map<const HloValue*, int64_t> value_to_memory_size_mapping;
for (LivenessIdx time_idx = 0; time_idx < liveness_set.size(); ++time_idx) {
int64_t memory_usage = 0;
for (const HloValue* value : liveness_set[time_idx]) {
if (value->instruction()->shape().IsTuple() && value->index().empty()) {
continue;
}
if (!instructions_to_shard.contains(value->instruction())) {
memory_usage += ShapeUtil::ByteSizeOf(value->shape());
continue;
}
auto iter1 = value_to_memory_size_mapping.find(value);
if (iter1 != value_to_memory_size_mapping.end()) {
memory_usage += iter1->second;
continue;
}
std::optional<HloSharding> optional_sharding = std::nullopt;
const HloBuffer& buffer = alias_analysis.GetBufferContainingValue(*value);
auto iter2 = buffer_to_sharded_value_mapping.find(buffer.id());
if (iter2 != buffer_to_sharded_value_mapping.end()) {
if (preserved_shardings.find(value->instruction()->name()) !=
preserved_shardings.end()) {
optional_sharding = get_value_sharding(iter2->second);
} else {
const HloSharding& value_sharding = get_value_sharding(iter2->second);
if (!value_sharding.IsTiled() ||
value_sharding.TotalNumTiles() == num_devices) {
optional_sharding = value_sharding;
}
}
}
const Shape& shape =
ShapeUtil::GetSubshape(value->instruction()->shape(), value->index());
int64_t value_memory_usage = ByteSizeOfShapeIfShardedAcrossDevices(
shape, num_devices, optional_sharding);
value_to_memory_size_mapping[value] = value_memory_usage;
memory_usage += value_memory_usage;
}
max_memory_usage = std::max(max_memory_usage, memory_usage);
}
return max_memory_usage;
}
void RecoverShardingsFromPartialMesh(
const HloInstructionSequence& sequence,
const absl::flat_hash_map<std::string, std::vector<HloSharding>>&
preserve_shardings) {
const std::vector<HloInstruction*>& instructions = sequence.instructions();
for (HloInstruction* ins : instructions) {
auto preserved_sharding_iter = preserve_shardings.find(ins->name());
if (preserved_sharding_iter != preserve_shardings.end()) {
const auto& preserved_sharding = preserved_sharding_iter->second;
if (ins->shape().IsTuple() || (ins->opcode() == HloOpcode::kOutfeed &&
preserved_sharding.size() > 1)) {
Shape output_tuple_sharding_shape = ins->shape();
if (ins->opcode() == HloOpcode::kOutfeed) {
std::vector<Shape> tuple_elements_shape(
ins->operand(0)->shape().tuple_shapes().begin(),
ins->operand(0)->shape().tuple_shapes().end());
tuple_elements_shape.push_back(ins->operand(1)->shape());
output_tuple_sharding_shape =
ShapeUtil::MakeTupleShape(tuple_elements_shape);
}
ShapeTree<HloSharding> output_tuple_sharding(
output_tuple_sharding_shape, Undefined());
size_t i = 0;
for (auto& leaf : output_tuple_sharding.leaves()) {
leaf.second = preserved_sharding.at(i++);
}
ins->set_sharding(HloSharding::Tuple(output_tuple_sharding));
} else {
ins->set_sharding(preserved_sharding.at(0));
}
}
}
}
void FindReplicateSet(
HloInstruction* cur, const AliasMap& alias_map, const CostGraph& cost_graph,
absl::Span<const NodeStrategyIdx> s_val, const StrategyMap& strategy_map,
const ShardingStrategy& strategy, const HloInstruction* output,
const bool do_all_gather_after_backward, HloInstruction*& transpose_inst,
InstructionSet& replicated_set, InstructionSet& boundary_set,
InstructionSet& consumer_set, ConstInstructionSet& visited) {
visited.insert(cur);
InstructionSet users = UsersWithAlias(cur, alias_map, output);
for (HloInstruction* consumer : users) {
const HloInstruction* shape_inst = cur;
if (consumer->opcode() == HloOpcode::kTranspose &&
(transpose_inst == nullptr ||
DimensionsEqual(transpose_inst->shape(), consumer->shape()))) {
shape_inst = consumer;
transpose_inst = consumer;
}
if (consumer->opcode() == HloOpcode::kTuple ||
(do_all_gather_after_backward && IsParameterConvert(consumer)) ||
GetShardingStrategy(consumer, strategy_map, cost_graph, s_val)
.output_sharding != strategy.output_sharding ||
!DimensionsEqual(consumer->shape(), shape_inst->shape())) {
boundary_set.insert(cur);
return;
}
}
replicated_set.insert(cur);
for (HloInstruction* consumer : users) {
if (!visited.contains(consumer)) {
consumer_set.insert(consumer);
FindReplicateSet(consumer, alias_map, cost_graph, s_val, strategy_map,
strategy, output, do_all_gather_after_backward,
transpose_inst, replicated_set, boundary_set,
consumer_set, visited);
}
}
for (size_t i = 0; i < cur->operand_count(); ++i) {
HloInstruction* operand = cur->mutable_operand(i);
if (!visited.contains(operand) && !IsAlwaysReplicated(operand) &&
GetShardingStrategy(operand, strategy_map, cost_graph, s_val)
.output_sharding == strategy.output_sharding &&
DimensionsEqual(operand->shape(), cur->shape())) {
FindReplicateSet(operand, alias_map, cost_graph, s_val, strategy_map,
strategy, output, do_all_gather_after_backward,
transpose_inst, replicated_set, boundary_set,
consumer_set, visited);
}
}
}
absl::Status GenerateReduceScatter(
const HloInstructionSequence& sequence, const AliasMap& alias_map,
const InstructionDepthMap& depth_map, const StrategyMap& strategy_map,
const CostGraph& cost_graph, absl::Span<const NodeStrategyIdx> s_val,
const ClusterEnvironment& cluster_env, const AutoShardingOption& option) {
const std::vector<HloInstruction*>& instructions = sequence.instructions();
const HloInstruction* output = instructions.back();
bool do_all_gather_after_backward = true;
bool use_all_reduce_for_grad_acc = option.reduce_scatter_grad_acc_friendly;
std::vector<HloInstruction*> insert_all_gather;
ConstInstructionSet modified;
for (HloInstruction* inst : instructions) {
if (!HasReduceScatterOpportunity(inst, strategy_map, cost_graph, s_val,
modified)) {
continue;
}
const ShardingStrategy& strategy =
GetShardingStrategy(inst, strategy_map, cost_graph, s_val);
const InputShardings& input_shardings =
GetInputShardings(inst, strategy_map, cost_graph, s_val);
if (!absl::StrContains(input_shardings.name, "allreduce")) {
continue;
}
InstructionSet replicated_set;
InstructionSet boundary_set;
InstructionSet consumer_set;
ConstInstructionSet visited;
HloInstruction* transpose_inst = nullptr;
visited.insert(output);
FindReplicateSet(inst, alias_map, cost_graph, s_val, strategy_map, strategy,
output, do_all_gather_after_backward, transpose_inst,
replicated_set, boundary_set, consumer_set, visited);
TryReduceWithCommonAncestor(replicated_set, boundary_set, consumer_set,
alias_map);
std::vector<HloInstruction*> need_all_gather;
for (HloInstruction* node : boundary_set) {
if (consumer_set.contains(node)) {
if (AllUsersAreReduce(node)) {
replicated_set.insert(node);
} else {
need_all_gather.push_back(node);
}
}
}
if (do_all_gather_after_backward && need_all_gather.size() == 1) {
HloInstruction* point = need_all_gather.front();
std::vector<HloInstruction*> path;
HloInstruction* root = point;
while (true) {
path.push_back(root);
if (root->opcode() == HloOpcode::kGetTupleElement) {
root = root->mutable_operand(0);
} else {
break;
}
}
if (root->opcode() == HloOpcode::kParameter) {
for (auto x : path) {
replicated_set.erase(x);
boundary_set.erase(x);
}
need_all_gather.clear();
for (auto x : replicated_set) {
auto iter = alias_map.find(x);
if (iter != alias_map.end() && iter->second == root) {
boundary_set.insert(x);
need_all_gather.push_back(x);
break;
}
}
}
}
int num_replicated_parameters = 0;
for (const HloInstruction* node : replicated_set) {
if (node->opcode() == HloOpcode::kParameter) {
num_replicated_parameters++;
}
}
for (const HloInstruction* to_split : need_all_gather) {
if (to_split->users().size() == 1 &&
to_split->users().front() == output && alias_map.contains(to_split)) {
num_replicated_parameters++;
}
}
VLOG(10) << inst->ToString(HloPrintOptions::ShortParsable()) << "\n";
VLOG(10) << "replicated set (#parameter: " << num_replicated_parameters
<< "):\n";
for (auto x : replicated_set) {
VLOG(10) << " " << x->ToString(HloPrintOptions::ShortParsable()) << "\n";
}
VLOG(10) << "boundary set (#incompatible: " << need_all_gather.size()
<< "):\n";
for (auto x : boundary_set) {
VLOG(10) << " " << x->ToString(HloPrintOptions::ShortParsable()) << " "
<< absl::c_linear_search(need_all_gather, x) << "\n";
}
if (num_replicated_parameters >= 1 && need_all_gather.size() <= 1 &&
replicated_set.size() >= 5) {
HloSharding output_spec =
GetReduceScatterOutput(inst, input_shardings, strategy, cluster_env);
if (IsUndefined(output_spec)) {
continue;
}
VLOG(10) << "SET: " << output_spec.ToString();
if (absl::StartsWith(input_shardings.name, "RR = RS x SR")) {
replicated_set.erase(inst);
}
if (use_all_reduce_for_grad_acc) {
UseAllReduceForGradAcc(replicated_set, inst);
}
for (HloInstruction* to_split : replicated_set) {
SetSharding(to_split, output_spec, inst, transpose_inst, modified);
}
if (!option.reduce_scatter_aggressive_partition) {
for (HloInstruction* to_split : need_all_gather) {
SetSharding(to_split, output_spec, inst, transpose_inst, modified);
if (!do_all_gather_after_backward && to_split->users().size() == 1 &&
to_split->users().front() == output &&
alias_map.contains(to_split)) {
SetSharding(alias_map.at(to_split), output_spec, inst,
transpose_inst, modified);
insert_all_gather.push_back(alias_map.at(to_split));
} else {
insert_all_gather.push_back(to_split);
}
}
} else {
for (HloInstruction* to_split : need_all_gather) {
SetSharding(to_split, output_spec, inst, transpose_inst, modified);
if (to_split->users().size() == 1 &&
to_split->users().front() == output &&
alias_map.contains(to_split)) {
HloInstruction* param = alias_map.at(to_split);
HloInstruction* cur = param;
while (cur->users().size() == 1) {
CHECK(cur->shape().IsArray());
SetSharding(cur, output_spec, inst, transpose_inst, modified);
cur = cur->users().front();
}
SetSharding(cur, output_spec, inst, transpose_inst, modified);
CHECK(!cur->users().empty());
HloInstruction* first_user = nullptr;
int64_t min_depth = ((int64_t)1) << 50;
for (const auto& x : cur->users()) {
auto iter = depth_map.find(x);
if (iter == depth_map.end()) {
LOG(FATAL) << "ERROR: " << x->ToString();
}
if (x->opcode() != HloOpcode::kConvolution &&
x->opcode() != HloOpcode::kDot) {
continue;
}
if (iter->second < min_depth) {
first_user = x;
min_depth = iter->second;
}
}
if (first_user != nullptr) {
HloInstruction* identity = inst->parent()->AddInstruction(
HloInstruction::CreateCustomCall(cur->shape(), {cur},
kIdentityMarker));
SetSharding(identity, output_spec, inst, transpose_inst,
modified);
ReplaceOperand(first_user, cur, identity);
}
}
}
}
}
VLOG(10) << "-----------------------done\n";
}
for (HloInstruction* inst : insert_all_gather) {
HloInstruction* replace_with = inst->parent()->AddInstruction(
HloInstruction::CreateReshape(inst->shape(), inst));
replace_with->set_sharding(
GetShardingStrategy(inst, strategy_map, cost_graph, s_val)
.output_sharding);
TF_RETURN_IF_ERROR(inst->ReplaceAllUsesWith(replace_with));
}
return absl::OkStatus();
}
HloSharding GetReduceScatterOutput(const HloInstruction* ins,
const InputShardings& input_shardings,
const ShardingStrategy& strategy,
const ClusterEnvironment& cluster_env) {
const DeviceMesh& device_mesh = cluster_env.device_mesh_;
const DeviceMesh& device_mesh_1d = cluster_env.device_mesh_1d_;
if (ins->opcode() == HloOpcode::kDot) {
const DotDimensionNumbers& dot_dnums = ins->dot_dimension_numbers();
int64_t space_base_dim = dot_dnums.lhs_batch_dimensions_size();
if (absl::StartsWith(input_shardings.name, "SR = SS x SR") ||
absl::StartsWith(input_shardings.name, "RS = RS x SS")) {
int mesh_dim0, mesh_dim1;
std::tie(mesh_dim0, mesh_dim1) = ParseMeshDims(input_shardings.name);
if (!IsDivisible(ins, device_mesh, {space_base_dim, space_base_dim + 1},
{mesh_dim0, mesh_dim1})) {
return Undefined();
}
return Tile(ins->shape(), {space_base_dim, space_base_dim + 1},
{mesh_dim0, mesh_dim1}, device_mesh);
}
if (absl::StartsWith(input_shardings.name, "SbR = SbSk x SbSk")) {
int mesh_dim0, mesh_dim1;
std::tie(mesh_dim0, mesh_dim1) = ParseMeshDims(input_shardings.name);
if (!IsDivisible(ins, device_mesh, {0, space_base_dim},
{mesh_dim0, mesh_dim1})) {
return Undefined();
}
return Tile(ins->shape(), {0, space_base_dim}, {mesh_dim0, mesh_dim1},
device_mesh);
}
if (absl::StartsWith(input_shardings.name, "RR = RS x SR")) {
int mesh_dim = absl::StrContains(input_shardings.name, "{0}") ? 0 : 1;
if (!IsDivisible(ins, device_mesh, {space_base_dim}, {mesh_dim})) {
return Undefined();
}
return Tile(ins->shape(), {space_base_dim}, {mesh_dim}, device_mesh);
}
if (absl::StartsWith(input_shardings.name, "R = Sk x Sk")) {
int mesh_dim = 0;
if (!IsDivisible(ins, device_mesh_1d, {space_base_dim}, {mesh_dim})) {
return Undefined();
}
return Tile(ins->shape(), {space_base_dim}, {mesh_dim}, device_mesh_1d);
}
} else if (ins->opcode() == HloOpcode::kConvolution) {
const ConvolutionDimensionNumbers& conv_dnums =
ins->convolution_dimension_numbers();
int out_batch_dim = conv_dnums.output_batch_dimension();
int out_out_channel_dim = conv_dnums.output_feature_dimension();
if (absl::StartsWith(input_shardings.name, "SR = SS x SR") ||
absl::StartsWith(input_shardings.name, "RS = RS x SS")) {
int mesh_dim0, mesh_dim1;
std::tie(mesh_dim0, mesh_dim1) = ParseMeshDims(input_shardings.name);
if (!IsDivisible(ins, device_mesh, {out_batch_dim, out_out_channel_dim},
{mesh_dim0, mesh_dim1})) {
return Undefined();
}
return Tile(ins->shape(), {out_batch_dim, out_out_channel_dim},
{mesh_dim0, mesh_dim1}, device_mesh);
}
if (absl::StartsWith(input_shardings.name, "R = Sk x Sk")) {
int mesh_dim = 0;
if (!IsDivisible(ins, device_mesh_1d, {out_batch_dim}, {mesh_dim})) {
return Undefined();
}
return Tile(ins->shape(), {out_batch_dim}, {mesh_dim}, device_mesh_1d);
}
} else if (ins->opcode() == HloOpcode::kReduce) {
CHECK_EQ(ins->shape().rank(), 1);
int mesh_dim;
if (absl::StrContains(input_shardings.name, "allreduce @ [0]")) {
mesh_dim = 0;
} else {
mesh_dim = 1;
}
if (strategy.output_sharding.IsReplicated()) {
if (absl::StrContains(input_shardings.name, "1d")) {
if (!IsDivisible(ins, device_mesh_1d, {0}, {mesh_dim})) {
return Undefined();
}
return Tile(ins->shape(), {0}, {mesh_dim}, device_mesh_1d);
}
if (!IsDivisible(ins, device_mesh, {0}, {mesh_dim})) {
return Undefined();
}
return Tile(ins->shape(), {0}, {mesh_dim}, device_mesh);
}
if (!IsDivisible(ins, device_mesh_1d, {0}, {0})) {
return Undefined();
}
auto tile_assignment = strategy.output_sharding.tile_assignment().Reshape(
{cluster_env.total_devices_});
return HloSharding::Tile(std::move(tile_assignment));
} else {
LOG(FATAL) << "Invalid instruction: " << ins->ToString();
}
return Undefined();
}
bool HasReduceScatterOpportunity(const HloInstruction* inst,
const StrategyMap& strategy_map,
const CostGraph& cost_graph,
absl::Span<const NodeStrategyIdx> s_val,
const ConstInstructionSet& modified) {
for (const HloInstruction* operand : inst->operands()) {
if (modified.contains(operand)) {
return false;
}
}
if (modified.contains(inst)) {
return false;
}
if (inst->opcode() == HloOpcode::kReduce && inst->shape().rank() == 1) {
return true;
}
if (inst->opcode() == HloOpcode::kDot) {
if (GetShardingStrategy(inst->operand(0), strategy_map, cost_graph, s_val)
.output_sharding.IsReplicated() &&
GetShardingStrategy(inst->operand(1), strategy_map, cost_graph, s_val)
.output_sharding.IsReplicated()) {
return false;
}
return true;
}
if (inst->opcode() == HloOpcode::kConvolution) {
return true;
}
return false;
}
}
absl::StatusOr<AutoShardingImplementation::SaveShardingAnnotationsResult>
AutoShardingImplementation::SaveAndRemoveShardingAnnotation(
HloModule* module,
const absl::flat_hash_set<const HloInstruction*>& instructions_to_shard,
const absl::flat_hash_set<std::string>& replicated_small_tensors,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
absl::flat_hash_map<std::string, std::vector<HloSharding>>
preserved_shardings;
absl::flat_hash_set<HloInstruction*> keep_inst;
for (const HloComputation* computation :
module->computations(execution_threads)) {
for (const auto inst : computation->instructions()) {
if (inst->opcode() == HloOpcode::kOutfeed ||
inst->opcode() == HloOpcode::kRecv ||
inst->opcode() == HloOpcode::kRecvDone ||
inst->opcode() == HloOpcode::kSend ||
inst->opcode() == HloOpcode::kSendDone) {
TF_RETURN_IF_ERROR(spmd::SaveShardingForInstruction(
inst,
false, preserved_shardings));
continue;
}
if (spmd::IsInstructionBeforeSPMDFullToShardShapeCustomCall(inst) ||
spmd::IsSPMDShardToFullShapeCustomCall(inst)) {
TF_RETURN_IF_ERROR(spmd::SaveShardingForInstruction(
inst,
false, preserved_shardings));
}
if (inst->has_sharding() &&
spmd::IsShardingMisaligned(inst->sharding(), inst->shape()) &&
!instructions_to_shard.contains(inst)) {
LOG(WARNING)
<< "Instruction " << inst->name()
<< " has a user sharding annotation that is misaligned. Shape: "
<< inst->shape().ToString()
<< ". Sharding:" << inst->sharding().ToString();
}
}
}
if (option_.preserve_shardings ==
AutoShardingOption::PreserveShardingsType::kKeepAllShardings) {
for (const HloComputation* computation :
module->computations(execution_threads)) {
for (const auto inst : computation->instructions()) {
TF_RETURN_IF_ERROR(spmd::SaveShardingForInstruction(
inst,
true, preserved_shardings));
}
}
return SaveShardingAnnotationsResult{preserved_shardings, false};
}
bool module_is_changed = false;
for (HloComputation* computation : module->computations(execution_threads)) {
bool is_entry_computation = computation->IsEntryComputation();
for (HloInstruction* ins : computation->instructions()) {
if (replicated_small_tensors.count(ins->name())) {
keep_inst.insert(ins);
TF_RETURN_IF_ERROR(spmd::SaveShardingForInstruction(
ins,
false, preserved_shardings));
continue;
}
if (option_.preserve_shardings ==
AutoShardingOption::PreserveShardingsType::
kKeepInputOutputShardings &&
is_entry_computation &&
(ins->opcode() == HloOpcode::kParameter || ins->IsRoot())) {
keep_inst.insert(ins);
TF_RETURN_IF_ERROR(spmd::SaveShardingForInstruction(
ins,
ins->opcode() == HloOpcode::kParameter,
preserved_shardings));
continue;
}
if (ins->opcode() == HloOpcode::kCopy &&
keep_inst.find(ins->operand(0)) != keep_inst.end()) {
continue;
}
if (ins->opcode() == HloOpcode::kOutfeed ||
ins->opcode() == HloOpcode::kSend ||
ins->opcode() == HloOpcode::kSendDone ||
spmd::IsInstructionBeforeSPMDFullToShardShapeCustomCall(ins) ||
spmd::IsSPMDShardToFullShapeCustomCall(ins) ||
!instructions_to_shard.contains(ins)) {
continue;
}
if (ins->has_sharding()) {
module_is_changed |= true;
ins->clear_sharding();
}
}
}
return SaveShardingAnnotationsResult{preserved_shardings, module_is_changed};
}
absl::Status AutoShardingImplementation::CanonicalizeLayouts(
HloModule* module) {
if (!module->layout_canonicalization_callback()) {
LOG(INFO) << "There is no registered layout_canonicalization_callback.";
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(auto layouts,
module->layout_canonicalization_callback()(*module));
std::vector<Shape>& argument_shapes = layouts.first;
Shape& result_shape = layouts.second;
ComputationLayout entry_computation_layout =
module->config().entry_computation_layout();
TF_RETURN_IF_ERROR(
entry_computation_layout.mutable_result_layout()->CopyLayoutFromShape(
result_shape));
CHECK_NE(entry_computation_layout.parameter_count(), 0);
CHECK_EQ(argument_shapes.size(), entry_computation_layout.parameter_count());
for (int32_t i = 0; i < entry_computation_layout.parameter_count(); i++) {
TF_RETURN_IF_ERROR(entry_computation_layout.mutable_parameter_layout(i)
->CopyLayoutFromShape(argument_shapes.at(i)));
}
*module->mutable_config().mutable_entry_computation_layout() =
entry_computation_layout;
return absl::OkStatus();
}
absl::flat_hash_set<const HloInstruction*> ComputeInstructionsToShard(
const HloModule& module, const HloInstructionSequence& sequence) {
std::queue<const HloInstruction*> queue;
for (HloInstruction* instruction : sequence.instructions()) {
if (spmd::IsSPMDFullToShardShapeCustomCall(instruction)) {
for (const HloInstruction* user : instruction->users()) {
if (spmd::IsSPMDShardToFullShapeCustomCall(user)) {
continue;
}
queue.push(user);
}
}
}
absl::flat_hash_set<const HloInstruction*> visited;
while (!queue.empty()) {
const HloInstruction* instruction = queue.front();
queue.pop();
if (visited.contains(instruction)) {
continue;
}
visited.insert(instruction);
for (const HloComputation* computation :
instruction->called_computations()) {
for (const HloInstruction* parameter :
computation->parameter_instructions()) {
if (spmd::IsSPMDShardToFullShapeCustomCall(parameter) ||
spmd::IsSPMDFullToShardShapeCustomCall(parameter) ||
parameter == instruction || visited.contains(parameter)) {
continue;
}
queue.push(parameter);
}
}
for (const HloInstruction* user : instruction->users()) {
if (spmd::IsSPMDShardToFullShapeCustomCall(user) ||
spmd::IsSPMDFullToShardShapeCustomCall(user) ||
visited.contains(user)) {
continue;
}
queue.push(user);
}
for (const HloInstruction* operand : instruction->operands()) {
if (spmd::IsSPMDShardToFullShapeCustomCall(operand) ||
spmd::IsSPMDFullToShardShapeCustomCall(operand) ||
operand == instruction || visited.contains(operand)) {
continue;
}
queue.push(operand);
}
}
absl::flat_hash_set<const HloInstruction*> to_shard;
for (HloInstruction* instruction : sequence.instructions()) {
if (!visited.contains(instruction) &&
!spmd::IsSPMDFullToShardShapeCustomCall(instruction)) {
if (HloCollectiveInstruction::ClassOf(instruction)) {
LOG(FATAL) << "The module contains collective ops not contained within "
"a graph surrounded by SPMDFullToShardShape and "
"SPMDShardToFullShape custom calls. This case is not yet "
"supported.";
}
to_shard.insert(instruction);
}
}
return to_shard;
}
AutoShardingImplementation::AutoShardingImplementation(
const AutoShardingOption& option)
: option_(option) {}
std::pair<int64_t, int64_t> ReduceMemoryTerms(
int64_t num_primitives,
const std::vector<std::pair<spmd::LivenessIdx, spmd::LivenessIdx>>&
intervals,
std::vector<std::pair<spmd::LivenessIdx, spmd::LivenessIdx>>&
reduced_intervals,
std::vector<absl::btree_set<int64_t>>& reduced_groups) {
int64_t num_lives = 0;
for (const auto& interval : intervals) {
if (interval.first > interval.second) continue;
num_lives = std::max(num_lives, interval.second + 1);
}
auto Intervals =
[intervals](int64_t prim_idx) -> std::pair<int64_t, int64_t> {
return intervals.at(prim_idx);
};
spmd::MemoryTermReducer reducer;
auto num_terms =
reducer.Reduce(num_lives, num_primitives, std::move(Intervals));
reduced_intervals = reducer.GetReducedIntervals();
reduced_groups = reducer.GetReducedGroups();
return num_terms;
}
absl::StatusOr<bool> AutoShardingImplementation::RunAutoSharding(
HloModule* module,
const absl::flat_hash_set<std::string>& replicated_small_tensors,
const absl::flat_hash_set<absl::string_view>& execution_threads,
const absl::flat_hash_map<std::string, HloSharding>&
sharding_propagation_solution) {
if (!option_.enable) {
return false;
}
bool module_is_changed = false;
bool set_to_memory_lower_bound = (option_.memory_budget_per_device == 0);
absl::flat_hash_map<const HloInstruction*, std::vector<int64_t>>
unspecified_dims;
TF_ASSIGN_OR_RETURN(
bool changed,
ProcessShardingInstruction(
module, execution_threads, true,
&unspecified_dims, nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
true));
DumpHloModuleIfEnabled(*module, "after_spmd_calls");
if (changed) {
module_is_changed = true;
VLOG(3) << "CustomCalls with custom_call_target=Sharding are removed and "
"their shardings are moved to their input ops.";
} else {
VLOG(3) << "This workload does not have CustomCalls with "
"custom_call_target=Sharding.";
}
auto size_fn = [](const BufferValue& buffer) {
return spmd::ByteSizeOfShape(buffer.shape());
};
TF_ASSIGN_OR_RETURN(
HloSchedule schedule,
ScheduleModule(module, size_fn,
ComputationSchedulerToModuleScheduler(DFSMemoryScheduler),
execution_threads));
const HloComputation* entry_computation = module->entry_computation();
std::unique_ptr<HloAliasAnalysis> alias_analysis =
HloAliasAnalysis::Run(module).value();
std::unique_ptr<HloModule> module_clone = module->Clone("");
TF_RETURN_IF_ERROR(
spmd::EnsureEntryComputationLayoutHasShapeLayouts(module_clone.get()));
OptimizeInputOutputBufferAlias input_output_buffer_alias_optimizer(
true);
CHECK_OK(input_output_buffer_alias_optimizer.Run(module_clone.get()));
const HloInputOutputAliasConfig& input_output_alias_config =
module_clone->input_output_alias_config();
spmd::AliasMap alias_map =
spmd::BuildAliasMap(module, input_output_alias_config);
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(schedule, *alias_analysis, entry_computation));
absl::flat_hash_map<const HloValue*, HloLiveRange::TimeBound>&
buffer_live_ranges = hlo_live_range->buffer_live_ranges();
spmd::LivenessSet liveness_set(hlo_live_range->schedule_end_time() + 1);
for (const auto& [hlo_value, live_range] : buffer_live_ranges) {
for (spmd::LivenessIdx i = live_range.start; i <= live_range.end; ++i) {
liveness_set[i].push_back(hlo_value);
}
}
VLOG(10) << hlo_live_range->ToString();
XLA_VLOG_LINES(10, spmd::PrintLivenessSet(liveness_set));
const HloInstructionSequence& sequence =
hlo_live_range->flattened_instruction_sequence();
const absl::flat_hash_set<const HloInstruction*>& instructions_to_shard =
ComputeInstructionsToShard(*module, sequence);
TF_ASSIGN_OR_RETURN(SaveShardingAnnotationsResult saved_sharding_result,
SaveAndRemoveShardingAnnotation(
module, instructions_to_shard,
replicated_small_tensors, execution_threads));
absl::flat_hash_map<std::string, std::vector<HloSharding>>
preserve_shardings = std::move(saved_sharding_result.preserved_shardings);
module_is_changed |= saved_sharding_result.module_is_changed;
absl::flat_hash_map<const HloInstruction*, int64_t>
instruction_execution_counts = spmd::ComputeInstructionExecutionCounts(
module, option_.loop_iteration_count_estimate);
spmd::DeviceMesh original_device_mesh(option_.device_mesh_shape);
original_device_mesh.SetValues(option_.device_mesh_ids);
const int64_t original_memory_budget = option_.memory_budget_per_device;
std::vector<std::vector<int64_t>> partial_mesh_shapes;
if (option_.solve_nd_sharding_iteratively) {
partial_mesh_shapes = spmd::DecomposeMeshShapes(option_.device_mesh_shape,
option_.device_mesh_alpha,
option_.device_mesh_beta);
} else {
partial_mesh_shapes = {option_.device_mesh_shape};
}
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
HloCostAnalysis::Options hlo_cost_analysis_options{
.shape_size = [](const Shape& shape) {
return spmd::ByteSizeOfShape(shape);
}};
HloCostAnalysis hlo_cost_analysis(hlo_cost_analysis_options);
CHECK_OK(module->entry_computation()->Accept(&hlo_cost_analysis));
for (size_t mesh_idx = 0; mesh_idx < partial_mesh_shapes.size(); ++mesh_idx) {
const std::vector<int64_t>& mesh_shape = partial_mesh_shapes[mesh_idx];
LOG(INFO) << "Processing partial mesh shape: "
<< spmd::ToString(mesh_shape);
spmd::DeviceMesh device_mesh(mesh_shape);
if (mesh_idx != partial_mesh_shapes.size() - 1) {
device_mesh.FillIota(0);
TF_ASSIGN_OR_RETURN(
bool changed,
spmd::AdjustShardingsWithPartialMeshShape(
sequence.instructions(), instructions_to_shard, mesh_shape,
original_device_mesh,
!option_.try_multiple_mesh_shapes));
LOG(INFO)
<< "Shardings are adjusted based on current partial mesh shape: "
<< changed;
} else {
device_mesh.SetValues(option_.device_mesh_ids);
}
spmd::ProfilingResult prof_result;
spmd::ClusterEnvironment cluster_env(
original_device_mesh, device_mesh, option_.device_mesh_alpha,
option_.device_mesh_beta, prof_result, option_);
XLA_VLOG_LINES(6, module->ToString());
const int64_t memory_lower_bound = spmd::MemoryBudgetLowerBound(
*module, instructions_to_shard, liveness_set, *alias_analysis,
device_mesh.num_elements(), preserve_shardings);
const float memory_lower_bound_gb =
static_cast<float>(memory_lower_bound) / (1024 * 1024 * 1024);
LOG(INFO) << "Memory consumption lower bound is " << memory_lower_bound_gb
<< " GB.";
if (set_to_memory_lower_bound) {
LOG(INFO)
<< "--xla_tpu_auto_spmd_partitioning_memory_budget_gb is 0, and "
"--xla_tpu_auto_spmd_partitioning_memory_budget_ratio is "
<< option_.memory_budget_ratio
<< ", so setting option.memory_budget_per_device to "
<< memory_lower_bound_gb << " x " << option_.memory_budget_ratio
<< " = " << memory_lower_bound_gb * option_.memory_budget_ratio
<< " GB";
option_.memory_budget_per_device =
memory_lower_bound * std::abs(option_.memory_budget_ratio);
if (option_.memory_budget_ratio < 0) {
option_.memory_overbudget_coeff = -1.0;
}
} else if (option_.memory_budget_per_device > 0) {
option_.memory_budget_per_device = original_memory_budget *
original_device_mesh.num_elements() /
device_mesh.num_elements();
LOG(INFO) << "Setting option.memory_budget_per_device to "
<< option_.memory_budget_per_device;
}
spmd::InstructionDepthMap ins_depth_map;
ins_depth_map = spmd::BuildInstructionDepthMap(sequence);
spmd::StrategyMap strategy_map;
spmd::StrategyGroups strategy_groups;
spmd::AssociativeDotPairs associative_dot_pairs;
TF_ASSIGN_OR_RETURN(
std::tie(strategy_map, strategy_groups, associative_dot_pairs),
BuildStrategyAndCost(sequence, module, instructions_to_shard,
instruction_execution_counts, ins_depth_map,
alias_map, cluster_env, option_, *call_graph,
hlo_cost_analysis,
option_.try_multiple_mesh_shapes));
spmd::AliasSet alias_set =
spmd::BuildAliasSet(module, input_output_alias_config, strategy_map);
TF_RETURN_IF_ERROR(RemoveFollowersIfMismatchedStrategies(
alias_set, strategy_groups, sequence,
!option_.try_multiple_mesh_shapes));
XLA_VLOG_LINES(8, PrintStrategyMap(strategy_map, sequence));
spmd::CostGraph cost_graph(strategy_groups, associative_dot_pairs);
cost_graph.Simplify(option_.simplify_graph);
std::vector<absl::flat_hash_set<spmd::EdgeIdx>> node_to_edges(
strategy_groups.size());
spmd::EdgeIdx edge_idx = 0;
for (const auto& [edge, _] : cost_graph.edge_costs_) {
node_to_edges[edge.second].insert(edge_idx);
++edge_idx;
}
const absl::flat_hash_map<const HloValue*, HloLiveRange::TimeBound>&
buffer_live_ranges = hlo_live_range->buffer_live_ranges();
absl::flat_hash_map<spmd::NodeIdx, HloLiveRange::TimeBound>
node_to_time_bound;
absl::flat_hash_map<spmd::EdgeIdx, HloLiveRange::TimeBound>
edge_to_time_bound;
for (const auto& [value, time_bound] : buffer_live_ranges) {
const HloInstruction* instruction = value->instruction();
const ShapeIndex& index = value->index();
if (instruction->shape().IsTuple() && index.empty()) continue;
const spmd::StrategyGroup* strategy_group =
strategy_map.at(instruction).get();
const spmd::NodeIdx node_idx =
strategy_group->GetSubStrategyGroup(index)->node_idx;
if (node_idx < 0) continue;
node_to_time_bound[node_idx] = time_bound;
for (const spmd::EdgeIdx edge_idx : node_to_edges[node_idx]) {
edge_to_time_bound[edge_idx] = time_bound;
}
}
std::vector<std::pair<spmd::LivenessIdx, spmd::LivenessIdx>> node_intervals,
edge_intervals;
for (spmd::NodeIdx node_idx = 0; node_idx < strategy_groups.size();
++node_idx) {
std::pair<spmd::LivenessIdx, spmd::LivenessIdx> interval;
if (auto time_bound = node_to_time_bound.find(node_idx);
time_bound != node_to_time_bound.end()) {
interval.first = time_bound->second.start;
interval.second = time_bound->second.end;
} else {
interval.first = std::numeric_limits<int64_t>::max();
interval.second = 0;
}
node_intervals.push_back(std::move(interval));
}
for (spmd::EdgeIdx edge_idx = 0; edge_idx < cost_graph.edge_costs_.size();
++edge_idx) {
std::pair<spmd::LivenessIdx, spmd::LivenessIdx> interval;
if (auto time_bound = edge_to_time_bound.find(edge_idx);
time_bound != edge_to_time_bound.end()) {
interval.first = time_bound->second.start;
interval.second = time_bound->second.end;
} else {
interval.first = std::numeric_limits<int64_t>::max();
interval.second = 0;
}
edge_intervals.push_back(std::move(interval));
}
const absl::Time term_reduction_start_time = absl::Now();
std::vector<std::pair<spmd::LivenessIdx, spmd::LivenessIdx>>
reduced_node_intervals, reduced_edge_intervals;
std::vector<absl::btree_set<int64_t>> reduced_node_groups,
reduced_edge_groups;
auto num_node_terms =
ReduceMemoryTerms(strategy_groups.size(), node_intervals,
reduced_node_intervals, reduced_node_groups);
auto num_edge_terms =
ReduceMemoryTerms(cost_graph.edge_costs_.size(), edge_intervals,
reduced_edge_intervals, reduced_edge_groups);
const absl::Time term_reduction_end_time = absl::Now();
const auto term_reduction_duration =
term_reduction_end_time - term_reduction_start_time;
LOG(INFO) << "Memory Term Reducer took "
<< absl::ToInt64Milliseconds(term_reduction_duration)
<< " ms and reduced the number of terms from "
<< num_node_terms.first + num_edge_terms.first << " to "
<< num_node_terms.second + num_edge_terms.second;
std::string request_name = absl::StrCat("mesh_idx_", mesh_idx);
TF_ASSIGN_OR_RETURN(
spmd::AutoShardingSolverOutput output,
Solve(*module, *hlo_live_range, strategy_map, strategy_groups,
cost_graph, alias_set, reduced_node_intervals,
reduced_edge_intervals, reduced_node_groups, reduced_edge_groups,
option_, request_name, sharding_propagation_solution));
if (mesh_idx == partial_mesh_shapes.size() - 1) {
this->solver_optimal_objective_value_ = output.cost;
} else {
TF_RET_CHECK(output.is_optimal)
<< "The solver did not find an optimal solution for a partial mesh "
<< "shape.";
}
XLA_VLOG_LINES(5, PrintAutoShardingSolution(
sequence, liveness_set, strategy_map, strategy_groups,
cost_graph, output.s_val, output.cost));
XLA_VLOG_LINES(6, PrintSolutionMemoryUsage(liveness_set, strategy_map,
cost_graph, output.s_val));
if (option_.prefer_reduce_scatter) {
TF_RETURN_IF_ERROR(GenerateReduceScatter(
sequence, alias_map, ins_depth_map, strategy_map, cost_graph,
output.s_val, cluster_env, option_));
}
SetHloSharding(sequence, instructions_to_shard, strategy_map, cost_graph,
output.s_val, (mesh_idx == partial_mesh_shapes.size() - 1));
if (mesh_idx == partial_mesh_shapes.size() - 1) {
TF_RETURN_IF_ERROR(spmd::SetHloShardingPostProcessing(
sequence, instructions_to_shard, preserve_shardings));
TF_RETURN_IF_ERROR(InsertReshardReshapes(
sequence, instructions_to_shard, strategy_map, cost_graph,
output.s_val, cluster_env,
!option_.try_multiple_mesh_shapes,
option_.insert_resharding_reshapes_for_non_dot_ops,
preserve_shardings));
} else {
spmd::RecoverShardingsFromPartialMesh(sequence, preserve_shardings);
}
}
if (VLOG_IS_ON(1)) {
spmd::CheckHloSharding(sequence, instructions_to_shard,
original_device_mesh.num_elements());
}
module_is_changed = true;
if (VLOG_IS_ON(1)) {
spmd::CheckUserShardingPreservation(module, preserve_shardings);
}
TF_RETURN_IF_ERROR(CanonicalizeLayouts(module));
for (HloInstruction* instruction : sequence.instructions()) {
if (!instructions_to_shard.contains(instruction)) {
instruction->set_sharding(
HloSharding::Single(instruction->shape(), HloSharding::Manual()));
}
}
for (HloInstruction* instruction : sequence.instructions()) {
if (spmd::IsSPMDFullToShardShapeCustomCall(instruction)) {
CHECK(instruction->has_sharding());
CHECK(instruction->sharding().IsManual());
CHECK(instruction->operand(0)->has_sharding());
CHECK(!instruction->operand(0)->sharding().IsManual());
} else if (spmd::IsSPMDShardToFullShapeCustomCall(instruction)) {
CHECK(instruction->has_sharding());
CHECK(!instruction->sharding().IsManual());
CHECK(instruction->operand(0)->has_sharding());
CHECK(instruction->operand(0)->sharding().IsManual())
<< instruction->ToString();
}
}
return module_is_changed;
}
bool ModuleIsManuallyPartitioned(const HloModule* module) {
for (const HloComputation* computation : module->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
if (spmd::IsSPMDFullToShardShapeCustomCall(instruction) ||
spmd::IsSPMDShardToFullShapeCustomCall(instruction)) {
return true;
}
}
}
return false;
}
bool IsSmallTensor(const HloInstruction* ins,
const AutoShardingOption& option) {
return spmd::ByteSizeOfShape(ins->shape()) <= option.small_tensor_byte_size;
}
bool HasUnsupportedNestedTuples(const HloModule& module) {
for (const auto* computation : module.computations()) {
for (const auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kConditional) {
for (const HloInstruction* operand : instruction->operands()) {
if (ShapeUtil::IsNestedTuple(operand->shape())) {
return true;
}
}
}
}
}
return false;
}
std::unique_ptr<HloModule> CloneModule(const HloModule* module) {
auto module_clone = module->Clone("");
module_clone->set_layout_canonicalization_callback(
module->layout_canonicalization_callback());
return module_clone;
}
absl::Status MoveComputationsFromModuleToModule(HloModule* from_module,
HloModule* to_module) {
TF_RETURN_IF_ERROR(from_module->RemoveUnusedComputations());
const std::vector<HloComputation*>& original_module_computations =
to_module->MakeComputationSorted();
const std::vector<HloComputation*>& clone_module_computations =
from_module->MakeComputationSorted();
if (original_module_computations.size() != clone_module_computations.size()) {
return absl::InternalError(
"The cloned and the original modules do not have the same number "
"of computations. This is a bug and should be reported.");
}
absl::flat_hash_map<HloComputation*, HloComputation*>
computation_replacements;
for (size_t i = 0; i < original_module_computations.size(); ++i) {
HloComputation* original_computation = original_module_computations[i];
HloComputation* new_computation = clone_module_computations[i];
computation_replacements[original_computation] = new_computation;
}
to_module->ReplaceComputations(computation_replacements);
to_module->MoveComputationsFrom(from_module);
*to_module->mutable_config().mutable_entry_computation_layout() =
from_module->entry_computation_layout();
to_module->input_output_alias_config() =
from_module->input_output_alias_config();
to_module->buffer_donor_config() = from_module->buffer_donor_config();
return absl::OkStatus();
}
AutoSharding::AutoSharding(const AutoShardingOption& option)
: option_(option) {}
absl::Time DumpModuleAndRecordPassStart(const HloModule* module) {
XLA_VLOG_LINES(6,
absl::StrCat("Before auto sharding:\n", module->ToString()));
DumpHloModuleIfEnabled(*module, "before_auto_spmd_sharding");
#if !defined(__APPLE__)
metrics::RecordAutoShardingInvocations();
#endif
return absl::Now();
}
void RecordPassEndAndDumpModule(absl::Time start_time,
const HloModule* module) {
absl::Time end_time = absl::Now();
absl::Duration duration = end_time - start_time;
LOG(INFO) << "Auto Sharding took " << absl::ToInt64Seconds(duration)
<< " seconds";
#if !defined(__APPLE__)
metrics::RecordAutoShardingCompilationTime(
absl::ToInt64Microseconds(duration));
#endif
XLA_VLOG_LINES(6, absl::StrCat("After auto sharding:\n", module->ToString()));
DumpHloModuleIfEnabled(*module, "after_auto_spmd_sharding");
}
absl::StatusOr<bool> AutoSharding::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (!option_.enable) {
return false;
}
LOG(INFO) << "Starting the auto sharding pass";
if (HasUnsupportedNestedTuples(*module)) {
LOG(FATAL) << "The input module contains nested tuples "
"which we do not currently support well. See b/332951306 to "
"track progress on this.";
return false;
}
absl::Time start_time = DumpModuleAndRecordPassStart(module);
TF_RETURN_IF_ERROR(module->RemoveUnusedComputations());
TF_RETURN_IF_ERROR(option_.CheckAndSetup());
LOG(INFO) << "AutoShardingOptions:\n" << option_.ToString();
absl::flat_hash_set<std::string> replicated_small_tensors;
if (option_.small_tensor_byte_size > 0) {
for (auto computation : module->computations()) {
for (auto instruction : computation->instructions()) {
if (!instruction->has_sharding() &&
IsSmallTensor(instruction, option_)) {
VLOG(1) << "Replicated small tensor: " << instruction->name();
instruction->set_sharding(
instruction->shape().IsTuple()
? HloSharding::SingleTuple(instruction->shape(),
HloSharding::Replicate())
: HloSharding::Replicate());
replicated_small_tensors.insert(std::string(instruction->name()));
}
}
}
}
bool module_is_manually_partitioned = ModuleIsManuallyPartitioned(module);
if (module_is_manually_partitioned) {
HloConstantSplitter constant_splitter(
option_.enable_expression_constant_splitter,
spmd::OpEncountersShardToFull);
CHECK_OK(constant_splitter.Run(module, execution_threads));
CHECK_OK(HloDCE().Run(module, execution_threads));
}
std::vector<std::vector<int64_t>> mesh_shapes;
if (option_.try_multiple_mesh_shapes || module_is_manually_partitioned) {
mesh_shapes = spmd::InferOrEnumerateMeshShapesToTry(
*module, Product(option_.device_mesh_shape),
option_.device_mesh_shape.size(),
false);
} else {
mesh_shapes.push_back(option_.device_mesh_shape);
}
CHECK(option_.try_multiple_mesh_shapes || mesh_shapes.size() == 1)
<< "Auto-sharding cannot infer a single appropriate mesh shape for this "
"HLO, and AutoShardingption::try_multiple_mesh_shapes is set to "
"false. Please re-run with the option set to true.";
if (module->entry_computation()->num_parameters() > 0) {
HloInstruction* parameter_instruction =
module->entry_computation()->parameter_instruction(0);
if (parameter_instruction->shape().IsTuple() &&
parameter_instruction->has_sharding()) {
CHECK_EQ(module->entry_computation()->num_parameters(), 1);
parameter_instruction->set_sharding(
spmd::ReplaceGivenShardingsWithUnknownForTuple(
parameter_instruction->sharding(), parameter_instruction->shape(),
module->config()
.allow_spmd_sharding_propagation_to_parameters()));
}
}
HloInstruction* root_instruction =
module->entry_computation()->root_instruction();
if (root_instruction->shape().IsTuple() && root_instruction->has_sharding()) {
root_instruction->set_sharding(
spmd::ReplaceGivenShardingsWithUnknownForTuple(
root_instruction->sharding(), root_instruction->shape(),
module->config().allow_spmd_sharding_propagation_to_output()));
}
absl::flat_hash_map<std::string, HloSharding> sharding_propagation_solution;
if (option_.use_sharding_propagation_for_default_shardings) {
std::unique_ptr<HloModule> module_with_default_solution =
CloneModule(module);
ShardingPropagation sharding_propagation(
true, false,
module->config().allow_spmd_sharding_propagation_to_output(),
module->config().allow_spmd_sharding_propagation_to_parameters(),
false,
nullptr);
CHECK_OK(sharding_propagation.Run(module_with_default_solution.get(),
execution_threads));
VLOG(6) << module_with_default_solution->ToString();
for (const auto computation :
module_with_default_solution->computations()) {
for (const auto instruction : computation->instructions()) {
if (instruction->has_sharding()) {
sharding_propagation_solution.insert(
{std::string(instruction->name()), instruction->sharding()});
}
}
}
}
bool module_is_changed = false;
VLOG(1) << "Original mesh shape "
<< spmd::ToString(option_.device_mesh_shape);
double min_objective_value = std::numeric_limits<double>::max();
int min_mesh_shape_index = -1;
std::unique_ptr<HloModule> min_mesh_shape_module;
for (size_t i = 0; i < mesh_shapes.size(); ++i) {
VLOG(1) << "Trying mesh shape " << spmd::ToString(mesh_shapes[i]);
AutoShardingOption this_option = option_;
this_option.device_mesh_shape = mesh_shapes[i];
if (this_option.device_mesh_shape.size() !=
this_option.device_mesh_alpha.size()) {
this_option.device_mesh_alpha.clear();
this_option.device_mesh_beta.clear();
TF_RETURN_IF_ERROR(this_option.CheckAndSetup());
}
auto pass = std::make_unique<AutoShardingImplementation>(this_option);
std::unique_ptr<HloModule> module_clone = CloneModule(module);
absl::StatusOr<bool> pass_result =
pass->RunAutoSharding(module_clone.get(), replicated_small_tensors,
execution_threads, sharding_propagation_solution);
if (!pass_result.ok()) {
VLOG(1) << "Mesh shape " << spmd::ToString(mesh_shapes[i])
<< " led to the following error: "
<< pass_result.status().message();
continue;
}
double this_mesh_objective_value = pass->GetSolverOptimalObjectiveValue();
VLOG(1) << "Mesh shape " << spmd::ToString(mesh_shapes[i])
<< " has objective value " << this_mesh_objective_value;
if (this_mesh_objective_value >= 0 &&
min_objective_value > this_mesh_objective_value) {
min_mesh_shape_index = i;
min_mesh_shape_module = std::move(module_clone);
min_objective_value = this_mesh_objective_value;
CHECK_OK(pass_result);
module_is_changed = *pass_result;
}
}
std::string trying_to_find =
option_.try_multiple_mesh_shapes
? "a device mesh (and the corresponding shardings)"
: "shardings";
CHECK_GE(min_mesh_shape_index, 0)
<< "The auto-sharding pass could not find " << trying_to_find
<< " that works for this input. This could be the result of a low memory "
"budget (please refer to the "
"`--xla_tpu_auto_spmd_partitioning_memory_budget_ratio` flag to set a "
"higher budget). If you think you have set a reasonably large memory "
"budget, please report this as a bug.";
solver_optimal_objective_value_ = min_objective_value;
if (module_is_changed) {
VLOG(1) << "Choosing mesh shape "
<< spmd::ToString(mesh_shapes[min_mesh_shape_index])
<< " which had the minimal solver objective value of "
<< min_objective_value;
chosen_mesh_shape_ = mesh_shapes[min_mesh_shape_index];
TF_RETURN_IF_ERROR(MoveComputationsFromModuleToModule(
min_mesh_shape_module.get(), module));
}
RecordPassEndAndDumpModule(start_time, module);
return module_is_changed;
}
} | #include "xla/hlo/experimental/auto_sharding/auto_sharding.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <numeric>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_cost_graph.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_device_mesh.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_option.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_strategy.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_util.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/buffer_value.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_value.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace spmd {
namespace {
using ::testing::Contains;
using ::testing::Each;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::Eq;
using ::testing::FieldsAre;
using ::testing::IsEmpty;
using ::testing::IsFalse;
using ::testing::IsTrue;
using ::testing::Not;
using ::testing::Pair;
using ::testing::ResultOf;
using ::testing::UnorderedElementsAre;
TEST(DeviceMeshTest, IotaDeviceMesh2DStartsWith0) {
DeviceMesh device_mesh({2, 4});
device_mesh.FillIota(0);
EXPECT_TRUE(device_mesh.is_iota);
EXPECT_THAT(device_mesh.dimensions(), ElementsAre(2, 4));
EXPECT_EQ(device_mesh.num_elements(), 8);
}
TEST(DeviceMeshTest, IotaDeviceMesh3DStartsWithNonZero) {
DeviceMesh device_mesh({2, 4, 8});
device_mesh.FillIota(55);
EXPECT_TRUE(device_mesh.is_iota);
EXPECT_THAT(device_mesh.dimensions(), ElementsAre(2, 4, 8));
EXPECT_EQ(device_mesh.num_elements(), 64);
}
TEST(DeviceMeshTest, ExplicitSetValuesInferIotaIotaValues) {
DeviceMesh device_mesh({2, 4, 8});
std::vector<int64_t> device_mesh_values(64);
absl::c_iota(device_mesh_values, 34);
device_mesh.SetValues(device_mesh_values);
EXPECT_TRUE(device_mesh.is_iota);
EXPECT_THAT(device_mesh.dimensions(), ElementsAre(2, 4, 8));
EXPECT_EQ(device_mesh.num_elements(), 64);
}
TEST(DeviceMeshTest, ExplicitSetValuesInferIotaNonIotaValues) {
DeviceMesh device_mesh({2, 4, 8});
std::vector<int64_t> device_mesh_values(64);
absl::c_iota(device_mesh_values, 34);
device_mesh_values[54] = 54;
device_mesh.SetValues(device_mesh_values);
EXPECT_FALSE(device_mesh.is_iota);
EXPECT_THAT(device_mesh.dimensions(), ElementsAre(2, 4, 8));
EXPECT_EQ(device_mesh.num_elements(), 64);
}
TEST(DeviceMeshTest, ReshapeTestWithoutIota) {
DeviceMesh device_mesh({2, 4, 8});
std::vector<int64_t> device_mesh_values(64);
absl::c_iota(device_mesh_values, 34);
device_mesh_values[54] = 54;
device_mesh.SetValues(device_mesh_values);
EXPECT_FALSE(device_mesh.is_iota);
EXPECT_THAT(device_mesh.dimensions(), ElementsAre(2, 4, 8));
EXPECT_EQ(device_mesh.num_elements(), 64);
device_mesh.Reshape({2, 32});
EXPECT_FALSE(device_mesh.is_iota);
EXPECT_THAT(device_mesh.dimensions(), ElementsAre(2, 32));
EXPECT_EQ(device_mesh.num_elements(), 64);
}
TEST(DeviceMeshTest, ReshapeTestWithIota) {
DeviceMesh device_mesh({2, 4, 8});
std::vector<int64_t> device_mesh_values(64);
absl::c_iota(device_mesh_values, 34);
device_mesh.SetValues(device_mesh_values);
EXPECT_TRUE(device_mesh.is_iota);
EXPECT_THAT(device_mesh.dimensions(), ElementsAre(2, 4, 8));
EXPECT_EQ(device_mesh.num_elements(), 64);
device_mesh.Reshape({2, 32});
EXPECT_TRUE(device_mesh.is_iota);
EXPECT_THAT(device_mesh.dimensions(), ElementsAre(2, 32));
EXPECT_EQ(device_mesh.num_elements(), 64);
}
class AutoShardingTest : public HloTestBase {
protected:
const absl::string_view kDotHloString = R"(
HloModule module
ENTRY matmul {
parameter.1 = f32[32,64]{1,0} parameter(0)
parameter.2 = f32[64,128]{1,0} parameter(1)
ROOT root = f32[32,128]{1,0} dot(parameter.1, parameter.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
const absl::string_view kAddHloString = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[16,32,64]{2,1,0} parameter(0)
%param1 = f32[16,32,64]{2,1,0} parameter(1)
ROOT root = f32[16,32,64]{2,1,0} add(%param0, %param1)
})";
void RunMatMulAutoShardingWithOptions(
AutoShardingOption option, size_t expected_num_tiles,
size_t expected_sharded_dimensions = 1) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kDotHloString));
RunAutoShardingWithOptions(module.get(), option, expected_num_tiles,
expected_sharded_dimensions);
}
void RunAddAutoShardingWithOptions(AutoShardingOption option,
size_t expected_num_tiles,
size_t expected_sharded_dimensions = 1) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kAddHloString));
RunAutoShardingWithOptions(module.get(), option, expected_num_tiles,
expected_sharded_dimensions);
}
void RunAutoShardingWithOptions(HloModule* module, AutoShardingOption option,
size_t expected_num_tiles,
size_t expected_sharded_dimensions = 1) {
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module));
EXPECT_TRUE(changed);
auto* root = FindInstruction(module, "root");
ASSERT_NE(root, nullptr);
EXPECT_EQ(root->sharding().NumTiles(), expected_num_tiles);
EXPECT_EQ(VectorGreaterThanOneElementCount(
root->sharding().tile_assignment().dimensions(),
root->sharding().ReplicateOnLastTileDim()),
expected_sharded_dimensions);
}
void RunMatMulAutoShardingWithOptionsExpectFail(AutoShardingOption option) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kDotHloString));
RunAutoShardingWithOptionsExpectFail(module.get(), option);
}
void RunAutoShardingWithOptionsExpectFail(HloModule* module,
AutoShardingOption option) {
EXPECT_FALSE(AutoSharding(option).Run(module).ok());
}
void RunMatMulAutoShardingWithOptionsNoDeviceIds(
AutoShardingOption option, std::vector<int64_t> expected_tile,
bool expected_last_dim_replicate = false) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kDotHloString));
RunAutoShardingWithOptionsNoDeviceIds(module.get(), option, expected_tile,
expected_last_dim_replicate);
}
void RunAutoShardingWithOptionsNoDeviceIds(HloModule* module,
AutoShardingOption option,
std::vector<int64_t> expected_tile,
bool expected_last_dim_replicate) {
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module));
EXPECT_TRUE(changed);
HloInstruction* root = FindInstruction(module, "root");
ASSERT_NE(root, nullptr);
EXPECT_EQ(root->sharding().ReplicateOnLastTileDim(),
expected_last_dim_replicate);
EXPECT_THAT(root->sharding().tile_assignment().dimensions(),
ElementsAreArray(expected_tile));
}
};
TEST_F(AutoShardingTest, MatmulMeshShape1DMeshShape) {
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {4};
RunMatMulAutoShardingWithOptions(option, 4);
option.device_mesh_shape = {8};
RunMatMulAutoShardingWithOptions(option, 8);
}
TEST_F(AutoShardingTest, MatmulMeshShape1DMeshShapeIds) {
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {4};
option.device_mesh_ids = {0, 1, 2, 3};
RunMatMulAutoShardingWithOptions(option, 4);
option.device_mesh_shape = {8};
option.device_mesh_ids = {0, 1, 2, 3, 4, 5, 6, 7};
RunMatMulAutoShardingWithOptions(option, 8);
}
TEST_F(AutoShardingTest, MatmulMeshShape1DAllOptions) {
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {4};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0};
option.device_mesh_beta = {1.0};
RunMatMulAutoShardingWithOptions(option, 4);
option.device_mesh_shape = {8};
option.device_mesh_ids = {0, 1, 2, 3, 4, 5, 6, 7};
option.device_mesh_alpha = {1.0};
option.device_mesh_beta = {1.0};
RunMatMulAutoShardingWithOptions(option, 8);
}
TEST_F(AutoShardingTest, MatmulMeshShape2DAllOptions) {
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.allow_mixed_mesh_shape = false;
RunMatMulAutoShardingWithOptions(option, 4, 2);
option.enable = true;
option.device_mesh_shape = {1, 4};
RunMatMulAutoShardingWithOptions(option, 4);
option.enable = true;
option.device_mesh_shape = {4, 1};
RunMatMulAutoShardingWithOptions(option, 4);
}
TEST_F(AutoShardingTest, MatmulMeshShape2DNoAlphaBeta) {
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.allow_mixed_mesh_shape = false;
RunMatMulAutoShardingWithOptions(option, 4, 2);
option.enable = true;
option.device_mesh_shape = {1, 4};
RunMatMulAutoShardingWithOptions(option, 4);
option.enable = true;
option.device_mesh_shape = {4, 1};
RunMatMulAutoShardingWithOptions(option, 4);
}
TEST_F(AutoShardingTest, MatmulMeshShape2DNoAlphaBetaMeshIds) {
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.allow_mixed_mesh_shape = false;
RunMatMulAutoShardingWithOptions(option, 4, 2);
option.enable = true;
option.device_mesh_shape = {1, 4};
RunMatMulAutoShardingWithOptions(option, 4);
option.enable = true;
option.device_mesh_shape = {4, 1};
RunMatMulAutoShardingWithOptions(option, 4);
}
TEST_F(AutoShardingTest, MatmulMeshShape2DNoMeshIds) {
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.allow_mixed_mesh_shape = false;
RunMatMulAutoShardingWithOptions(option, 4, 2);
option.enable = true;
option.device_mesh_shape = {1, 4};
RunMatMulAutoShardingWithOptions(option, 4);
option.enable = true;
option.device_mesh_shape = {4, 1};
RunMatMulAutoShardingWithOptions(option, 4);
}
TEST_F(AutoShardingTest, MatmulMeshShape3DAllOptions) {
AutoShardingOption option;
option.enable = true;
option.allow_mixed_mesh_shape = false;
option.allow_recompute_heavy_op = false;
option.device_mesh_shape = {2, 2, 2};
option.device_mesh_ids = {0, 1, 2, 3, 4, 5, 6, 7};
option.device_mesh_alpha = {1.0, 1.0, 1.0};
option.device_mesh_beta = {0.01, 0.5, 1.0};
RunMatMulAutoShardingWithOptionsNoDeviceIds(option, {2, 2, 2}, true);
}
TEST_F(AutoShardingTest, Matmul3DMeshShape2DSharding) {
AutoShardingOption option;
option.enable = true;
option.allow_mixed_mesh_shape = false;
option.device_mesh_shape = {1, 2, 2};
RunMatMulAutoShardingWithOptions(option, 4, 2);
option.device_mesh_shape = {2, 1, 2};
RunMatMulAutoShardingWithOptions(option, 4, 2);
option.device_mesh_shape = {2, 2, 1};
RunMatMulAutoShardingWithOptions(option, 4, 2);
}
TEST_F(AutoShardingTest, AddMeshShape3DAllOptions) {
AutoShardingOption option;
option.enable = true;
option.allow_mixed_mesh_shape = false;
option.device_mesh_shape = {1, 2, 4};
option.device_mesh_ids = {0, 1, 2, 3, 4, 5, 6, 7};
option.device_mesh_alpha = {1.0, 1.0, 1.0};
option.device_mesh_beta = {0.01, 0.5, 1.0};
RunAddAutoShardingWithOptions(option, 8, 2);
option.device_mesh_shape = {4, 1, 2};
RunAddAutoShardingWithOptions(option, 8, 2);
option.device_mesh_shape = {1, 4, 2};
RunAddAutoShardingWithOptions(option, 8, 2);
}
TEST_F(AutoShardingTest, AddMeshShape3DNoAlphaBeta) {
AutoShardingOption option;
option.enable = true;
option.allow_mixed_mesh_shape = false;
option.device_mesh_shape = {1, 2, 4};
option.device_mesh_ids = {0, 1, 2, 3, 4, 5, 6, 7};
RunAddAutoShardingWithOptions(option, 8, 2);
option.device_mesh_shape = {4, 1, 2};
RunAddAutoShardingWithOptions(option, 8, 2);
option.device_mesh_shape = {1, 4, 2};
RunAddAutoShardingWithOptions(option, 8, 2);
}
TEST_F(AutoShardingTest, AddMeshShape3DNoAlphaBetaMeshIds) {
AutoShardingOption option;
option.allow_mixed_mesh_shape = false;
option.enable = true;
option.device_mesh_shape = {1, 2, 4};
RunAddAutoShardingWithOptions(option, 8, 2);
option.device_mesh_shape = {4, 1, 2};
RunAddAutoShardingWithOptions(option, 8, 2);
option.device_mesh_shape = {1, 4, 2};
RunAddAutoShardingWithOptions(option, 8, 2);
}
TEST_F(AutoShardingTest, AddMeshShape3DNoMeshIds) {
AutoShardingOption option;
option.allow_mixed_mesh_shape = false;
option.enable = true;
option.device_mesh_shape = {1, 2, 4};
option.device_mesh_alpha = {1.0, 1.0, 1.0};
option.device_mesh_beta = {0.01, 0.5, 1.0};
RunAddAutoShardingWithOptions(option, 8, 2);
option.device_mesh_shape = {4, 1, 2};
RunAddAutoShardingWithOptions(option, 8, 2);
option.device_mesh_shape = {1, 4, 2};
RunAddAutoShardingWithOptions(option, 8, 2);
}
TEST_F(AutoShardingTest, MatMulMeshShape2D) {
AutoShardingOption option;
option.allow_mixed_mesh_shape = false;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
RunMatMulAutoShardingWithOptions(option, 4, 2);
}
TEST_F(AutoShardingTest, AddMeshShape2D) {
AutoShardingOption option;
option.allow_mixed_mesh_shape = false;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
RunAddAutoShardingWithOptions(option, 4, 2);
}
TEST_F(AutoShardingTest, AddMeshShape3D) {
AutoShardingOption option;
option.enable = true;
option.allow_mixed_mesh_shape = false;
option.device_mesh_shape = {2, 2, 2};
option.device_mesh_alpha = {1.0, 1.0, 1.0};
option.device_mesh_beta = {0.01, 0.5, 1.0};
RunAddAutoShardingWithOptions(option, 2);
}
TEST_F(AutoShardingTest, LargeSize) {
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {1, 2, 4, 7};
option.device_mesh_alpha = {1.0, 1.0, 1.0, 1.0};
option.device_mesh_beta = {1.0, 1.0, 1.0, 1.0};
option.memory_budget_per_device = (8192 + 8192 * 2 + 8192 * 4 / 8);
RunMatMulAutoShardingWithOptions(option, 56, 1);
}
TEST_F(AutoShardingTest, InvalidOptions) {
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {1, 2, 4};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 0.5};
EXPECT_FALSE(option.CheckAndSetup().ok());
RunMatMulAutoShardingWithOptionsExpectFail(option);
AutoShardingOption empty_option;
empty_option.enable = true;
EXPECT_FALSE(empty_option.CheckAndSetup().ok());
RunMatMulAutoShardingWithOptionsExpectFail(empty_option);
AutoShardingOption option_with_non_positive_mesh;
option_with_non_positive_mesh.enable = true;
option_with_non_positive_mesh.device_mesh_shape = {0, 4};
EXPECT_FALSE(option_with_non_positive_mesh.CheckAndSetup().ok());
RunMatMulAutoShardingWithOptionsExpectFail(option_with_non_positive_mesh);
option_with_non_positive_mesh.device_mesh_shape = {-1, 4};
EXPECT_FALSE(option_with_non_positive_mesh.CheckAndSetup().ok());
RunMatMulAutoShardingWithOptionsExpectFail(option_with_non_positive_mesh);
AutoShardingOption option_not_compatible;
option_not_compatible.enable = true;
option_not_compatible.device_mesh_shape = {4, 8};
option_not_compatible.device_mesh_ids = {1, 2, 3, 4};
EXPECT_FALSE(option_not_compatible.CheckAndSetup().ok());
RunMatMulAutoShardingWithOptionsExpectFail(option_not_compatible);
}
TEST_F(AutoShardingTest, MemoryBudgetTest) {
auto compute_memory_budget_lower_bound =
[](const HloModule& module, int64_t num_devices,
const absl::flat_hash_map<std::string, std::vector<HloSharding>>&
preserved_shardings = {}) -> absl::StatusOr<int64_t> {
auto size_fn = [](const BufferValue& buffer) {
return spmd::ByteSizeOfShape(buffer.shape());
};
TF_ASSIGN_OR_RETURN(HloSchedule schedule,
ScheduleModule(&module, size_fn,
ComputationSchedulerToModuleScheduler(
DFSMemoryScheduler),
{}));
const HloComputation* entry_computation = module.entry_computation();
std::unique_ptr<HloAliasAnalysis> alias_analysis =
HloAliasAnalysis::Run(&module).value();
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(schedule, *alias_analysis, entry_computation));
absl::flat_hash_map<const HloValue*, HloLiveRange::TimeBound>&
buffer_live_ranges = hlo_live_range->buffer_live_ranges();
spmd::LivenessSet liveness_set(hlo_live_range->schedule_end_time() + 1);
for (const auto& [hlo_value, live_range] : buffer_live_ranges) {
for (spmd::LivenessIdx i = live_range.start; i <= live_range.end; ++i) {
liveness_set[i].push_back(hlo_value);
}
}
absl::flat_hash_set<const HloInstruction*> instructions_to_shard(
module.entry_computation()->instructions().begin(),
module.entry_computation()->instructions().end());
return spmd::MemoryBudgetLowerBound(module, instructions_to_shard,
liveness_set, *alias_analysis,
num_devices, preserved_shardings);
};
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[16384,16384]{0,1} parameter(0)
%param1 = f32[16384,16384]{0,1} parameter(1)
%add = f32[16384,16384]{0,1} add(%param0, %param1)
ROOT %copy = f32[16384,16384]{0,1} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(HloSharding partial_sharding,
ParseSharding("{devices=[64,1]<=[64]}"));
TF_ASSERT_OK_AND_ASSIGN(
int64_t partial_mesh_64x1_budget_lower_bound,
compute_memory_budget_lower_bound(*module, 64));
for (HloInstruction* ins : module->entry_computation()->instructions()) {
ins->set_sharding(partial_sharding);
}
TF_ASSERT_OK_AND_ASSIGN(
int64_t full_mesh_64x8_budget_lower_bound,
compute_memory_budget_lower_bound(*module, 512));
CHECK_LT(full_mesh_64x8_budget_lower_bound,
partial_mesh_64x1_budget_lower_bound)
<< "The memory budget lower bound per device should be lower with a "
"larger number of devices. Instead, the bound was "
<< partial_mesh_64x1_budget_lower_bound << " bytes for 64 devices and "
<< full_mesh_64x8_budget_lower_bound << " bytes for 512 devices.";
}
TEST_F(AutoShardingTest, DISABLED_ElementWiseOperator) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[128,128]{0,1} parameter(0)
%param1 = f32[128,128]{0,1} parameter(1)
%add = f32[128,128]{0,1} add(%param0, %param1)
ROOT %copy = f32[128,128]{0,1} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "param0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,2,1,3}"));
}
TEST_F(AutoShardingTest, NDIterativeSolveTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
param = s32[512,3084]{1,0} parameter(0), sharding={devices=[256,1]<=[16,16]T(1,0)}
sharding_call = s32[512,3084]{1,0} custom-call(param), custom_call_target="Sharding", sharding={devices=[256,1]<=[256]}
ROOT slice = s32[512,2048]{1,0} slice(sharding_call), slice={[0:512], [0:2048]}
})";
AutoShardingOption option;
option.enable = true;
option.solve_nd_sharding_iteratively = true;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
option.device_mesh_shape = {16, 16};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
HloInstruction* slice = FindInstruction(module.get(), "slice");
EXPECT_NE(slice, nullptr);
EXPECT_THAT(slice, op::Sharding("{devices=[256,1]<=[256]}"));
}
TEST_F(AutoShardingTest, SliceDeviceMeshTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
param = s32[512,3084]{1,0} parameter(0)
slice = s32[512,2048]{1,0} slice(param), slice={[0:512], [0:2048]}
ROOT copy = s32[512,2048]{1,0} copy(slice)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, AutoSharding( {.enable = true,
.device_mesh_shape = {2, 2},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0}})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* slice = FindInstruction(module.get(), "slice");
ASSERT_NE(slice, nullptr);
EXPECT_THAT(
slice,
AnyOf(op::Sharding("{devices=[4,1]0,1,2,3}"),
op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}")));
}
TEST_F(AutoShardingTest, SliceInvalidStrategyFollowingTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
param = s32[512,2084]{1,0} parameter(0)
slice = s32[32,2048]{1,0} slice(param), slice={[0:32], [0:2048]}
ROOT copy = s32[32,2048]{1,0} copy(slice)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, AutoSharding( {.enable = true,
.device_mesh_shape = {64, 1},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0}})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* slice = FindInstruction(module.get(), "slice");
ASSERT_NE(slice, nullptr);
EXPECT_THAT(slice, op::Sharding("{replicated}"));
}
TEST_F(AutoShardingTest, SliceForcedInvalidStrategyFollowingTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
param = s32[512,2084]{1,0} parameter(0), sharding={devices=[64,1]<=[64]}
slice = s32[32,2048]{1,0} slice(param), slice={[0:32], [0:2048]}
ROOT copy = s32[32,2048]{1,0} copy(slice)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, AutoSharding( {.enable = true,
.device_mesh_shape = {64, 1},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0}})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* slice = FindInstruction(module.get(), "slice");
ASSERT_NE(slice, nullptr);
EXPECT_THAT(slice, op::Sharding("{devices=[64,1]<=[64]}"));
}
TEST_F(AutoShardingTest, IotaPartiallyReplicatedShardingTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
iota1 = s32[11,1026]{1,0} iota(), iota_dimension=1
param1 = s32[11,1026]{1,0} parameter(0), sharding={devices=[1,16,16]<=[16,16]T(1,0) last_tile_dim_replicate}
copy1 = s32[11,1026]{1,0} copy(iota1)
ROOT add1 = s32[11,1026]{1,0} add(copy1, param1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
AutoSharding(
{
.enable = true,
.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings,
.allow_mixed_mesh_shape = false,
.only_allow_divisible_input_output = false,
.device_mesh_shape = {16, 16},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0}})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* iota = FindInstruction(module.get(), "iota1");
ASSERT_NE(iota, nullptr);
EXPECT_THAT(
iota, op::Sharding(
"{devices=[1,16,16]<=[16,16]T(1,0) last_tile_dim_replicate}"));
}
TEST_F(AutoShardingTest, SliceMixedUserShardingTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
param = s32[512,3084]{1,0} parameter(0), sharding={devices=[4,1]0,2,1,3}
slice = s32[512,2048]{1,0} slice(param), slice={[0:512], [0:2048]}
ROOT copy = s32[512,2048]{1,0} copy(slice)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
AutoSharding(
{
.enable = true,
.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings,
.solve_nd_sharding_iteratively = true,
.device_mesh_shape = {2, 2},
.device_mesh_ids = {0, 2, 1, 3},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0}})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
std::vector<HloInstruction*> instructions =
module->entry_computation()->MakeInstructionPostOrder();
EXPECT_THAT(instructions,
Each(ResultOf(
[](const HloInstruction* ins) { return ins->has_sharding(); },
IsTrue())));
EXPECT_THAT(instructions, Each(op::Sharding("{devices=[4,1]0,2,1,3}")));
}
TEST_F(AutoShardingTest, SlicedTensorDimensionShardedTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %slicemodule {
param = s32[512,3084]{1,0} parameter(0), sharding={devices=[1,4]0,2,1,3}
slice = s32[512,2048]{1,0} slice(param), slice={[0:512], [0:2048]}, sharding={devices=[1,4]0,2,1,3}
ROOT copy = s32[512,2048]{1,0} copy(slice)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
AutoSharding(
{
.enable = true,
.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings,
.solve_nd_sharding_iteratively = true,
.device_mesh_shape = {2, 2},
.device_mesh_ids = {0, 2, 1, 3},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0}})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
std::vector<HloInstruction*> instructions =
module->entry_computation()->MakeInstructionPostOrder();
EXPECT_THAT(instructions,
Not(Contains(ResultOf(
[](const HloInstruction* ins) { return ins->opcode(); },
Eq(HloOpcode::kReshape)))));
}
TEST_F(AutoShardingTest, UserShardingTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
concatenate.76306 = bf16[1,4096,8,256]{3,2,1,0} parameter(0)
constant.15158 = bf16[] constant(0)
pad.70 = bf16[1,4352,8,256]{3,2,1,0} pad(concatenate.76306, constant.15158), padding=0_0x0_256x0_0x0_0, sharding={devices=[1,1,128,1]<=[128]}
ROOT copy.45 = bf16[1,4352,8,256]{3,2,1,0} copy(pad.70)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
AutoSharding(
AutoShardingOption{
.enable = true,
.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings,
.device_mesh_shape = {128, 1},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0}})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
}
TEST_F(AutoShardingTest,
AllowShardingsSmallDimsAcrossManyDevicesForFollowersTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
parameter.1 = bf16[8,1024]{1,0} parameter(0), sharding={devices=[16,16]<=[256]}
add.1 = bf16[8,1024]{1,0} add(parameter.1, parameter.1)
ROOT copy.45 = bf16[8,1024]{1,0} copy(add.1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
AutoSharding(
AutoShardingOption{
.enable = true,
.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings,
.solve_nd_sharding_iteratively = false,
.only_allow_divisible_input_output = false,
.device_mesh_shape = {16, 16},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0},
.allow_shardings_small_dims_across_many_devices = true})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* add1 = FindInstruction(module.get(), "add.1");
EXPECT_THAT(add1, op::Sharding("{devices=[16,16]<=[256]}"));
TF_ASSERT_OK_AND_ASSIGN(module, ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
changed,
AutoSharding(
AutoShardingOption{
.enable = true,
.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings,
.solve_nd_sharding_iteratively = false,
.only_allow_divisible_input_output = false,
.device_mesh_shape = {16, 16},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0},
.allow_shardings_small_dims_across_many_devices = false})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
add1 = FindInstruction(module.get(), "add.1");
EXPECT_THAT(add1, Not(op::Sharding("{devices=[16,16]<=[256]}")));
}
TEST_F(AutoShardingTest,
AllowShardingsSmallDimsAcrossManyDevicesForSourcesTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
parameter.1 = bf16[8,1024]{1,0} parameter(0)
add.1 = bf16[8,1024]{1,0} add(parameter.1, parameter.1), sharding={devices=[16,1,16]<=[256] last_tile_dim_replicate}
ROOT copy.45 = bf16[8,1024]{1,0} copy(add.1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
AutoSharding(
AutoShardingOption{
.enable = true,
.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings,
.allow_replicated_parameters = false,
.allow_mixed_mesh_shape = false,
.solve_nd_sharding_iteratively = false,
.only_allow_divisible_input_output = false,
.device_mesh_shape = {16, 16},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0},
.allow_shardings_small_dims_across_many_devices = true})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* parameter1 =
FindInstruction(module.get(), "parameter.1");
EXPECT_THAT(
parameter1,
op::Sharding("{devices=[16,1,16]<=[256] last_tile_dim_replicate}"));
TF_ASSERT_OK_AND_ASSIGN(module, ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
changed,
AutoSharding(
AutoShardingOption{
.enable = true,
.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings,
.allow_replicated_parameters = false,
.allow_mixed_mesh_shape = false,
.solve_nd_sharding_iteratively = false,
.only_allow_divisible_input_output = false,
.device_mesh_shape = {16, 16},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0},
.allow_shardings_small_dims_across_many_devices = false})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
parameter1 = FindInstruction(module.get(), "parameter.1");
EXPECT_THAT(
parameter1,
Not(op::Sharding("{devices=[16,1,16]<=[256] last_tile_dim_replicate}")));
}
TEST_F(AutoShardingTest, RngBitGeneratorArrayInput) {
constexpr absl::string_view kHloString = R"(
HloModule rng_bit_generator
ENTRY %RngBitGenerator (p0: u64[2]) -> (u64[2], u32[16,16]) {
%p0 = u64[2]{0} parameter(0)
ROOT %rand = (u64[2]{0}, u32[16,16]{1,0}) rng-bit-generator(u64[2]{0} %p0), algorithm=rng_three_fry
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {1.0, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* instruction = FindInstruction(module.get(), "p0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{replicated}"));
}
TEST_F(AutoShardingTest, SPMDShardToFullShapeWithConstantTest) {
constexpr absl::string_view kHloString = R"(
HloModule rng_bit_generator
add.6.clone {
y.13 = bf16[]{:T(256)} parameter(1)
x.13 = bf16[]{:T(256)} parameter(0)
ROOT add.9011 = bf16[]{:T(256)} add(x.13, y.13)
}
ENTRY main {
input.1 = bf16[512,512]{1,0} parameter(0)
constant.1 = bf16[] constant(16.7)
broadcast.1 = bf16[128,128]{1,0} broadcast(constant.1), dimensions={}
broadcast.2 = bf16[512,512]{1,0} broadcast(constant.1), dimensions={}
custom-call.1 = bf16[512,512]{1,0} custom-call(input.1), custom_call_target="Sharding", sharding={devices=[4,4]<=[16]}
custom-call.2 = bf16[128,128]{1,0} custom-call(custom-call.1), custom_call_target="SPMDFullToShardShape", sharding={manual}
all-reduce.1 = bf16[128,128]{1,0} all-reduce(custom-call.2), channel_id=621, replica_groups={{0,1,2,3},{4,5,6,7},{8,9,10,11},{12,13,14,15}}, use_global_device_ids=true, to_apply=add.6.clone, frontend_attributes={from-cross-replica-sharding="true"}, backend_config={"flag_configs":[],"barrier_config":{"barrier_type":"CUSTOM","id":"9"},"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_DEFAULT","device_type":"DEVICE_TYPE_INVALID","used_scoped_memory_configs":[]}
add.1 = bf16[128,128]{1,0} add(bf16[128,128]{1,0} all-reduce.1, bf16[128,128]{1,0} broadcast.1)
custom-call.3 = bf16[512,512]{1,0} custom-call(add.1), custom_call_target="SPMDShardToFullShape", sharding={devices=[4,1,4]<=[16]last_tile_dim_replicate}
add.2 = bf16[512,512]{1,0} add(bf16[512,512]{1,0} custom-call.3, bf16[512,512]{1,0} broadcast.2)
ROOT copy.1 = bf16[512,512]{1,0} copy(add.2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kRemoveAllShardings;
option.enable = true;
option.device_mesh_shape = {4, 4};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {1.0, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* custom_call2 =
FindInstruction(module.get(), "custom-call.2");
ASSERT_NE(custom_call2, nullptr);
EXPECT_THAT(custom_call2, op::Sharding("{manual}"));
const HloInstruction* custom_call3 =
FindInstruction(module.get(), "custom-call.3");
ASSERT_NE(custom_call3, nullptr);
EXPECT_THAT(custom_call3,
op::Sharding("{devices=[4,1,4]<=[16]last_tile_dim_replicate}"));
const HloInstruction* custom_call1 = custom_call2->operand(0);
ASSERT_NE(custom_call1, nullptr);
EXPECT_THAT(custom_call1, op::Sharding("{devices=[4,4]<=[16]}"));
std::vector<const HloInstruction*> instructions(
module->entry_computation()->instructions().begin(),
module->entry_computation()->instructions().end());
EXPECT_THAT(
module->entry_computation()->instructions(),
Contains(ResultOf(
"opcode",
[](const HloInstruction* ins) { return ins->opcode(); },
Eq(HloOpcode::kConstant)))
.Times(2));
}
TEST_F(AutoShardingTest, SPMDShardToFullShapeMultipleValidMeshShapeTest) {
constexpr absl::string_view kHloString = R"(
HloModule rng_bit_generator
add.6.clone {
y.13 = bf16[]{:T(256)} parameter(1)
x.13 = bf16[]{:T(256)} parameter(0)
ROOT add.9011 = bf16[]{:T(256)} add(x.13, y.13)
}
ENTRY main {
input.1 = bf16[512,512]{1,0} parameter(0)
custom-call.1 = bf16[512,512]{1,0} custom-call(input.1), custom_call_target="Sharding", sharding={devices=[4,4]<=[16]}
custom-call.2 = bf16[128,128]{1,0} custom-call(custom-call.1), custom_call_target="SPMDFullToShardShape", sharding={manual}
all-reduce.1 = bf16[128,128]{1,0} all-reduce(custom-call.2), channel_id=621, replica_groups={{0,1,2,3},{4,5,6,7},{8,9,10,11},{12,13,14,15}}, use_global_device_ids=true, to_apply=add.6.clone, frontend_attributes={from-cross-replica-sharding="true"}, backend_config={"flag_configs":[],"barrier_config":{"barrier_type":"CUSTOM","id":"9"},"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_DEFAULT","device_type":"DEVICE_TYPE_INVALID","used_scoped_memory_configs":[]}
reshape.1 = bf16[64,2,128]{2,1,0} reshape(bf16[128,128]{1,0} all-reduce.1)
reshape.2 = bf16[64,256]{1,0} reshape(bf16[64,2,128]{2,1,0} reshape.1)
custom-call.3 = bf16[512,512]{1,0} custom-call(reshape.2), custom_call_target="SPMDShardToFullShape", sharding={devices=[8,2]<=[16]}
ROOT copy.1 = copy(custom-call.3)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kRemoveAllShardings;
option.enable = true;
option.try_multiple_mesh_shapes = false;
option.device_mesh_shape = {4, 4};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {1.0, 1.0};
EXPECT_DEATH(auto status = AutoSharding(option).Run(module.get()),
"Auto-sharding cannot infer a single appropriate mesh shape for "
"this HLO, and AutoShardingption::try_multiple_mesh_shapes is "
"set to false. Please re-run with the option set to true.");
}
TEST_F(AutoShardingTest, RngBitGeneratorTupleInput) {
constexpr absl::string_view kHloString = R"(
HloModule rng_bit_generator
ENTRY %RngBitGenerator {
param.0 = u32[2]{0:T(128)} parameter(0)
param.1 = u32[2]{0:T(128)} parameter(1)
tuple.3 = (u32[2]{0:T(128)}, u32[2]{0:T(128)}) tuple(param.0, param.1)
ROOT rng-bit-generator = u32[100,100]{1,0:T(8,128)} rng-bit-generator(tuple.3), algorithm=rng_default
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* param0 = FindInstruction(module.get(), "param.0");
const HloInstruction* param1 = FindInstruction(module.get(), "param.1");
ASSERT_NE(param0, nullptr);
ASSERT_NE(param0, nullptr);
EXPECT_THAT(param0, op::Sharding("{replicated}"));
EXPECT_THAT(param1, op::Sharding("{replicated}"));
}
TEST_F(AutoShardingTest, DotMixedMeshStrategies) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param0 = f32[8192,23]{1,0} parameter(0), sharding={devices=[4,1]0,1,2,3}
%param1 = f32[23,23]{1,0} parameter(1)
%dot = f32[8192,23]{1,0} dot(%param0, %param1), lhs_contracting_dims={1}, rhs_contracting_dims={1}
ROOT %copy = f32[8192,23]{1,0} copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.solve_nd_sharding_iteratively = false;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(2) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* param0 = FindInstruction(module.get(), "param0");
const HloInstruction* param1 = FindInstruction(module.get(), "param1");
const HloInstruction* dot = FindInstruction(module.get(), "dot");
ASSERT_NE(param0, nullptr);
ASSERT_NE(param1, nullptr);
ASSERT_NE(dot, nullptr);
EXPECT_THAT(param0, op::Sharding("{devices=[4,1]0,1,2,3}"));
EXPECT_THAT(param1, op::Sharding("{replicated}"));
EXPECT_THAT(dot, AnyOf(op::Sharding("{devices=[4,1]0,1,2,3}"),
op::Sharding("{devices=[2,2]<=[4]}")));
}
TEST_F(AutoShardingTest, DotInsertReshardingReshapes) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param0 = f32[256,256]{1,0} parameter(0), sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}
%param1 = f32[256,256]{1,0} parameter(1), sharding={devices=[2,2]0,1,2,3}
%dot = f32[256,256]{1,0} dot(%param0, %param1), lhs_contracting_dims={1}, rhs_contracting_dims={1}, sharding={devices=[2,2]0,1,2,3}
ROOT %copy = f32[256,256]{1,0} copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(2) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* param0 = FindInstruction(module.get(), "param0");
const HloInstruction* param1 = FindInstruction(module.get(), "param1");
const HloInstruction* dot = FindInstruction(module.get(), "dot");
ASSERT_NE(param0, nullptr);
ASSERT_NE(param1, nullptr);
ASSERT_NE(dot, nullptr);
EXPECT_EQ(dot->operand(0), param0);
EXPECT_NE(dot->operand(1), param1);
}
TEST_F(AutoShardingTest, DotLHSTwoNonContractingDims) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4,256,64]{2,1,0} parameter(0)
%param1 = f32[64,32]{0,1} parameter(1)
%dot = f32[4,256,32]{2,1,0} dot(f32[4,256,64]{2,1,0} %param0, f32[64,32]{0,1} %param1), lhs_contracting_dims={2}, rhs_contracting_dims={0}
ROOT %copy = f32[4,256,32]{2,1,0} copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.allow_mixed_mesh_shape = false;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(2) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* param0 = FindInstruction(module.get(), "param0");
const HloInstruction* param1 = FindInstruction(module.get(), "param1");
const HloInstruction* dot = FindInstruction(module.get(), "dot");
ASSERT_NE(param0, nullptr);
ASSERT_NE(param1, nullptr);
ASSERT_NE(dot, nullptr);
EXPECT_THAT(
std::make_tuple(param0, param1, dot),
AnyOf(
FieldsAre(
op::Sharding(
"{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[1,2,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[1,2,2]0,1,2,3}")),
FieldsAre(
op::Sharding(
"{devices=[1,2,1,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[1,2,2]0,2,1,3}")),
FieldsAre(
op::Sharding(
"{devices=[2,1,1,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[1,2,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[2,1,2]0,1,2,3}")),
FieldsAre(
op::Sharding(
"{devices=[2,1,1,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[2,1,2]0,2,1,3}"))));
}
TEST_F(AutoShardingTest, DotRHSTwoNonContractingDims) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4,256,32]{2,1,0} parameter(0)
%param1 = f32[4,256,4,8]{1,3,2,0} parameter(1)
%dot = f32[32,4,8]{2,1,0} dot(f32[4,256,32]{2,1,0} %param0, f32[4,256,4,8]{1,3,2,0} %param1), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}
ROOT %copy = f32[32,4,8]{2,1,0} copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.allow_mixed_mesh_shape = false;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(2) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* param0 = FindInstruction(module.get(), "param0");
const HloInstruction* param1 = FindInstruction(module.get(), "param1");
const HloInstruction* dot = FindInstruction(module.get(), "dot");
ASSERT_NE(param0, nullptr);
ASSERT_NE(param1, nullptr);
ASSERT_NE(dot, nullptr);
EXPECT_THAT(
std::make_tuple(param0, param1, dot),
AnyOf(
FieldsAre(op::Sharding(
"{devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding(
"{devices=[1,1,2,1,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[2,2,1]0,1,2,3}")),
FieldsAre(op::Sharding(
"{devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding(
"{devices=[1,1,1,2,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[2,1,2]0,1,2,3}")),
FieldsAre(op::Sharding(
"{devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding(
"{devices=[1,1,1,2,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[2,1,2]0,2,1,3}")),
FieldsAre(op::Sharding(
"{devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding(
"{devices=[1,1,2,1,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[2,2,1]0,2,1,3}"))));
}
TEST_F(AutoShardingTest, DotTwoContractingDims) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4,256,64]{2,1,0} parameter(0)
%param1 = f32[4,256,32]{2,1,0} parameter(1)
%dot = f32[64,32]{1,0} dot(f32[4,256,64]{2,1,0} %param0, f32[4,256,32]{2,1,0} %param1), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}
ROOT %copy = f32[64,32]{1,0} copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.allow_mixed_mesh_shape = false;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(2) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* param0 = FindInstruction(module.get(), "param0");
const HloInstruction* param1 = FindInstruction(module.get(), "param1");
const HloInstruction* dot = FindInstruction(module.get(), "dot");
ASSERT_NE(param0, nullptr);
ASSERT_NE(param1, nullptr);
ASSERT_NE(dot, nullptr);
EXPECT_THAT(
std::make_tuple(param0, param1, dot),
AnyOf(FieldsAre(op::Sharding(
"{devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding(
"{devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[2,2]0,2,1,3}")),
FieldsAre(op::Sharding(
"{devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding(
"{devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[2,2]0,1,2,3}"))));
}
TEST_F(AutoShardingTest, TwoMatmulWithoutDotReplicationEnabled) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY twomatmul {
parameter.1 = f32[64,64]{1,0} parameter(0)
parameter.2 = f32[64,128]{1,0} parameter(1)
dot.4 = f32[64,128]{1,0} dot(parameter.1, parameter.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
parameter.3 = f32[128,64]{1,0} parameter(2)
ROOT dot.5 = f32[64,64]{1,0} dot(dot.4, parameter.3), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.allow_recompute_heavy_op = false;
option.allow_mixed_mesh_shape = false;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* param1 = FindInstruction(module.get(), "parameter.1");
ASSERT_NE(param1, nullptr);
EXPECT_THAT(param1,
op::Sharding("{devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}"));
const HloInstruction* param2 = FindInstruction(module.get(), "parameter.2");
ASSERT_NE(param2, nullptr);
EXPECT_THAT(param2,
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"));
const HloInstruction* param3 = FindInstruction(module.get(), "parameter.3");
ASSERT_NE(param3, nullptr);
EXPECT_THAT(param3,
op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}"));
const HloInstruction* dot4 = FindInstruction(module.get(), "dot.4");
ASSERT_NE(dot4, nullptr);
EXPECT_THAT(dot4, op::Sharding("{devices=[2,2]0,2,1,3}"));
const HloInstruction* dot5 = FindInstruction(module.get(), "dot.5");
ASSERT_NE(dot5, nullptr);
EXPECT_THAT(dot5,
op::Sharding("{devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}"));
}
TEST_F(AutoShardingTest, TwoMatmulWithDotReplicationEnabled) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY twomatmul {
parameter.1 = f32[64,64]{1,0} parameter(0)
parameter.2 = f32[64,128]{1,0} parameter(1)
dot.4 = f32[64,128]{1,0} dot(parameter.1, parameter.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
parameter.3 = f32[128,64]{1,0} parameter(2)
ROOT dot.5 = f32[64,64]{1,0} dot(dot.4, parameter.3), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.allow_recompute_heavy_op = true;
option.allow_mixed_mesh_shape = false;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* param1 = FindInstruction(module.get(), "parameter.1");
const HloInstruction* param2 = FindInstruction(module.get(), "parameter.2");
const HloInstruction* param3 = FindInstruction(module.get(), "parameter.3");
const HloInstruction* dot4 = FindInstruction(module.get(), "dot.4");
const HloInstruction* dot5 = FindInstruction(module.get(), "dot.5");
ASSERT_NE(param1, nullptr);
ASSERT_NE(param2, nullptr);
ASSERT_NE(param3, nullptr);
ASSERT_NE(dot4, nullptr);
ASSERT_NE(dot5, nullptr);
EXPECT_THAT(
std::make_tuple(param1, param2, param3, dot4, dot5),
AnyOf(
FieldsAre(
op::Sharding("{replicated}"), op::Sharding("{replicated}"),
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{replicated}"),
op::Sharding("{devices=[2,2]0,2,1,3}")),
FieldsAre(
op::Sharding("{replicated}"), op::Sharding("{replicated}"),
op::Sharding("{devices=[1,2,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding("{replicated}"),
op::Sharding("{devices=[2,2]0,1,2,3}"))));
}
TEST_F(AutoShardingTest, ProcessCustomCallShardings) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param0 = f32[6,3] parameter(0)
%copy = f32[6,3] copy(%param0)
%annotate = f32[6,3] custom-call(%copy), custom_call_target="Sharding",
backend_config="unspecified_dims=[1]",
sharding={devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}
%copy.2 = f32[6,3] copy(%annotate)
ROOT %copy.3 = f32[6,3] copy(%copy.2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
auto* copy = FindInstruction(module.get(), "copy");
ASSERT_NE(copy, nullptr);
EXPECT_TRUE(copy->has_sharding());
EXPECT_THAT(copy,
op::Sharding("{devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}"));
}
TEST_F(AutoShardingTest, SaveAndRemoveShardingAnnotationKeepAll) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry (param0: f32[4,256,64], param1: f32[4,256,32]) -> f32[64,32] {
%param0 = f32[4,256,64]{2,1,0} parameter(0), sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}
%param1 = f32[4,256,32]{2,1,0} parameter(1), sharding={devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}
%dot = f32[64,32]{1,0} dot(f32[4,256,64]{2,1,0} %param0, f32[4,256,32]{2,1,0} %param1), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}, sharding={devices=[2,2]0,1,2,3}
ROOT %copy = f32[64,32]{1,0} copy(f32[64,32]{1,0} %dot), sharding={devices=[2,2]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
absl::flat_hash_set<const HloInstruction*> instructions_to_shard(
module->entry_computation()->instructions().begin(),
module->entry_computation()->instructions().end());
TF_ASSERT_OK_AND_ASSIGN(
AutoShardingImplementation::SaveShardingAnnotationsResult
saved_shardings_result,
AutoShardingImplementation(option).SaveAndRemoveShardingAnnotation(
module.get(), instructions_to_shard,
{},
{}));
absl::flat_hash_map<std::string, std::vector<HloSharding>> saved_shardings =
saved_shardings_result.preserved_shardings;
EXPECT_FALSE(saved_shardings_result.module_is_changed);
std::vector<HloInstruction*> instructions =
module->entry_computation()->MakeInstructionPostOrder();
EXPECT_THAT(instructions,
Each(ResultOf(
[](const HloInstruction* ins) { return ins->has_sharding(); },
IsTrue())));
auto verified_parse_sharding = [](const absl::string_view sharding_str) {
absl::StatusOr<HloSharding> sharding = ParseSharding(sharding_str);
CHECK_OK(sharding);
return *sharding;
};
EXPECT_THAT(
saved_shardings,
UnorderedElementsAre(
Pair("param0",
ElementsAre(verified_parse_sharding(
"{devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}"))),
Pair("param1",
ElementsAre(verified_parse_sharding(
"{devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}"))),
Pair("dot",
ElementsAre(verified_parse_sharding("{devices=[2,2]0,1,2,3}"))),
Pair("copy", ElementsAre(verified_parse_sharding(
"{devices=[2,2]0,1,2,3}")))));
}
TEST_F(AutoShardingTest,
SaveAndRemoveShardingAnnotationKeepInputOutputSmallTensor) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry (param0: f32[4,256,64], param1: f32[4,256,32]) -> f32[64,32] {
%param0 = f32[4,256,64]{2,1,0} parameter(0), sharding={devices=[2,2,1]0,1,2,3}
%param1 = f32[4,256,32]{2,1,0} parameter(1), sharding={devices=[2,2,1]0,1,2,3}
%dot = f32[64,32]{1,0} dot(f32[4,256,64]{2,1,0} %param0, f32[4,256,32]{2,1,0} %param1), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}, sharding={replicated}
ROOT %copy = f32[64,32]{1,0} copy(f32[64,32]{1,0} %dot), sharding={devices=[2,2]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepInputOutputShardings;
absl::flat_hash_set<const HloInstruction*> instructions_to_shard(
module->entry_computation()->instructions().begin(),
module->entry_computation()->instructions().end());
TF_ASSERT_OK_AND_ASSIGN(
AutoShardingImplementation::SaveShardingAnnotationsResult
saved_shardings_result,
AutoShardingImplementation(option).SaveAndRemoveShardingAnnotation(
module.get(), instructions_to_shard,
{"dot"},
{}));
absl::flat_hash_map<std::string, std::vector<HloSharding>> saved_shardings =
saved_shardings_result.preserved_shardings;
EXPECT_FALSE(saved_shardings_result.module_is_changed);
std::vector<HloInstruction*> instructions =
module->entry_computation()->MakeInstructionPostOrder();
EXPECT_THAT(instructions,
Each(ResultOf(
[](const HloInstruction* ins) { return ins->has_sharding(); },
IsTrue())));
auto verified_parse_sharding = [](const absl::string_view sharding_str) {
absl::StatusOr<HloSharding> sharding = ParseSharding(sharding_str);
CHECK_OK(sharding);
return *sharding;
};
EXPECT_THAT(
saved_shardings,
UnorderedElementsAre(
Pair("param0", ElementsAre(verified_parse_sharding(
"{devices=[2,2,1]0,1,2,3}"))),
Pair("param1", ElementsAre(verified_parse_sharding(
"{devices=[2,2,1]0,1,2,3}"))),
Pair("dot", ElementsAre(verified_parse_sharding("{replicated}"))),
Pair("copy", ElementsAre(verified_parse_sharding(
"{devices=[2,2]0,1,2,3}")))));
}
TEST_F(AutoShardingTest, SaveAndRemoveShardingAnnotationKeepInputOutput) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry (param0: f32[4,256,64], param1: f32[4,256,32]) -> f32[64,32] {
%param0 = f32[4,256,64]{2,1,0} parameter(0), sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}
%param1 = f32[4,256,32]{2,1,0} parameter(1), sharding={devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}
%param0_copy = f32[4,256,64]{2,1,0} copy(param0), sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}
%param1_copy = f32[4,256,32]{2,1,0} copy(param1), sharding={devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}
%dot = f32[64,32]{1,0} dot(f32[4,256,64]{2,1,0} %param0_copy, f32[4,256,32]{2,1,0} %param1_copy), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}, sharding={devices=[2,2]0,1,2,3}
ROOT %copy = f32[64,32]{1,0} copy(f32[64,32]{1,0} %dot), sharding={devices=[2,2]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepInputOutputShardings;
absl::flat_hash_set<const HloInstruction*> instructions_to_shard(
module->entry_computation()->instructions().begin(),
module->entry_computation()->instructions().end());
TF_ASSERT_OK_AND_ASSIGN(
AutoShardingImplementation::SaveShardingAnnotationsResult
saved_shardings_result,
AutoShardingImplementation(option).SaveAndRemoveShardingAnnotation(
module.get(), instructions_to_shard,
{},
{}));
absl::flat_hash_map<std::string, std::vector<HloSharding>> saved_shardings =
saved_shardings_result.preserved_shardings;
EXPECT_TRUE(saved_shardings_result.module_is_changed);
const HloInstruction* dot = FindInstruction(module.get(), "dot");
ASSERT_NE(dot, nullptr);
EXPECT_FALSE(dot->has_sharding());
const HloInstruction* param0 = FindInstruction(module.get(), "param0");
ASSERT_NE(param0, nullptr);
EXPECT_TRUE(param0->has_sharding());
EXPECT_THAT(
param0,
op::Sharding("{devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}"));
const HloInstruction* param0_copy =
FindInstruction(module.get(), "param0_copy");
ASSERT_NE(param0_copy, nullptr);
EXPECT_TRUE(param0_copy->has_sharding());
EXPECT_THAT(
param0_copy,
op::Sharding("{devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}"));
const HloInstruction* param1 = FindInstruction(module.get(), "param1");
ASSERT_NE(param1, nullptr);
EXPECT_TRUE(param1->has_sharding());
EXPECT_THAT(
param1,
op::Sharding("{devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}"));
const HloInstruction* param1_copy =
FindInstruction(module.get(), "param1_copy");
ASSERT_NE(param1_copy, nullptr);
EXPECT_TRUE(param1_copy->has_sharding());
EXPECT_THAT(
param1_copy,
op::Sharding("{devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}"));
const HloInstruction* copy = FindInstruction(module.get(), "copy");
ASSERT_NE(copy, nullptr);
EXPECT_TRUE(copy->has_sharding());
EXPECT_THAT(copy, op::Sharding("{devices=[2,2]0,1,2,3}"));
EXPECT_THAT(
saved_shardings,
UnorderedElementsAre(Pair("param0", ElementsAre(param0->sharding())),
Pair("param0_copy", ElementsAre(param0->sharding())),
Pair("param1", ElementsAre(param1->sharding())),
Pair("param1_copy", ElementsAre(param1->sharding())),
Pair("copy", ElementsAre(copy->sharding()))));
}
TEST_F(AutoShardingTest, SaveAndRemoveShardingAnnotationRemoveAll) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry (param0: f32[4,256,64], param1: f32[4,256,32]) -> f32[64,32] {
%param0 = f32[4,256,64]{2,1,0} parameter(0),
sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate} %param1 =
f32[4,256,32]{2,1,0} parameter(1), sharding={devices=[1,1,2,2]0,2,1,3
last_tile_dim_replicate} %dot = f32[64,32]{1,0} dot(f32[4,256,64]{2,1,0}
%param0, f32[4,256,32]{2,1,0} %param1), lhs_contracting_dims={0,1},
rhs_contracting_dims={0,1}, sharding={devices=[2,2]0,1,2,3} ROOT %copy =
f32[64,32]{1,0} copy(f32[64,32]{1,0} %dot), sharding={devices=[2,2]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kRemoveAllShardings;
absl::flat_hash_set<const HloInstruction*> instructions_to_shard(
module->entry_computation()->instructions().begin(),
module->entry_computation()->instructions().end());
TF_ASSERT_OK_AND_ASSIGN(
AutoShardingImplementation::SaveShardingAnnotationsResult
saved_shardings_result,
AutoShardingImplementation(option).SaveAndRemoveShardingAnnotation(
module.get(), instructions_to_shard,
{},
{}));
absl::flat_hash_map<std::string, std::vector<HloSharding>> saved_shardings =
saved_shardings_result.preserved_shardings;
EXPECT_TRUE(saved_shardings_result.module_is_changed);
EXPECT_THAT(saved_shardings, IsEmpty());
std::vector<HloInstruction*> instructions =
module->entry_computation()->MakeInstructionPostOrder();
EXPECT_THAT(instructions,
Each(ResultOf(
[](const HloInstruction* ins) { return ins->has_sharding(); },
IsFalse())));
}
TEST_F(AutoShardingTest, SaveAndRemoveShardingAnnotationRemoveAllSmallTensor) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry (param0: f32[4,256,64], param1: f32[4,256,32]) -> f32[64,32] {
%param0 = f32[4,256,64]{2,1,0} parameter(0), sharding={devices=[2,2,1]0,1,2,3}
%param1 = f32[4,256,32]{2,1,0} parameter(1), sharding={devices=[2,2,1]0,1,2,3}
%dot = f32[64,32]{1,0} dot(f32[4,256,64]{2,1,0} %param0, f32[4,256,32]{2,1,0} %param1), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}, sharding={replicated}
ROOT %copy = f32[64,32]{1,0} copy(f32[64,32]{1,0} %dot), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kRemoveAllShardings;
absl::flat_hash_set<const HloInstruction*> instructions_to_shard(
module->entry_computation()->instructions().begin(),
module->entry_computation()->instructions().end());
TF_ASSERT_OK_AND_ASSIGN(
AutoShardingImplementation::SaveShardingAnnotationsResult
saved_shardings_result,
AutoShardingImplementation(option).SaveAndRemoveShardingAnnotation(
module.get(), instructions_to_shard,
{"dot", "copy"},
{}));
absl::flat_hash_map<std::string, std::vector<HloSharding>> saved_shardings =
saved_shardings_result.preserved_shardings;
EXPECT_TRUE(saved_shardings_result.module_is_changed);
const HloInstruction* param0 = FindInstruction(module.get(), "param0");
ASSERT_NE(param0, nullptr);
EXPECT_FALSE(param0->has_sharding());
const HloInstruction* param1 = FindInstruction(module.get(), "param1");
ASSERT_NE(param1, nullptr);
EXPECT_FALSE(param1->has_sharding());
const HloInstruction* dot = FindInstruction(module.get(), "dot");
ASSERT_NE(dot, nullptr);
EXPECT_TRUE(dot->has_sharding());
EXPECT_TRUE(dot->sharding().IsReplicated());
const HloInstruction* copy = FindInstruction(module.get(), "copy");
ASSERT_NE(copy, nullptr);
EXPECT_TRUE(copy->has_sharding());
EXPECT_TRUE(copy->sharding().IsReplicated());
EXPECT_THAT(
saved_shardings,
UnorderedElementsAre(Pair("dot", ElementsAre(dot->sharding())),
Pair("copy", ElementsAre(copy->sharding()))));
}
TEST_F(AutoShardingTest, TupleReduceTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
%func (lhs_value: f32[], lhs_index: s32[], rhs_value: f32[], rhs_index: s32[]) -> (f32[], s32[]) {
%lhs_value = f32[] parameter(0)
%rhs_value = f32[] parameter(2)
%compare.a = pred[] compare(f32[] %lhs_value, f32[] %rhs_value), direction=GE
%select.a = f32[] select(pred[] %compare.a, f32[] %lhs_value, f32[] %rhs_value)
%compare.b = pred[] compare(f32[] %lhs_value, f32[] %rhs_value), direction=EQ
%lhs_index = s32[] parameter(1)
%rhs_index = s32[] parameter(3)
%minimum = s32[] minimum(s32[] %lhs_index, s32[] %rhs_index)
%select.b = s32[] select(pred[] %compare.a, s32[] %lhs_index, s32[] %rhs_index)
%select.c = s32[] select(pred[] %compare.b, s32[] %minimum, s32[] %select.b)
ROOT %tuple = (f32[], s32[]) tuple(f32[] %select.a, s32[] %select.c)
}
ENTRY %entry {
%param0 = f32[1,16,40]{2,1,0} parameter(0)
%iota = s32[1,16,40]{2,1,0} iota(), iota_dimension=2
%constant.a = f32[] constant(-inf)
%constant.b = s32[] constant(0)
%reduce = (f32[1,16]{1,0}, s32[1,16]{1,0}) reduce(f32[1,16,40]{2,1,0} %param0, s32[1,16,40]{2,1,0} %iota, f32[] %constant.a, s32[] %constant.b), dimensions={2}, to_apply=%func
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* reduce = FindInstruction(module.get(), "reduce");
ASSERT_NE(reduce, nullptr);
EXPECT_THAT(
reduce,
AnyOf(op::Sharding("{{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}, "
"{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}}"),
op::Sharding("{{devices=[1,2,2]0,2,1,3 last_tile_dim_replicate}, "
"{devices=[1,2,2]0,2,1,3 last_tile_dim_replicate}}"),
op::Sharding("{{devices=[1,4]0,1,2,3}, "
"{devices=[1,4]0,1,2,3}}")));
const HloSharding& sharding = reduce->sharding();
TF_EXPECT_OK(sharding.Validate(reduce->shape(), 4));
}
TEST_F(AutoShardingTest, ReduceTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
%func (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %x, f32[] %y)
}
ENTRY %entry {
%param0 = f32[1,16,128]{2,1,0} parameter(0)
%param1 = f32[] parameter(1)
%reduce = f32[1,16]{1,0} reduce(f32[1,16,128]{2,1,0} %param0, f32[] %param1), dimensions={2}, to_apply=%func
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* reduce = FindInstruction(module.get(), "reduce");
const HloInstruction* param0 = FindInstruction(module.get(), "param0");
ASSERT_NE(reduce, nullptr);
auto reduce_matcher1 =
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}");
auto param0_matcher1 =
op::Sharding("{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}");
auto reduce_matcher2 =
op::Sharding("{devices=[1,2,2]0,2,1,3 last_tile_dim_replicate}");
auto param0_matcher2 =
op::Sharding("{devices=[1,2,1,2]0,2,1,3 last_tile_dim_replicate}");
auto reduce_matcher3 = op::Sharding("{devices=[1,4]0,1,2,3}");
auto param0_matcher3 = op::Sharding("{devices=[1,4,1]0,1,2,3}");
EXPECT_TRUE(
(Matches(param0_matcher1)(param0) && Matches(reduce_matcher1)(reduce)) ||
(Matches(param0_matcher2)(param0) && Matches(reduce_matcher2)(reduce)) ||
(Matches(param0_matcher3)(param0) && Matches(reduce_matcher3)(reduce)));
const HloSharding& sharding = reduce->sharding();
TF_EXPECT_OK(sharding.Validate(reduce->shape(), 4));
}
TEST_F(AutoShardingTest, ScatterTest2D) {
constexpr absl::string_view kHloString = R"(
HloModule module
region {
Arg_0 = s32[] parameter(0)
ROOT Arg_1 = s32[] parameter(1)
}
ENTRY %Scatter {
call = s32[4,128]{1,0} parameter(0)
clamp = s32[4,2]{1,0} parameter(1)
broadcast = s32[4,8]{1,0} parameter(2)
ROOT scatter = s32[4,128]{1,0} scatter(call, clamp, broadcast), update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0,1}, index_vector_dim=1, indices_are_sorted=true, unique_indices=true, to_apply=region
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.memory_budget_per_device = 1185;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_EQ(scatter->sharding().NumTiles(), 4);
TF_EXPECT_OK(scatter->sharding().Validate(scatter->shape(), 4));
}
TEST_F(AutoShardingTest, ScatterTest3D) {
constexpr absl::string_view kHloString = R"(
HloModule module
region {
Arg_0 = f32[] parameter(0)
ROOT Arg_1 = f32[] parameter(1)
}
ENTRY %Scatter {
call = f32[4,128,128]{2,1,0} parameter(0)
clamp = s32[4,3]{1,0} parameter(1)
multiply = f32[4,8,8]{2,1,0} parameter(2)
ROOT scatter = f32[4,128,128]{2,1,0} scatter(call, clamp, multiply), update_window_dims={1,2}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0,1,2}, index_vector_dim=1, indices_are_sorted=true, unique_indices=true, to_apply=region
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.memory_budget_per_device = 4 * 2 * (4 * 128 * 128 / 4) + 48 + 1024 + 1;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* scatter = FindInstruction(module.get(), "scatter");
ASSERT_NE(scatter, nullptr);
EXPECT_EQ(scatter->sharding().NumTiles(), 4);
TF_EXPECT_OK(scatter->sharding().Validate(scatter->shape(), 4));
}
TEST_F(AutoShardingTest, GatherTest) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
parameter.0 = s32[262144,2]{1,0} parameter(0), sharding={devices=[16,1,16]<=[256] last_tile_dim_replicate}
parameter.1 = f32[512,712,4096]{2,1,0} parameter(1), sharding={devices=[16,1,16]<=[256]}
ROOT gather = f32[262144,4096]{1,0} gather(parameter.1, parameter.0), offset_dims={1}, collapsed_slice_dims={0,1}, start_index_map={0,1}, index_vector_dim=1, slice_sizes={1,1,4096}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {16, 16};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(0) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(gather, op::Sharding("{devices=[16,16]<=[256]}"));
}
TEST_F(AutoShardingTest, GatherTest2) {
const char* const hlo_string = R"(
HloModule module
ENTRY %module {
data = f32[1000]{0} parameter(0), sharding={replicated}
indices = s32[512,1280,8,1]{3,2,1,0} parameter(1), sharding={devices=[256,1,1,1]<=[256]}
ROOT gather = f32[512,1280,8,1]{3,2,1,0} gather(data, indices), offset_dims={3}, collapsed_slice_dims={}, start_index_map={0}, index_vector_dim=3, slice_sizes={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {256, 1};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(0) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* gather = FindInstruction(module.get(), "gather");
ASSERT_NE(gather, nullptr);
EXPECT_THAT(gather, op::Sharding("{devices=[256,1,1,1]<=[256]}"));
}
TEST_F(AutoShardingTest, GatherTestNoReshard) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
data = s8[1000,128]{1,0} parameter(0)
indices = s32[8,1,1]{2,1,0} parameter(1)
gather = s8[8,1,128]{2,1,0} gather(data, indices), offset_dims={2}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2, slice_sizes={1,128}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {1, 1, 8};
option.device_mesh_ids = {0, 1, 2, 3, 4, 5, 6, 7};
option.device_mesh_alpha = {1.0, 1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* gather = FindInstruction(module.get(), "gather");
const HloInstruction* data = FindInstruction(module.get(), "data");
ASSERT_NE(gather, nullptr);
ASSERT_NE(data, nullptr);
EXPECT_THAT(gather, AnyOf(op::Sharding("{devices=[1,1,8]<=[8]}"),
op::Sharding("{devices=[8,1,1]<=[8]}")));
EXPECT_THAT(data, AnyOf(op::Sharding("{devices=[1,8]<=[8]}"),
op::Sharding("{devices=[8,1]<=[8]}")));
TF_EXPECT_OK(gather->sharding().Validate(gather->shape(), 8));
EXPECT_EQ(data, gather->operand(0));
}
TEST_F(AutoShardingTest, GatherConvTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param0 = f32[1024,1024]{0,1} parameter(0)
%param1 = s32[128,1024,1]{2,1,0} parameter(1)
%gather = f32[128,1024,1024]{2,1,0} gather(f32[1024,1024]{0,1} %param0, s32[128,1024,1]{2,1,0} %param1), offset_dims={2}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=2, slice_sizes={1,1024}
%param2 = f32[1024,1024]{1,0} parameter(2), sharding={replicated}
%reshape = f32[1024,1024,1]{2,1,0} reshape(param2)
ROOT convolution = f32[128,1024,1024]{2,1,0} convolution(gather, reshape), window={size=1}, dim_labels=b0f_io0->b0f
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepInputOutputShardings;
option.device_mesh_shape = {4, 1};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* gather = FindInstruction(module.get(), "gather");
const HloInstruction* conv = FindInstruction(module.get(), "convolution");
ASSERT_NE(gather, nullptr);
ASSERT_NE(conv, nullptr);
const HloSharding& gather_sharding = gather->sharding();
EXPECT_EQ(gather_sharding.NumTiles(), 4);
EXPECT_OK(gather_sharding.Validate(gather->shape(), 4));
const HloSharding& conv_sharding = conv->sharding();
EXPECT_EQ(conv_sharding.NumTiles(), 4);
EXPECT_OK(conv_sharding.Validate(conv->shape(), 4));
}
TEST_F(AutoShardingTest, AutoShardingKeepUserShardingInputOutput) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry (param0: f32[4,256,64], param1: f32[4,256,32]) -> f32[64,32] {
%param0 = f32[4,256,64]{2,1,0} parameter(0), sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}
%param1 = f32[4,256,32]{2,1,0} parameter(1), sharding={devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}
%dot = f32[64,32]{1,0} dot(f32[4,256,64]{2,1,0} %param0, f32[4,256,32]{2,1,0} %param1), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}, sharding={devices=[2,2]0,1,2,3}
ROOT %copy = f32[64,32]{1,0} copy(f32[64,32]{1,0} %dot), sharding={devices=[2,2]0,1,2,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
auto* dot = FindInstruction(module.get(), "dot");
dot->clear_sharding();
EXPECT_FALSE(dot->has_sharding());
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepInputOutputShardings;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
auto* dot_after = FindInstruction(module.get(), "dot");
ASSERT_NE(dot_after, nullptr);
EXPECT_THAT(dot_after, op::Sharding("{devices=[2,2]0,1,2,3}"));
auto sharding = dot_after->sharding();
TF_EXPECT_OK(sharding.Validate(dot_after->shape(), 4));
}
TEST_F(AutoShardingTest, AutoShardingKeepUserShardingAdd) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[128,128]{0,1} parameter(0)
%param1 = f32[128,128]{0,1} parameter(1)
%add = f32[128,128]{0,1} add(%param0, %param1), sharding={devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}
ROOT %copy = f32[128,128]{0,1} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.allow_mixed_mesh_shape = false;
option.device_mesh_shape = {2, 2};
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
LOG(INFO) << module->ToString();
const HloInstruction* param0_after = FindInstruction(module.get(), "param0");
ASSERT_NE(param0_after, nullptr);
EXPECT_THAT(param0_after,
op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}"));
const HloInstruction* param1_after = FindInstruction(module.get(), "param1");
ASSERT_NE(param1_after, nullptr);
EXPECT_THAT(param1_after,
op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}"));
const HloInstruction* add_after = FindInstruction(module.get(), "add");
ASSERT_NE(add_after, nullptr);
EXPECT_THAT(add_after,
op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}"));
}
TEST_F(AutoShardingTest, AutoShardingKeepUserShardingDot) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry (param0: f32[4,256,64], param1: f32[4,256,32]) -> f32[64,32] {
%param0 = f32[4,256,64]{2,1,0} parameter(0), sharding={devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}
%param1 = f32[4,256,32]{2,1,0} parameter(1), sharding={devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}
%dot = f32[64,32]{1,0} dot(f32[4,256,64]{2,1,0} %param0, f32[4,256,32]{2,1,0} %param1), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}, sharding={devices=[2,2]0,1,2,3}
ROOT %copy = f32[64,32]{1,0} copy(f32[64,32]{1,0} %dot), sharding={devices=[2,2]0,1,2,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
HloInstruction* param0 = FindInstruction(module.get(), "param0");
param0->clear_sharding();
EXPECT_FALSE(param0->has_sharding());
HloInstruction* param1 = FindInstruction(module.get(), "param1");
param1->clear_sharding();
EXPECT_FALSE(param1->has_sharding());
HloInstruction* copy = FindInstruction(module.get(), "copy");
copy->clear_sharding();
EXPECT_FALSE(copy->has_sharding());
AutoShardingOption option;
option.enable = true;
option.allow_mixed_mesh_shape = false;
option.device_mesh_shape = {2, 2};
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* param0_after = FindInstruction(module.get(), "param0");
ASSERT_NE(param0_after, nullptr);
EXPECT_THAT(
param0_after,
op::Sharding("{devices=[1,1,2,2]0,1,2,3 last_tile_dim_replicate}"));
const HloInstruction* param1_after = FindInstruction(module.get(), "param1");
ASSERT_NE(param1_after, nullptr);
EXPECT_THAT(
param1_after,
op::Sharding("{devices=[1,1,2,2]0,2,1,3 last_tile_dim_replicate}"));
const HloInstruction* copy_after = FindInstruction(module.get(), "copy");
ASSERT_NE(copy_after, nullptr);
EXPECT_THAT(copy_after, op::Sharding("{devices=[2,2]0,1,2,3}"));
}
TEST_F(AutoShardingTest, ENABLEDAutoShardingKeepUserShardingTupleReduce) {
constexpr absl::string_view kHloString = R"(
HloModule module
%func (lhs_value: f32[], lhs_index: s32[], rhs_value: f32[], rhs_index: s32[]) -> (f32[], s32[]) {
%lhs_value = f32[] parameter(0)
%rhs_value = f32[] parameter(2)
%compare.a = pred[] compare(f32[] %lhs_value, f32[] %rhs_value), direction=GE
%select.a = f32[] select(pred[] %compare.a, f32[] %lhs_value, f32[] %rhs_value)
%compare.b = pred[] compare(f32[] %lhs_value, f32[] %rhs_value), direction=EQ
%lhs_index = s32[] parameter(1)
%rhs_index = s32[] parameter(3)
%minimum = s32[] minimum(s32[] %lhs_index, s32[] %rhs_index)
%select.b = s32[] select(pred[] %compare.a, s32[] %lhs_index, s32[] %rhs_index)
%select.c = s32[] select(pred[] %compare.b, s32[] %minimum, s32[] %select.b)
ROOT %tuple = (f32[], s32[]) tuple(f32[] %select.a, s32[] %select.c)
}
ENTRY %entry {
%param0 = f32[1,16,40]{2,1,0} parameter(0)
%iota = s32[1,16,40]{2,1,0} iota(), iota_dimension=2
%constant.a = f32[] constant(-inf)
%constant.b = s32[] constant(0)
%reduce = (f32[1,16]{1,0}, s32[1,16]{1,0}) reduce(f32[1,16,40]{2,1,0} %param0, s32[1,16,40]{2,1,0} %iota, f32[] %constant.a, s32[] %constant.b), dimensions={2}, to_apply=%func,
sharding={{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}, {devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
auto* reduce = FindInstruction(module.get(), "reduce");
ASSERT_NE(reduce, nullptr);
EXPECT_THAT(reduce, op::Sharding(
"{{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}, "
"{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}}"));
auto sharding = reduce->sharding();
TF_EXPECT_OK(sharding.Validate(reduce->shape(), 4));
auto* param0 = FindInstruction(module.get(), "param0");
ASSERT_NE(param0, nullptr);
EXPECT_FALSE(param0->sharding().IsReplicated());
}
TEST_F(AutoShardingTest, GetTupleElementUserShardingsParameter) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %tupleparameter {
%param0 = f32[32,64]{1,0} parameter(0)
%param1 = f32[32,64]{1,0} parameter(1), sharding={devices=[2,2]<=[4]}
%tuple1 = (f32[32,64]{1,0}, f32[32,64]{1,0}) tuple(f32[32,64]{1,0} %param0, f32[32,64]{1,0} %param1)
%first = f32[32,64]{1,0} get-tuple-element((f32[32,64]{1,0}, f32[32,64]{1,0}) %tuple1), index=0
%second = f32[32,64]{1,0} get-tuple-element((f32[32,64]{1,0}, f32[32,64]{1,0}) %tuple1), index=1, sharding={devices=[4,1]<=[4]}
ROOT root = f32[32,64]{1,0} add(%first, %second)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* param1 = FindInstruction(module.get(), "param1");
ASSERT_NE(param1, nullptr);
EXPECT_THAT(param1, op::Sharding("{devices=[2,2]<=[4]}"));
const HloInstruction* second = FindInstruction(module.get(), "root");
ASSERT_NE(second, nullptr);
EXPECT_THAT(second, op::Sharding("{devices=[4,1]<=[4]}"));
}
TEST_F(AutoShardingTest, TupleParameter) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %tupleparameter {
%tuple_param = (f32[16,32,64]{2,1,0}, f32[16,32,64]{2,1,0}) parameter(0)
%first = f32[16,32,64]{2,1,0} get-tuple-element((f32[16,32,64]{2,1,0}, f32[16,32,64]{2,1,0}) %tuple_param), index=0
%second = f32[16,32,64]{2,1,0} get-tuple-element((f32[16,32,64]{2,1,0}, f32[16,32,64]{2,1,0}) %tuple_param), index=1
ROOT root = f32[16,32,64]{2,1,0} add(%first, %second)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* tuple_param =
FindInstruction(module.get(), "tuple_param");
const HloInstruction* first = FindInstruction(module.get(), "first");
const HloInstruction* second = FindInstruction(module.get(), "second");
const HloInstruction* root = FindInstruction(module.get(), "root");
ASSERT_NE(tuple_param, nullptr);
ASSERT_NE(first, nullptr);
ASSERT_NE(second, nullptr);
ASSERT_NE(root, nullptr);
ASSERT_TRUE(tuple_param->has_sharding());
ASSERT_TRUE(first->has_sharding());
ASSERT_TRUE(second->has_sharding());
ASSERT_TRUE(root->has_sharding());
EXPECT_EQ(first->sharding(), second->sharding());
EXPECT_EQ(first->sharding(), root->sharding());
ASSERT_TRUE(tuple_param->sharding().IsTuple());
ASSERT_EQ(tuple_param->sharding().tuple_elements().size(), 2);
EXPECT_EQ(tuple_param->sharding().tuple_elements()[0], first->sharding());
EXPECT_EQ(tuple_param->sharding().tuple_elements()[1], second->sharding());
TF_EXPECT_OK(tuple_param->sharding().Validate(tuple_param->shape(), 4));
}
TEST_F(AutoShardingTest, GetTupleElementWithUserShardingTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
%while_cond {
%param0 = (u32[],f32[16,256,256]{2,1,0},f32[16,256,256]{2,1,0}) parameter(0)
%count = u32[] get-tuple-element((u32[],f32[16,256,256]{2,1,0},f32[16,256,256]{2,1,0}) %param0), index=0
%limit = u32[] constant(2)
ROOT %lt = pred[] compare(%count, %limit), direction=LT
}
%while_body {
%param0 = (u32[],f32[16,256,256]{2,1,0},f32[16,256,256]{2,1,0}) parameter(0)
%count = u32[] get-tuple-element((u32[],f32[16,256,256]{2,1,0},f32[16,256,256]{2,1,0}) %param0), index=0
%v1 = f32[16,256,256]{2,1,0} get-tuple-element((u32[],f32[16,256,256]{2,1,0},f32[16,256,256]{2,1,0}) %param0), index=1
%v2 = f32[16,256,256]{2,1,0} get-tuple-element((u32[],f32[16,256,256]{2,1,0},f32[16,256,256]{2,1,0}) %param0), index=2
%dot = f32[16,256,256]{2,1,0} dot(f32[16,256,256]{2,1,0} %v1, f32[16,256,256]{2,1,0} %v2), lhs_contracting_dims={2}, rhs_contracting_dims={2}, lhs_batch_dims={0}, rhs_batch_dims={0}
%dot_tanh = f32[16,256,256]{2,1,0} tanh(f32[16,256,256]{2,1,0} %dot)
%dot_cos = f32[16,256,256]{2,1,0} cosine(f32[16,256,256]{2,1,0} %dot)
ROOT %result = (u32[],f32[16,256,256]{2,1,0},f32[16,256,256]{2,1,0}) tuple(%count, %dot_tanh, %dot_cos)
}
ENTRY %entry (param0: f32[16,256,256], param1: f32[16,256,256]) -> f32[16,256,256] {
%param0 = f32[16,256,256]{2,1,0} parameter(0), sharding={devices=[2,1,2]0,1,2,3}
%param1 = f32[16,256,256]{2,1,0} parameter(1), sharding={devices=[2,1,2]0,1,2,3}
%zero = u32[] constant(0)
%init = (u32[], f32[16,256,256], f32[16,256,256]) tuple(%zero, %param0, %param1)
%while.1 = (u32[],f32[16,256,256]{2,1,0},f32[16,256,256]{2,1,0}) while(%init), body=%while_body, condition=%while_cond
%tuple1 = f32[16,256,256]{2,1,0} get-tuple-element((u32[], f32[16,256,256]{2,1,0}, f32[16,256,256]{2,1,0}) %while.1), index=1, sharding={devices=[2,2,1]0,2,1,3}
ROOT %tanh = f32[16,256,256]{2,1,0} tanh(f32[16,256,256]{2,1,0} %tuple1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
option.enable = true;
option.device_mesh_shape = {2, 1, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
}
TEST_F(AutoShardingTest, While) {
constexpr absl::string_view kHloString = R"(
HloModule module
%cond {
%vars.cond = (u32[], bf16[2,2048,768], bf16[128,512,2048], bf16[128,512,768], s32[]) parameter(0)
%count.cond = u32[] get-tuple-element(%vars.cond), index=0
%limit = u32[] constant(2)
ROOT %lt = pred[] compare(%count.cond, %limit), direction=LT
}
%body {
%param = (u32[], bf16[2,2048,768], bf16[128,512,2048], bf16[128,512,768], s32[]) parameter(0)
%i0 = s32[] constant(0)
%count = u32[] get-tuple-element(%param), index=0
%gte0 = bf16[2,2048,768]{2,1,0} get-tuple-element(%param), index=1
%index = s32[] get-tuple-element(%param), index=4
%ds = bf16[1,2048,768]{2,1,0} dynamic-slice(%gte0, s32[] %index, s32[] %i0, s32[] %i0), dynamic_slice_sizes={1,2048,768}
%rhs = bf16[2048,768]{1,0} reshape(%ds)
%lhs = bf16[128,512,2048]{2,1,0} get-tuple-element(%param), index=2
%dot = bf16[128,512,768]{2,1,0} dot(bf16[128,512,2048]{2,1,0} %lhs, bf16[2048,768]{1,0} %rhs), lhs_contracting_dims={2}, rhs_contracting_dims={0}
ROOT %tuple = (u32[], bf16[2,2048,768], bf16[128,512,2048], bf16[128,512,768], s32[]) tuple(%count, %gte0, %lhs, %dot, index)
}
ENTRY %entry {
%p0 = bf16[2048,768] parameter(0)
%p1 = bf16[128,512,2048] parameter(1)
%p2 = bf16[128,512,768] parameter(2)
%reshape0 = bf16[1,2048,768] reshape(%p0)
%concat0 = bf16[2,2048,768] concatenate(%reshape0, %reshape0), dimensions={0}
%zero = u32[] constant(0)
%p3 = s32[] parameter(3)
%init = (u32[], bf16[2,2048,768], bf16[128,512,2048], bf16[128,512,768], s32[]) tuple(%zero, %concat0, %p1, %p2, %p3)
%while = (u32[], bf16[2, 2048, 768], bf16[128,512,2048], bf16[128,512,768], s32[]) while(%init), body=%body, condition=%cond
ROOT %result = bf16[128,512,768] get-tuple-element(%while), index=3
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(0) << module->ToString();
EXPECT_TRUE(changed);
auto* while_op = FindInstruction(module.get(), "while");
ASSERT_NE(while_op, nullptr);
for (size_t i = 0; i < while_op->while_body()
->root_instruction()
->sharding()
.tuple_elements()
.size();
i++) {
const HloSharding& root_sharding = while_op->while_body()
->root_instruction()
->sharding()
.tuple_elements()
.at(i);
EXPECT_EQ(while_op->while_body()
->parameter_instruction(0)
->sharding()
.tuple_elements()
.at(i)
.ToString(),
root_sharding.ToString());
EXPECT_EQ(while_op->while_condition()
->parameter_instruction(0)
->sharding()
.tuple_elements()
.at(i)
.ToString(),
root_sharding.ToString());
}
}
TEST_F(AutoShardingTest, DynamicSlice) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param0 = s32[] parameter(0)
%arg_tuple = (s32[], f32[4,256,1024]{2,1,0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2,4,256,1024]{3,2,1,0}, f32[2,4096]{1,0}, f32[2,1024,4096]{2,1,0}, f32[2,1024]{1,0}, f32[2,4096,1024]{2,1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,256]{1,0}, f32[2,1024]{1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,4096]{1,0}, f32[2,1024,4096]{2,1,0}, f32[2,1024]{1,0}, f32[2,4096,1024]{2,1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,256]{1,0}, f32[2,1024]{1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[4,1,256,256]{3,2,1,0}, f32[4,256,1]{2,1,0}, f32[4,256,1]{2,1,0}, f32[4,256,1]{2,1,0}, f32[4,256,1]{2,1,0}, f32[4,256,1]{2,1,0}, f32[], f32[], f32[4,256,1]{2,1,0}, f32[], f32[]) parameter(1)
%constant.a = s32[] constant(2)
%constant.b = s32[] constant(0)
%compare = pred[] compare(s32[] %param0, s32[] %constant.b), direction=LT
%add = s32[] add(s32[] %param0, s32[] %constant.a)
%select = s32[] select(pred[] %compare, s32[] %add, s32[] %param0)
%get-tuple-element = f32[2,1024]{1,0} get-tuple-element((s32[], f32[4,256,1024]{2,1,0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2,4,256,1024]{3,2,1,0}, f32[2,4096]{1,0}, f32[2,1024,4096]{2,1,0}, f32[2,1024]{1,0}, f32[2,4096,1024]{2,1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,256]{1,0}, f32[2,1024]{1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,4096]{1,0}, f32[2,1024,4096]{2,1,0}, f32[2,1024]{1,0}, f32[2,4096,1024]{2,1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,1024]{1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,256]{1,0}, f32[2,1024]{1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[2,4,256]{2,1,0}, f32[2,1024,4,256]{3,2,1,0}, f32[4,1,256,256]{3,2,1,0}, f32[4,256,1]{2,1,0}, f32[4,256,1]{2,1,0}, f32[4,256,1]{2,1,0}, f32[4,256,1]{2,1,0}, f32[4,256,1]{2,1,0}, f32[], f32[], f32[4,256,1]{2,1,0}, f32[], f32[]) %arg_tuple), index=16
ROOT %dynamic-slice = f32[1,1024]{1,0} dynamic-slice(f32[2,1024]{1,0} %get-tuple-element, s32[] %select, s32[] %constant.b), dynamic_slice_sizes={1,1024}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(0) << module->ToString();
EXPECT_TRUE(changed);
}
TEST_F(AutoShardingTest, Alias) {
constexpr absl::string_view kHloString = R"(
HloModule module, input_output_alias={ {0}: (0, {}, may-alias), {1}: (1, {}, may-alias), {2}: (2, {}, may-alias), {3}: (3, {}, may-alias)}
ENTRY %entry {
param.0 = u32[] parameter(0)
param.1 = f32[32]{0} parameter(1)
param.2 = f32[32]{0} parameter(2)
param.3 = f32[1000]{0} parameter(3)
ROOT tuple = (u32[], f32[32]{0}, f32[32]{0}, f32[1000]{0}) tuple(param.0, param.1, param.2, param.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(0) << module->ToString();
EXPECT_TRUE(changed);
}
TEST_F(AutoShardingTest, AliasTupleParameter) {
constexpr absl::string_view kHloString = R"(
HloModule module, input_output_alias={ {0}: (0, {0}, may-alias), {1}: (0, {1}, may-alias), {2}: (0, {2}, may-alias), {3}: (0, {3}, may-alias)}
ENTRY %entry {
arg_tuple.1 = (u32[], f32[32]{0}, f32[32]{0}, f32[1000]{0}) parameter(0)
get-tuple-element.0 = u32[] get-tuple-element(arg_tuple.1), index=0
get-tuple-element.1 = f32[32]{0} get-tuple-element(arg_tuple.1), index=1
get-tuple-element.2 = f32[32]{0} get-tuple-element(arg_tuple.1), index=2
get-tuple-element.3 = f32[1000]{0} get-tuple-element(arg_tuple.1), index=3
ROOT tuple = (u32[], f32[32]{0}, f32[32]{0}, f32[1000]{0}) tuple(get-tuple-element.0, get-tuple-element.1, get-tuple-element.2, get-tuple-element.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(0) << module->ToString();
EXPECT_TRUE(changed);
}
TEST_F(AutoShardingTest, JaxRandomUniform) {
constexpr absl::string_view kHloString = R"(
HloModule module
clone {
lhs.1 = u32[] parameter(0)
rhs.1 = u32[] parameter(2)
or.2 = u32[] or(lhs.1, rhs.1)
lhs.0 = u32[] parameter(1)
rhs.0 = u32[] parameter(3)
or.3 = u32[] or(lhs.0, rhs.0)
ROOT tuple.23 = (u32[], u32[]) tuple(or.2, or.3)
}
ENTRY %entry {
shift-left = u32[2,2]{1,0} parameter(0)
select = u32[2,2]{1,0} parameter(1)
constant.a = u32[] parameter(2)
reduce = (u32[2]{0}, u32[2]{0}) reduce(shift-left, select, constant.a, constant.a), dimensions={1}, to_apply=clone
rng-bit-generator = u32[8,512]{1,0} rng-bit-generator(reduce), algorithm=rng_default
constant.b = u32[] constant(9)
broadcast.a = u32[8,512]{1,0} broadcast(constant.b), dimensions={}, sharding={replicated}
shift-right-logical = u32[8,512]{1,0} shift-right-logical(rng-bit-generator, broadcast.a)
constant.c = u32[] constant(1065353216)
broadcast.b = u32[8,512]{1,0} broadcast(constant.c), dimensions={}, sharding={replicated}
or = u32[8,512]{1,0} or(shift-right-logical, broadcast.b)
bitcast-convert = f32[8,512]{1,0} bitcast-convert(or)
constant.d = f32[] constant(1)
broadcast.c = f32[8,512]{1,0} broadcast(constant.d), dimensions={}, sharding={replicated}
subtract = f32[8,512]{1,0} subtract(bitcast-convert, broadcast.c)
constant.e = f32[] constant(0)
broadcast.d = f32[8,512]{1,0} broadcast(constant.e), dimensions={}, sharding={replicated}
ROOT maximum = f32[8,512]{1,0} maximum(subtract, broadcast.d)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(0) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_TRUE(module->entry_computation()->root_instruction()->has_sharding());
auto* tuple_operand = FindInstruction(module.get(), "reduce");
ASSERT_NE(tuple_operand, nullptr);
EXPECT_THAT(tuple_operand, op::Sharding("{{replicated}, {replicated}}"));
}
TEST_F(AutoShardingTest, Reshape) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param.0 = bf16[24,2048,2048]{2,1,0} parameter(0)
%param.1 = s32[] parameter(1)
%param.2 = bf16[512,1024,2048]{2,1,0} parameter(2)
%constant = s32[] constant(0)
%dynamic-slice = bf16[1,2048,2048]{2,1,0} dynamic-slice(bf16[24,2048,2048]{2,1,0} %param.0, s32[] %param.1, s32[] %constant, s32[] %constant), dynamic_slice_sizes={1,2048,2048}
%reshape = bf16[2048,16,128]{2,1,0} reshape(bf16[1,2048,2048]{2,1,0} %dynamic-slice)
%dot = bf16[512,1024,16,128]{3,2,1,0} dot(bf16[512,1024,2048]{2,1,0} %param.2, bf16[2048,16,128]{2,1,0} %reshape), lhs_contracting_dims={2}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {64, 1};
option.device_mesh_ids.resize(64);
std::iota(option.device_mesh_ids.begin(), option.device_mesh_ids.end(), 0);
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
}
TEST_F(AutoShardingTest, ReshapeWithInvalidUserSharding) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param.0 = bf16[24,16,16]{2,1,0} parameter(0), sharding={devices=[32,1,1]<=[32]}
%reshape = bf16[1,24,16,16]{3,2,1,0} reshape(%param.0)
%copy = bf16[1,24,16,16]{3,2,1,0} copy(%reshape)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {32, 1};
option.device_mesh_ids.resize(32);
std::iota(option.device_mesh_ids.begin(), option.device_mesh_ids.end(), 0);
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
VLOG(1) << module->ToString();
HloInstruction* reshape = FindInstruction(module.get(), "reshape");
EXPECT_THAT(reshape, op::Sharding("{devices=[1,32,1,1]<=[32]}"));
}
TEST_F(AutoShardingTest, Broadcast) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param.0 = s32[32]{0} parameter(0)
ROOT broadcast = s32[512,1024,1024,32]{3,2,1,0} broadcast(s32[32]{0} %param.0), dimensions={3}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {1, 1, 64};
option.memory_budget_per_device = 1025 * 1024 * 1024;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
}
TEST_F(AutoShardingTest, TestReshardingCostsForUserAnnotatedSharding) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param0 = f32[256,256] parameter(0)
%param1 = f32[256,256] parameter(1)
%dot = f32[256,256] dot(%param0, %param1), lhs_contracting_dims={1}, rhs_contracting_dims={1}
ROOT %result = f32[256,256] tanh(%dot), sharding={devices=[1,4]0,1,2,3}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_beta = {1, 1};
option.device_mesh_alpha = {1, 1};
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
AutoSharding pass(option);
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
EXPECT_TRUE(changed);
LOG(INFO) << module->ToString();
EXPECT_GT(pass.GetSolverOptimalObjectiveValue(), 0);
}
TEST_F(AutoShardingTest, AllowAliasToFollowerConversion) {
constexpr absl::string_view kHloString = R"(
HloModule module, input_output_alias={ {0}: (0, {}, may-alias), {1}: (1, {}, may-alias), {2}: (2, {}, may-alias), {3}: (3, {}, may-alias)}
ENTRY %entry {
param.0 = u32[] parameter(0)
param.1 = f32[32]{0} parameter(1)
param.2 = f32[32]{0} parameter(2)
param.3 = f32[32000]{0} parameter(3)
ROOT tuple.61 = (u32[], f32[32]{0}, f32[32]{0}, f32[32000]{0}) tuple(param.0, param.1, param.2, param.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.allow_alias_to_follower_conversion = true;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(0) << module->ToString();
EXPECT_TRUE(changed);
}
TEST_F(AutoShardingTest, DisallowAliasToFollowerConversion) {
constexpr absl::string_view kHloString = R"(
HloModule module, input_output_alias={ {0}: (0, {}, may-alias), {1}: (1, {}, may-alias), {2}: (2, {}, may-alias), {3}: (3, {}, may-alias)}
ENTRY %entry {
param.0 = u32[] parameter(0)
param.1 = f32[32]{0} parameter(1)
param.2 = f32[32]{0} parameter(2)
param.3 = f32[32000]{0} parameter(3)
ROOT tuple.61 = (u32[], f32[32]{0}, f32[32]{0}, f32[32000]{0}) tuple(param.0, param.1, param.2, param.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.allow_alias_to_follower_conversion = false;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(0) << module->ToString();
EXPECT_TRUE(changed);
}
TEST_F(AutoShardingTest, BufferDonorConfigPreservation) {
constexpr absl::string_view kHloString = R"(
HloModule Module, buffer_donor={ (0, {0}), (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
const HloBufferDonorConfig buffer_donor_config_before =
module->buffer_donor_config();
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
const HloBufferDonorConfig& buffer_donor_config_after =
module->buffer_donor_config();
EXPECT_EQ(buffer_donor_config_before.ToString(),
buffer_donor_config_after.ToString());
}
TEST_F(AutoShardingTest, InputOutputAliasConfigPreservation) {
constexpr absl::string_view kHloString = R"(
HloModule Module, input_output_alias={ {0}: (0, {0}, must-alias), {1}: (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
const HloInputOutputAliasConfig input_output_alias_config_before =
module->input_output_alias_config();
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
const HloInputOutputAliasConfig& input_output_alias_config_after =
module->input_output_alias_config();
EXPECT_EQ(input_output_alias_config_before.ToString(),
input_output_alias_config_after.ToString());
}
TEST_F(AutoShardingTest, SliceAliasTest) {
const char* const kHloString = R"(
HloModule module
%branch0 {
%branch0_param = f32[256,256]{1,0} parameter(0)
ROOT %slice0 = f32[16,16]{1,0} slice(f32[256,256]{1,0} %branch0_param), slice={[16:32], [16:32]}
}
%branch1 {
%branch1_param = f32[256,256]{1,0} parameter(0)
ROOT %slice1 = f32[16,16]{1,0} slice(f32[256,256]{1,0} %branch1_param), slice={[0:16], [0:16]}
}
ENTRY %entry {
%entry_param0 = f32[256,256]{1,0} parameter(0), sharding={devices=[32,1]<=[32]}
%entry_param1 = s32[] parameter(1)
ROOT %conditional = f32[16,16]{1,0} conditional(s32[] %entry_param1, f32[256,256]{1,0} %entry_param0, f32[256,256]{1,0} %entry_param0), branch_computations={%branch0, %branch1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
option.enable = true;
option.device_mesh_shape = {32, 1};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
ASSERT_TRUE(changed);
VLOG(5) << module->ToString();
const HloInstruction* branch0_param =
FindInstruction(module.get(), "branch0_param");
const HloInstruction* slice0 = FindInstruction(module.get(), "slice0");
const HloInstruction* branch1_param =
FindInstruction(module.get(), "branch1_param");
const HloInstruction* slice1 = FindInstruction(module.get(), "slice1");
ASSERT_NE(branch0_param, nullptr);
ASSERT_NE(slice0, nullptr);
ASSERT_NE(branch1_param, nullptr);
ASSERT_NE(slice1, nullptr);
ASSERT_TRUE(branch0_param->has_sharding());
ASSERT_TRUE(slice0->has_sharding());
ASSERT_TRUE(branch1_param->has_sharding());
ASSERT_TRUE(slice1->has_sharding());
EXPECT_THAT(branch0_param, op::Sharding("{devices=[32,1]<=[32]}"));
EXPECT_THAT(slice0, op::Sharding("{replicated}"));
EXPECT_THAT(branch1_param, op::Sharding("{devices=[32,1]<=[32]}"));
EXPECT_THAT(slice1, op::Sharding("{replicated}"));
}
TEST_F(AutoShardingTest, CrashIfAskedToRespectShardAsShardLike) {
const char* const kHloString = R"(
HloModule module
ENTRY matmul {
param1 = f32[32,64]{1,0} parameter(0)
param2 = f32[64,128]{1,0} parameter(1)
custom-call1 = f32[32,64]{1,0} custom-call(param1), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 0}
custom-call2 = f32[64,128]{1,0} custom-call(param2), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 0}
ROOT root = f32[32,128]{1,0} dot(custom-call1, custom-call2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
option.enable = true;
option.device_mesh_shape = {4, 1};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
EXPECT_DEATH(
absl::StatusOr<bool> status = AutoSharding(option).Run(module.get()),
"The auto-sharding pass could not find shardings that works for this "
"input.");
}
TEST_F(AutoShardingTest, IgnoreShardAsShardLike) {
const char* const kHloString = R"(
HloModule module
ENTRY matmul {
param1 = f32[32,64]{1,0} parameter(0)
param2 = f32[64,128]{1,0} parameter(1)
custom-call1 = f32[32,64]{1,0} custom-call(param1), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 0}
custom-call2 = f32[64,128]{1,0} custom-call(param2), custom_call_target="Sharding", custom_call_has_side_effect=true, sharding={unknown shard_as 0}
ROOT root = f32[32,128]{1,0} dot(custom-call1, custom-call2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kRemoveAllShardings;
option.enable = true;
option.device_mesh_shape = {4, 1};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
EXPECT_TRUE(changed);
}
TEST(NormalizeTest, NormalizeHandlesNegativeCosts) {
EdgeReshardingCostMatrix edge_cost(2, 2);
edge_cost(0, 0).communication_cost = -100;
edge_cost(0, 1).communication_cost = 200;
edge_cost(1, 0).communication_cost = 300;
edge_cost(1, 1).communication_cost = 400;
const EdgeReshardingCostMatrix normalized_edge_cost = Normalize(edge_cost);
EXPECT_EQ(normalized_edge_cost(0, 0).communication_cost, 0);
EXPECT_EQ(normalized_edge_cost(0, 1).communication_cost, 300);
EXPECT_EQ(normalized_edge_cost(1, 0).communication_cost, 400);
EXPECT_EQ(normalized_edge_cost(1, 1).communication_cost, 500);
}
TEST(NormalizeTest, NormalizeHandlesPositiveCosts) {
EdgeReshardingCostMatrix edge_cost(2, 2);
edge_cost(0, 0).communication_cost = 100;
edge_cost(0, 1).communication_cost = 200;
edge_cost(1, 0).communication_cost = 300;
edge_cost(1, 1).communication_cost = 400;
const EdgeReshardingCostMatrix normalized_edge_cost = Normalize(edge_cost);
EXPECT_EQ(normalized_edge_cost(0, 0).communication_cost, 100);
EXPECT_EQ(normalized_edge_cost(0, 1).communication_cost, 200);
EXPECT_EQ(normalized_edge_cost(1, 0).communication_cost, 300);
EXPECT_EQ(normalized_edge_cost(1, 1).communication_cost, 400);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/experimental/auto_sharding/auto_sharding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/experimental/auto_sharding/auto_sharding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bfb29bf7-6eab-4ad7-a819-b0a239d934ec | cpp | tensorflow/tensorflow | auto_sharding_solver | third_party/xla/xla/hlo/experimental/auto_sharding/auto_sharding_solver.cc | third_party/xla/xla/hlo/experimental/auto_sharding/auto_sharding_solver_test.cc | #include "xla/hlo/experimental/auto_sharding/auto_sharding_solver.h"
#include <algorithm>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/container/btree_set.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding.pb.h"
#ifdef PLATFORM_GOOGLE
#include "file/base/options.h"
#endif
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_memory.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_strategy.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/fingerprint.h"
#include "tsl/platform/hash.h"
#include "tsl/platform/types.h"
#include "ortools/linear_solver/linear_solver.h"
#include "ortools/linear_solver/linear_solver.pb.h"
#ifdef PLATFORM_GOOGLE
#include "file/base/helpers.h"
#include "util/task/status.pb.h"
#endif
namespace xla {
namespace spmd {
using ::operations_research::MPConstraint;
using ::operations_research::MPSolver;
using ::operations_research::MPVariable;
constexpr double kMaxCostEpsilon = 1.0001;
constexpr double kMemoryMultiplier = 1e6;
constexpr double kMaxCostValue = 1e18;
bool AutoShardingSolverOutput::operator==(
const AutoShardingSolverOutput& other) const {
return s_val == other.s_val && cost == other.cost &&
is_optimal == other.is_optimal && peak_times == other.peak_times;
}
void PrintLargestInstructions(
const std::vector<NodeStrategyIdx>& chosen_strategy,
const AutoShardingSolverRequest& request) {
if (!request.node_intervals().empty()) return;
std::vector<std::pair<LivenessIdx, double>> time_memory_usage;
for (LivenessIdx time_idx = 0; time_idx < request.live_size(); ++time_idx) {
double mem = 0.0;
for (NodeIdx node_idx : request.live(time_idx).nodes()) {
mem += request.memory_costs(node_idx).costs(chosen_strategy[node_idx]);
}
time_memory_usage.push_back({time_idx, mem});
}
struct {
bool operator()(std::pair<LivenessIdx, double> a,
std::pair<LivenessIdx, double> b) const {
return a.second > b.second;
}
} MemLarger;
std::sort(time_memory_usage.begin(), time_memory_usage.end(), MemLarger);
LOG(INFO) << "using m[] and L[], max memory usage: "
<< time_memory_usage.front().second / (1024 * 1024 * 1024)
<< " GB at time " << time_memory_usage.front().first;
size_t k = 3;
k = std::min(k, time_memory_usage.size());
std::vector<std::pair<NodeIdx, double>> instruction_mem;
absl::flat_hash_set<NodeIdx> instruction_set;
for (auto usage_idx = 0; usage_idx < k; ++usage_idx) {
LivenessIdx time_idx = time_memory_usage.at(usage_idx).first;
for (NodeIdx node_idx : request.live(time_idx).nodes()) {
double mem =
request.memory_costs(node_idx).costs(chosen_strategy[node_idx]);
if (mem > 100 * 1024 * 1024 &&
instruction_set.find(node_idx) == instruction_set.end()) {
instruction_mem.push_back({node_idx, mem});
instruction_set.insert(node_idx);
}
}
}
std::sort(instruction_mem.begin(), instruction_mem.end(), MemLarger);
size_t top_tensors = 10;
top_tensors = std::min(top_tensors, instruction_mem.size());
VLOG(1) << "Top " << top_tensors << " largest tensors:";
for (size_t i = 0; i < top_tensors; ++i) {
VLOG(1) << "instruction name: "
<< request.instruction_names(instruction_mem.at(i).first)
<< " memory usage: "
<< instruction_mem.at(i).second / (1024 * 1024 * 1024) << "GB";
}
}
absl::StatusOr<AutoShardingSolverOutput> SolveAndExtractSolution(
const AutoShardingSolverRequest& request,
const std::vector<std::vector<MPVariable*>>& s,
const std::vector<std::vector<MPVariable*>>& e,
const MPVariable* overbudget_var, const MPVariable* makespan_var,
MPSolver& solver);
double MinimumMemoryBudgetRequired(const AutoShardingSolverRequest& request) {
double min_memory_budget_required_estimate = 0.0;
for (LivenessIdx time_idx = 0; time_idx < request.live_size(); ++time_idx) {
double min_memory_budget_required_estimate_local = 0.0;
for (NodeIdx node_idx : request.live(time_idx).nodes()) {
const auto& m = request.memory_costs(node_idx).costs();
const double fixed_memory_cost = *std::min_element(m.begin(), m.end());
min_memory_budget_required_estimate_local += fixed_memory_cost;
}
min_memory_budget_required_estimate =
std::max(min_memory_budget_required_estimate,
min_memory_budget_required_estimate_local);
}
return min_memory_budget_required_estimate;
}
double MaxCoeff(
const tsl::protobuf::RepeatedPtrField<AutoShardingSolverRequest_Costs>&
cost_mat) {
double max_coeff = 0.0;
for (auto& costs : cost_mat) {
for (auto& cost : costs.costs()) {
if (cost < kInfinityCost) {
max_coeff = std::max(max_coeff, cost);
}
}
}
return max_coeff;
}
void ScaleCoeffs(
double scaling_factor,
tsl::protobuf::RepeatedPtrField<AutoShardingSolverRequest_Costs>*
cost_mat) {
for (auto& costs : *cost_mat) {
for (auto& cost : *costs.mutable_costs()) {
if (cost < kInfinityCost) {
cost = floor(cost * scaling_factor);
}
}
}
}
AutoShardingSolverRequest ScaleRequest(
const AutoShardingSolverRequest& request) {
if (!request.has_coeff_limit()) return request;
VLOG(0) << "Scaling request by coefficient limit: "
<< request.coeff_limit().coeff();
double max_coeff = 0.0;
max_coeff = std::max(max_coeff, MaxCoeff(request.communication_costs()));
max_coeff = std::max(max_coeff, MaxCoeff(request.computation_costs()));
max_coeff = std::max(max_coeff, MaxCoeff(request.resharding_costs()));
if (max_coeff <= request.coeff_limit().coeff()) return request;
const double scaling_factor = request.coeff_limit().coeff() / max_coeff;
AutoShardingSolverRequest scaled_request = request;
ScaleCoeffs(scaling_factor, scaled_request.mutable_communication_costs());
ScaleCoeffs(scaling_factor, scaled_request.mutable_computation_costs());
ScaleCoeffs(scaling_factor, scaled_request.mutable_resharding_costs());
return scaled_request;
}
std::optional<std::pair<int64_t, int64_t>> ReduceMemoryTerms(
const AutoShardingSolverRequest& request, MPSolver& solver,
int64_t num_lives, int64_t num_primitives,
const std::function<
tsl::protobuf::RepeatedField<int64_t>(int64_t)>&
live,
const tsl::protobuf::RepeatedPtrField<
AutoShardingSolverRequest_Pair>& intervals,
const tsl::protobuf::RepeatedPtrField<
AutoShardingSolverRequest_Group>& groups,
const tsl::protobuf::RepeatedPtrField<
AutoShardingSolverRequest_Costs>& memory_costs,
std::string_view prim_type,
std::vector<std::vector<MPVariable*>>& prim_vars,
std::vector<std::pair<int64_t, int64_t>>& reduced_intervals,
std::vector<MPVariable*>& group_vars,
absl::flat_hash_set<int64_t>& reduced_times) {
const absl::Time term_reduction_start_time = absl::Now();
std::optional<std::pair<int64_t, int64_t>> num_terms = std::nullopt;
std::vector<absl::btree_set<int64_t>> reduced_groups;
if (groups.empty()) {
for (const auto& interval : intervals) {
if (interval.first() > interval.second()) continue;
num_lives = std::max(num_lives, interval.second() + 1);
}
auto Intervals =
[intervals](int64_t prim_idx) -> std::pair<int64_t, int64_t> {
return {intervals.at(prim_idx).first(), intervals.at(prim_idx).second()};
};
MemoryTermReducer reducer;
num_terms =
intervals.empty()
? reducer.Reduce(num_lives, num_primitives, live)
: reducer.Reduce(num_lives, num_primitives, std::move(Intervals));
reduced_intervals = reducer.GetReducedIntervals();
reduced_groups = reducer.GetReducedGroups();
} else {
for (const auto& interval : intervals) {
reduced_intervals.push_back({interval.first(), interval.second()});
}
for (const auto& group : groups) {
reduced_groups.push_back({group.prims().begin(), group.prims().end()});
}
}
solver.MakeNumVarArray(reduced_groups.size(), 0.0, MPSolver::infinity(),
absl::StrCat("group_", prim_type), &group_vars);
for (int64_t group_idx = 0; group_idx < group_vars.size(); ++group_idx) {
MPConstraint* constraint = solver.MakeRowConstraint(
-MPSolver::infinity(), 0.0,
absl::StrCat("group_", prim_type, "[", group_idx, "]"));
constraint->SetCoefficient(group_vars[group_idx], -1.0);
for (const int64_t prim_idx : reduced_groups[group_idx]) {
for (int64_t j = 0; j < prim_vars[prim_idx].size(); ++j) {
double memory_cost = memory_costs.at(prim_idx).costs(j);
memory_cost /= request.memory_budget() / kMemoryMultiplier;
const double accumulated_coefficient =
constraint->GetCoefficient(prim_vars[prim_idx][j]);
constraint->SetCoefficient(prim_vars[prim_idx][j],
accumulated_coefficient + memory_cost);
}
}
}
const absl::flat_hash_set<int64_t> times = MemoryTermReducer::GetReducedTimes(
num_primitives, reduced_intervals, reduced_groups);
reduced_times.insert(times.begin(), times.end());
const absl::Time term_reduction_end_time = absl::Now();
if (num_terms) {
const auto term_reduction_duration =
term_reduction_end_time - term_reduction_start_time;
LOG(INFO) << "Memory Term Reducer for " << prim_type << "s took "
<< absl::ToInt64Milliseconds(term_reduction_duration)
<< " ms and reduced the number of terms from " << num_terms->first
<< " to " << num_terms->second;
}
return num_terms;
}
void AddMemoryTerms(
const AutoShardingSolverRequest& request, MPSolver& solver,
int64_t num_primitives,
const std::vector<std::pair<int64_t, int64_t>>& intervals,
const tsl::protobuf::RepeatedPtrField<
AutoShardingSolverRequest_Costs>& memory_costs,
const MPVariable* overbudget_var,
const absl::flat_hash_set<int64_t>& reduced_times,
std::vector<std::vector<MPVariable*>>& prim_vars,
std::vector<MPVariable*>& group_vars,
absl::flat_hash_map<LivenessIdx, MPConstraint*>& constraints) {
for (int64_t prim_idx = 0; prim_idx < intervals.size(); ++prim_idx) {
for (int64_t time_idx = intervals[prim_idx].first;
time_idx <= intervals[prim_idx].second; ++time_idx) {
if (!reduced_times.contains(time_idx)) continue;
if (!constraints.contains(time_idx)) {
MPConstraint* constraint =
solver.MakeRowConstraint(-MPSolver::infinity(), kMemoryMultiplier,
absl::StrCat("mem[", time_idx, "]"));
if (overbudget_var) {
constraint->SetCoefficient(overbudget_var, -kMemoryMultiplier);
}
constraints[time_idx] = constraint;
}
MPConstraint* constraint = constraints[time_idx];
if (prim_idx >= num_primitives) {
constraint->SetCoefficient(group_vars[prim_idx - num_primitives], 1.0);
continue;
}
for (int64_t j = 0; j < prim_vars[prim_idx].size(); ++j) {
double memory_cost = memory_costs.at(prim_idx).costs(j);
memory_cost /= request.memory_budget() / kMemoryMultiplier;
const double accumulated_coefficient =
constraint->GetCoefficient(prim_vars[prim_idx][j]);
constraint->SetCoefficient(prim_vars[prim_idx][j],
accumulated_coefficient + memory_cost);
}
}
}
}
absl::StatusOr<AutoShardingSolverOutput> FormulateAndSolveMIPFromSolverRequest(
const AutoShardingSolverRequest& unscaled_request) {
const absl::Time start_time = absl::Now();
const AutoShardingSolverRequest& request = ScaleRequest(unscaled_request);
const size_t num_edges = request.edges_size();
const int num_workers = 32;
#ifdef PLATFORM_GOOGLE
std::unique_ptr<MPSolver> solver(MPSolver::CreateSolver("SAT"));
#else
std::unique_ptr<MPSolver> solver(
std::make_unique<MPSolver>("", MPSolver::SAT_INTEGER_PROGRAMMING));
#endif
CHECK(solver);
solver->MutableObjective()->SetMinimization();
std::string solver_parameter_str;
if (solver->ProblemType() ==
operations_research::MPSolver::SAT_INTEGER_PROGRAMMING) {
solver_parameter_str = absl::StrCat("num_workers:", num_workers);
if (request.deterministic_mode()) {
absl::StrAppend(
&solver_parameter_str,
",share_binary_clauses:false,random_seed:1,interleave_search:true");
}
if (request.has_solver_timeout()) {
absl::StrAppend(&solver_parameter_str, ",max_deterministic_time:",
request.solver_timeout().solver_timeout_in_seconds());
}
solver->SetSolverSpecificParametersAsString(solver_parameter_str);
}
std::vector<std::vector<MPVariable*>> s(request.num_nodes());
std::vector<std::vector<MPVariable*>> e(num_edges);
MPVariable* overbudget_var = nullptr;
MPVariable* makespan_var = nullptr;
size_t unique_nodes = 0;
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
if (request.s_follow(node_idx) < 0) {
unique_nodes += 1;
solver->MakeBoolVarArray(request.s_len(node_idx),
absl::StrCat("s[", node_idx, "]"), &s[node_idx]);
}
}
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
if (request.s_follow(node_idx) >= 0) {
CHECK_EQ(request.s_len(node_idx),
request.s_len(request.s_follow(node_idx)));
s[node_idx] = s[request.s_follow(node_idx)];
}
}
size_t unique_edges = 0;
std::vector<EdgeIdx> e_follow(num_edges, -1);
absl::flat_hash_map<std::pair<NodeIdx, NodeIdx>, EdgeIdx> edge_map;
for (EdgeIdx edge_idx = 0; edge_idx < num_edges; ++edge_idx) {
const auto& raw_edge = request.edges(edge_idx);
const std::pair<NodeIdx, NodeIdx> edge(raw_edge.first(), raw_edge.second());
auto followed_edge = edge;
if (int f = request.s_follow(edge.first); f >= 0) followed_edge.first = f;
if (int f = request.s_follow(edge.second); f >= 0) followed_edge.second = f;
if (const auto& it = edge_map.find(followed_edge); it != edge_map.end()) {
e[edge_idx] = e[it->second];
e_follow[edge_idx] = it->second;
continue;
}
unique_edges += 1;
solver->MakeBoolVarArray(
request.s_len(edge.first) * request.s_len(edge.second),
absl::StrCat("e[", edge.first, ",", edge.second, "]"), &e[edge_idx]);
edge_map.insert({followed_edge, edge_idx});
}
if (request.memory_budget() > 0 && request.has_overbudget_coeff()) {
overbudget_var =
solver->MakeNumVar(0.0, MPSolver::infinity(), "overbudget");
}
if (request.has_makespan_coeff()) {
makespan_var = CreateMakespanVar(request, e, *solver);
}
absl::flat_hash_set<MPVariable*> infinity_vars;
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
for (NodeStrategyIdx j = 0; j < s[node_idx].size(); ++j) {
double coefficient = request.computation_costs(node_idx).costs(j) +
request.communication_costs(node_idx).costs(j);
if (coefficient >= kInfinityCost) {
infinity_vars.insert(s[node_idx][j]);
continue;
}
if (request.minimize_departures()) continue;
double accumulated_coefficient =
solver->MutableObjective()->GetCoefficient(s[node_idx][j]);
solver->MutableObjective()->SetCoefficient(
s[node_idx][j], accumulated_coefficient + coefficient);
}
}
for (EdgeIdx edge_idx = 0; edge_idx < num_edges; ++edge_idx) {
for (EdgeStrategyIdx j = 0; j < e[edge_idx].size(); ++j) {
double coefficient = request.resharding_costs(edge_idx).costs(j);
if (coefficient >= kInfinityCost) {
infinity_vars.insert(e[edge_idx][j]);
continue;
}
if (request.minimize_departures()) continue;
double accumulated_coefficient =
solver->MutableObjective()->GetCoefficient(e[edge_idx][j]);
solver->MutableObjective()->SetCoefficient(
e[edge_idx][j], accumulated_coefficient + coefficient);
}
}
LOG(INFO) << "Number of infinity terms: " << infinity_vars.size();
const NodeStrategies shaved_strategies =
StrategyShaver(request).FindShavedStrategies();
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
if (s[node_idx].empty() || request.s_follow(node_idx) >= 0) continue;
bool all_infinity = true;
for (NodeStrategyIdx j = 0; j < s[node_idx].size(); ++j) {
if (infinity_vars.contains(s[node_idx][j]) ||
shaved_strategies.contains({node_idx, j})) {
MPConstraint* constraint = solver->MakeRowConstraint(
0.0, 0.0,
absl::StrCat("infinitycost: s[", node_idx, "][", j, "] = 0"));
constraint->SetCoefficient(s[node_idx][j], 1.0);
} else {
all_infinity = false;
}
}
if (all_infinity) {
LOG(FATAL) << "All of s[" << node_idx << "][*] have infinity costs";
}
}
for (EdgeIdx edge_idx = 0; edge_idx < num_edges; ++edge_idx) {
if (e[edge_idx].empty() || e_follow[edge_idx] >= 0) continue;
bool all_infinity = true;
for (EdgeStrategyIdx j = 0; j < e[edge_idx].size(); ++j) {
if (infinity_vars.contains(e[edge_idx][j])) {
MPConstraint* constraint = solver->MakeRowConstraint(
0.0, 0.0,
absl::StrCat("infinitycost: e[", edge_idx, "][", j, "] = 0"));
constraint->SetCoefficient(e[edge_idx][j], 1.0);
} else {
all_infinity = false;
}
}
if (all_infinity) {
auto err_msg = absl::StrCat("All of e[", request.edges(edge_idx).first(),
"][", request.edges(edge_idx).second(),
"][*] have infinity costs");
if (request.crash_at_infinity_costs_check()) {
LOG(FATAL) << err_msg;
} else {
LOG(WARNING) << err_msg;
return absl::InternalError(err_msg);
}
}
}
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
if (request.s_follow(node_idx) >= 0) continue;
MPConstraint* constraint = solver->MakeRowConstraint(
1.0, 1.0,
absl::StrCat("sum(s[", node_idx, "][j] for j = [0 .. ",
s[node_idx].size(), ")) = 1"));
for (NodeStrategyIdx j = 0; j < s[node_idx].size(); ++j) {
constraint->SetCoefficient(s[node_idx][j], 1.0);
}
}
if (request.memory_budget() > 0) {
auto LiveNodes =
[request](int64_t live_idx) -> tsl::protobuf::RepeatedField<int64_t> {
return request.live(live_idx).nodes();
};
auto LiveEdges =
[request](int64_t live_idx) -> tsl::protobuf::RepeatedField<int64_t> {
return request.live_edges(live_idx).edges();
};
std::vector<std::pair<int64_t, int64_t>> reduced_intervals_nodes,
reduced_intervals_edges;
absl::flat_hash_set<int64_t> reduced_times;
std::vector<MPVariable*> group_node_vars, group_edge_vars;
std::optional<std::pair<int64_t, int64_t>> num_node_terms, num_edge_terms;
num_node_terms = ReduceMemoryTerms(
request, *solver, request.live_size(), request.num_nodes(),
std::move(LiveNodes), request.node_intervals(), request.node_groups(),
request.memory_costs(), "node", s, reduced_intervals_nodes,
group_node_vars, reduced_times);
if (request.enable_memory_edge_costs()) {
num_edge_terms = ReduceMemoryTerms(
request, *solver, request.live_edges_size(), request.edges_size(),
std::move(LiveEdges), request.edge_intervals(), request.edge_groups(),
request.memory_edge_costs(), "edge", e, reduced_intervals_edges,
group_edge_vars, reduced_times);
}
absl::flat_hash_map<LivenessIdx, MPConstraint*> constraints;
AddMemoryTerms(request, *solver, request.num_nodes(),
reduced_intervals_nodes, request.memory_costs(),
overbudget_var, reduced_times, s, group_node_vars,
constraints);
if (request.enable_memory_edge_costs()) {
AddMemoryTerms(request, *solver, request.edges_size(),
reduced_intervals_edges, request.memory_edge_costs(),
overbudget_var, reduced_times, e, group_edge_vars,
constraints);
}
if (overbudget_var && !request.minimize_departures()) {
solver->MutableObjective()->SetCoefficient(
overbudget_var,
request.overbudget_coeff().coeff() * request.memory_budget());
}
LOG(INFO) << "Minimum memory budget estimate: "
<< MinimumMemoryBudgetRequired(request);
LOG(INFO) << "Using memory budget: "
<< static_cast<double>(request.memory_budget());
}
for (EdgeIdx edge_idx = 0; edge_idx < num_edges; ++edge_idx) {
if (e_follow[edge_idx] >= 0) continue;
const auto& edge = request.edges(edge_idx);
for (NodeStrategyIdx p = 0; p < s[edge.first()].size(); ++p) {
for (NodeStrategyIdx q = 0; q < s[edge.second()].size(); ++q) {
const EdgeStrategyIdx j = p * s[edge.second()].size() + q;
MPConstraint* constraint = solver->MakeRowConstraint(
-1.0, MPSolver::infinity(),
absl::StrCat("edge[", edge_idx, "][", j, "]"));
double coeff = (s[edge.first()][p] == s[edge.second()][q]) ? 2.0 : 1.0;
constraint->SetCoefficient(s[edge.first()][p], -coeff);
constraint->SetCoefficient(s[edge.second()][q], -coeff);
constraint->SetCoefficient(e[edge_idx][j], 1.0);
}
}
}
absl::flat_hash_set<std::pair<NodeIdx, NodeIdx>> alias_set;
for (auto alias_idx = 0; alias_idx < request.aliases_size(); ++alias_idx) {
const auto& raw_alias = request.aliases(alias_idx);
const std::pair<NodeIdx, NodeIdx> alias(raw_alias.first(),
raw_alias.second());
if (alias_set.contains(alias)) continue;
alias_set.insert(alias);
const auto& value_costs = request.value_costs(alias_idx).costs();
for (NodeStrategyIdx p = 0; p < s[alias.first].size(); ++p) {
for (NodeStrategyIdx q = 0; q < s[alias.second].size(); ++q) {
if (value_costs[p * s[alias.second].size() + q] > 0.5) {
MPConstraint* constraint = solver->MakeRowConstraint(
-MPSolver::infinity(), 1,
absl::StrCat("s[", alias.first, "][", p, "] + s[", alias.second,
"][", q, "] <= 1"));
constraint->SetCoefficient(s[alias.first][p], 1.0);
constraint->SetCoefficient(s[alias.second][q], 1.0);
}
}
}
}
if (request.has_max_departures()) {
MPConstraint* constraint = solver->MakeRowConstraint(
0, request.max_departures().coeff(),
absl::StrCat("departures <= ", request.max_departures().coeff()));
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
for (NodeStrategyIdx j = 0; j < s[node_idx].size(); ++j) {
double accumulated_coefficient =
constraint->GetCoefficient(s[node_idx][j]);
double departure_cost = request.departure_costs(node_idx).costs(j);
constraint->SetCoefficient(s[node_idx][j],
accumulated_coefficient + departure_cost);
}
}
}
if (request.minimize_departures()) {
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
for (NodeStrategyIdx j = 0; j < s[node_idx].size(); ++j) {
double accumulated_coefficient =
solver->MutableObjective()->GetCoefficient(s[node_idx][j]);
double departure_cost = request.departure_costs(node_idx).costs(j);
solver->MutableObjective()->SetCoefficient(
s[node_idx][j], accumulated_coefficient + departure_cost);
}
}
}
if (request.has_max_cost() && request.max_cost().coeff() < kMaxCostValue) {
double max_cost = kMaxCostEpsilon * request.max_cost().coeff();
max_cost -= solver->Objective().offset();
MPConstraint* cost_constraint = solver->MakeRowConstraint(
-MPSolver::infinity(), max_cost, "cost_constraint");
for (const auto [var, coeff] : solver->Objective().terms()) {
cost_constraint->SetCoefficient(var, coeff);
}
}
if (!request.s_hint().empty() && !request.deterministic_mode() &&
(!request.has_max_cost() || request.max_cost().coeff() < kMaxCostValue)) {
std::vector<std::pair<const MPVariable*, double>> hint;
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
if (request.s_follow(node_idx) >= 0) continue;
for (NodeStrategyIdx j = 0; j < s[node_idx].size(); ++j) {
double hint_val = (request.s_hint(node_idx) == j) ? 1.0 : 0.0;
hint.push_back({s[node_idx][j], hint_val});
}
}
solver->SetHint(hint);
}
#ifdef PLATFORM_GOOGLE
bool dump_model = false;
if (dump_model) {
operations_research::MPModelProto model_proto;
solver->ExportModelToProto(&model_proto);
auto write_status = file::SetTextProto(
absl::StrCat("/tmp/model_", solver->NumVariables(), ".proto"),
model_proto, file::Defaults());
if (!write_status.ok()) {
LOG(ERROR) << write_status.message();
}
}
bool dump_solver_request = false;
if (dump_solver_request) {
uint64_t solver_request_fprint =
tsl::Fingerprint64(unscaled_request.SerializeAsString());
std::string request_dump_path =
absl::StrCat("/tmp/solver_request_", unscaled_request.request_name(),
"_", solver_request_fprint, ".proto");
auto write_status = file::SetBinaryProto(
request_dump_path, unscaled_request, file::Defaults());
VLOG(5) << "Dumped solver request to " << request_dump_path;
if (!write_status.ok()) {
LOG(ERROR) << write_status.message();
}
}
#endif
if (request.enable_output()) {
solver->EnableOutput();
}
VLOG(0) << "Starting solver " << solver->ProblemType() << "\n"
<< "Solver parameter string: " << solver_parameter_str << "\n"
<< "Number of workers: " << num_workers << "\n"
<< "Number of threads: " << solver->GetNumThreads() << "\n"
<< "Time limit: " << solver->time_limit() << "\n"
<< "Request valid: " << ValidateRequest(request).ok() << "\n"
<< "Aliases: " << request.aliases_size() << "\n"
<< "Unique nodes: " << unique_nodes << "\n"
<< "Unique edges: " << unique_edges << "\n"
<< "Total instructions: " << request.num_nodes() << "\n"
<< "Total edges: " << request.edges_size() << "\n"
<< "Memory budget: " << request.memory_budget() / (1024 * 1024 * 1024)
<< "GB\n"
<< "Number variables for ILP: " << solver->NumVariables() << "\n"
<< "Number of ILP constraints: " << solver->NumConstraints() << "\n"
<< "Deterministic mode: " << request.deterministic_mode() << "\n"
<< "Module name: " << request.module_name();
if (request.has_max_cost()) {
VLOG(0) << "Max cost: " << request.max_cost().coeff();
}
auto result = SolveAndExtractSolution(request, s, e, overbudget_var,
makespan_var, *solver);
if (result.ok()) {
const AutoShardingEvaluation evaluation =
Evaluate(unscaled_request, *result);
LOG(INFO) << "*** Total costs for the (unscaled) solver request ***";
LOG(INFO) << "Total Communication Cost: "
<< evaluation.total.communication_cost
<< " (lower bound: " << evaluation.lower_bound.communication_cost
<< ")";
LOG(INFO) << "Total Computation Cost: " << evaluation.total.computation_cost
<< " (lower bound: " << evaluation.lower_bound.computation_cost
<< ")";
LOG(INFO) << "Total Resharding Cost: " << evaluation.total.resharding_cost
<< " (lower bound: " << evaluation.lower_bound.resharding_cost
<< ")";
LOG(INFO) << "Total Overbudget Cost: " << evaluation.total.overbudget_cost
<< " (lower bound: " << evaluation.lower_bound.overbudget_cost
<< ")";
LOG(INFO) << "Total Makespan Cost: " << evaluation.total.makespan_cost
<< " (lower bound: " << evaluation.lower_bound.makespan_cost
<< ")";
LOG(INFO) << "Total Cost: " << evaluation.total.cost()
<< " (lower bound: " << evaluation.lower_bound.cost() << ")";
LOG(INFO) << "Total Departures: " << evaluation.total_departures;
LOG(INFO) << "Total Makespan: " << evaluation.total_makespan;
LOG(INFO) << "Total Violations: " << evaluation.violation_codes.size();
}
const absl::Time end_time = absl::Now();
const auto duration = end_time - start_time;
LOG(INFO) << "Solver took " << absl::ToInt64Milliseconds(duration) << " ms";
return result;
}
std::vector<NodeStrategyIdx> GetChosenNodeStrategy(
const AutoShardingSolverRequest& request,
const std::vector<std::vector<MPVariable*>>& s) {
std::vector<NodeStrategyIdx> chosen_node_strategy(request.num_nodes(), -1);
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
for (NodeStrategyIdx j = 0; j < s[node_idx].size(); ++j) {
if (s[node_idx][j]->solution_value() > 0.5) {
chosen_node_strategy[node_idx] = j;
break;
}
}
}
return chosen_node_strategy;
}
absl::StatusOr<AutoShardingSolverOutput> SolveAndExtractSolution(
const AutoShardingSolverRequest& request,
const std::vector<std::vector<MPVariable*>>& s,
const std::vector<std::vector<MPVariable*>>& e,
const MPVariable* overbudget_var, const MPVariable* makespan_var,
MPSolver& solver) {
auto status = solver.Solve();
LOG(INFO) << "Solver absl::Status: " << status;
bool is_optimal = false;
if (status == operations_research::MPSolver::INFEASIBLE) {
LOG(ERROR) << "MPSolver could not find any feasible solution.";
#ifdef PLATFORM_GOOGLE
if (request.compute_iis()) {
operations_research::MPModelRequest model_request;
solver.ExportModelToProto(model_request.mutable_model());
if (solver.ProblemType() ==
operations_research::MPSolver::SAT_INTEGER_PROGRAMMING) {
model_request.set_solver_type(
operations_research::MPModelRequest::SAT_INTEGER_PROGRAMMING);
} else if (solver.ProblemType() == operations_research::MPSolver::
SCIP_MIXED_INTEGER_PROGRAMMING) {
model_request.set_solver_type(operations_research::MPModelRequest::
SCIP_MIXED_INTEGER_PROGRAMMING);
}
model_request.set_solver_time_limit_seconds(100);
auto iis = MPSolver::ComputeIrreducibleInfeasibleSubset(model_request);
LOG(INFO) << iis.status().DebugString();
LOG(INFO) << "Infeasible constraints: ";
for (int index : iis.constraint_index()) {
LOG(INFO) << " - " << model_request.model().constraint(index).name();
}
for (int index : iis.general_constraint_index()) {
LOG(INFO)
<< " - "
<< model_request.model().general_constraint(index).DebugString();
}
}
#endif
return absl::InternalError(
"MPSolver could not find any feasible solution.");
} else if (status == operations_research::MPSolver::MODEL_INVALID) {
LOG(FATAL) << "The MIP fed to the solver is invalid. This is most likely a "
"bug and should be reported.";
return absl::InternalError("Invalid MIP.");
} else if (status == operations_research::MPSolver::NOT_SOLVED) {
LOG(WARNING) << "Solver timeout; no solution was produced";
return absl::InternalError("Solver timed out.");
} else if (status != operations_research::MPSolver::OPTIMAL) {
LOG(WARNING) << "Solver timeout; moving forward with a suboptimal solution";
} else {
is_optimal = true;
}
operations_research::MPModelProto model_proto;
solver.ExportModelToProto(&model_proto);
uint64_t model_fprint = tsl::Fingerprint64(model_proto.SerializeAsString());
operations_research::MPSolutionResponse response;
solver.FillSolutionResponseProto(&response);
response.clear_solve_info();
uint64_t solution_fprint = tsl::Fingerprint64(response.SerializeAsString());
LOG(INFO) << "Objective value: " << solver.Objective().Value()
<< " Model fingerprint: " << model_fprint
<< " Solution fingerprint: " << solution_fprint;
if (solver.Objective().Value() >= kInfinityCost) {
LOG(WARNING) << "Objective (" << solver.Objective().Value()
<< ") is larger than kInfinityCost. It means the solver "
"chooses a solution with kInfinityCost and there may be "
"numerical issues when the solver considering other costs.";
}
if (VLOG_IS_ON(10)) {
VLOG(10) << "MODEL:";
XLA_VLOG_LINES(10, model_proto.DebugString());
VLOG(10) << "RESPONSE:";
XLA_VLOG_LINES(10, response.DebugString());
}
size_t num_edges = request.edges_size();
double unsalted_objective = 0.0;
const std::vector<NodeStrategyIdx> chosen_node_strategy =
GetChosenNodeStrategy(request, s);
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
const NodeStrategyIdx j = chosen_node_strategy[node_idx];
unsalted_objective += request.computation_costs(node_idx).costs(j) +
request.communication_costs(node_idx).costs(j);
}
const auto chosen_edge_strategy = [&](EdgeIdx edge_idx) {
const auto& edge = request.edges(edge_idx);
return chosen_node_strategy[edge.first()] * request.s_len(edge.second()) +
chosen_node_strategy[edge.second()];
};
for (EdgeIdx edge_idx = 0; edge_idx < num_edges; ++edge_idx) {
const EdgeStrategyIdx j = chosen_edge_strategy(edge_idx);
unsalted_objective += request.resharding_costs(edge_idx).costs(j);
}
if (overbudget_var) {
unsalted_objective += request.overbudget_coeff().coeff() *
overbudget_var->solution_value() *
request.memory_budget();
}
if (makespan_var) {
unsalted_objective +=
request.makespan_coeff().coeff() * makespan_var->solution_value();
}
LOG(INFO) << "Unsalted objective value: " << unsalted_objective;
LOG(INFO) << "N = " << request.num_nodes();
if (request.memory_budget() < 0) {
LOG(INFO) << "memory budget: -1";
} else {
LOG(INFO) << "memory budget: "
<< request.memory_budget() / (1024 * 1024 * 1024) << " GB";
}
PrintLargestInstructions(chosen_node_strategy, request);
return AutoShardingSolverOutput{.s_val = std::move(chosen_node_strategy),
.cost = solver.Objective().Value(),
.is_optimal = is_optimal};
}
bool CostComponents::operator==(const CostComponents& other) const {
return communication_cost == other.communication_cost &&
computation_cost == other.computation_cost &&
resharding_cost == other.resharding_cost &&
overbudget_cost == other.overbudget_cost &&
makespan_cost == other.makespan_cost;
}
double CostComponents::cost() const {
return communication_cost + computation_cost + resharding_cost +
overbudget_cost + makespan_cost;
}
bool AutoShardingEvaluation::operator==(
const AutoShardingEvaluation& other) const {
return violation_codes == other.violation_codes && total == other.total &&
lower_bound == other.lower_bound &&
total_departures == other.total_departures;
}
AutoShardingEvaluation Evaluate(const AutoShardingSolverRequest& request,
const AutoShardingSolverOutput& result) {
const auto& c = request.computation_costs();
const auto& d = request.communication_costs();
const auto& r = request.resharding_costs();
const auto& v = request.value_costs();
const auto& p = request.departure_costs();
const std::vector<NodeStrategyIdx>& s_val = result.s_val;
const auto e_val = [&](EdgeIdx edge_idx) {
const auto& edge = request.edges(edge_idx);
return s_val[edge.first()] * request.s_len(edge.second()) +
s_val[edge.second()];
};
AutoShardingEvaluation evaluation;
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
NodeIdx s_follow = request.s_follow(node_idx);
if (s_follow >= 0 && s_val[node_idx] != s_val[s_follow]) {
evaluation.violation_codes.insert(kFollowerViolationCode);
}
}
for (auto alias_idx = 0; alias_idx < request.aliases_size(); ++alias_idx) {
const auto& alias = request.aliases(alias_idx);
NodeStrategyIdx p = s_val[alias.first()], q = s_val[alias.second()];
if (v.at(alias_idx).costs(p * request.s_len(alias.second()) + q) > 0.5) {
evaluation.violation_codes.insert(kAliasViolationCode);
}
}
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
NodeStrategyIdx strat_idx = s_val[node_idx];
const double node_cost =
c.at(node_idx).costs(strat_idx) + d.at(node_idx).costs(strat_idx);
if (node_cost >= kInfinityCost) {
evaluation.violation_codes.insert(kInfiniteCostViolationCode);
}
}
for (EdgeIdx edge_idx = 0; edge_idx < request.edges_size(); ++edge_idx) {
if (r.at(edge_idx).costs(e_val(edge_idx)) >= kInfinityCost) {
evaluation.violation_codes.insert(kInfiniteCostViolationCode);
}
}
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
evaluation.total_departures += p.at(node_idx).costs(s_val[node_idx]);
if (request.has_max_departures() &&
evaluation.total_departures > request.max_departures().coeff()) {
evaluation.violation_codes.insert(kMaxDeparturesViolationCode);
}
}
if (request.memory_budget() > 0) {
double total_overbudget = 0.0;
double lower_bound_overbudget = 0.0;
std::vector<double> total_memory_costs, lower_bound_memory_costs;
if (request.node_intervals().empty()) {
total_memory_costs.resize(request.live_size(), 0.0);
lower_bound_memory_costs.resize(request.live_size(), 0.0);
for (LivenessIdx time_idx = 0; time_idx < request.live_size();
++time_idx) {
for (NodeIdx node_idx : request.live(time_idx).nodes()) {
const auto& m = request.memory_costs(node_idx).costs();
total_memory_costs[time_idx] += m[s_val[node_idx]];
lower_bound_memory_costs[time_idx] +=
*std::min_element(m.begin(), m.end());
}
if (!request.live_edges().empty() &&
request.enable_memory_edge_costs()) {
for (EdgeIdx edge_idx : request.live_edges(time_idx).edges()) {
const auto& m = request.memory_edge_costs(edge_idx).costs();
total_memory_costs[time_idx] += m[e_val(edge_idx)];
lower_bound_memory_costs[time_idx] +=
*std::min_element(m.begin(), m.end());
}
}
}
} else {
std::vector<double> total_node_group_costs, total_edge_group_costs,
lower_bound_node_group_costs, lower_bound_edge_group_costs;
for (const auto& group : request.node_groups()) {
double total_group_cost = 0.0;
double lower_bound_group_cost = 0.0;
for (const NodeIdx node_idx : group.prims()) {
const auto& m = request.memory_costs(node_idx).costs();
total_group_cost += m[s_val[node_idx]];
lower_bound_group_cost += *std::min_element(m.begin(), m.end());
}
total_node_group_costs.push_back(total_group_cost);
lower_bound_node_group_costs.push_back(lower_bound_group_cost);
}
for (const auto& group : request.edge_groups()) {
double total_group_cost = 0.0;
double lower_bound_group_cost = 0.0;
for (const EdgeIdx edge_idx : group.prims()) {
const auto& m = request.memory_edge_costs(edge_idx).costs();
total_group_cost += m[e_val(edge_idx)];
lower_bound_group_cost += *std::min_element(m.begin(), m.end());
}
total_edge_group_costs.push_back(total_group_cost);
lower_bound_edge_group_costs.push_back(lower_bound_group_cost);
}
for (NodeIdx node_idx = 0; node_idx < request.node_intervals_size();
++node_idx) {
const auto& interval = request.node_intervals(node_idx);
if (interval.first() > interval.second()) continue;
while (total_memory_costs.size() <= interval.second()) {
total_memory_costs.push_back(0.0);
lower_bound_memory_costs.push_back(0.0);
}
double total_memory_cost = 0.0, lower_bound_memory_cost = 0.0;
if (node_idx < request.num_nodes()) {
const auto& m = request.memory_costs(node_idx).costs();
total_memory_cost = m[s_val[node_idx]];
lower_bound_memory_cost = *std::min_element(m.begin(), m.end());
} else {
int64_t group_idx = node_idx - request.num_nodes();
total_memory_cost = total_node_group_costs[group_idx];
lower_bound_memory_cost = lower_bound_node_group_costs[group_idx];
}
for (LivenessIdx time_idx = interval.first();
time_idx <= interval.second(); ++time_idx) {
total_memory_costs[time_idx] += total_memory_cost;
lower_bound_memory_costs[time_idx] += lower_bound_memory_cost;
}
}
if (request.enable_memory_edge_costs()) {
for (EdgeIdx edge_idx = 0; edge_idx < request.edge_intervals_size();
++edge_idx) {
const auto& interval = request.edge_intervals(edge_idx);
if (interval.first() > interval.second()) continue;
while (total_memory_costs.size() <= interval.second()) {
total_memory_costs.push_back(0.0);
lower_bound_memory_costs.push_back(0.0);
}
double total_memory_cost = 0.0, lower_bound_memory_cost = 0.0;
if (edge_idx < request.edges_size()) {
const auto& m = request.memory_edge_costs(edge_idx).costs();
total_memory_cost = m[e_val(edge_idx)];
lower_bound_memory_cost = *std::min_element(m.begin(), m.end());
} else {
int64_t group_idx = edge_idx - request.edges_size();
total_memory_cost = total_edge_group_costs[group_idx];
lower_bound_memory_cost = lower_bound_edge_group_costs[group_idx];
}
for (LivenessIdx time_idx = interval.first();
time_idx <= interval.second(); ++time_idx) {
total_memory_costs[time_idx] += total_memory_cost;
lower_bound_memory_costs[time_idx] += lower_bound_memory_cost;
}
}
}
}
for (LivenessIdx time_idx = 0; time_idx < total_memory_costs.size();
++time_idx) {
if (request.has_overbudget_coeff()) {
total_overbudget =
std::max(total_overbudget,
total_memory_costs[time_idx] - request.memory_budget());
lower_bound_overbudget = std::max(
lower_bound_overbudget,
lower_bound_memory_costs[time_idx] - request.memory_budget());
} else if (total_memory_costs[time_idx] > request.memory_budget()) {
evaluation.violation_codes.insert(kMemoryViolationCode);
}
}
if (request.has_overbudget_coeff()) {
evaluation.total.overbudget_cost =
request.overbudget_coeff().coeff() * total_overbudget;
evaluation.lower_bound.overbudget_cost =
request.overbudget_coeff().coeff() * lower_bound_overbudget;
}
}
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
evaluation.total.communication_cost +=
d.at(node_idx).costs(s_val[node_idx]);
evaluation.total.computation_cost += c.at(node_idx).costs(s_val[node_idx]);
evaluation.lower_bound.communication_cost += *std::min_element(
d.at(node_idx).costs().begin(), d.at(node_idx).costs().end());
evaluation.lower_bound.computation_cost += *std::min_element(
c.at(node_idx).costs().begin(), c.at(node_idx).costs().end());
}
for (EdgeIdx edge_idx = 0; edge_idx < request.edges_size(); ++edge_idx) {
evaluation.total.resharding_cost += r.at(edge_idx).costs(e_val(edge_idx));
evaluation.lower_bound.resharding_cost += *std::min_element(
r.at(edge_idx).costs().begin(), r.at(edge_idx).costs().end());
}
evaluation.total_makespan = EvaluateMakespan(request, result, evaluation);
return evaluation;
}
absl::Status ValidateRequest(const AutoShardingSolverRequest& request) {
const int num_nodes = request.num_nodes();
const int num_edges = request.edges_size();
TF_RET_CHECK(num_nodes == request.computation_costs_size());
TF_RET_CHECK(num_nodes == request.communication_costs_size());
TF_RET_CHECK(num_nodes == request.memory_costs_size());
TF_RET_CHECK(num_edges == request.resharding_costs_size());
for (NodeIdx u = 0; u < num_nodes; ++u) {
const int num_strategies = request.computation_costs(u).costs_size();
TF_RET_CHECK(num_strategies >= 1);
TF_RET_CHECK(num_strategies == request.communication_costs(u).costs_size());
TF_RET_CHECK(num_strategies == request.memory_costs(u).costs_size());
for (NodeStrategyIdx strategy = 0; strategy < num_strategies; ++strategy) {
TF_RET_CHECK(request.computation_costs(u).costs(strategy) >= 0.0);
TF_RET_CHECK(request.communication_costs(u).costs(strategy) >= 0.0);
TF_RET_CHECK(request.memory_costs(u).costs(strategy) >= 0.0);
}
}
absl::btree_set<std::pair<int, int>> edges_seen;
for (EdgeIdx e = 0; e < num_edges; ++e) {
const int u = request.edges(e).first();
const int v = request.edges(e).second();
TF_RET_CHECK(u >= 0);
TF_RET_CHECK(u < num_nodes);
TF_RET_CHECK(v >= 0);
TF_RET_CHECK(v < num_nodes);
TF_RET_CHECK(u < v);
TF_RET_CHECK(edges_seen.count({u, v}) == 0);
edges_seen.insert({u, v});
const int num_strategies = request.resharding_costs(e).costs_size();
const int num_u_strategies = request.computation_costs(u).costs_size();
const int num_v_strategies = request.computation_costs(v).costs_size();
CHECK_EQ(num_strategies, num_u_strategies * num_v_strategies);
}
return absl::OkStatus();
}
}
} | #include "xla/hlo/experimental/auto_sharding/auto_sharding_solver.h"
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding.pb.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_strategy.h"
#include "tsl/platform/platform.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
namespace {
using CostMatrix = std::vector<std::vector<double>>;
using NodeMatrix = std::vector<std::vector<int64_t>>;
using EdgeMatrix = std::vector<std::vector<int64_t>>;
void AddCosts(proto2::RepeatedPtrField<AutoShardingSolverRequest_Costs>* costs,
const CostMatrix& cost_matrix) {
for (const auto& cost_row : cost_matrix) {
AutoShardingSolverRequest_Costs cost;
cost.mutable_costs()->Add(cost_row.begin(), cost_row.end());
costs->Add(std::move(cost));
}
}
void AddNodes(proto2::RepeatedPtrField<AutoShardingSolverRequest_Nodes>* nodes,
const NodeMatrix& node_matrix) {
for (const auto& node_row : node_matrix) {
AutoShardingSolverRequest_Nodes node;
node.mutable_nodes()->Add(node_row.begin(), node_row.end());
nodes->Add(std::move(node));
}
}
void AddEdges(proto2::RepeatedPtrField<AutoShardingSolverRequest_Edges>* edges,
const EdgeMatrix& edge_matrix) {
for (const auto& edge_row : edge_matrix) {
AutoShardingSolverRequest_Edges edge;
edge.mutable_edges()->Add(edge_row.begin(), edge_row.end());
edges->Add(std::move(edge));
}
}
void AddIntervals(
proto2::RepeatedPtrField<AutoShardingSolverRequest_Pair>* pairs,
const std::vector<std::pair<int64_t, int64_t>>& intervals) {
for (const auto& interval : intervals) {
AutoShardingSolverRequest_Pair pair;
pair.set_first(interval.first);
pair.set_second(interval.second);
pairs->Add(std::move(pair));
}
}
void AddGroups(
proto2::RepeatedPtrField<AutoShardingSolverRequest_Group>* groups,
const std::vector<std::vector<int64_t>>& reduced_groups) {
for (const auto& reduced_group : reduced_groups) {
AutoShardingSolverRequest_Group group;
group.mutable_prims()->Add(reduced_group.begin(), reduced_group.end());
groups->Add(std::move(group));
}
}
AutoShardingSolverRequest DefaultAutoShardingSolverRequest() {
const auto s_len = {4, 3, 4, 4, 3};
const auto s_follow = {-1, -1, -1, 2, -1};
AutoShardingSolverRequest_Pair edge1, edge2;
edge1.set_first(0);
edge1.set_second(2);
edge2.set_first(1);
edge2.set_second(2);
const auto edges = {edge1, edge2};
const NodeMatrix live = {{1, 0},
{1, 0},
{1, 2, 0},
{1, 2, 3, 0},
{1, 3, 0}};
const CostMatrix c = {{10, 11, 12, 13},
{20, 21, 22},
{30, 31, 32, 33},
{40, 41, 42, 43},
{50, 51, 52}};
const CostMatrix d = {{100, 110, 120, 130},
{200, 210, 220},
{300, 310, 320, 330},
{400, 410, 420, 430},
{500, 510, 520}};
const CostMatrix m = {{100000, 110000, 990000, 130000},
{200000, 210000, 220000},
{300000, 310000, 320000, 330000},
{400000, 410000, 420000, 430000},
{500000, 510000, 520000}};
const CostMatrix p = {{1.0, 0.0, 1.0, 1.0},
{1.0, 0.0, 1.0},
{1.0, 0.0, 1.0, 1.0},
{1.0, 0.0, 1.0, 1.0},
{1.0, 0.0, 1.0}};
const CostMatrix r = {{1000, 1100, 1200, 1300,
2000, 2100, 2200, 2300,
3000, 3100, 3200, 3300,
4000, 4100, 4200, 4300},
{5000, 5100, 5200, 5300,
6000, 6100, 6200, 6300,
7000, 7100, 7200, 7300}};
const CostMatrix t = {{73000, 72000, 71000, 70000,
63000, 62000, 61000, 60000,
53000, 52000, 51000, 50000,
43000, 42000, 41000, 40000},
{33000, 32000, 31000, 30000,
23000, 22000, 21000, 20000,
13000, 12000, 11000, 10000}};
AutoShardingSolverRequest_Pair alias;
alias.set_first(1);
alias.set_second(4);
const auto aliases = {alias};
const CostMatrix v = {{0, 1, 1,
1, 0, 1,
1, 1, 0}};
const std::vector<std::string> instruction_names = {"A", "B", "C", "D", "E"};
const std::vector<std::string> metadata_source_files = {"attention.py",
"convolution.py",
"layers.py",
"logits.py",
"pipeline.py"};
AutoShardingSolverRequest request;
request.set_num_nodes(5);
request.set_memory_budget(1500000);
request.mutable_s_len()->Add(s_len.begin(), s_len.end());
request.mutable_s_follow()->Add(s_follow.begin(), s_follow.end());
request.mutable_edges()->Add(edges.begin(), edges.end());
AddNodes(request.mutable_live(), live);
AddCosts(request.mutable_computation_costs(), c);
AddCosts(request.mutable_communication_costs(), d);
AddCosts(request.mutable_memory_costs(), m);
AddCosts(request.mutable_departure_costs(), p);
AddCosts(request.mutable_resharding_costs(), r);
AddCosts(request.mutable_duration_costs(), t);
request.mutable_aliases()->Add(aliases.begin(), aliases.end());
AddCosts(request.mutable_value_costs(), v);
request.mutable_instruction_names()->Add(instruction_names.begin(),
instruction_names.end());
request.mutable_metadata_source_files()->Add(metadata_source_files.begin(),
metadata_source_files.end());
return request;
}
AutoShardingSolverRequest AutoShardingSolverRequestWithEquivalences() {
const auto s_len = {4, 3, 7, 7, 3};
const auto s_follow = {-1, -1, -1, 2, -1};
AutoShardingSolverRequest_Pair edge1, edge2;
edge1.set_first(0);
edge1.set_second(2);
edge2.set_first(1);
edge2.set_second(2);
const auto edges = {edge1, edge2};
const NodeMatrix live = {{1, 0},
{1, 0},
{1, 2, 0},
{1, 2, 3, 0},
{1, 3, 0}};
const CostMatrix c = {{10, 10, 10, 10},
{20, 20, 20},
{30, 30, 31, 30, 30, 30, 30},
{40, 40, 40, 40, 40, 40, 40},
{50, 50, 50}};
const CostMatrix d = {{100, 100, 100, 100},
{200, 200, 200},
{300, 300, 300, 300, 300, 300, 300},
{400, 400, 400, 400, 400, 400, 410},
{500, 500, 500}};
const CostMatrix m = {{10000, 10000, 10000, 10000},
{20000, 20000, 20000},
{30000, 30000, 30000, 31000, 30000, 30000, 30000},
{40000, 40000, 40000, 40000, 40000, 40000, 40000},
{50000, 50000, 50000}};
const CostMatrix p = {{1.0, 0.0, 1.0, 1.0},
{1.0, 0.0, 1.0},
{1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0},
{1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0},
{1.0, 0.0, 1.0}};
const CostMatrix r = {{1000, 1000, 1000, 1000, 1000, 1000, 1000,
2000, 2000, 2000, 2000, 2000, 2000, 2000,
3000, 3000, 3000, 3000, 3100, 3000, 3000,
4000, 4000, 4000, 4000, 4000, 4000, 4000},
{5000, 5000, 5000, 5000, 5000, 5000, 5000,
6000, 6000, 6000, 6000, 6000, 6000, 6000,
7000, 7000, 7000, 7000, 7000, 7000, 7000}};
const CostMatrix t = {{70000, 70000, 70000, 70000, 70000, 70000, 70000,
60000, 60000, 60000, 60000, 60000, 60000, 60000,
50000, 50000, 50000, 50000, 50000, 50000, 50000,
40000, 40000, 40000, 40000, 40000, 40000, 40000},
{30000, 30000, 30000, 30000, 30000, 30000, 30000,
20000, 20000, 20000, 20000, 20000, 20000, 20000,
10000, 10000, 10000, 10000, 10000, 10000, 10000}};
AutoShardingSolverRequest_Pair alias;
alias.set_first(2);
alias.set_second(4);
const auto aliases = {alias};
const CostMatrix v = {{0, 1, 0,
0, 1, 0,
0, 1, 0,
0, 1, 0,
0, 1, 0,
1, 0, 1,
0, 1, 0}};
const std::vector<std::string> instruction_names = {"A", "B", "C", "D", "E"};
AutoShardingSolverRequest request;
request.set_num_nodes(5);
request.set_memory_budget(1500000);
request.mutable_s_len()->Add(s_len.begin(), s_len.end());
request.mutable_s_follow()->Add(s_follow.begin(), s_follow.end());
request.mutable_edges()->Add(edges.begin(), edges.end());
AddNodes(request.mutable_live(), live);
AddCosts(request.mutable_computation_costs(), c);
AddCosts(request.mutable_communication_costs(), d);
AddCosts(request.mutable_memory_costs(), m);
AddCosts(request.mutable_departure_costs(), p);
AddCosts(request.mutable_resharding_costs(), r);
AddCosts(request.mutable_duration_costs(), t);
request.mutable_aliases()->Add(aliases.begin(), aliases.end());
AddCosts(request.mutable_value_costs(), v);
request.mutable_instruction_names()->Add(instruction_names.begin(),
instruction_names.end());
return request;
}
TEST(FormulateAndSolveMIPFromSolverRequestTest, SolvesOptimally) {
const AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
TF_ASSERT_OK_AND_ASSIGN(const AutoShardingSolverOutput result,
FormulateAndSolveMIPFromSolverRequest(request));
const std::vector<NodeStrategyIdx> s_val = {0, 0, 0, 0, 0};
const double objective_value = 7650.0;
const AutoShardingSolverOutput expected_output = {s_val, objective_value};
EXPECT_EQ(result, expected_output);
}
TEST(FormulateAndSolveMIPFromSolverRequestTest, SolvesOverbudget) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
request.set_memory_budget(100000);
request.mutable_overbudget_coeff()->set_coeff(10.0);
TF_ASSERT_OK_AND_ASSIGN(const AutoShardingSolverOutput result,
FormulateAndSolveMIPFromSolverRequest(request));
const std::vector<NodeStrategyIdx> s_val = {0, 0, 0, 0, 0};
const double objective_value = 9007650.0;
const AutoShardingSolverOutput expected_output = {s_val, objective_value};
EXPECT_EQ(result, expected_output);
}
TEST(FormulateAndSolveMIPFromSolverRequestTest, SolvesMaxDepartures) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
request.mutable_max_departures()->set_coeff(3.0);
TF_ASSERT_OK_AND_ASSIGN(const AutoShardingSolverOutput result,
FormulateAndSolveMIPFromSolverRequest(request));
const std::vector<NodeStrategyIdx> s_val = {0, 0, 1, 1, 0};
const double objective_value = 7872.0;
const AutoShardingSolverOutput expected_output = {s_val, objective_value};
EXPECT_EQ(result, expected_output);
}
TEST(FormulateAndSolveMIPFromSolverRequestTest, MinimizesDepartures) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
request.set_minimize_departures(true);
TF_ASSERT_OK_AND_ASSIGN(const AutoShardingSolverOutput result,
FormulateAndSolveMIPFromSolverRequest(request));
const std::vector<NodeStrategyIdx> s_val = {0, 1, 0, 0, 1};
const double objective_value = 3.0;
const AutoShardingSolverOutput expected_output = {s_val, objective_value};
EXPECT_EQ(result, expected_output);
}
TEST(FormulateAndSolveMIPFromSolverRequestTest, AvoidsInfiniteNodeCosts) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
request.mutable_computation_costs(0)->set_costs(0, kInfinityCost);
request.mutable_computation_costs(0)->set_costs(1, kInfinityCost);
request.mutable_computation_costs(0)->set_costs(2, kInfinityCost);
TF_ASSERT_OK_AND_ASSIGN(const AutoShardingSolverOutput result,
FormulateAndSolveMIPFromSolverRequest(request));
const std::vector<NodeStrategyIdx> s_val = {3, 0, 0, 0, 0};
const double objective_value = 10683.0;
const AutoShardingSolverOutput expected_output = {s_val, objective_value};
EXPECT_EQ(result, expected_output);
}
TEST(FormulateAndSolveMIPFromSolverRequestTest, AvoidsInfiniteEdgeCosts) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
request.mutable_resharding_costs(0)->set_costs(0, kInfinityCost);
TF_ASSERT_OK_AND_ASSIGN(const AutoShardingSolverOutput result,
FormulateAndSolveMIPFromSolverRequest(request));
const std::vector<NodeStrategyIdx> s_val = {0, 0, 1, 1, 0};
const double objective_value = 7872.0;
const AutoShardingSolverOutput expected_output = {s_val, objective_value};
EXPECT_EQ(result, expected_output);
}
TEST(FormulateAndSolveMIPFromSolverRequestTest, HandlesFollowedEdges) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
AutoShardingSolverRequest_Pair edge;
edge.set_first(1);
edge.set_second(3);
*request.mutable_edges()->Add() = edge;
const CostMatrix r = {{5000, 5100, 5200, 5300,
6000, 6100, 6200, 6300,
7000, 7100, 7200, 7300}};
AddCosts(request.mutable_resharding_costs(), r);
const CostMatrix t = {{50000, 51000, 52000, 53000,
60000, 61000, 62000, 63000,
70000, 71000, 72000, 73000}};
AddCosts(request.mutable_duration_costs(), t);
TF_ASSERT_OK_AND_ASSIGN(const AutoShardingSolverOutput result,
FormulateAndSolveMIPFromSolverRequest(request));
const std::vector<NodeStrategyIdx> s_val = {0, 0, 0, 0, 0};
const double objective_value = 12650.0;
const AutoShardingSolverOutput expected_output = {s_val, objective_value};
EXPECT_EQ(result, expected_output);
}
TEST(FormulateAndSolveMIPFromSolverRequestTest, HandlesCollapsedEdge) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
AutoShardingSolverRequest_Pair edge;
edge.set_first(2);
edge.set_second(3);
*request.mutable_edges()->Add() = edge;
const CostMatrix r = {{9000, 5100, 5200, 5300,
6000, 6100, 6200, 6300,
7000, 7100, 7200, 7300,
8000, 8100, 8200, 8300}};
AddCosts(request.mutable_resharding_costs(), r);
const CostMatrix t = {{50000, 51000, 52000, 53000,
60000, 61000, 62000, 63000,
70000, 71000, 72000, 73000,
80000, 81000, 82000, 83000}};
AddCosts(request.mutable_duration_costs(), t);
TF_ASSERT_OK_AND_ASSIGN(const AutoShardingSolverOutput result,
FormulateAndSolveMIPFromSolverRequest(request));
const std::vector<NodeStrategyIdx> s_val = {0, 0, 1, 1, 0};
const double objective_value = 13972.0;
const AutoShardingSolverOutput expected_output = {s_val, objective_value};
EXPECT_EQ(result, expected_output);
}
TEST(FormulateAndSolveMIPFromSolverRequestTest, UsesHint) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const auto s_hint = {1, 0, 0, 0, 0};
request.mutable_s_hint()->Add(s_hint.begin(), s_hint.end());
TF_ASSERT_OK_AND_ASSIGN(const AutoShardingSolverOutput result,
FormulateAndSolveMIPFromSolverRequest(request));
const std::vector<NodeStrategyIdx> s_val = {0, 0, 0, 0, 0};
const double objective_value = 7650.0;
const AutoShardingSolverOutput expected_output = {s_val, objective_value};
EXPECT_EQ(result, expected_output);
}
TEST(FormulateAndSolveMIPFromSolverRequestTest, HonorsMaxCost) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
request.mutable_max_cost()->set_coeff(7600.0);
const absl::StatusOr<AutoShardingSolverOutput> result =
FormulateAndSolveMIPFromSolverRequest(request);
EXPECT_TRUE(absl::IsInternal(result.status()));
}
TEST(FormulateAndSolveMIPFromSolverRequestTest, HandlesExtremelyHighMaxCost) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
request.mutable_max_cost()->set_coeff(1e19);
TF_ASSERT_OK_AND_ASSIGN(const AutoShardingSolverOutput result,
FormulateAndSolveMIPFromSolverRequest(request));
const std::vector<NodeStrategyIdx> s_val = {0, 0, 0, 0, 0};
const double objective_value = 7650.0;
const AutoShardingSolverOutput expected_output = {s_val, objective_value};
EXPECT_EQ(result, expected_output);
}
TEST(FormulateAndSolveMIPFromSolverRequestTest, HandlesMemoryEdgeCosts) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const EdgeMatrix live_edges = {{}, {0}, {0, 1}, {1}, {}};
const CostMatrix memory_edge_costs = {{1000000, 1100, 1200, 1300,
2000, 2100, 2200, 2300,
3000, 3100, 3200, 3300,
4000, 4100, 4200, 4300},
{5000000, 5100, 5200, 5300,
6000, 6100, 6200, 6300,
7000, 7100, 7200, 7300}};
AddEdges(request.mutable_live_edges(), live_edges);
AddCosts(request.mutable_memory_edge_costs(), memory_edge_costs);
request.set_enable_memory_edge_costs(true);
TF_ASSERT_OK_AND_ASSIGN(const AutoShardingSolverOutput result,
FormulateAndSolveMIPFromSolverRequest(request));
const std::vector<NodeStrategyIdx> s_val = {0, 0, 1, 1, 0};
const double objective_value = 7872.0;
const AutoShardingSolverOutput expected_output = {s_val, objective_value};
EXPECT_EQ(result, expected_output);
}
TEST(FormulateAndSolveMIPFromSolverRequestTest, HandlesIntervals) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const std::vector<std::pair<int64_t, int64_t>> node_intervals =
{{0, 4}, {0, 4}, {2, 3}, {3, 4}, {100, -1}};
const std::vector<std::pair<int64_t, int64_t>> edge_intervals =
{{1, 2}, {2, 3}};
const CostMatrix memory_edge_costs = {{1000000, 1100, 1200, 1300,
2000, 2100, 2200, 2300,
3000, 3100, 3200, 3300,
4000, 4100, 4200, 4300},
{5000000, 5100, 5200, 5300,
6000, 6100, 6200, 6300,
7000, 7100, 7200, 7300}};
request.clear_live();
AddIntervals(request.mutable_node_intervals(), node_intervals);
AddIntervals(request.mutable_edge_intervals(), edge_intervals);
AddCosts(request.mutable_memory_edge_costs(), memory_edge_costs);
request.set_enable_memory_edge_costs(true);
TF_ASSERT_OK_AND_ASSIGN(const AutoShardingSolverOutput result,
FormulateAndSolveMIPFromSolverRequest(request));
const std::vector<NodeStrategyIdx> s_val = {0, 0, 1, 1, 0};
const double objective_value = 7872.0;
const AutoShardingSolverOutput expected_output = {s_val, objective_value};
EXPECT_EQ(result, expected_output);
}
TEST(FormulateAndSolveMIPFromSolverRequestTest,
HandlesReducedIntervalsAndGroups) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const std::vector<std::pair<int64_t, int64_t>> node_intervals =
{{5, -1}, {5, -1}, {2, 3}, {3, 4}, {100, -1}, {0, 4}};
const std::vector<std::pair<int64_t, int64_t>> edge_intervals =
{{1, 2}, {2, 3}};
const std::vector<std::vector<int64_t>> node_groups = {{0, 1}};
const std::vector<std::vector<int64_t>> edge_groups = {};
const CostMatrix memory_edge_costs = {{1000000, 1100, 1200, 1300,
2000, 2100, 2200, 2300,
3000, 3100, 3200, 3300,
4000, 4100, 4200, 4300},
{5000000, 5100, 5200, 5300,
6000, 6100, 6200, 6300,
7000, 7100, 7200, 7300}};
request.clear_live();
AddIntervals(request.mutable_node_intervals(), node_intervals);
AddIntervals(request.mutable_edge_intervals(), edge_intervals);
AddGroups(request.mutable_node_groups(), node_groups);
AddGroups(request.mutable_edge_groups(), edge_groups);
AddCosts(request.mutable_memory_edge_costs(), memory_edge_costs);
request.set_enable_memory_edge_costs(true);
TF_ASSERT_OK_AND_ASSIGN(const AutoShardingSolverOutput result,
FormulateAndSolveMIPFromSolverRequest(request));
const std::vector<NodeStrategyIdx> s_val = {0, 0, 1, 1, 0};
const double objective_value = 7872.0;
const AutoShardingSolverOutput expected_output = {s_val, objective_value};
EXPECT_EQ(result, expected_output);
}
TEST(FormulateAndSolveMIPFromSolverRequestTest,
HandlesReducedIntervalsAndGroupsNoMemoryEdgeCosts) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const std::vector<std::pair<int64_t, int64_t>> node_intervals =
{{5, -1}, {5, -1}, {2, 3}, {3, 4}, {100, -1}, {0, 4}};
const std::vector<std::vector<int64_t>> node_groups = {{0, 1}};
request.clear_live();
AddIntervals(request.mutable_node_intervals(), node_intervals);
AddGroups(request.mutable_node_groups(), node_groups);
request.set_enable_memory_edge_costs(false);
TF_ASSERT_OK_AND_ASSIGN(const AutoShardingSolverOutput result,
FormulateAndSolveMIPFromSolverRequest(request));
const std::vector<NodeStrategyIdx> s_val = {0, 0, 0, 0, 0};
const double objective_value = 7650.0;
const AutoShardingSolverOutput expected_output = {s_val, objective_value};
EXPECT_EQ(result, expected_output);
}
TEST(FormulateAndSolveMIPFromSolverRequestTest,
HandlesGroupsWithTinyMemoryCosts) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const std::vector<std::pair<int64_t, int64_t>> node_intervals =
{{5, -1}, {5, -1}, {2, 3}, {3, 4}, {100, -1}, {0, 4}};
const std::vector<std::pair<int64_t, int64_t>> edge_intervals =
{{1, 2}, {2, 3}};
const std::vector<std::vector<int64_t>> node_groups = {{0, 1}};
const std::vector<std::vector<int64_t>> edge_groups = {};
const CostMatrix memory_costs = {{1, 1, 1, 1},
{2, 2, 2},
{300, 300, 300, 300, 300, 300, 300},
{4000, 4000, 4000, 4000, 4000, 4000, 4000},
{50000, 50000, 50000}};
const CostMatrix memory_edge_costs = {{0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0},
{0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0}};
request.clear_live();
request.clear_memory_costs();
AddIntervals(request.mutable_node_intervals(), node_intervals);
AddIntervals(request.mutable_edge_intervals(), edge_intervals);
AddGroups(request.mutable_node_groups(), node_groups);
AddGroups(request.mutable_edge_groups(), edge_groups);
AddCosts(request.mutable_memory_costs(), memory_costs);
AddCosts(request.mutable_memory_edge_costs(), memory_edge_costs);
request.set_enable_memory_edge_costs(true);
request.set_memory_budget(4321);
TF_ASSERT_OK_AND_ASSIGN(const AutoShardingSolverOutput result,
FormulateAndSolveMIPFromSolverRequest(request));
const std::vector<NodeStrategyIdx> s_val = {0, 0, 0, 0, 0};
const double objective_value = 7650.0;
const AutoShardingSolverOutput expected_output = {s_val, objective_value};
EXPECT_EQ(result, expected_output);
}
TEST(FormulateAndSolveMIPFromSolverRequestTest, SolvesWithEquivalences) {
const AutoShardingSolverRequest request =
AutoShardingSolverRequestWithEquivalences();
TF_ASSERT_OK_AND_ASSIGN(const AutoShardingSolverOutput result,
FormulateAndSolveMIPFromSolverRequest(request));
const std::vector<NodeStrategyIdx> s_val = {0, 0, 5, 5, 1};
const double objective_value = 7650.0;
const AutoShardingSolverOutput expected_output = {s_val, objective_value};
EXPECT_EQ(result, expected_output);
}
TEST(AutoShardingEvaluatorTest, NoViolations) {
const AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const std::vector<NodeStrategyIdx> s_val = {3, 1, 2, 2, 1};
const double objective_value = 12149.0;
const AutoShardingSolverOutput output = {s_val, objective_value};
const AutoShardingEvaluation evaluation = Evaluate(request, output);
AutoShardingEvaluation expected_evaluation;
expected_evaluation.total.computation_cost = 159.0;
expected_evaluation.total.communication_cost = 1590.0;
expected_evaluation.total.resharding_cost = 10400.0;
expected_evaluation.lower_bound.computation_cost = 150.0;
expected_evaluation.lower_bound.communication_cost = 1500.0;
expected_evaluation.lower_bound.resharding_cost = 6000.0;
expected_evaluation.total_departures = 3.0;
EXPECT_EQ(evaluation, expected_evaluation);
}
TEST(AutoShardingEvaluatorTest, EvaluatesOverbudget) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
request.set_memory_budget(100000);
request.mutable_overbudget_coeff()->set_coeff(10.0);
const std::vector<NodeStrategyIdx> s_val = {2 , 1, 2, 2, 1};
const double objective_value = 11138.0;
const AutoShardingSolverOutput output = {s_val, objective_value};
const AutoShardingEvaluation evaluation = Evaluate(request, output);
AutoShardingEvaluation expected_evaluation;
expected_evaluation.total.computation_cost = 158.0;
expected_evaluation.total.communication_cost = 1580.0;
expected_evaluation.total.resharding_cost = 9400.0;
expected_evaluation.total.overbudget_cost = 18400000.0;
expected_evaluation.lower_bound.computation_cost = 150.0;
expected_evaluation.lower_bound.communication_cost = 1500.0;
expected_evaluation.lower_bound.resharding_cost = 6000.0;
expected_evaluation.lower_bound.overbudget_cost = 9000000.0;
expected_evaluation.total_departures = 3.0;
EXPECT_EQ(evaluation, expected_evaluation);
}
TEST(AutoShardingEvaluatorTest, EvaluatesOverbudgetWithIntervals) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const std::vector<std::pair<int64_t, int64_t>> node_intervals =
{{0, 4}, {0, 4}, {2, 3}, {3, 4}, {100, -1}};
request.set_memory_budget(100000);
request.mutable_overbudget_coeff()->set_coeff(10.0);
request.clear_live();
AddIntervals(request.mutable_node_intervals(), node_intervals);
const std::vector<NodeStrategyIdx> s_val = {2 , 1, 2, 2, 1};
const double objective_value = 11138.0;
const AutoShardingSolverOutput output = {s_val, objective_value};
const AutoShardingEvaluation evaluation = Evaluate(request, output);
AutoShardingEvaluation expected_evaluation;
expected_evaluation.total.computation_cost = 158.0;
expected_evaluation.total.communication_cost = 1580.0;
expected_evaluation.total.resharding_cost = 9400.0;
expected_evaluation.total.overbudget_cost = 18400000.0;
expected_evaluation.lower_bound.computation_cost = 150.0;
expected_evaluation.lower_bound.communication_cost = 1500.0;
expected_evaluation.lower_bound.resharding_cost = 6000.0;
expected_evaluation.lower_bound.overbudget_cost = 9000000.0;
expected_evaluation.total_departures = 3.0;
EXPECT_EQ(evaluation, expected_evaluation);
}
TEST(AutoShardingEvaluatorTest,
EvaluatesOverbudgetWithReducedIntervalsAndGroups) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const std::vector<std::pair<int64_t, int64_t>> node_intervals =
{{5, -1}, {5, -1}, {2, 3}, {3, 4}, {100, -1}, {0, 4}};
const std::vector<std::vector<int64_t>> node_groups = {{0, 1}};
request.set_memory_budget(100000);
request.mutable_overbudget_coeff()->set_coeff(10.0);
request.clear_live();
AddIntervals(request.mutable_node_intervals(), node_intervals);
AddGroups(request.mutable_node_groups(), node_groups);
const std::vector<NodeStrategyIdx> s_val = {2 , 1, 2, 2, 1};
const double objective_value = 11138.0;
const AutoShardingSolverOutput output = {s_val, objective_value};
const AutoShardingEvaluation evaluation = Evaluate(request, output);
AutoShardingEvaluation expected_evaluation;
expected_evaluation.total.computation_cost = 158.0;
expected_evaluation.total.communication_cost = 1580.0;
expected_evaluation.total.resharding_cost = 9400.0;
expected_evaluation.total.overbudget_cost = 18400000.0;
expected_evaluation.lower_bound.computation_cost = 150.0;
expected_evaluation.lower_bound.communication_cost = 1500.0;
expected_evaluation.lower_bound.resharding_cost = 6000.0;
expected_evaluation.lower_bound.overbudget_cost = 9000000.0;
expected_evaluation.total_departures = 3.0;
EXPECT_EQ(evaluation, expected_evaluation);
}
TEST(AutoShardingEvaluatorTest, ViolatesFollower) {
const AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const std::vector<NodeStrategyIdx> s_val = {3, 1, 2, 1 , 1};
const double objective_value = 12138.0;
const AutoShardingSolverOutput output = {s_val, objective_value};
const AutoShardingEvaluation evaluation = Evaluate(request, output);
AutoShardingEvaluation expected_evaluation;
expected_evaluation.violation_codes = {kFollowerViolationCode};
expected_evaluation.total.computation_cost = 158.0;
expected_evaluation.total.communication_cost = 1580.0;
expected_evaluation.total.resharding_cost = 10400.0;
expected_evaluation.lower_bound.computation_cost = 150.0;
expected_evaluation.lower_bound.communication_cost = 1500.0;
expected_evaluation.lower_bound.resharding_cost = 6000.0;
expected_evaluation.total_departures = 2.0;
EXPECT_EQ(evaluation, expected_evaluation);
}
TEST(AutoShardingEvaluatorTest, ViolatesAlias) {
const AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const std::vector<NodeStrategyIdx> s_val = {3, 1, 2, 2, 0 };
const double objective_value = 12138.0;
const AutoShardingSolverOutput output = {s_val, objective_value};
const AutoShardingEvaluation evaluation = Evaluate(request, output);
AutoShardingEvaluation expected_evaluation;
expected_evaluation.violation_codes = {kAliasViolationCode};
expected_evaluation.total.computation_cost = 158.0;
expected_evaluation.total.communication_cost = 1580.0;
expected_evaluation.total.resharding_cost = 10400.0;
expected_evaluation.lower_bound.computation_cost = 150.0;
expected_evaluation.lower_bound.communication_cost = 1500.0;
expected_evaluation.lower_bound.resharding_cost = 6000.0;
expected_evaluation.total_departures = 4.0;
EXPECT_EQ(evaluation, expected_evaluation);
}
TEST(AutoShardingEvaluatorTest, ViolatesMemory) {
const AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const std::vector<NodeStrategyIdx> s_val = {2 , 1, 2, 2, 1};
const double objective_value = 11138.0;
const AutoShardingSolverOutput output = {s_val, objective_value};
const AutoShardingEvaluation evaluation = Evaluate(request, output);
AutoShardingEvaluation expected_evaluation;
expected_evaluation.violation_codes = {kMemoryViolationCode};
expected_evaluation.total.computation_cost = 158.0;
expected_evaluation.total.communication_cost = 1580.0;
expected_evaluation.total.resharding_cost = 9400.0;
expected_evaluation.lower_bound.computation_cost = 150.0;
expected_evaluation.lower_bound.communication_cost = 1500.0;
expected_evaluation.lower_bound.resharding_cost = 6000.0;
expected_evaluation.total_departures = 3.0;
EXPECT_EQ(evaluation, expected_evaluation);
}
TEST(AutoShardingEvaluatorTest, ViolatesInfiniteCostForNode) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
request.mutable_computation_costs(0)->set_costs(0, kInfinityCost);
request.mutable_computation_costs(0)->set_costs(1, kInfinityCost);
request.mutable_computation_costs(0)->set_costs(2, kInfinityCost);
const std::vector<NodeStrategyIdx> s_val = {0 , 1, 2, 2, 1};
const double objective_value = 1e+20;
const AutoShardingSolverOutput output = {s_val, objective_value};
const AutoShardingEvaluation evaluation = Evaluate(request, output);
AutoShardingEvaluation expected_evaluation;
expected_evaluation.violation_codes = {kInfiniteCostViolationCode};
expected_evaluation.total.computation_cost = 1e+20;
expected_evaluation.total.communication_cost = 1560.0;
expected_evaluation.total.resharding_cost = 7400.0;
expected_evaluation.lower_bound.computation_cost = 153.0;
expected_evaluation.lower_bound.communication_cost = 1500.0;
expected_evaluation.lower_bound.resharding_cost = 6000.0;
expected_evaluation.total_departures = 3.0;
EXPECT_EQ(evaluation, expected_evaluation);
}
TEST(AutoShardingEvaluatorTest, ViolatesInfiniteCostForEdge) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
request.mutable_resharding_costs(0)->set_costs(2, kInfinityCost);
const std::vector<NodeStrategyIdx> s_val = {0, 1, 2, 2, 1};
const double objective_value = 1e+20;
const AutoShardingSolverOutput output = {s_val, objective_value};
const AutoShardingEvaluation evaluation = Evaluate(request, output);
AutoShardingEvaluation expected_evaluation;
expected_evaluation.violation_codes = {kInfiniteCostViolationCode};
expected_evaluation.total.computation_cost = 156.0;
expected_evaluation.total.communication_cost = 1560.0;
expected_evaluation.total.resharding_cost = 1e+20;
expected_evaluation.lower_bound.computation_cost = 150.0;
expected_evaluation.lower_bound.communication_cost = 1500.0;
expected_evaluation.lower_bound.resharding_cost = 6000.0;
expected_evaluation.total_departures = 3.0;
EXPECT_EQ(evaluation, expected_evaluation);
}
TEST(AutoShardingEvaluatorTest, ViolatesMaxDepartures) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
request.mutable_max_departures()->set_coeff(2.0);
const std::vector<NodeStrategyIdx> s_val = {3, 1, 2, 2, 1};
const double objective_value = 12149.0;
const AutoShardingSolverOutput output = {s_val, objective_value};
const AutoShardingEvaluation evaluation = Evaluate(request, output);
AutoShardingEvaluation expected_evaluation;
expected_evaluation.violation_codes = {kMaxDeparturesViolationCode};
expected_evaluation.total.computation_cost = 159.0;
expected_evaluation.total.communication_cost = 1590.0;
expected_evaluation.total.resharding_cost = 10400.0;
expected_evaluation.lower_bound.computation_cost = 150.0;
expected_evaluation.lower_bound.communication_cost = 1500.0;
expected_evaluation.lower_bound.resharding_cost = 6000.0;
expected_evaluation.total_departures = 3.0;
EXPECT_EQ(evaluation, expected_evaluation);
}
TEST(ScaleRequest, ScalesProperly) {
AutoShardingSolverRequest unscaled_request;
const CostMatrix c = {{10000000, 11000000, 12000000, 13000000},
{20000000, 21000000, 22000000},
{30000000, 31000000, 32000000, 33000000},
{40000000, 41000000, 42000000, 43000000},
{50000000, 51000000, 52000000, 53000000}};
const CostMatrix d = {{100000000, 110000000, 120000000, 130000000},
{200000000, 210000000, 220000000},
{300000000, 310000000, 320000000, 330000000},
{400000000, 410000000, 420000000, 430000000},
{500000000, 510000000, 520000000}};
const CostMatrix r = {{1000000000, 1100000000, 1200000000, 1300000000,
2000000000, 2100000000, 2200000000, 2300000000,
3000000000, 3100000000, 3200000000, 3300000000,
4000000000, 4100000000, 4200000000, 4300000000},
{5000000000, 5100000000, 5200000000, 5300000000,
6000000000, 6100000000, 6200000000, 6300000000,
7000000000, 7100000000, 7200000000, 10000000000000}};
AddCosts(unscaled_request.mutable_computation_costs(), c);
AddCosts(unscaled_request.mutable_communication_costs(), d);
AddCosts(unscaled_request.mutable_resharding_costs(), r);
unscaled_request.mutable_coeff_limit()->set_coeff(1e7);
AutoShardingSolverRequest request = ScaleRequest(unscaled_request);
AutoShardingSolverRequest expected_request;
const CostMatrix expected_c = {{10, 11, 12, 13},
{20, 21, 22},
{30, 31, 32, 33},
{40, 41, 42, 43},
{50, 51, 52, 53}};
const CostMatrix expected_d = {{100, 110, 120, 130},
{200, 210, 220},
{300, 310, 320, 330},
{400, 410, 420, 430},
{500, 510, 520}};
const CostMatrix expected_r = {{1000, 1100, 1200, 1300,
2000, 2100, 2200, 2300,
3000, 3100, 3200, 3300,
4000, 4100, 4200, 4300},
{5000, 5100, 5200, 5300,
6000, 6100, 6200, 6300,
7000, 7100, 7200, 10000000}};
AddCosts(expected_request.mutable_computation_costs(), expected_c);
AddCosts(expected_request.mutable_communication_costs(), expected_d);
AddCosts(expected_request.mutable_resharding_costs(), expected_r);
expected_request.mutable_coeff_limit()->set_coeff(1e7);
EXPECT_THAT(request, ::testing::EqualsProto(expected_request));
}
TEST(ScaleRequest, SkipsScaling) {
AutoShardingSolverRequest unscaled_request;
const CostMatrix c = {{10, 11, 12, 13},
{20, 21, 22},
{30, 31, 32, 33},
{40, 41, 42, 43},
{50, 51, 52, 53}};
const CostMatrix d = {{100, 110, 120, 130},
{200, 210, 220},
{300, 310, 320, 330},
{400, 410, 420, 430},
{500, 510, 520}};
const CostMatrix r = {{1000, 1100, 1200, 1300,
2000, 2100, 2200, 2300,
3000, 3100, 3200, 3300,
4000, 4100, 4200, 4300},
{5000, 5100, 5200, 5300,
6000, 6100, 6200, 6300,
7000, 7100, 7200, 10000000}};
AddCosts(unscaled_request.mutable_computation_costs(), c);
AddCosts(unscaled_request.mutable_communication_costs(), d);
AddCosts(unscaled_request.mutable_resharding_costs(), r);
unscaled_request.mutable_coeff_limit()->set_coeff(1e7);
AutoShardingSolverRequest request = ScaleRequest(unscaled_request);
AutoShardingSolverRequest expected_request;
const CostMatrix expected_c = {{10, 11, 12, 13},
{20, 21, 22},
{30, 31, 32, 33},
{40, 41, 42, 43},
{50, 51, 52, 53}};
const CostMatrix expected_d = {{100, 110, 120, 130},
{200, 210, 220},
{300, 310, 320, 330},
{400, 410, 420, 430},
{500, 510, 520}};
const CostMatrix expected_r = {{1000, 1100, 1200, 1300,
2000, 2100, 2200, 2300,
3000, 3100, 3200, 3300,
4000, 4100, 4200, 4300},
{5000, 5100, 5200, 5300,
6000, 6100, 6200, 6300,
7000, 7100, 7200, 10000000}};
AddCosts(expected_request.mutable_computation_costs(), expected_c);
AddCosts(expected_request.mutable_communication_costs(), expected_d);
AddCosts(expected_request.mutable_resharding_costs(), expected_r);
expected_request.mutable_coeff_limit()->set_coeff(1e7);
EXPECT_THAT(request, ::testing::EqualsProto(expected_request));
}
TEST(StableMap, IterationOrderDeterminism){
StableMap<int, int> map;
std::vector<int> insertion_order = {6, 3, 1, 2, 4, 5, 10, 0, 7, 9, 8};
for (int key : insertion_order) {
map[key] = key;
}
std::vector<int> iteration_order;
for (const auto& [key, value] : map) {
iteration_order.push_back(key);
}
EXPECT_THAT(iteration_order,
::testing::ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
}
TEST(ValidateRequest, AcceptsAutoShardingSolverRequest) {
CHECK_OK(ValidateRequest(DefaultAutoShardingSolverRequest()));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/experimental/auto_sharding/auto_sharding_solver.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/experimental/auto_sharding/auto_sharding_solver_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
37814792-efdc-4cb3-a712-b3397f948e72 | cpp | tensorflow/tensorflow | auto_sharding_memory | third_party/xla/xla/hlo/experimental/auto_sharding/auto_sharding_memory.cc | third_party/xla/xla/hlo/experimental/auto_sharding/auto_sharding_memory_test.cc | #include "xla/hlo/experimental/auto_sharding/auto_sharding_memory.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <limits>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/btree_map.h"
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "tsl/platform/protobuf.h"
namespace xla {
namespace spmd {
namespace {
using PrimIdx = int64_t;
using LiveIdx = int64_t;
using GroupIdx = int64_t;
using PrimPair = std::pair<PrimIdx, PrimIdx>;
using Interval = std::pair<LiveIdx, LiveIdx>;
using ActivePrim = std::pair<Interval, PrimIdx>;
bool IsValid(const Interval& interval) {
return interval.first <= interval.second;
}
int64_t length(const Interval& interval) {
return interval.second - interval.first + 1;
}
}
std::pair<int64_t, int64_t> MemoryTermReducer::Reduce(
int64_t num_lives, int64_t num_primitives,
const std::function<
tsl::protobuf::RepeatedField<int64_t>(int64_t)>&
live,
int64_t max_iterations) {
LOG(INFO) << "Memory Term Reducer beginning to reduce number of terms ...";
reduced_live_.clear();
reduced_intervals_.clear();
reduced_groups_.clear();
int64_t num_terms = 0;
reduced_intervals_.reserve(num_primitives);
for (PrimIdx prim_idx = 0; prim_idx < num_primitives; ++prim_idx) {
reduced_intervals_.push_back({std::numeric_limits<LiveIdx>::max(), 0});
}
for (LiveIdx live_idx = 0; live_idx < num_lives; ++live_idx) {
for (const PrimIdx prim_idx : live(live_idx)) {
Interval& interval = reduced_intervals_[prim_idx];
interval.first = std::min(interval.first, live_idx);
interval.second = std::max(interval.second, live_idx);
++num_terms;
}
}
Reduce(num_lives, num_primitives, max_iterations);
int64_t num_reduced_terms = 0;
reduced_live_.resize(num_lives);
for (PrimIdx prim_idx = 0; prim_idx < reduced_intervals_.size(); ++prim_idx) {
const Interval& interval = reduced_intervals_[prim_idx];
for (LiveIdx live_idx = interval.first; live_idx <= interval.second;
++live_idx) {
reduced_live_[live_idx].push_back(prim_idx);
++num_reduced_terms;
}
}
for (const auto& group : reduced_groups_) num_reduced_terms += group.size();
LOG(INFO) << "Memory Term Reducer finished reducing the number of terms.";
return {num_terms, num_reduced_terms};
}
std::pair<int64_t, int64_t> MemoryTermReducer::Reduce(
int64_t num_lives, int64_t num_primitives,
const std::function<std::pair<int64_t, int64_t>(int64_t)>& intervals,
int64_t max_iterations) {
LOG(INFO) << "Memory Term Reducer beginning to reduce number of terms ...";
reduced_live_.clear();
reduced_intervals_.clear();
reduced_groups_.clear();
int64_t num_terms = 0;
reduced_intervals_.reserve(num_primitives);
for (PrimIdx prim_idx = 0; prim_idx < num_primitives; ++prim_idx) {
reduced_intervals_.push_back(intervals(prim_idx));
const Interval& interval = reduced_intervals_.back();
if (IsValid(interval)) num_terms += length(interval);
}
Reduce(num_lives, num_primitives, max_iterations);
int64_t num_reduced_terms = 0;
for (PrimIdx prim_idx = 0; prim_idx < reduced_intervals_.size(); ++prim_idx) {
const Interval& interval = reduced_intervals_[prim_idx];
if (IsValid(interval)) num_reduced_terms += length(interval);
}
for (const auto& group : reduced_groups_) num_reduced_terms += group.size();
LOG(INFO) << "Memory Term Reducer finished reducing the number of terms.";
return {num_terms, num_reduced_terms};
}
void MemoryTermReducer::Reduce(int64_t num_lives, int64_t num_primitives,
int64_t max_iterations) {
std::vector<absl::btree_set<PrimIdx>> enter(num_lives), evict(num_lives);
for (PrimIdx prim_idx = 0; prim_idx < num_primitives; ++prim_idx) {
const Interval& interval = reduced_intervals_[prim_idx];
if (!IsValid(interval)) continue;
enter[interval.first].insert(prim_idx);
evict[interval.second].insert(prim_idx);
}
auto Splits = [this](PrimIdx large_idx, PrimIdx small_idx) -> bool {
const Interval& large = reduced_intervals_[large_idx];
const Interval& small = reduced_intervals_[small_idx];
return large.first < small.first && large.second > small.second;
};
auto CalcOverlap = [this, Splits](
int64_t prim0_idx,
int64_t prim1_idx) -> std::optional<Interval> {
if (prim0_idx == prim1_idx) return std::nullopt;
const Interval& interval0 = reduced_intervals_[prim0_idx];
const Interval& interval1 = reduced_intervals_[prim1_idx];
if (!IsValid(interval0) || !IsValid(interval1)) return std::nullopt;
if (Splits(prim0_idx, prim1_idx)) return std::nullopt;
if (Splits(prim1_idx, prim0_idx)) return std::nullopt;
return Interval(std::max(interval0.first, interval1.first),
std::min(interval0.second, interval1.second));
};
auto MergeIntoGroup = [num_primitives, this](
PrimIdx prim_idx,
absl::btree_set<PrimIdx>& reduced_group) {
if (prim_idx < num_primitives) {
reduced_group.insert(prim_idx);
} else {
const auto& group = reduced_groups_[prim_idx - num_primitives];
reduced_group.insert(group.begin(), group.end());
}
};
auto CalcNumTerms = [num_primitives, this](
PrimIdx prim_idx,
std::optional<Interval> overlap = std::nullopt) {
int64_t num_terms = length(reduced_intervals_[prim_idx]);
if (overlap) num_terms -= length(*overlap);
if (prim_idx >= num_primitives && num_terms > 0) {
num_terms += reduced_groups_[prim_idx - num_primitives].size();
}
return num_terms;
};
auto UpdatePrimitive = [this, &enter, &evict](
PrimIdx prim_idx,
const Interval& overlap) mutable {
Interval& interval = reduced_intervals_[prim_idx];
enter[interval.first].erase(prim_idx);
evict[interval.second].erase(prim_idx);
if (auto& t = interval.first; t == overlap.first) t = overlap.second + 1;
if (auto& t = interval.second; t == overlap.second) t = overlap.first - 1;
if (!IsValid(interval)) return;
enter[interval.first].insert(prim_idx);
evict[interval.second].insert(prim_idx);
};
auto SweepAndMerge = [&num_lives, &enter, &evict, &CalcOverlap, &CalcNumTerms,
&MergeIntoGroup, &UpdatePrimitive, this]() -> bool {
absl::btree_set<ActivePrim> actives;
absl::btree_multimap<int64_t, PrimPair> overlaps;
for (LiveIdx live_idx = 0; live_idx < num_lives; ++live_idx) {
for (const PrimIdx prim_idx : enter[live_idx]) {
actives.insert({reduced_intervals_[prim_idx], prim_idx});
}
for (const PrimIdx prim_idx : evict[live_idx]) {
auto active = actives.find({reduced_intervals_[prim_idx], prim_idx});
if (++active == actives.end()) continue;
std::optional<Interval> overlap = CalcOverlap(prim_idx, active->second);
if (!overlap) continue;
overlaps.insert({-length(*overlap), {prim_idx, active->second}});
}
for (const PrimIdx prim_idx : evict[live_idx]) {
actives.erase({reduced_intervals_[prim_idx], prim_idx});
}
}
bool changed = false;
for (const auto& [_, prim_pair] : overlaps) {
const PrimIdx prim0_idx = prim_pair.first, prim1_idx = prim_pair.second;
const std::optional<Interval> overlap = CalcOverlap(prim0_idx, prim1_idx);
if (!overlap) continue;
absl::btree_set<PrimIdx> reduced_group;
MergeIntoGroup(prim0_idx, reduced_group);
MergeIntoGroup(prim1_idx, reduced_group);
if (CalcNumTerms(prim0_idx) + CalcNumTerms(prim1_idx) <=
CalcNumTerms(prim0_idx, overlap) + CalcNumTerms(prim1_idx, overlap) +
length(*overlap) + reduced_group.size()) {
continue;
}
enter[overlap->first].insert(reduced_intervals_.size());
evict[overlap->second].insert(reduced_intervals_.size());
reduced_intervals_.push_back({overlap->first, overlap->second});
reduced_groups_.push_back(reduced_group);
UpdatePrimitive(prim0_idx, *overlap);
UpdatePrimitive(prim1_idx, *overlap);
changed = true;
}
return changed;
};
for (int64_t iteration = 0; iteration < max_iterations; ++iteration) {
if (!SweepAndMerge()) break;
}
for (GroupIdx group_idx = reduced_groups_.size() - 1; group_idx >= 0;
--group_idx) {
if (IsValid(reduced_intervals_[num_primitives + group_idx])) continue;
reduced_intervals_.erase(reduced_intervals_.begin() + num_primitives +
group_idx);
reduced_groups_.erase(reduced_groups_.begin() + group_idx);
}
}
const std::vector<std::vector<int64_t>>& MemoryTermReducer::GetReducedLive()
const {
return reduced_live_;
}
const std::vector<std::pair<int64_t, int64_t>>&
MemoryTermReducer::GetReducedIntervals() const {
return reduced_intervals_;
}
const std::vector<absl::btree_set<int64_t>>&
MemoryTermReducer::GetReducedGroups() const {
return reduced_groups_;
}
absl::flat_hash_set<int64_t> MemoryTermReducer::GetReducedTimes(
int64_t num_primitives) {
return GetReducedTimes(num_primitives, reduced_intervals_, reduced_groups_);
}
absl::flat_hash_set<int64_t> MemoryTermReducer::GetReducedTimes(
int64_t num_primitives,
const std::vector<std::pair<int64_t, int64_t>>& reduced_intervals,
const std::vector<absl::btree_set<int64_t>>& reduced_groups) {
std::vector<std::pair<int64_t, int64_t>> intervals;
for (int64_t reduced_interval_idx = 0;
reduced_interval_idx < reduced_intervals.size();
++reduced_interval_idx) {
const Interval& reduced_interval = reduced_intervals[reduced_interval_idx];
if (reduced_interval_idx < num_primitives) {
intervals.push_back(reduced_interval);
continue;
}
const GroupIdx group_idx = reduced_interval_idx - num_primitives;
for (const PrimIdx prim_idx : reduced_groups[group_idx]) {
Interval& interval = intervals[prim_idx];
if (!IsValid(interval)) {
interval.first = reduced_interval.first;
interval.second = reduced_interval.second;
continue;
}
interval.first = std::min(interval.first, reduced_interval.first);
interval.second = std::max(interval.second, reduced_interval.second);
}
}
absl::btree_set<std::pair<int64_t, bool>> times;
for (const Interval& interval : intervals) {
if (!IsValid(interval)) continue;
times.insert({interval.first, false});
times.insert({interval.second, true});
}
int64_t last_entering_time = -1;
absl::flat_hash_set<int64_t> reduced_times;
for (const auto& time : times) {
if ( time.second) {
reduced_times.insert(last_entering_time);
} else {
last_entering_time = time.first;
}
}
reduced_times.insert(last_entering_time);
return reduced_times;
}
}
} | #include "xla/hlo/experimental/auto_sharding/auto_sharding_memory.h"
#include <cstdint>
#include <functional>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_set.h"
namespace xla {
namespace spmd {
namespace {
std::function<tsl::protobuf::RepeatedField<int64_t>(int64_t)>
Convert(const std::vector<std::vector<int64_t>>& live) {
return [live](int64_t live_idx) {
return ::proto2::RepeatedField<int64_t>(live[live_idx].begin(),
live[live_idx].end());
};
}
std::function<std::pair<int64_t, int64_t>(int64_t)> Convert(
const std::vector<std::pair<int64_t, int64_t>>& intervals) {
return [intervals](int64_t prim_idx) { return intervals[prim_idx]; };
}
TEST(AutoShardingMemoryTest, WithoutOverlap) {
const int num_primitives = 2;
const std::vector<std::vector<int64_t>> live =
{{0 },
{0 },
{0 },
{ 1},
{ 1},
{ 1}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{0 },
{0 },
{0 },
{ 1},
{ 1},
{ 1}};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 2}, {3, 5}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups = {};
const std::pair<int64_t, int64_t> expected_num_terms = {6, 6};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0, 3};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, PartialOverlap) {
const int num_primitives = 2;
const std::vector<std::vector<int64_t>> live =
{{0 },
{0, 1},
{0, 1},
{0, 1},
{0, 1},
{ 1}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{0 },
{ 2},
{ 2},
{ 2},
{ 2},
{ 1 }};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 0}, {5, 5}, {1, 4}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}};
const std::pair<int64_t, int64_t> expected_num_terms = {10, 8};
const absl::flat_hash_set<int64_t> expected_reduced_times = {1};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, PartialOverlapReversed) {
const int num_primitives = 2;
const std::vector<std::vector<int64_t>> live =
{{ 1},
{0, 1},
{0, 1},
{0, 1},
{0, 1},
{0 }};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{ 1 },
{ 2},
{ 2},
{ 2},
{ 2},
{0 }};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{5, 5}, {0, 0}, {1, 4}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}};
const std::pair<int64_t, int64_t> expected_num_terms = {10, 8};
const absl::flat_hash_set<int64_t> expected_reduced_times = {1};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, DoesNotSplitPrimitive) {
const int num_primitives = 2;
const std::vector<std::vector<int64_t>> live =
{{0 },
{0, 1},
{0, 1},
{0, 1},
{0, 1},
{0 }};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{0 },
{0, 1},
{0, 1},
{0, 1},
{0, 1},
{0 }};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 5}, {1, 4}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups = {};
const std::pair<int64_t, int64_t> expected_num_terms = {10, 10};
const absl::flat_hash_set<int64_t> expected_reduced_times = {1};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, OnePrimitiveVanishes) {
const int num_primitives = 2;
const std::vector<std::vector<int64_t>> live =
{{0 },
{0, 1},
{0, 1},
{0, 1},
{0, 1},
{0, 1}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{0 },
{ 2},
{ 2},
{ 2},
{ 2},
{ 2}};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 0}, {6, 0}, {1, 5}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}};
const std::pair<int64_t, int64_t> expected_num_terms = {11, 8};
const absl::flat_hash_set<int64_t> expected_reduced_times = {1};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, BothPrimitivesVanish) {
const int num_primitives = 2;
const std::vector<std::vector<int64_t>> live =
{{0, 1},
{0, 1},
{0, 1},
{0, 1},
{0, 1},
{0, 1}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{2},
{2},
{2},
{2},
{2},
{2}};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{6, -1}, {6, -1}, {0, 5}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}};
const std::pair<int64_t, int64_t> expected_num_terms = {12, 8};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, OneGroupingPreventsAnother) {
const int num_primitives = 3;
const std::vector<std::vector<int64_t>> live =
{{0, 1 },
{0, 1 },
{0, 1 },
{0, 1 },
{0, 1, 2},
{ 1, 2},
{ 1, 2},
{ 1, 2},
{ 2}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{ 3},
{ 3},
{ 3},
{ 3},
{ 2, 3},
{1, 2 },
{1, 2 },
{1, 2 },
{ 2 }};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{5, -1}, {5, 7}, {4, 8}, {0, 4}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}};
const std::pair<int64_t, int64_t> expected_num_terms = {18, 15};
const absl::flat_hash_set<int64_t> expected_reduced_times = {4};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, TwoGroups) {
const int num_primitives = 3;
const std::vector<std::vector<int64_t>> live =
{{0, 1 },
{0, 1 },
{0, 1 },
{0, 2},
{0, 2},
{0, 2}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{3},
{3},
{3},
{4},
{4},
{4}};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{6, 2}, {3, -1}, {6, 2}, {0, 2}, {3, 5}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {0, 2}};
const std::pair<int64_t, int64_t> expected_num_terms = {12, 10};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0, 3};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, TwoGroupsMutuallyExclusive) {
const int num_primitives = 4;
const std::vector<std::vector<int64_t>> live =
{{0 },
{0, 1 },
{0, 1 },
{0, 1 },
{ 2, 3},
{ 2, 3},
{ 2, 3},
{ 3}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{0 },
{ 4},
{ 4},
{ 4},
{ 5},
{ 5},
{ 5},
{ 3 }};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 0}, {4, 0}, {7, 3}, {7, 7}, {1, 3}, {4, 6}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {2, 3}};
const std::pair<int64_t, int64_t> expected_num_terms = {14, 12};
const absl::flat_hash_set<int64_t> expected_reduced_times = {1, 4};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, MergingPrimitivesWouldNotReduceTerms) {
const int num_primitives = 2;
const std::vector<std::vector<int64_t>> live =
{{0, 1},
{0, 1}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{0, 1},
{0, 1}};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 1}, {0, 1}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups = {};
const std::pair<int64_t, int64_t> expected_num_terms = {4, 4};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, AllPrimitivesVanish) {
const int num_primitives = 3;
const std::vector<std::vector<int64_t>> live =
{{0, 1, 2},
{0, 1, 2},
{0, 1, 2},
{0, 1, 2},
{0, 1, 2},
{0, 1, 2}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{3},
{3},
{3},
{3},
{3},
{3}};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{6, -1}, {6, -1}, {6, -1}, {0, 5}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1, 2}};
const std::pair<int64_t, int64_t> expected_num_terms = {18, 9};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, MergingGroupsWouldNotReduceTerms) {
const int num_primitives = 4;
const std::vector<std::vector<int64_t>> live =
{{0, 1 },
{0, 1 },
{0, 1 },
{0, 1, 2, 3},
{0, 1, 2, 3},
{0, 1, 2, 3},
{0, 1, 2, 3},
{ 2, 3},
{ 2, 3}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{4 },
{4 },
{4 },
{4, 5},
{4, 5},
{4, 5},
{4, 5},
{ 5},
{ 5}};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{7, -1}, {7, -1}, {9, 2}, {9, 2}, {0, 6}, {3, 8}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {2, 3}};
const std::pair<int64_t, int64_t> expected_num_terms = {26, 17};
const absl::flat_hash_set<int64_t> expected_reduced_times = {3};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, ExampleFromDocumentation) {
const int num_primitives = 4;
const std::vector<std::vector<int64_t>> live =
{{0 },
{0, 1 },
{0, 1 },
{0, 1 },
{0, 1 },
{0, 1, 2, 3},
{0, 1, 2, 3},
{0, 1, 2, 3},
{0, 1, 2, 3},
{0, 1, 2, 3},
{ 2, 3},
{ 2, 3},
{ 2, 3},
{ 3}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{0 },
{ 4},
{ 4},
{ 4},
{ 4},
{ 6},
{ 6},
{ 6},
{ 6},
{ 6},
{ 5},
{ 5},
{ 5},
{ 3 }};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 0}, {10, 0}, {13, 4}, {13, 13}, {1, 4}, {10, 12}, {5, 9}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {2, 3}, {0, 1, 2, 3}};
const std::pair<int64_t, int64_t> expected_num_terms = {36, 22};
const absl::flat_hash_set<int64_t> expected_reduced_times = {5};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, MergesWithRightmost) {
const int num_primitives = 3;
const std::vector<std::vector<int64_t>> live =
{{0, 2},
{0, 2},
{0, 2},
{ 1, 2}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{ 3},
{ 3},
{ 3},
{1, 2 }};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{3, -1}, {3, 3}, {3, 3}, {0, 2}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 2}};
const std::pair<int64_t, int64_t> expected_num_terms = {8, 7};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0, 3};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, ExampleFromDocumentationUsingIntervals) {
const int num_primitives = 4;
const std::vector<std::pair<int64_t, int64_t>> intervals =
{{0, 9}, {1, 9}, {5, 12}, {5, 13}};
MemoryTermReducer reducer;
const auto num_terms = reducer.Reduce(14, num_primitives,
Convert(intervals));
const std::vector<std::vector<int64_t>> expected_reduced_live = {};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 0}, {10, 0}, {13, 4}, {13, 13}, {1, 4}, {10, 12}, {5, 9}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {2, 3}, {0, 1, 2, 3}};
const std::pair<int64_t, int64_t> expected_num_terms = {36, 22};
const absl::flat_hash_set<int64_t> expected_reduced_times = {5};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, InvalidIntervals) {
const int num_primitives = 3;
const std::vector<std::pair<int64_t, int64_t>> intervals =
{{0, 4}, {9223372036854775807, 0}, {9223372036854775807, 0}};
MemoryTermReducer reducer;
const auto num_terms = reducer.Reduce(5, num_primitives,
Convert(intervals));
const std::vector<std::vector<int64_t>> expected_reduced_live = {};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 4}, {9223372036854775807, 0}, {9223372036854775807, 0}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups = {};
const std::pair<int64_t, int64_t> expected_num_terms = {5, 5};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, OneIterationOnly) {
const int num_primitives = 4;
const std::vector<std::pair<int64_t, int64_t>> intervals =
{{0, 9}, {1, 9}, {5, 12}, {5, 13}};
MemoryTermReducer reducer;
const auto num_terms = reducer.Reduce(14, num_primitives,
Convert(intervals),
1);
const std::vector<std::vector<int64_t>> expected_reduced_live = {};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 0}, {10, 0}, {13, 4}, {13, 13}, {1, 9}, {5, 12}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {2, 3}};
const std::pair<int64_t, int64_t> expected_num_terms = {36, 23};
const absl::flat_hash_set<int64_t> expected_reduced_times = {5};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, StairsBottomLeft) {
const int num_primitives = 4;
const std::vector<std::pair<int64_t, int64_t>> intervals =
{{0, 13}, {0, 10}, {0, 7}, {0, 4}};
MemoryTermReducer reducer;
const auto num_terms = reducer.Reduce(14, num_primitives,
Convert(intervals),
1);
const std::vector<std::vector<int64_t>> expected_reduced_live = {};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{11, 13}, {11, -1}, {5, 7}, {5, -1}, {0, 10}, {0, 4}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {2, 3}};
const std::pair<int64_t, int64_t> expected_num_terms = {38, 26};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, StairsTopLeft) {
const int num_primitives = 4;
const std::vector<std::pair<int64_t, int64_t>> intervals =
{{0, 4}, {0, 7}, {0, 10}, {0, 13}};
MemoryTermReducer reducer;
const auto num_terms = reducer.Reduce(14, num_primitives,
Convert(intervals),
1);
const std::vector<std::vector<int64_t>> expected_reduced_live = {};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{5, -1}, {5, 7}, {11, -1}, {11, 13}, {0, 10}, {0, 4}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{2, 3}, {0, 1}};
const std::pair<int64_t, int64_t> expected_num_terms = {38, 26};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, StairsTopRight) {
const int num_primitives = 4;
const std::vector<std::pair<int64_t, int64_t>> intervals =
{{9, 13}, {6, 13}, {3, 13}, {0, 13}};
MemoryTermReducer reducer;
const auto num_terms = reducer.Reduce(14, num_primitives,
Convert(intervals),
1);
const std::vector<std::vector<int64_t>> expected_reduced_live = {};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{14, 8}, {6, 8}, {14, 2}, {0, 2}, {3, 13}, {9, 13}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{2, 3}, {0, 1}};
const std::pair<int64_t, int64_t> expected_num_terms = {38, 26};
const absl::flat_hash_set<int64_t> expected_reduced_times = {9};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, StairsBottomRight) {
const int num_primitives = 4;
const std::vector<std::pair<int64_t, int64_t>> intervals =
{{0, 13}, {3, 13}, {6, 13}, {9, 13}};
MemoryTermReducer reducer;
const auto num_terms = reducer.Reduce(14, num_primitives,
Convert(intervals),
1);
const std::vector<std::vector<int64_t>> expected_reduced_live = {};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 2}, {14, 2}, {6, 8}, {14, 8}, {3, 13}, {9, 13}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {2, 3}};
const std::pair<int64_t, int64_t> expected_num_terms = {38, 26};
const absl::flat_hash_set<int64_t> expected_reduced_times = {9};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/experimental/auto_sharding/auto_sharding_memory.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/experimental/auto_sharding/auto_sharding_memory_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e9469ed1-6c77-4323-9ef8-b4a9ff678ff6 | cpp | tensorflow/tensorflow | type_to_shape | third_party/xla/xla/hlo/translate/mhlo_to_hlo/type_to_shape.cc | third_party/xla/xla/hlo/translate/mhlo_to_hlo/type_to_shape_test.cc | #include "xla/hlo/translate/mhlo_to_hlo/type_to_shape.h"
#include <algorithm>
#include <cstdint>
#include <numeric>
#include <optional>
#include <tuple>
#include <utility>
#include <vector>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/LogicalResult.h"
#include "mlir/Dialect/SparseTensor/IR/Enums.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Location.h"
#include "mlir/Support/DebugStringHelper.h"
#include "mlir/Support/LLVM.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "xla/mlir/utils/type_util.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
using ::int64_t;
using mlir::MemRefType;
using mlir::RankedTensorType;
using mlir::ShapedType;
using mlir::VectorType;
using mlir::mhlo::TypeExtensionsAttr;
using xla::PrimitiveType;
namespace xla {
std::optional<std::tuple<DimLevelType, bool, bool>> ConvertDimLevelType(
mlir::sparse_tensor::LevelType lt) {
auto f = mlir::sparse_tensor::getLevelFormat(lt);
if (!f) return std::nullopt;
bool unique = mlir::sparse_tensor::isUniqueLT(lt);
bool ordered = mlir::sparse_tensor::isOrderedLT(lt);
switch (*f) {
case mlir::sparse_tensor::LevelFormat::Singleton:
return std::make_tuple(DimLevelType::DIM_SINGLETON, unique, ordered);
case mlir::sparse_tensor::LevelFormat::Compressed:
return std::make_tuple(DimLevelType::DIM_COMPRESSED, unique, ordered);
case mlir::sparse_tensor::LevelFormat::Dense:
return std::make_tuple(DimLevelType::DIM_DENSE, unique, ordered);
case mlir::sparse_tensor::LevelFormat::LooseCompressed:
return std::make_tuple(DimLevelType::DIM_LOOSE_COMPRESSED, unique,
ordered);
default:
return std::nullopt;
}
}
Shape TypeToShape(mlir::Type type) {
PrimitiveType ptype = ConvertMlirTypeToPrimitiveType(type);
if (ptype != PrimitiveType::PRIMITIVE_TYPE_INVALID)
return ShapeUtil::MakeShape(ptype, {});
if (type.isIntOrFloat()) {
auto* context = type.getContext();
mlir::emitError(mlir::UnknownLoc::get(context))
<< "lowering should have been handled by primitive type lowering for "
<< debugString(type);
} else if (auto v = mlir::dyn_cast<mlir::VectorType>(type)) {
llvm::SmallVector<int64_t, 4> span(v.getShape().begin(),
v.getShape().end());
mlir::Type element_type = v.getElementType();
PrimitiveType primitive_type = ConvertMlirTypeToPrimitiveType(element_type);
if (primitive_type != PrimitiveType::PRIMITIVE_TYPE_INVALID)
return ShapeUtil::MakeShape(primitive_type, span);
} else if (auto m = mlir::dyn_cast<mlir::MemRefType>(type)) {
llvm::SmallVector<int64_t, 6> span(m.getShape().begin(),
m.getShape().end());
mlir::Type element_type = m.getElementType();
if (auto v = mlir::dyn_cast<mlir::VectorType>(element_type)) {
element_type = v.getElementType();
span.insert(span.end(), v.getShape().begin(), v.getShape().end());
}
PrimitiveType primitive_type = ConvertMlirTypeToPrimitiveType(element_type);
if (primitive_type == PrimitiveType::PRIMITIVE_TYPE_INVALID) return {};
if (m.getLayout().isIdentity())
return ShapeUtil::MakeShape(primitive_type, span);
llvm::SmallVector<int64_t, 4> strides;
int64_t offset;
if (failed(mlir::getStridesAndOffset(m, strides, offset))) return {};
llvm::SmallVector<std::pair<int64_t, int>, 4> strides_with_indices;
for (const auto& e : llvm::enumerate(strides)) {
strides_with_indices.push_back({e.value(), e.index()});
}
std::stable_sort(strides_with_indices.begin(), strides_with_indices.end());
llvm::SmallVector<int64_t, 4> minor_to_major;
int64_t stride = 1;
for (const auto& pr : strides_with_indices) {
minor_to_major.push_back(pr.second);
if (stride != pr.first && m.getShape()[pr.second] != 1) return {};
stride *= m.getShape()[pr.second];
}
llvm::SmallVector<int64_t, 4> dimensions(m.getShape().begin(),
m.getShape().end());
return ::xla::ShapeUtil::MakeShapeWithDenseLayout(
primitive_type, dimensions, minor_to_major);
} else if (auto t = mlir::dyn_cast<mlir::RankedTensorType>(type)) {
int64_t rank = t.getRank();
llvm::SmallVector<int64_t, 4> bounds;
if (auto extn =
mlir::dyn_cast_or_null<TypeExtensionsAttr>(t.getEncoding())) {
bounds = llvm::to_vector<4>(extn.getBounds());
} else {
bounds.assign(rank, ShapedType::kDynamic);
}
llvm::SmallVector<int64_t, 4> shape(rank, mlir::ShapedType::kDynamic);
std::vector<bool> is_dynamic(rank, false);
for (int64_t dim = 0; dim < rank; ++dim) {
int64_t size = t.getDimSize(dim);
if (size == ShapedType::kDynamic) {
shape[dim] = bounds[dim] != ShapedType::kDynamic
? bounds[dim]
: Shape::kUnboundedSize;
is_dynamic[dim] = true;
} else {
if (bounds[dim] != ShapedType::kDynamic) return {};
shape[dim] = size;
}
}
PrimitiveType primitive_type =
ConvertMlirTypeToPrimitiveType(t.getElementType());
if (primitive_type == PrimitiveType::PRIMITIVE_TYPE_INVALID) return {};
if (auto sparse = mlir::sparse_tensor::getSparseTensorEncoding(type)) {
if (!t.hasStaticShape()) return {};
if (sparse.getPosWidth() != 32 || sparse.getCrdWidth() != 32) return {};
llvm::SmallVector<DimLevelType, 3> lvl_types;
llvm::SmallVector<bool, 3> level_unique;
llvm::SmallVector<bool, 3> level_ordered;
for (auto lt : sparse.getLvlTypes()) {
auto new_lt = ConvertDimLevelType(lt);
if (!new_lt) return {};
lvl_types.push_back(std::get<0>(*new_lt));
level_unique.push_back(std::get<1>(*new_lt));
level_ordered.push_back(std::get<2>(*new_lt));
}
std::vector<int64_t> ordering(rank);
std::iota(ordering.rbegin(), ordering.rend(), 0);
auto dimToLvl = sparse.getDimToLvl()
? sparse.getDimToLvl()
: mlir::AffineMap::getMultiDimIdentityMap(
rank, sparse.getContext());
auto final_ordering = mlir::applyPermutationMap(
dimToLvl, llvm::ArrayRef<int64_t>(ordering));
auto sparse_shape = ::xla::ShapeUtil::MakeShapeWithSparseLayout(
primitive_type, shape, final_ordering, lvl_types, level_unique,
level_ordered);
return sparse_shape;
}
return ShapeUtil::MakeShape(primitive_type, shape, is_dynamic);
} else if (auto tuple_type = mlir::dyn_cast<mlir::TupleType>(type)) {
llvm::SmallVector<Shape, 4> shapes;
shapes.reserve(tuple_type.size());
for (mlir::Type sub_type : tuple_type.getTypes()) {
shapes.push_back(TypeToShape(sub_type));
}
return ShapeUtil::MakeTupleShape(shapes);
} else if (mlir::isa<mlir::mhlo::TokenType>(type) ||
mlir::isa<mlir::stablehlo::TokenType>(type)) {
return ShapeUtil::MakeTokenShape();
} else if (auto bundle_type =
mlir::dyn_cast<mlir::mhlo::AsyncBundleType>(type)) {
auto tuple_type =
mlir::TupleType::get(type.getContext(), bundle_type.getTypes());
return TypeToShape(tuple_type);
}
return {};
}
} | #include "xla/hlo/translate/mhlo_to_hlo/type_to_shape.h"
#include <iostream>
#include <utility>
#include "absl/status/statusor.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/translate/hlo_to_mhlo/hlo_utils.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/protobuf.h"
using mlir::Builder;
using mlir::MemRefType;
using mlir::MLIRContext;
using mlir::RankedTensorType;
using mlir::UnrankedTensorType;
using mlir::VectorType;
namespace xla {
namespace {
class ProtoStringMatcher {
public:
explicit ProtoStringMatcher(const tsl::protobuf::Message& expected)
: expected_(expected.SerializeAsString()) {}
template <typename Message>
bool MatchAndExplain(const Message& p, testing::MatchResultListener*) const {
return p.SerializeAsString() == expected_;
}
void DescribeTo(::std::ostream* os) const { *os << expected_; }
void DescribeNegationTo(::std::ostream* os) const {
*os << "not equal to expected message: " << expected_;
}
private:
const std::string expected_;
};
inline ::testing::PolymorphicMatcher<ProtoStringMatcher> EqualsProto(
const tsl::protobuf::Message& x) {
return ::testing::MakePolymorphicMatcher(ProtoStringMatcher(x));
}
TEST(TypeToShapeTest, ConvertBasicTypesToTypes) {
MLIRContext context;
Builder b(&context);
EXPECT_TRUE(
ShapeUtil::IsScalarWithElementType(TypeToShape(b.getF32Type()), F32));
EXPECT_THAT(
TypeToShape(VectorType::get({8, 128}, b.getIntegerType(32))).ToProto(),
EqualsProto(
ShapeUtil::MakeShape(PrimitiveType::S32, {8, 128}).ToProto()));
EXPECT_THAT(
TypeToShape(VectorType::get({8, 128}, b.getF32Type())).ToProto(),
EqualsProto(
ShapeUtil::MakeShape(PrimitiveType::F32, {8, 128}).ToProto()));
EXPECT_THAT(
TypeToShape(VectorType::get({8, 128}, b.getIntegerType(17))).ToProto(),
EqualsProto(Shape().ToProto()));
}
TEST(TypeToShapeTest, ConvertMemRefTypeToTypes) {
MLIRContext context;
Builder b(&context);
EXPECT_THAT(
TypeToShape(MemRefType::get({8, 128}, b.getF32Type())).ToProto(),
EqualsProto(
ShapeUtil::MakeShape(PrimitiveType::F32, {8, 128}).ToProto()));
EXPECT_THAT(
TypeToShape(MemRefType::get({100, 13, 210}, b.getF32Type())).ToProto(),
EqualsProto(
ShapeUtil::MakeShape(PrimitiveType::F32, {100, 13, 210}).ToProto()));
EXPECT_THAT(
TypeToShape(MemRefType::get({100, 13, 210},
VectorType::get({8, 128}, b.getF32Type())))
.ToProto(),
EqualsProto(
ShapeUtil::MakeShape(PrimitiveType::F32, {100, 13, 210, 8, 128})
.ToProto()));
}
TEST(TypeToShapeTest, ConvertTensorTypeToTypes) {
mlir::MLIRContext context;
context.loadDialect<mlir::mhlo::MhloDialect>();
Builder b(&context);
EXPECT_THAT(
TypeToShape(RankedTensorType::get({8, 128}, b.getF32Type())).ToProto(),
EqualsProto(
ShapeUtil::MakeShape(PrimitiveType::F32, {8, 128}).ToProto()));
llvm::SmallVector<int64_t, 4> bounds = {8, mlir::ShapedType::kDynamic};
auto extensions = mlir::mhlo::TypeExtensionsAttr::get(&context, bounds);
EXPECT_THAT(
TypeToShape(RankedTensorType::get({mlir::ShapedType::kDynamic, 128},
b.getF32Type(), extensions))
.ToProto(),
EqualsProto(
ShapeUtil::MakeShape(PrimitiveType::F32, {8, 128}, {true, false})
.ToProto()));
EXPECT_THAT(
TypeToShape(RankedTensorType::get({mlir::ShapedType::kDynamic, 784},
b.getF32Type()))
.ToProto(),
EqualsProto(ShapeUtil::MakeShape(PrimitiveType::F32,
{Shape::kUnboundedSize, 784},
{true, false})
.ToProto()));
EXPECT_THAT(TypeToShape(UnrankedTensorType::get(b.getF32Type())).ToProto(),
EqualsProto(Shape().ToProto()));
EXPECT_THAT(
TypeToShape(RankedTensorType::get(
{8, 128}, VectorType::get({16, 16}, b.getF32Type())))
.ToProto(),
EqualsProto(Shape().ToProto()));
}
TEST(TypeToShapeTest, ConvertMemRefToShape) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(PrimitiveType::F32,
{10, 20, 30}, {2, 0, 1});
MLIRContext context;
mlir::Builder builder(&context);
absl::StatusOr<mlir::Type> mlir_type =
ConvertShapeToType<MemRefType>(shape, builder);
ASSERT_TRUE(mlir_type.ok());
mlir::Type type = std::move(mlir_type).value();
Shape converted = TypeToShape(type);
EXPECT_TRUE(ShapeUtil::Equal(
converted, ShapeUtil::MakeShapeWithDenseLayout(PrimitiveType::F32,
{10, 20, 30}, {2, 0, 1})));
EXPECT_TRUE(ShapeUtil::Equal(converted, shape));
}
TEST(TypeToShapeTest, ConvertMemRefToShape2) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(PrimitiveType::C64,
{2, 4, 3, 3}, {2, 3, 1, 0});
MLIRContext context;
mlir::Builder builder(&context);
absl::StatusOr<mlir::Type> mlir_type =
ConvertShapeToType<MemRefType>(shape, builder);
ASSERT_TRUE(mlir_type.ok());
mlir::Type type = std::move(mlir_type).value();
Shape converted = TypeToShape(type);
EXPECT_TRUE(ShapeUtil::Equal(
converted, ShapeUtil::MakeShapeWithDenseLayout(
PrimitiveType::C64, {2, 4, 3, 3}, {2, 3, 1, 0})));
EXPECT_TRUE(ShapeUtil::Equal(converted, shape));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/translate/mhlo_to_hlo/type_to_shape.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/translate/mhlo_to_hlo/type_to_shape_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
598c730c-79a4-40bf-874a-24f60c51c13a | cpp | tensorflow/tensorflow | mlir_hlo_to_hlo | third_party/xla/xla/hlo/translate/mhlo_to_hlo/mlir_hlo_to_hlo.cc | third_party/xla/xla/hlo/translate/mhlo_to_hlo/mlir_hlo_to_hlo_test.cc | #include "xla/hlo/translate/mhlo_to_hlo/mlir_hlo_to_hlo.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/LogicalResult.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributeInterfaces.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/IR/UseDefLists.h"
#include "mlir/IR/Value.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/RegionUtils.h"
#include "stablehlo/dialect/Base.h"
#include "xla/array.h"
#include "xla/comparison_util.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/builder/lib/approx_topk.h"
#include "xla/hlo/builder/lib/approx_topk_shape.h"
#include "xla/hlo/builder/lib/matrix.h"
#include "xla/hlo/builder/lib/slicing.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/hlo/ir/dynamic_parameter_binding.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/translate/mhlo_to_hlo/attribute_exporter.h"
#include "xla/hlo/translate/mhlo_to_hlo/layout_util.h"
#include "xla/hlo/translate/mhlo_to_hlo/location_exporter.h"
#include "xla/hlo/translate/mhlo_to_hlo/module_attributes_exporter.h"
#include "xla/hlo/translate/mhlo_to_hlo/stack_frame_index_builder.h"
#include "xla/hlo/translate/mhlo_to_hlo/type_to_shape.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/mlir/utils/error_util.h"
#include "xla/mlir/utils/type_util.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/mlir_hlo/mhlo/transforms/passes.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/types.h"
using ::int64_t;
using ::tsl::int16;
using ::tsl::int32;
using ::tsl::int8;
using ::tsl::StatusOr;
using ::tsl::uint16;
using ::tsl::uint32;
using ::tsl::uint64;
using ::tsl::uint8;
constexpr char kJaxBufferDonor[] = "jax.buffer_donor";
constexpr char kResultLayout[] = "result_layout";
constexpr char kSourceLayout[] = "source_layout";
constexpr char kAggregateToTopk[] = "aggregate_to_topk";
constexpr char kApiVersion[] = "api_version";
constexpr char kApproxTopK[] = "ApproxTopK";
constexpr char kBackendConfig[] = "backend_config";
constexpr char kCallTargetName[] = "call_target_name";
constexpr char kCalledComputations[] = "called_computations";
constexpr char kHasSideEffect[] = "has_side_effect";
constexpr char kIsFallback[] = "is_fallback";
constexpr char kRecallTarget[] = "recall_target";
constexpr char kReductionDim[] = "reduction_dim";
constexpr char kReductionInputSizeOverride[] = "reduction_input_size_override";
constexpr char kTopK[] = "top_k";
constexpr char kMhloCrossProgramPrefetches[] = "mhlo.cross_program_prefetches";
constexpr char kMhloFrontendAttributes[] = "mhlo.frontend_attributes";
constexpr char kMhloInputOutputAlias[] = "mhlo.input_output_alias";
constexpr char kMhloIsDynamic[] = "mhlo.is_dynamic";
constexpr char kMhloLiteral[] = "mhlo.literal";
constexpr char kMhloParameterReplication[] = "mhlo.parameter_replication";
constexpr char kMhloReplication[] = "mhlo.is_same_data_across_replicas";
constexpr char kMhloSharding[] = "mhlo.sharding";
constexpr char kMhloSpmdOutputSharding[] = "mhlo.spmd_output_sharding";
constexpr char kMhloSpmdParametersShardings[] =
"mhlo.spmd_parameters_shardings";
constexpr char kMhloUseAutoSpmdPartitioning[] =
"mhlo.use_auto_spmd_partitioning";
constexpr char kMhloXlaEntryComputationParameterLayouts[] =
"mhlo.xla_entry_computation_parameter_layouts";
constexpr char kMhloXlaEntryComputationParameterTiles[] =
"mhlo.xla_entry_computation_parameter_tiles";
constexpr char kMhloXlaEntryComputationResultLayout[] =
"mhlo.xla_entry_computation_result_layout";
constexpr char kMhloXlaEntryComputationResultTiles[] =
"mhlo.xla_entry_computation_result_tiles";
constexpr char kArgEmptyTuple[] = "arg_empty_tuple";
constexpr char kArgPrefix[] = "Arg_";
constexpr char kArgTuple[] = "arg_tuple";
constexpr char kDefaultLayoutAttrName[] = "xla_shape";
constexpr char kExecutionThread[] = "execution_thread";
constexpr char kLayout[] = "layout";
constexpr char kMain[] = "main";
constexpr char kRegionPrefix[] = "region_";
constexpr char kTfAliasingOutput[] = "tf.aliasing_output";
template <typename T>
T Unwrap(T t) {
return t;
}
template <typename T>
T* Unwrap(const std::unique_ptr<T>& t) {
return t.get();
}
static mlir::LogicalResult GetXlaOp(
mlir::Value val, const llvm::DenseMap<mlir::Value, xla::XlaOp>& val_map,
xla::XlaOp* result, mlir::Operation* op) {
auto iter = val_map.find(val);
if (iter == val_map.end()) {
return op->emitOpError(
"requires all operands to be defined in the parent region for export");
}
*result = iter->second;
return mlir::success();
}
bool IsBoundedOrStatic(mlir::Type ty) {
auto ranked_ty = mlir::dyn_cast_or_null<mlir::RankedTensorType>(ty);
if (!ranked_ty) return false;
if (ranked_ty.hasStaticShape()) return true;
auto encoding = mlir::dyn_cast_or_null<mlir::mhlo::TypeExtensionsAttr>(
ranked_ty.getEncoding());
if (!encoding || encoding.getBounds().empty()) return false;
int64_t rank = ranked_ty.getRank();
for (int64_t dim = 0; dim < rank; ++dim) {
if (ranked_ty.isDynamicDim(dim) &&
encoding.getBounds()[dim] == mlir::ShapedType::kDynamic)
return false;
}
return true;
}
template <typename T>
xla::Array<T> ArrayFromDenseElementsAttr(mlir::DenseElementsAttr dense_attr) {
constexpr xla::PrimitiveType type =
xla::primitive_util::NativeToPrimitiveType<T>();
xla::Shape shape = xla::TypeToShape(dense_attr.getType());
xla::Array<T> array(shape.dimensions());
if constexpr (!xla::primitive_util::IsSubByteNonPredType(type)) {
array.SetValues(dense_attr.getValues<T>());
} else {
auto values = dense_attr.getValues<llvm::APInt>();
for (int i = 0; i < values.size(); i++) {
if constexpr (xla::primitive_util::IsUnsignedIntegralType(type)) {
array.data()[i] = T{values[i].getZExtValue()};
} else {
static_assert(xla::primitive_util::IsSignedIntegralType(type));
array.data()[i] = T{values[i].getSExtValue()};
}
}
}
return array;
}
absl::StatusOr<xla::Literal> CreateArrayLiteralFromAttr(mlir::ElementsAttr attr,
xla::Layout layout) {
auto dense_attr = mlir::dyn_cast<mlir::DenseElementsAttr>(attr);
if (!dense_attr)
return tsl::errors::Unimplemented("Only dense elements attr are supported");
xla::Shape shape = xla::TypeToShape(dense_attr.getType());
return xla::primitive_util::PrimitiveTypeSwitch<absl::StatusOr<xla::Literal>>(
[&](auto primitive_type_constant) -> absl::StatusOr<xla::Literal> {
if constexpr (xla::primitive_util::IsArrayType(
primitive_type_constant)) {
using cpp_type =
xla::primitive_util::NativeTypeOf<primitive_type_constant>;
xla::Array<cpp_type> source_data =
ArrayFromDenseElementsAttr<cpp_type>(dense_attr);
return xla::LiteralUtil::CreateFromArrayWithLayout(source_data,
layout);
}
return tsl::errors::Internal(absl::StrCat(
"Unsupported type: ",
xla::PrimitiveType_Name(shape.element_type())));
},
shape.element_type());
}
static int ConvertAPInt(llvm::APInt i) { return i.getSExtValue(); }
static uint32_t Convertuint32_t(uint32_t i) { return i; }
static uint64_t Convertuint64_t(uint64_t i) { return i; }
static double ConvertAPFloat(llvm::APFloat value) {
const auto& semantics = value.getSemantics();
bool losesInfo = false;
if (&semantics != &llvm::APFloat::IEEEdouble())
value.convert(llvm::APFloat::IEEEdouble(),
llvm::APFloat::rmNearestTiesToEven, &losesInfo);
return value.convertToDouble();
}
static inline bool Convertbool(bool value) { return value; }
static absl::string_view ConvertStringRef(mlir::StringRef value) {
return {value.data(), value.size()};
}
static std::vector<int64_t> ConvertDenseIntAttr(
mlir::DenseIntElementsAttr attr) {
auto values = attr.getValues<int64_t>();
return {values.begin(), values.end()};
}
static std::vector<int64_t> ConvertDenseIntAttr(
std::optional<mlir::DenseIntElementsAttr> attr) {
if (!attr) return {};
return ConvertDenseIntAttr(*attr);
}
static std::vector<int64_t> Convert_broadcast_dimensions(
std::optional<mlir::DenseIntElementsAttr> broadcast_dimensions) {
if (!broadcast_dimensions.has_value()) return {};
return ConvertDenseIntAttr(*broadcast_dimensions);
}
static std::vector<xla::CrossProgramPrefetch> Convert_cross_program_prefetches(
mlir::ArrayAttr prefetches) {
std::vector<xla::CrossProgramPrefetch> cross_program_prefetches;
for (auto prefetch : prefetches) {
auto cpp = mlir::cast<mlir::mhlo::CrossProgramPrefetchAttr>(prefetch);
xla::CrossProgramPrefetch xla_cpp;
xla_cpp.set_parameter(cpp.getParameter());
for (auto index : cpp.getIndices()) xla_cpp.add_index(index);
cross_program_prefetches.push_back(xla_cpp);
}
return cross_program_prefetches;
}
static xla::FftType Convert_fft_type(mlir::mhlo::FftType fft_type) {
xla::FftType fft_type_enum;
if (!FftType_Parse(std::string(mlir::mhlo::stringifyFftType(fft_type)),
&fft_type_enum))
return xla::FftType::FFT;
return fft_type_enum;
}
static std::vector<std::pair<int64_t, int64_t>> Convert_padding(
std::optional<mlir::DenseIntElementsAttr> padding) {
return xla::ConvertNx2Attribute(padding).value();
}
static std::optional<bool> Convert_use_global_device_ids(
std::optional<bool> use_global_device_ids) {
if (!use_global_device_ids) return {};
return *use_global_device_ids;
}
static std::vector<std::pair<int64_t, int64_t>> Convert_source_target_pairs(
std::optional<mlir::DenseIntElementsAttr> source_target_pairs) {
return xla::ConvertNx2Attribute(source_target_pairs).value();
}
static std::vector<xla::ReplicaGroup> Convert_replica_groups(
mlir::DenseIntElementsAttr groups) {
return xla::ConvertReplicaGroups(groups).value();
}
static void SetLayout(xla::Shape& shape, mlir::DenseIntElementsAttr layout) {
if (shape.IsArray()) {
shape.mutable_layout()->clear_minor_to_major();
for (auto l : layout) {
shape.mutable_layout()->mutable_minor_to_major()->push_back(
l.getSExtValue());
}
} else if (shape.IsToken()) {
assert(layout.empty() && "Invalid layout for token type");
} else {
assert(!shape.IsTuple() &&
"Exporting layout for tuples is not implemented yet");
assert(false && "Exporting unknown type with layout");
}
}
static void SetLayout(xla::Shape& shape, mlir::ArrayAttr layouts) {
if (shape.IsTuple()) {
for (int i = 0; i < shape.tuple_shapes_size(); ++i) {
SetLayout(*shape.mutable_tuple_shapes(i),
mlir::cast<mlir::DenseIntElementsAttr>(layouts[i]));
}
} else {
assert(layouts.size() == 1);
SetLayout(shape, mlir::cast<mlir::DenseIntElementsAttr>(layouts[0]));
}
}
static std::vector<xla::Shape> ConvertTypesToShapesWithLayout(
mlir::TypeRange value_types, mlir::ArrayAttr layouts) {
std::vector<xla::Shape> shapes_with_layout;
for (auto [type, layout] : llvm::zip(value_types, layouts)) {
xla::Shape shape = xla::TypeToShape(type);
SetLayout(shape, mlir::cast<mlir::DenseIntElementsAttr>(layout));
shapes_with_layout.push_back(std::move(shape));
}
return shapes_with_layout;
}
static xla::TriangularSolveOptions::Transpose Convert_transpose_a(
mlir::mhlo::Transpose transpose) {
return xla::ConvertTranspose(mlir::mhlo::stringifyTranspose(transpose))
.value();
}
static xla::Layout ExtractLayout(
mlir::Operation* op, int rank,
llvm::StringRef attr_name = kDefaultLayoutAttrName) {
if (auto attr = op->getAttrOfType<mlir::DenseIntElementsAttr>(attr_name)) {
llvm::SmallVector<int64_t, 4> minor_to_major;
DCHECK_EQ(rank, attr.size());
minor_to_major.reserve(attr.size());
for (const llvm::APInt& i : attr) {
minor_to_major.push_back(i.getZExtValue());
}
return xla::LayoutUtil::MakeLayout(minor_to_major);
}
return xla::LayoutUtil::MakeDescendingLayout(rank);
}
static mlir::FailureOr<xla::Shape> ExtractXlaShape(mlir::Operation* op) {
if (auto attr = op->getAttrOfType<mlir::StringAttr>(kDefaultLayoutAttrName)) {
return *xla::ParseShape(
absl::string_view(attr.getValue().data(), attr.getValue().size()));
} else {
std::vector<xla::Shape> subshapes;
for (auto [index, result] : llvm::enumerate(op->getResults())) {
subshapes.push_back(xla::TypeToShape(result.getType()));
if (subshapes.back().element_type() == xla::PRIMITIVE_TYPE_INVALID) {
return op->emitError()
<< "result #" << index << " type is not supported";
}
}
if (subshapes.size() > 1) {
return xla::ShapeUtil::MakeTupleShape(subshapes);
}
return subshapes[0];
}
}
#define I64_ELEMENTS_ATTR_TO_VECTOR(attribute) \
static std::vector<int64_t> Convert_##attribute( \
std::optional<mlir::DenseIntElementsAttr> attribute) { \
return ConvertDenseIntAttr(attribute); \
}
I64_ELEMENTS_ATTR_TO_VECTOR(broadcast_sizes);
I64_ELEMENTS_ATTR_TO_VECTOR(permutation);
I64_ELEMENTS_ATTR_TO_VECTOR(start_indices);
I64_ELEMENTS_ATTR_TO_VECTOR(limit_indices);
I64_ELEMENTS_ATTR_TO_VECTOR(strides);
I64_ELEMENTS_ATTR_TO_VECTOR(slice_sizes);
I64_ELEMENTS_ATTR_TO_VECTOR(fft_length);
I64_ELEMENTS_ATTR_TO_VECTOR(dimensions);
I64_ELEMENTS_ATTR_TO_VECTOR(window_strides);
I64_ELEMENTS_ATTR_TO_VECTOR(lhs_dilation);
I64_ELEMENTS_ATTR_TO_VECTOR(rhs_dilation);
#undef I64_ELEMENTS_ATTR_TO_VECTOR
#define BOOL_ELEMENTS_ATTR_TO_VECTOR(attribute) \
static std::vector<bool> Convert_##attribute( \
std::optional<mlir::DenseElementsAttr> attribute) { \
if (!attribute) return {}; \
auto values = attribute->getValues<bool>(); \
return {values.begin(), values.end()}; \
}
BOOL_ELEMENTS_ATTR_TO_VECTOR(window_reversal);
#undef BOOL_ELEMENTS_ATTR_TO_VECTOR
static std::vector<int64_t> Convert_ArrayRef(llvm::ArrayRef<int64_t> values) {
return {values.begin(), values.end()};
}
static std::unique_ptr<xla::PrecisionConfig> Convert_precision_config(
std::optional<mlir::ArrayAttr> optional_precision_config_attr) {
if (!optional_precision_config_attr.has_value()) return nullptr;
auto precision_config = std::make_unique<xla::PrecisionConfig>();
for (auto attr : optional_precision_config_attr.value()) {
xla::PrecisionConfig::Precision p;
auto operand_precision =
mlir::mhlo::stringifyPrecision(
mlir::cast<mlir::mhlo::PrecisionAttr>(attr).getValue())
.str();
if (xla::PrecisionConfig::Precision_Parse(operand_precision, &p)) {
precision_config->add_operand_precision(p);
} else {
auto* context = attr.getContext();
mlir::emitError(mlir::UnknownLoc::get(context))
<< "unexpected operand precision " << operand_precision;
return nullptr;
}
}
return precision_config;
}
static xla::DotDimensionNumbers Convert_dot_dimension_numbers(
mlir::mhlo::DotDimensionNumbersAttr dot_dimension_numbers_attr) {
xla::DotDimensionNumbers dot_dimension_numbers;
auto rhs_contracting_dimensions =
dot_dimension_numbers_attr.getRhsContractingDimensions();
auto lhs_contracting_dimensions =
dot_dimension_numbers_attr.getLhsContractingDimensions();
auto rhs_batch_dimensions =
dot_dimension_numbers_attr.getRhsBatchingDimensions();
auto lhs_batch_dimensions =
dot_dimension_numbers_attr.getLhsBatchingDimensions();
for (const auto& val : rhs_contracting_dimensions) {
dot_dimension_numbers.add_rhs_contracting_dimensions(val);
}
for (const auto& val : lhs_contracting_dimensions) {
dot_dimension_numbers.add_lhs_contracting_dimensions(val);
}
for (const auto& val : rhs_batch_dimensions) {
dot_dimension_numbers.add_rhs_batch_dimensions(val);
}
for (const auto& val : lhs_batch_dimensions) {
dot_dimension_numbers.add_lhs_batch_dimensions(val);
}
return dot_dimension_numbers;
}
static xla::SparsityDescriptor Convert_sparsity_descriptor(
mlir::mhlo::SparsityDescriptorAttr sparsity_attr, bool is_lhs) {
xla::SparsityDescriptor sparsity_descriptor;
sparsity_descriptor.set_type(xla::SPARSITY_STRUCTURED_N_M);
sparsity_descriptor.set_index(is_lhs ? 0 : 1);
sparsity_descriptor.set_dimension(sparsity_attr.getDimension());
sparsity_descriptor.set_n(sparsity_attr.getN());
sparsity_descriptor.set_m(sparsity_attr.getM());
return sparsity_descriptor;
}
xla::ChannelHandle Convert_channel_handle(mlir::mhlo::ChannelHandleAttr attr) {
xla::ChannelHandle channel_handle;
channel_handle.set_handle(attr.getHandle());
channel_handle.set_type(
static_cast<xla::ChannelHandle::ChannelType>(attr.getType()));
return channel_handle;
}
std::optional<xla::ChannelHandle> Convert_channel_handle(
std::optional<mlir::mhlo::ChannelHandleAttr> attr) {
if (!attr.has_value()) return std::nullopt;
return Convert_channel_handle(attr.value());
}
static xla::ComparisonDirection Convert_comparison_direction(
llvm::StringRef comparison_direction_string) {
return xla::StringToComparisonDirection(comparison_direction_string.str())
.value();
}
static xla::GatherDimensionNumbers Convert_dimension_numbers(
mlir::mhlo::GatherDimensionNumbersAttr input) {
xla::GatherDimensionNumbers output;
auto offset_dims = input.getOffsetDims();
std::copy(
offset_dims.begin(), offset_dims.end(),
tsl::protobuf::RepeatedFieldBackInserter(output.mutable_offset_dims()));
auto collapsed_slice_dims = input.getCollapsedSliceDims();
std::copy(collapsed_slice_dims.begin(), collapsed_slice_dims.end(),
tsl::protobuf::RepeatedFieldBackInserter(
output.mutable_collapsed_slice_dims()));
auto operand_batching_dims = input.getOperandBatchingDims();
std::copy(operand_batching_dims.begin(), operand_batching_dims.end(),
tsl::protobuf::RepeatedFieldBackInserter(
output.mutable_operand_batching_dims()));
auto start_indices_batching_dims = input.getStartIndicesBatchingDims();
std::copy(start_indices_batching_dims.begin(),
start_indices_batching_dims.end(),
tsl::protobuf::RepeatedFieldBackInserter(
output.mutable_start_indices_batching_dims()));
auto start_index_map = input.getStartIndexMap();
std::copy(start_index_map.begin(), start_index_map.end(),
tsl::protobuf::RepeatedFieldBackInserter(
output.mutable_start_index_map()));
output.set_index_vector_dim(input.getIndexVectorDim());
return output;
}
static xla::ScatterDimensionNumbers Convert_scatter_dimension_numbers(
mlir::mhlo::ScatterDimensionNumbersAttr input) {
xla::ScatterDimensionNumbers output;
auto update_window_dims = input.getUpdateWindowDims();
std::copy(update_window_dims.begin(), update_window_dims.end(),
tsl::protobuf::RepeatedFieldBackInserter(
output.mutable_update_window_dims()));
auto inserted_window_dims = input.getInsertedWindowDims();
std::copy(inserted_window_dims.begin(), inserted_window_dims.end(),
tsl::protobuf::RepeatedFieldBackInserter(
output.mutable_inserted_window_dims()));
auto input_batching_dims = input.getInputBatchingDims();
std::copy(input_batching_dims.begin(), input_batching_dims.end(),
tsl::protobuf::RepeatedFieldBackInserter(
output.mutable_input_batching_dims()));
auto scatter_indices_batching_dims = input.getScatterIndicesBatchingDims();
std::copy(scatter_indices_batching_dims.begin(),
scatter_indices_batching_dims.end(),
tsl::protobuf::RepeatedFieldBackInserter(
output.mutable_scatter_indices_batching_dims()));
auto scatter_dims_to_operand_dims = input.getScatterDimsToOperandDims();
std::copy(scatter_dims_to_operand_dims.begin(),
scatter_dims_to_operand_dims.end(),
tsl::protobuf::RepeatedFieldBackInserter(
output.mutable_scatter_dims_to_operand_dims()));
output.set_index_vector_dim(input.getIndexVectorDim());
return output;
}
static std::optional<xla::OpSharding> CreateOpShardingFromAttribute(
mlir::Operation* op) {
auto shardingAttr = op->getAttrOfType<mlir::StringAttr>(kMhloSharding);
if (!shardingAttr) return std::nullopt;
return xla::ConvertSharding(shardingAttr.getValue());
}
void ConstructFrontendAttributesFromAttribute(
const mlir::DictionaryAttr& frontend_attributes_dict,
xla::FrontendAttributes& frontend_attributes) {
for (const auto& attr : frontend_attributes_dict)
if (auto value_str_attr = mlir::dyn_cast<mlir::StringAttr>(attr.getValue()))
frontend_attributes.mutable_map()->insert(
{attr.getName().str(), value_str_attr.getValue().str()});
}
static xla::FrontendAttributes CreateXlaFrontendAttributesFromOp(
mlir::Operation* op) {
xla::FrontendAttributes frontend_attributes;
auto frontend_attributes_dict =
op->getAttrOfType<mlir::DictionaryAttr>(kMhloFrontendAttributes);
if (!frontend_attributes_dict) return frontend_attributes;
ConstructFrontendAttributesFromAttribute(frontend_attributes_dict,
frontend_attributes);
return frontend_attributes;
}
static void ExtractFrontendAttributesFromFunction(
mlir::func::FuncOp function,
llvm::SmallVectorImpl<std::optional<xla::FrontendAttributes>>* fe_attrs) {
fe_attrs->resize(function.getNumArguments(), std::nullopt);
for (int i = 0, end = function.getNumArguments(); i < end; ++i)
if (auto fe_attr = function.getArgAttrOfType<mlir::DictionaryAttr>(
i, kMhloFrontendAttributes)) {
xla::FrontendAttributes frontend_attributes;
ConstructFrontendAttributesFromAttribute(fe_attr, frontend_attributes);
(*fe_attrs)[i] = frontend_attributes;
}
}
static bool SomeOptionalShardingsAreSet(
llvm::ArrayRef<std::optional<xla::OpSharding>> shardings) {
return llvm::any_of(shardings,
[](const std::optional<xla::OpSharding>& sharding) {
return sharding.has_value();
});
}
static void ExtractShardingsFromFunction(
mlir::func::FuncOp function,
llvm::SmallVectorImpl<std::optional<xla::OpSharding>>* arg_shardings,
llvm::SmallVectorImpl<std::optional<xla::OpSharding>>* ret_shardings) {
arg_shardings->resize(function.getNumArguments(),
std::optional<xla::OpSharding>());
for (int i = 0, end = function.getNumArguments(); i < end; ++i)
if (auto sharding =
function.getArgAttrOfType<mlir::StringAttr>(i, kMhloSharding))
(*arg_shardings)[i] = xla::ConvertSharding(sharding.getValue());
ret_shardings->resize(function.getNumResults(),
std::optional<xla::OpSharding>());
for (int i = 0, end = function.getNumResults(); i < end; ++i)
if (auto sharding =
function.getResultAttrOfType<mlir::StringAttr>(i, kMhloSharding))
(*ret_shardings)[i] = xla::ConvertSharding(sharding.getValue());
}
std::optional<xla::OpSharding> CreateTupleSharding(
llvm::ArrayRef<std::optional<xla::OpSharding>> tuple_shardings) {
if (tuple_shardings.empty() ||
!SomeOptionalShardingsAreSet(tuple_shardings)) {
return std::nullopt;
}
xla::OpSharding sharding;
sharding.set_type(xla::OpSharding::TUPLE);
for (const std::optional<xla::OpSharding>& tuple_sharding : tuple_shardings) {
if (tuple_sharding) {
*sharding.add_tuple_shardings() = *tuple_sharding;
} else {
xla::OpSharding fallback_sharding;
fallback_sharding.set_type(xla::OpSharding::REPLICATED);
*sharding.add_tuple_shardings() = fallback_sharding;
}
}
return sharding;
}
xla::XlaOp CreateTupleIfMultipleOps(
xla::XlaBuilder* builder, llvm::ArrayRef<xla::XlaOp> ops,
llvm::ArrayRef<std::optional<xla::OpSharding>> shardings) {
if (ops.size() == 1) {
return ops[0];
}
xla::XlaScopedShardingAssignment scoped_sharding(
builder, CreateTupleSharding(shardings));
return Tuple(builder, ops);
}
llvm::SmallVector<std::optional<xla::OpSharding>> GetResultShardings(
std::optional<xla::OpSharding> op_sharding, int64_t num_results) {
if (!op_sharding) {
return {};
}
llvm::SmallVector<std::optional<xla::OpSharding>> res_shardings;
res_shardings.reserve(num_results);
if (op_sharding->type() == xla::OpSharding::TUPLE) {
assert(op_sharding->tuple_shardings_size() == num_results);
res_shardings.assign(op_sharding->tuple_shardings().begin(),
op_sharding->tuple_shardings().end());
} else {
res_shardings.append(num_results, op_sharding);
}
return res_shardings;
}
llvm::SmallVector<std::optional<xla::OpSharding>> GetXlaOpShardings(
llvm::ArrayRef<xla::XlaOp> xla_ops) {
llvm::SmallVector<std::optional<xla::OpSharding>> shardings;
shardings.reserve(xla_ops.size());
for (const xla::XlaOp& xla_op : xla_ops) {
auto sharding = xla_op.builder()->GetOpSharding(xla_op);
assert(sharding.ok() && "can't find XlaOp for argument");
shardings.push_back(*sharding);
}
return shardings;
}
namespace mlir {
namespace {
class ConvertToHloModule {
public:
using ValueLoweringMap = llvm::DenseMap<Value, xla::XlaOp>;
using FunctionLoweringMap =
llvm::DenseMap<mlir::func::FuncOp, xla::XlaComputation>;
explicit ConvertToHloModule(mlir::ModuleOp module,
xla::XlaBuilder& module_builder,
MlirToHloConversionOptions options)
: module_(module), module_builder_(module_builder), options_(options) {}
LogicalResult Run() {
auto main = module_.lookupSymbol<mlir::func::FuncOp>(kMain);
if (!main)
return module_.emitError(
"conversion requires module with `main` function");
for (auto func : module_.getOps<func::FuncOp>()) {
if (func.empty()) continue;
if (failed(RunOnFunction(func))) return failure();
}
return success();
}
LogicalResult RunOnFunction(mlir::func::FuncOp f);
::xla::HloModuleProto ConsumeMainProto() {
auto main = module_.lookupSymbol<mlir::func::FuncOp>(kMain);
CHECK(main) << "requires module to have main function";
return lowered_computation_[main].proto();
}
LogicalResult LowerRegionAsComputation(
mlir::Region* region, xla::XlaComputation* func,
llvm::ArrayRef<mlir::Value> implicit_operands = {},
llvm::ArrayRef<mlir::Value> implicit_results = {},
bool ensure_single_arg = false,
llvm::ArrayRef<std::optional<xla::OpSharding>> arg_shardings = {},
llvm::ArrayRef<std::optional<xla::OpSharding>> ret_shardings = {});
LogicalResult LowerBasicBlockAsFunction(
Block* block, xla::XlaBuilder* builder, bool is_entry_function,
bool ensure_single_arg,
const std::vector<bool>& entry_args_same_across_replicas,
llvm::ArrayRef<std::optional<xla::OpSharding>> arg_shardings,
llvm::ArrayRef<std::optional<xla::OpSharding>> ret_shardings,
llvm::ArrayRef<std::optional<xla::FrontendAttributes>> fe_attrs,
xla::XlaComputation* result,
llvm::ArrayRef<mlir::Value> implicit_operands = {},
llvm::ArrayRef<mlir::Value> implicit_results = {});
LogicalResult LowerCast(mlir::Operation* inst,
const MlirToHloConversionOptions& options,
ConvertToHloModule::ValueLoweringMap* value_lowering);
LogicalResult LowerCompositeCall(
mlir::Operation* inst, xla::XlaBuilder* module_builder,
xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering,
xla::XlaOp* return_value);
LogicalResult LowerConstant(
mlir::Operation* inst, xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering,
ElementsAttr const_attr);
LogicalResult LowerFunctionCall(
mlir::func::CallOp call_op, xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering);
LogicalResult LowerInfeed(
mlir::Operation* inst, xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering);
LogicalResult LowerReturn(
Operation* inst, bool is_entry_function,
llvm::ArrayRef<std::optional<xla::OpSharding>> ret_shardings,
llvm::ArrayRef<mlir::Value> implicit_results, xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering,
xla::XlaOp* return_value, const MlirToHloConversionOptions& options);
LogicalResult PropagateLayouts(const MlirToHloConversionOptions& options,
mlir::Operation* inst, xla::XlaOp xla_op);
func::FuncOp LookUpSymbol(FlatSymbolRefAttr symbol) {
return module_.lookupSymbol<mlir::func::FuncOp>(symbol);
}
xla::XlaComputation& GetLoweredComputation(func::FuncOp func) {
return lowered_computation_[func];
}
LogicalResult Lower(
mlir::Operation* inst, bool is_entry_function,
llvm::ArrayRef<std::optional<xla::OpSharding>> ret_shardings,
llvm::ArrayRef<mlir::Value> implicit_results, xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering,
xla::XlaOp* return_value);
const MlirToHloConversionOptions& GetOptions() const { return options_; }
xla::StackFrameIndexProto BuildStackFramesIndexProto() {
return stack_frame_indexes_builder_.Build();
}
private:
LogicalResult SetEntryTupleShapesAndLeafReplication(
Block* block, const std::vector<bool>& entry_args_same_across_replicas,
llvm::SmallVectorImpl<xla::Shape>* arg_shapes,
std::vector<bool>* leaf_replication);
LogicalResult SetEntryTupleShardings(
Block* block, xla::XlaBuilder* builder,
llvm::ArrayRef<std::optional<xla::OpSharding>> arg_shardings,
llvm::SmallVectorImpl<xla::Shape>* arg_shapes);
mlir::ModuleOp module_;
xla::XlaBuilder& module_builder_;
mlir::StackFrameIndexBuilder stack_frame_indexes_builder_;
FunctionLoweringMap lowered_computation_;
size_t region_id_ = 0;
MlirToHloConversionOptions options_;
};
}
}
namespace {
struct OpLoweringContext {
llvm::DenseMap<mlir::Value, xla::XlaOp>* values;
mlir::ConvertToHloModule* converter;
xla::XlaBuilder* builder;
mlir::StackFrameIndexBuilder* frame_index_builder;
};
mlir::LogicalResult GetTuple(mlir::Operation* op,
mlir::Operation::operand_range values,
OpLoweringContext ctx,
llvm::SmallVectorImpl<xla::XlaOp>& results) {
results.reserve(values.size());
for (mlir::Value value : values) {
if (failed(GetXlaOp(value, *ctx.values, &results.emplace_back(), op)))
return mlir::failure();
}
return mlir::success();
}
mlir::LogicalResult GetXlaOps(mlir::Operation* op,
llvm::ArrayRef<mlir::Value> values,
OpLoweringContext ctx,
llvm::SmallVectorImpl<xla::XlaOp>& results) {
results.reserve(values.size());
for (mlir::Value value : values) {
if (failed(GetXlaOp(value, *ctx.values, &results.emplace_back(), op)))
return mlir::failure();
}
return mlir::success();
}
bool SimplyReturnedOp(mlir::Operation* op) {
for (auto operand : op->getOperands()) {
if (!llvm::isa<mlir::BlockArgument>(operand)) return false;
}
auto users = op->getUsers();
if (users.empty()) return false;
auto first_user = *users.begin();
for (auto user : users) {
if (first_user != user) return false;
}
if (llvm::isa<mlir::func::ReturnOp>(first_user)) return true;
return false;
}
void BuildGetTupleElementsForTupleResults(mlir::Operation* op, xla::XlaOp tuple,
OpLoweringContext ctx,
unsigned num_implicit_results = 0) {
const std::optional<xla::OpSharding>& sharding = ctx.builder->sharding();
if (sharding.has_value()) {
bool is_tuple_sharding = sharding->type() == xla::OpSharding::TUPLE;
assert(!is_tuple_sharding || (op->getNumResults() + num_implicit_results ==
sharding->tuple_shardings_size()));
for (auto [index, result] : llvm::enumerate(op->getResults())) {
xla::XlaScopedShardingAssignment scoped_sharding(
ctx.builder,
is_tuple_sharding ? sharding->tuple_shardings(index) : sharding);
(*ctx.values)[result] = xla::GetTupleElement(tuple, index);
}
} else {
xla::XlaScopedShardingAssignment scoped_sharding(ctx.builder, std::nullopt);
for (auto [index, result] : llvm::enumerate(op->getResults())) {
(*ctx.values)[result] = xla::GetTupleElement(tuple, index);
}
}
}
}
namespace mlir {
namespace mhlo {
namespace {
LogicalResult ExportXlaOp(CollectiveBroadcastOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp operand;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
value_map[op->getResult(0)] = xla::CollectiveBroadcast(
operand, Convert_replica_groups(op.getReplicaGroups()),
Convert_channel_handle(op.getChannelHandle()));
return success();
}
LogicalResult ExportXlaOp(CompositeOp, OpLoweringContext) {
return failure();
}
LogicalResult ExportXlaOp(DynamicBroadcastInDimOp op, OpLoweringContext ctx) {
return failure();
}
LogicalResult ExportXlaOp(DynamicConvOp op, OpLoweringContext ctx) {
return failure();
}
LogicalResult ExportXlaOp(DynamicGatherOp op, OpLoweringContext ctx) {
return failure();
}
LogicalResult ExportXlaOp(DynamicIotaOp op, OpLoweringContext ctx) {
return failure();
}
LogicalResult ExportXlaOp(DynamicPadOp op, OpLoweringContext ctx) {
return failure();
}
LogicalResult ExportXlaOp(DynamicReshapeOp op, OpLoweringContext ctx) {
auto resultType = mlir::dyn_cast<RankedTensorType>(op.getResult().getType());
if (!resultType) return op->emitOpError() << "expected ranked result";
auto resultBounds = hlo::encodingToBounds(resultType.getEncoding());
if (resultBounds.empty())
return op->emitOpError() << "expected bounded result";
auto shapeType =
mlir::dyn_cast<RankedTensorType>(op.getOutputShape().getType());
if (!shapeType || !shapeType.getElementType().isInteger(32))
return op->emitOpError() << "expected output shape to be tensor<Nxi32>";
auto& value_map = *ctx.values;
xla::XlaOp operand;
xla::XlaOp outputShape;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
if (failed(GetXlaOp(op.getOutputShape(), value_map, &outputShape, op)))
return failure();
SmallVector<xla::XlaOp> dimSizes;
SmallVector<int64_t> newSizeBounds;
std::vector<bool> dimsAreDynamic;
for (auto i = 0; i < resultType.getRank(); ++i) {
auto runtimeSizeX1 = xla::Slice(outputShape, {i}, {i + 1}, {1});
dimSizes.push_back(xla::Reshape(runtimeSizeX1, {}));
auto dimSize = resultType.getDimSize(i);
auto dimBound = resultBounds[i];
if (!hlo::isStaticDimSize(dimSize) && !hlo::isStaticDimSize(dimBound))
return op->emitOpError() << "unbounded dynamism is not supported";
newSizeBounds.push_back(hlo::isStaticDimSize(dimSize) ? dimSize : dimBound);
dimsAreDynamic.push_back(!hlo::isStaticDimSize(dimSize));
}
value_map[op] =
xla::DynamicReshape(operand, dimSizes, newSizeBounds, dimsAreDynamic);
return success();
}
LogicalResult ExportXlaOp(RealDynamicSliceOp op, OpLoweringContext ctx) {
return failure();
}
mlir::LogicalResult ExportXlaOp(mlir::mhlo::CopyOp op, OpLoweringContext ctx) {
if (op.getCrossProgramPrefetchIndex() && !SimplyReturnedOp(op))
return op->emitOpError() << "synchronous CopyOp should not include "
"cross_program_prefetch_index attribute.";
auto& value_map = *ctx.values;
auto result = op.getResult();
xla::XlaOp xla_arg_0;
if (failed(
GetXlaOp(*op.getODSOperands(0).begin(), value_map, &xla_arg_0, op)))
return mlir::failure();
auto xla_result = xla::Copy(Unwrap(xla_arg_0));
value_map[result] = xla_result;
return mlir::success();
}
LogicalResult ExportXlaOp(AddDependencyOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp token;
xla::XlaOp operand;
if (failed(GetXlaOp(op.getToken(), value_map, &token, op))) return failure();
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
auto operand_shape = ctx.builder->GetShape(operand).value();
value_map[op] = xla::internal::XlaBuilderFriend::BuildAddDependency(
ctx.builder, operand, token, operand_shape);
return success();
}
LogicalResult ExportXlaOp(AllGatherOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
SmallVector<xla::XlaOp> operands;
if (failed(GetTuple(op.getOperation(), op.getOperands(), ctx, operands))) {
return failure();
}
mlir::FailureOr<xla::Shape> shape_or = ExtractXlaShape(op.getOperation());
if (failed(shape_or)) return failure();
auto all_gather_dim = op.getAllGatherDim();
int64_t shard_count = 0;
for (size_t i = 0; i < operands.size(); ++i) {
TensorType operand_type =
mlir::cast<TensorType>(op.getOperand(i).getType());
TensorType result_type = mlir::cast<TensorType>(op.getType(i));
if (!operand_type.hasStaticShape() || !result_type.hasStaticShape())
return failure();
if (i == 0) {
shard_count = result_type.getDimSize(all_gather_dim) /
operand_type.getDimSize(all_gather_dim);
}
}
if (shape_or->IsTuple()) {
std::optional<xla::Layout> layout = std::nullopt;
if (shape_or->has_layout()) {
layout = shape_or->layout();
}
auto tuple = xla::AllGatherTuple(
operands, all_gather_dim, shard_count,
Convert_replica_groups(op.getReplicaGroups()),
Convert_channel_handle(op.getChannelHandle()), layout,
Convert_use_global_device_ids(op.getUseGlobalDeviceIds()));
BuildGetTupleElementsForTupleResults(op, tuple, ctx);
} else {
value_map[op->getResults()[0]] = xla::AllGather(
operands[0], all_gather_dim, shard_count,
Convert_replica_groups(op.getReplicaGroups()),
Convert_channel_handle(op.getChannelHandle()), std::nullopt,
Convert_use_global_device_ids(op.getUseGlobalDeviceIds()));
}
return success();
}
LogicalResult ExportXlaOp(AllReduceOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaComputation computation;
if (failed(ctx.converter->LowerRegionAsComputation(&op.getComputation(),
&computation))) {
return failure();
}
SmallVector<xla::XlaOp> operands;
if (failed(GetTuple(op.getOperation(), op.getOperands(), ctx, operands)))
return failure();
mlir::FailureOr<xla::Shape> shape_or = ExtractXlaShape(op.getOperation());
if (failed(shape_or)) return failure();
if (shape_or->IsTuple()) {
std::optional<xla::Shape> shape_with_layout = std::nullopt;
if (shape_or->has_layout()) shape_with_layout = shape_or.value();
auto tuple = xla::AllReduceTuple(
operands, computation, Convert_replica_groups(op.getReplicaGroups()),
Convert_channel_handle(op.getChannelHandle()), shape_with_layout,
Convert_use_global_device_ids(op.getUseGlobalDeviceIds()));
BuildGetTupleElementsForTupleResults(op, tuple, ctx);
} else {
value_map[op->getResults()[0]] = xla::AllReduce(
operands[0], computation, Convert_replica_groups(op.getReplicaGroups()),
Convert_channel_handle(op.getChannelHandle()), std::nullopt,
Convert_use_global_device_ids(op.getUseGlobalDeviceIds()));
}
return success();
}
LogicalResult ExportXlaOp(AllToAllOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
SmallVector<xla::XlaOp> operands;
if (failed(GetTuple(op.getOperation(), op.getOperands(), ctx, operands))) {
return failure();
}
mlir::FailureOr<xla::Shape> shape_or = ExtractXlaShape(op.getOperation());
if (failed(shape_or)) return failure();
if (shape_or->IsTuple()) {
std::optional<xla::Layout> layout = std::nullopt;
if (shape_or->has_layout()) {
layout = shape_or->layout();
}
auto tuple = xla::AllToAllTuple(
operands, Convert_replica_groups(op.getReplicaGroups()), layout,
Convert_channel_handle(op.getChannelHandle()));
BuildGetTupleElementsForTupleResults(op, tuple, ctx);
} else {
std::optional<uint64_t> splitDimension = op.getSplitDimension();
std::optional<uint64_t> concatDimension = op.getConcatDimension();
std::optional<uint64_t> splitCount = op.getSplitCount();
value_map[op->getResults()[0]] = xla::AllToAll(
operands[0], *splitDimension, *concatDimension, *splitCount,
Convert_replica_groups(op.getReplicaGroups()),
std::nullopt, Convert_channel_handle(op.getChannelHandle()));
}
return success();
}
LogicalResult ExportXlaOp(ReduceScatterOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp operand;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
TensorType operand_type = mlir::cast<TensorType>(op.getOperand().getType());
TensorType result_type = op.getType();
if (!operand_type.hasStaticShape() || !result_type.hasStaticShape())
return failure();
auto scatter_dim = op.getScatterDimension();
int64_t shard_count = operand_type.getDimSize(scatter_dim) /
result_type.getDimSize(scatter_dim);
xla::XlaComputation computation;
if (failed(ctx.converter->LowerRegionAsComputation(&op.getComputation(),
&computation))) {
return failure();
}
value_map[op] = xla::ReduceScatter(
operand, computation, scatter_dim, shard_count,
Convert_replica_groups(op.getReplicaGroups()),
Convert_channel_handle(op.getChannelHandle()), std::nullopt,
Convert_use_global_device_ids(op.getUseGlobalDeviceIds()));
return success();
}
LogicalResult ExportXlaOp(AsyncStartOp op, OpLoweringContext ctx) {
for (auto* user : op.getResult().getUsers()) {
if (!isa<AsyncUpdateOp, AsyncDoneOp>(user)) {
return op.emitOpError() << "Users of AsyncStart's return value must be "
<< "async_update or async_done";
}
}
auto& value_map = *ctx.values;
Value result = op.getResult();
llvm::SmallVector<xla::XlaOp> operands;
if (failed(GetTuple(op, op.getInputs(), ctx, operands))) return failure();
mlir::func::FuncOp callee = ctx.converter->LookUpSymbol(
FlatSymbolRefAttr::get(op->getContext(), op.getCalledComputation()));
auto all_gather_op =
dyn_cast_or_null<AllGatherOp>(callee.getBody().front().front());
if (all_gather_op && SimplyReturnedOp(all_gather_op)) {
TensorType operand_type =
mlir::cast<TensorType>(all_gather_op.getOperand(0).getType());
TensorType result_type = mlir::cast<TensorType>(all_gather_op.getType(0));
if (!operand_type.hasStaticShape() || !result_type.hasStaticShape())
return failure();
if (operands.size() != 1) return failure();
auto all_gather_dim = all_gather_op.getAllGatherDim();
int64_t shard_count = result_type.getDimSize(all_gather_dim) /
operand_type.getDimSize(all_gather_dim);
value_map[result] = xla::internal::XlaBuilderFriend::BuildAllGatherStart(
ctx.builder, operands[0], all_gather_dim, shard_count,
Convert_replica_groups(all_gather_op.getReplicaGroups()),
Convert_channel_handle(all_gather_op.getChannelHandle()),
ExtractLayout(all_gather_op,
mlir::cast<RankedTensorType>(result_type).getRank()),
Convert_use_global_device_ids(all_gather_op.getUseGlobalDeviceIds()));
return success();
}
auto all_reduce_op =
dyn_cast_or_null<AllReduceOp>(callee.getBody().front().front());
if (all_reduce_op && SimplyReturnedOp(all_reduce_op)) {
xla::XlaComputation computation;
if (failed(ctx.converter->LowerRegionAsComputation(
&all_reduce_op.getComputation(), &computation))) {
return failure();
}
if (operands.size() != 1) return failure();
value_map[result] = xla::internal::XlaBuilderFriend::BuildAllReduceStart(
ctx.builder, operands[0], computation,
Convert_replica_groups(all_reduce_op.getReplicaGroups()),
Convert_channel_handle(all_reduce_op.getChannelHandle()), std::nullopt,
Convert_use_global_device_ids(all_reduce_op.getUseGlobalDeviceIds()));
return success();
}
auto collective_permute_op =
dyn_cast_or_null<CollectivePermuteOp>(callee.getBody().front().front());
if (collective_permute_op && SimplyReturnedOp(collective_permute_op)) {
value_map[result] =
xla::internal::XlaBuilderFriend::BuildCollectivePermuteStart(
ctx.builder, operands[0],
Convert_source_target_pairs(
collective_permute_op.getSourceTargetPairs()),
Convert_channel_handle(collective_permute_op.getChannelHandle()));
return mlir::success();
}
auto copy_op = dyn_cast_or_null<CopyOp>(callee.getBody().front().front());
if (copy_op && SimplyReturnedOp(copy_op)) {
std::optional<int> cross_program_prefetch_index =
copy_op.getCrossProgramPrefetchIndex()
? std::make_optional(*copy_op.getCrossProgramPrefetchIndex())
: std::nullopt;
value_map[result] = xla::internal::XlaBuilderFriend::BuildCopyStart(
ctx.builder, operands[0], cross_program_prefetch_index);
return mlir::success();
}
auto send_op = dyn_cast_or_null<SendOp>(callee.getBody().front().front());
if (send_op && SimplyReturnedOp(send_op)) {
xla::XlaOp operand;
if (operands.size() == 2)
operand = operands[0];
else
operand =
Tuple(ctx.builder, absl::Span<const xla::XlaOp>(operands).subspan(
0, operands.size() - 1));
xla::XlaOp token = operands[operands.size() - 1];
value_map[result] = xla::internal::XlaBuilderFriend::BuildSend(
ctx.builder, operand, token,
Convert_channel_handle(send_op.getChannelHandle()),
send_op.getIsHostTransfer());
return mlir::success();
}
auto recv_op = dyn_cast_or_null<RecvOp>(callee.getBody().front().front());
if (recv_op && SimplyReturnedOp(recv_op)) {
auto result_types =
mlir::cast<AsyncBundleType>(result.getType()).getTypes()[1];
mlir::Type received_type = mlir::TupleType::get(op->getContext(), {});
if (isa<TupleType>(result_types)) {
received_type = mlir::cast<TupleType>(result_types).getType(0);
}
value_map[result] = xla::internal::XlaBuilderFriend::BuildRecv(
ctx.builder, operands[0], xla::TypeToShape(received_type),
Convert_channel_handle(recv_op.getChannelHandle()),
recv_op.getIsHostTransfer());
return mlir::success();
}
if (failed(ctx.converter->RunOnFunction(callee))) return failure();
xla::XlaComputation& computation =
ctx.converter->GetLoweredComputation(callee);
computation.mutable_proto()->mutable_computations(0)->set_execution_thread(
op.getExecutionThread().str());
auto [xla_op, computation_id] =
xla::internal::XlaBuilderFriend::BuildAsyncStart(
ctx.builder, operands, op.getExecutionThread().str(), computation,
xla::TypeToShape(result.getType()));
value_map[result] = xla_op;
computation.mutable_proto()->mutable_computations(0)->set_id(computation_id);
return success();
}
LogicalResult ExportXlaOp(AsyncUpdateOp op, OpLoweringContext ctx) {
if (!isa<AsyncStartOp, AsyncUpdateOp>(op.getBundle().getDefiningOp())) {
auto theerror = op.emitError()
<< "Defining op of AsyncUpdate's operand must be "
<< "async_start or async_update";
if (op.getBundle().getDefiningOp()) {
return theerror << ", but got "
<< op.getBundle().getDefiningOp()->getName();
} else {
return theerror << ".";
}
}
for (auto* user : op.getResult().getUsers()) {
if (!isa<AsyncUpdateOp, AsyncDoneOp>(user)) {
return op.emitOpError() << "Users of AsyncUpdate's return value must be "
<< "async_update or async_done";
}
}
auto& value_map = *ctx.values;
Value result = op.getResult();
xla::XlaOp operand;
if (failed(GetXlaOp(op.getBundle(), value_map, &operand, op)))
return failure();
value_map[result] = xla::internal::XlaBuilderFriend::BuildAsyncUpdate(
ctx.builder, operand, xla::TypeToShape(result.getType()));
return success();
}
LogicalResult ExportXlaOp(AsyncDoneOp op, OpLoweringContext ctx) {
if (!isa<AsyncStartOp, AsyncUpdateOp>(op.getBundle().getDefiningOp())) {
auto theerror = op.emitError()
<< "Defining op of AsyncDone's operand must be "
<< "async_start or async_update";
if (op.getBundle().getDefiningOp())
return theerror << ", but got "
<< op.getBundle().getDefiningOp()->getName();
return theerror << ".";
}
auto& value_map = *ctx.values;
xla::XlaOp operand;
if (failed(GetXlaOp(op.getBundle(), value_map, &operand, op)))
return failure();
Operation* start = op;
while (start != nullptr && !isa<AsyncStartOp>(start)) {
start = start->getOperand(0).getDefiningOp();
if (start == nullptr || !isa<AsyncStartOp, AsyncUpdateOp>(start)) {
return op.emitError() << "Defining op of AsyncDone's operand must be "
<< "async_start or async_update";
}
}
if (!isa<AsyncStartOp>(start)) {
return op.emitError() << "Could not find async chain start";
}
mlir::func::FuncOp callee =
ctx.converter->LookUpSymbol(FlatSymbolRefAttr::get(
op->getContext(), cast<AsyncStartOp>(start).getCalledComputation()));
auto all_gather_op =
dyn_cast_or_null<AllGatherOp>(callee.getBody().front().front());
if (all_gather_op && SimplyReturnedOp(all_gather_op)) {
value_map[op.getResult(0)] =
xla::internal::XlaBuilderFriend::BuildAllGatherDone(
ctx.builder, operand, xla::TypeToShape(all_gather_op.getType(0)));
return success();
}
auto all_reduce_op =
dyn_cast_or_null<AllReduceOp>(callee.getBody().front().front());
if (all_reduce_op && SimplyReturnedOp(all_reduce_op)) {
value_map[op.getResult(0)] =
xla::internal::XlaBuilderFriend::BuildAllReduceDone(
ctx.builder, operand, xla::TypeToShape(all_reduce_op.getType(0)));
return success();
}
auto collective_permute_op =
dyn_cast_or_null<CollectivePermuteOp>(callee.getBody().front().front());
if (collective_permute_op && SimplyReturnedOp(collective_permute_op)) {
value_map[op.getResult(0)] =
xla::internal::XlaBuilderFriend::BuildCollectivePermuteDone(
ctx.builder, operand,
xla::TypeToShape(collective_permute_op.getType()));
return success();
}
auto copy_op = dyn_cast_or_null<CopyOp>(callee.getBody().front().front());
if (copy_op && SimplyReturnedOp(copy_op)) {
value_map[op.getResult(0)] = xla::internal::XlaBuilderFriend::BuildCopyDone(
ctx.builder, operand, xla::TypeToShape(copy_op.getType()));
return success();
}
auto send_op = dyn_cast_or_null<SendOp>(callee.getBody().front().front());
if (send_op && SimplyReturnedOp(send_op)) {
value_map[op.getResult(0)] = xla::internal::XlaBuilderFriend::BuildSendDone(
ctx.builder, operand,
Convert_channel_handle(send_op.getChannelHandle()),
send_op.getIsHostTransfer());
return success();
}
auto recv_op = dyn_cast_or_null<RecvOp>(callee.getBody().front().front());
if (recv_op && SimplyReturnedOp(recv_op)) {
auto result_types =
mlir::cast<AsyncBundleType>(op.getBundle().getType()).getTypes()[1];
mlir::Type received_type = mlir::TupleType::get(op->getContext(), {});
if (isa<TupleType>(result_types)) {
received_type = mlir::cast<TupleType>(result_types).getType(0);
}
xla::XlaOp xla_recv = xla::internal::XlaBuilderFriend::BuildRecvDone(
ctx.builder, operand, xla::TypeToShape(received_type),
Convert_channel_handle(recv_op.getChannelHandle()),
recv_op.getIsHostTransfer());
if (op.getNumResults() == 1) {
value_map[op.getResult(0)] = xla_recv;
} else {
BuildGetTupleElementsForTupleResults(op, xla_recv, ctx);
}
return success();
}
std::vector<xla::Shape> subshapes;
for (const auto& item : op.getResults().getType()) {
subshapes.push_back(xla::TypeToShape(item));
}
xla::Shape data_shape = xla::ShapeUtil::MakeTupleShape(subshapes);
xla::XlaOp exportedOp = xla::internal::XlaBuilderFriend::BuildAsyncDone(
ctx.builder, operand, data_shape);
if (op.getNumResults() == 1) {
value_map[op.getResult(0)] = exportedOp;
} else {
BuildGetTupleElementsForTupleResults(op, exportedOp, ctx);
}
return success();
}
LogicalResult ExportXlaOp(BitcastConvertOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp operand;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
value_map[op] = xla::BitcastConvertType(
operand,
xla::ConvertMlirTypeToPrimitiveType(getElementTypeOrSelf(op.getType())));
return success();
}
LogicalResult ExportXlaOp(BroadcastInDimOp op, OpLoweringContext ctx) {
auto type = mlir::dyn_cast<RankedTensorType>(op.getType());
if (!type) return failure();
auto& value_map = *ctx.values;
xla::XlaOp operand;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
value_map[op] =
BroadcastInDim(operand, Convert_ArrayRef(type.getShape()),
Convert_broadcast_dimensions(op.getBroadcastDimensions()));
return success();
}
LogicalResult ExportXlaOp(StochasticConvertOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp operand, random;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
if (failed(GetXlaOp(op.getRandom(), value_map, &random, op)))
return failure();
value_map[op] = xla::StochasticConvertType(
operand, random,
xla::ConvertMlirTypeToPrimitiveType(getElementTypeOrSelf(op.getType())));
return success();
}
LogicalResult ExportXlaOp(CosineOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
auto result = op.getResult();
xla::XlaOp arg;
if (failed(GetXlaOp(*op.getODSOperands(0).begin(), value_map, &arg, op)))
return mlir::failure();
auto xla_result = xla::Cos(Unwrap(arg));
value_map[result] = xla_result;
return mlir::success();
}
LogicalResult ExportXlaOp(TanOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
auto result = op.getResult();
xla::XlaOp arg;
if (failed(GetXlaOp(*op.getODSOperands(0).begin(), value_map, &arg, op)))
return mlir::failure();
auto xla_result = xla::Tan(Unwrap(arg));
value_map[result] = xla_result;
return mlir::success();
}
LogicalResult ExportXlaOp(DotOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp lhs, rhs;
if (failed(GetXlaOp(op.getLhs(), value_map, &lhs, op)))
return mlir::failure();
if (failed(GetXlaOp(op.getRhs(), value_map, &rhs, op)))
return mlir::failure();
xla::PrimitiveType preferred_element_type =
xla::ConvertMlirTypeToPrimitiveType(getElementTypeOrSelf(op.getType()));
value_map[op] = xla::Dot(
lhs, rhs, Unwrap(Convert_precision_config(op.getPrecisionConfig())),
preferred_element_type);
return mlir::success();
}
LogicalResult ExportXlaOp(DotGeneralOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp lhs, rhs;
if (failed(GetXlaOp(op.getLhs(), value_map, &lhs, op)))
return mlir::failure();
if (failed(GetXlaOp(op.getRhs(), value_map, &rhs, op)))
return mlir::failure();
xla::PrimitiveType preferred_element_type =
xla::ConvertMlirTypeToPrimitiveType(getElementTypeOrSelf(op.getType()));
auto precision_config = Convert_precision_config(op.getPrecisionConfig());
if (op.getAlgorithmAttr()) {
absl::StatusOr<xla::PrecisionConfig::Algorithm> algorithm =
xla::ConvertDotAlgorithm(op.getAlgorithmAttr());
if (!algorithm.ok()) {
return op.emitError(algorithm.status().ToString());
}
precision_config->set_algorithm(algorithm.value());
}
auto xlaOp = xla::DotGeneral(
lhs, rhs, Convert_dot_dimension_numbers(op.getDotDimensionNumbers()),
Unwrap(precision_config), preferred_element_type);
value_map[op] = xlaOp;
return mlir::success();
}
LogicalResult ExportXlaOp(SparseDotOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp lhs, rhs;
if (failed(GetXlaOp(op.getLhs(), value_map, &lhs, op)))
return mlir::failure();
if (failed(GetXlaOp(op.getRhs(), value_map, &rhs, op)))
return mlir::failure();
xla::PrimitiveType preferred_element_type =
xla::ConvertMlirTypeToPrimitiveType(getElementTypeOrSelf(op.getType()));
llvm::SmallVector<xla::XlaOp> sparse_meta;
if (failed(GetTuple(op, op.getMeta(), ctx, sparse_meta))) return failure();
std::vector<xla::SparsityDescriptor> sparsity;
if (op.getLhsSparsity().has_value()) {
sparsity.push_back(
Convert_sparsity_descriptor(*op.getLhsSparsity(), true));
}
if (op.getRhsSparsity().has_value()) {
sparsity.push_back(
Convert_sparsity_descriptor(*op.getRhsSparsity(), false));
}
value_map[op] =
xla::SparseDot(lhs, rhs, absl::MakeSpan(sparse_meta), sparsity,
Convert_dot_dimension_numbers(op.getDotDimensionNumbers()),
Unwrap(Convert_precision_config(op.getPrecisionConfig())),
preferred_element_type);
return mlir::success();
}
LogicalResult ExportXlaOp(DomainOp op, OpLoweringContext ctx) {
auto& valueMap = *ctx.values;
xla::Shape shape = xla::TypeToShape(op.getResult().getType());
xla::XlaOp operand;
if (failed(GetXlaOp(op.getOperand(), valueMap, &operand, op)))
return failure();
auto entry = xla::ConvertSharding(op.getEntryMetadata());
if (!entry) return failure();
auto exit = xla::ConvertSharding(op.getExitMetadata());
if (!exit) return failure();
valueMap[op] = xla::internal::XlaBuilderFriend::BuildDomain(
ctx.builder, operand, *exit, *entry, shape);
return success();
}
LogicalResult ExportXlaOp(IfOp op, OpLoweringContext ctx) {
xla::XlaComputation true_branch;
xla::XlaComputation false_branch;
auto& value_map = *ctx.values;
llvm::SetVector<mlir::Value> implicit_true_operand_set,
implicit_false_operand_set;
getUsedValuesDefinedAbove(op.getTrueBranch(), op.getTrueBranch(),
implicit_true_operand_set);
getUsedValuesDefinedAbove(op.getFalseBranch(), op.getFalseBranch(),
implicit_false_operand_set);
llvm::SmallVector<mlir::Value> implicit_true_operands =
implicit_true_operand_set.takeVector();
llvm::SmallVector<mlir::Value> implicit_false_operands =
implicit_false_operand_set.takeVector();
llvm::SmallVector<std::optional<xla::OpSharding>> ret_shardings =
GetResultShardings(ctx.builder->sharding(), op->getNumResults());
llvm::SmallVector<xla::XlaOp> true_args;
if (failed(GetXlaOps(op, implicit_true_operands, ctx, true_args)))
return failure();
llvm::SmallVector<xla::XlaOp> false_args;
if (failed(GetXlaOps(op, implicit_false_operands, ctx, false_args)))
return failure();
llvm::SmallVector<std::optional<xla::OpSharding>> true_arg_shardings,
false_arg_shardings;
if (!ret_shardings.empty()) {
true_arg_shardings = GetXlaOpShardings(true_args);
false_arg_shardings = GetXlaOpShardings(false_args);
}
if (failed(ctx.converter->LowerRegionAsComputation(
&op.getTrueBranch(), &true_branch, implicit_true_operands,
{}, true,
true_arg_shardings, ret_shardings)) ||
failed(ctx.converter->LowerRegionAsComputation(
&op.getFalseBranch(), &false_branch, implicit_false_operands,
{}, true,
false_arg_shardings, ret_shardings))) {
return failure();
}
xla::XlaOp pred;
if (failed(GetXlaOp(op.getPred(), value_map, &pred, op))) return failure();
xla::XlaOp true_arg =
CreateTupleIfMultipleOps(ctx.builder, true_args, true_arg_shardings);
xla::XlaOp false_arg =
CreateTupleIfMultipleOps(ctx.builder, false_args, false_arg_shardings);
auto ifop =
xla::Conditional(pred, true_arg, true_branch, false_arg, false_branch);
if (op.getNumResults() == 1) {
value_map[op.getResult(0)] = ifop;
} else {
BuildGetTupleElementsForTupleResults(op, ifop, ctx);
}
return success();
}
LogicalResult ExportXlaOp(CaseOp op, OpLoweringContext ctx) {
llvm::DenseMap<mlir::Value, xla::XlaOp>& value_map = *ctx.values;
MutableArrayRef<Region> branches = op.getBranches();
llvm::SmallVector<xla::XlaOp, 4> branch_operands(branches.size());
std::vector<xla::XlaComputation> computations(branches.size());
std::vector<xla::XlaComputation*> computations_p(branches.size());
for (unsigned i = 0; i < branches.size(); ++i) {
llvm::SetVector<mlir::Value> implicit_operand_set;
getUsedValuesDefinedAbove(branches[i], branches[i], implicit_operand_set);
llvm::SmallVector<mlir::Value> implicit_operands =
implicit_operand_set.takeVector();
llvm::SmallVector<std::optional<xla::OpSharding>> ret_shardings =
GetResultShardings(ctx.builder->sharding(), op->getNumResults());
llvm::SmallVector<xla::XlaOp> args;
if (failed(GetXlaOps(op, implicit_operands, ctx, args))) return failure();
llvm::SmallVector<std::optional<xla::OpSharding>> arg_shardings;
if (!ret_shardings.empty()) {
arg_shardings = GetXlaOpShardings(args);
}
branch_operands[i] =
CreateTupleIfMultipleOps(ctx.builder, args, arg_shardings);
computations_p[i] = &computations[i];
if (failed(ctx.converter->LowerRegionAsComputation(
&branches[i], computations_p[i], implicit_operands,
{}, true, arg_shardings,
ret_shardings)))
return failure();
}
xla::XlaOp index;
if (failed(GetXlaOp(op.getIndex(), value_map, &index, op))) return failure();
xla::XlaOp caseop = xla::Conditional(index, computations_p, branch_operands);
if (op.getNumResults() == 1) {
value_map[op.getResult(0)] = caseop;
} else {
BuildGetTupleElementsForTupleResults(op, caseop, ctx);
}
return success();
}
mlir::LogicalResult ExportXlaOp(mlir::mhlo::CompareOp op,
OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp lhs, rhs;
if (failed(GetXlaOp(op.getLhs(), value_map, &lhs, op)))
return mlir::failure();
if (failed(GetXlaOp(op.getRhs(), value_map, &rhs, op)))
return mlir::failure();
auto dir = Convert_comparison_direction(
mlir::mhlo::stringifyComparisonDirection(op.getComparisonDirection()));
auto type_attr = op.getCompareTypeAttr();
xla::XlaOp xla_result;
if (type_attr && type_attr.getValue() != mlir::mhlo::ComparisonType::NOTYPE) {
auto type = xla::StringToComparisonType(
stringifyComparisonType(type_attr.getValue()).str())
.value();
xla_result = xla::Compare(lhs, rhs, {}, dir, type);
} else {
xla_result = xla::Compare(lhs, rhs, dir);
}
value_map[op] = xla_result;
return mlir::success();
}
LogicalResult ExportXlaOp(ConstantOp op, OpLoweringContext ctx) {
return failure();
}
LogicalResult ExportXlaOp(mlir::mhlo::ConvolutionOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp lhs, rhs;
if (failed(GetXlaOp(op.getLhs(), value_map, &lhs, op)))
return mlir::failure();
if (failed(GetXlaOp(op.getRhs(), value_map, &rhs, op)))
return mlir::failure();
xla::PrimitiveType preferred_element_type =
xla::ConvertMlirTypeToPrimitiveType(getElementTypeOrSelf(op.getType()));
xla::XlaOp xla_result = xla::ConvGeneralDilated(
lhs, rhs, Convert_window_strides(op.getWindowStrides()),
Convert_padding(op.getPadding()),
Convert_lhs_dilation(op.getLhsDilation()),
Convert_rhs_dilation(op.getRhsDilation()),
xla::ConvertConvDimensionNumbers(op.getDimensionNumbers()),
Convertuint64_t(op.getFeatureGroupCount()),
Convertuint64_t(op.getBatchGroupCount()),
Unwrap(Convert_precision_config(op.getPrecisionConfig())),
preferred_element_type, Convert_window_reversal(op.getWindowReversal()));
value_map[op] = xla_result;
return mlir::success();
}
LogicalResult ExportXlaOp(ConvertOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp operand;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
value_map[op] = xla::ConvertElementType(
operand,
xla::ConvertMlirTypeToPrimitiveType(getElementTypeOrSelf(op.getType())));
return success();
}
LogicalResult ExportXlaOp(CustomCallOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
llvm::SmallVector<xla::XlaOp> args;
if (failed(GetTuple(op, op.getInputs(), ctx, args))) return failure();
if (op.getCallTargetName() == kApproxTopK) {
auto isSupportedAttrName = [](NamedAttribute attr) {
auto name = attr.getName();
return name == kCallTargetName || name == kBackendConfig ||
name == kApiVersion || name == kCalledComputations ||
name == kHasSideEffect;
};
for (const auto& attr : op->getAttrs()) {
if (!isSupportedAttrName(attr))
return op.emitOpError()
<< attr.getName().getValue()
<< " is not a supported attribute for ApproxTopK";
}
auto backend_config =
mlir::dyn_cast_or_null<mlir::DictionaryAttr>(op.getBackendConfigAttr());
if (!backend_config)
return op.emitOpError() << "Missing backend_config attribute";
for (auto attr : backend_config) {
auto name = attr.getName();
if (!(name == kTopK || name == kReductionDim || name == kRecallTarget ||
name == kAggregateToTopk || name == kReductionInputSizeOverride ||
name == kIsFallback))
return op.emitOpError()
<< name.getValue() << " is not a supported backend_config"
<< " attribute for ApproxTopK";
}
auto checkI64Attr =
[&](const std::string& attr_name) -> mlir::LogicalResult {
if (!backend_config.contains(attr_name))
return op.emitOpError()
<< "Missing " << attr_name << " attribute in backend_config";
auto attr = backend_config.getAs<IntegerAttr>(attr_name);
if (!attr || !attr.getType().isInteger(64))
return op.emitOpError()
<< attr_name
<< " attribute in backend_config must be of i64 type";
return success();
};
auto checkF32Attr =
[&](const std::string& attr_name) -> mlir::LogicalResult {
if (!backend_config.contains(attr_name))
return op.emitOpError()
<< "Missing " << attr_name << " attribute in backend_config";
auto attr = backend_config.getAs<FloatAttr>(attr_name);
if (!attr || !attr.getType().isF32())
return op.emitOpError()
<< attr_name
<< " attribute in backend_config must be of f32 type";
return success();
};
auto checkBoolAttr =
[&](const std::string& attr_name) -> mlir::LogicalResult {
if (!backend_config.contains(attr_name))
return op.emitOpError()
<< "Missing " << attr_name << " attribute in backend_config";
auto attr = backend_config.getAs<BoolAttr>(attr_name);
if (!attr)
return op.emitOpError()
<< attr_name
<< " attribute in backend_config must be of bool type";
return success();
};
if (failed(checkI64Attr(kTopK))) return failure();
if (failed(checkI64Attr(kReductionDim))) return failure();
if (failed(checkF32Attr(kRecallTarget))) return failure();
if (failed(checkBoolAttr(kAggregateToTopk))) return failure();
if (failed(checkI64Attr(kReductionInputSizeOverride))) return failure();
bool has_is_fallback = backend_config.contains(kIsFallback);
if (has_is_fallback && !backend_config.getAs<BoolAttr>(kIsFallback))
return op.emitOpError()
<< "is_fallback attribute in backend_config must be of bool type";
int64_t top_k = backend_config.getAs<IntegerAttr>(kTopK).getInt();
int64_t reduction_dim =
backend_config.getAs<IntegerAttr>(kReductionDim).getInt();
float recall_target = backend_config.getAs<FloatAttr>(kRecallTarget)
.getValue()
.convertToFloat();
bool aggregate_to_topk =
backend_config.getAs<BoolAttr>(kAggregateToTopk).getValue();
int64_t reduction_input_size_override =
backend_config.getAs<IntegerAttr>(kReductionInputSizeOverride).getInt();
bool is_fallback = has_is_fallback &&
backend_config.getAs<BoolAttr>(kIsFallback).getValue();
if (args.size() % 2 != 0) {
return op.emitOpError() << "ApproxTopK takes an even number of operands.";
}
auto num_inputs = args.size() / 2;
absl::Span<const xla::XlaOp> inputs(args.begin(), num_inputs);
absl::Span<const xla::XlaOp> init_values(args.begin() + num_inputs,
num_inputs);
if (num_inputs != op.getNumResults()) {
return op.emitOpError() << "num_results does not match num_inputs";
}
SmallVector<RankedTensorType> input_types, init_value_types, result_types;
for (size_t i = 0; i < num_inputs; ++i) {
auto input_type =
mlir::dyn_cast<RankedTensorType>(op.getOperand(i).getType());
if (!input_type) return failure();
input_types.push_back(input_type);
auto init_value_type = mlir::dyn_cast<RankedTensorType>(
op.getOperand(num_inputs + i).getType());
if (!init_value_type) return failure();
init_value_types.push_back(init_value_type);
auto result_type =
mlir::dyn_cast<RankedTensorType>(op.getResult(i).getType());
if (!result_type) return failure();
result_types.push_back(result_type);
}
for (size_t i = 0; i < inputs.size(); ++i) {
if (input_types[0].getShape() != input_types[i].getShape()) {
return op.emitOpError() << "input shape mismatch at position " << i;
}
if (init_value_types[i].getElementType() !=
input_types[i].getElementType()) {
return op.emitOpError()
<< "input and init_value element type mismatch at position "
<< i;
}
if (input_types[i].getElementType() != result_types[i].getElementType()) {
return op.emitOpError()
<< "result element type mismatch at position " << i;
}
for (size_t j = 0; j < input_types[i].getRank(); ++j) {
if (j == reduction_dim) {
auto reduction_output_size = xla::ApproxTopKReductionOutputSize(
input_types[i].getShape()[j], input_types[i].getRank(), top_k,
recall_target, aggregate_to_topk, reduction_input_size_override);
if (!reduction_output_size.ok()) return failure();
if (result_types[i].getShape()[j] != reduction_output_size->first)
return op.emitOpError()
<< "ApproxTopK aggregates to k="
<< reduction_output_size->first << ", but got "
<< result_types[i].getShape()[j];
continue;
}
if (input_types[i].getShape()[j] != result_types[i].getShape()[j]) {
return op.emitOpError() << "result shape mismatch at position " << i
<< ", index " << j;
}
}
}
auto called_computations = op.getCalledComputations();
if (called_computations.size() != 1) {
return op.emitOpError()
<< "ApproxTopK takes exactly 1 called_computation.";
}
mlir::func::FuncOp callee = ctx.converter->LookUpSymbol(
mlir::cast<FlatSymbolRefAttr>(op.getCalledComputations()[0]));
mlir::FunctionType callee_type = callee.getFunctionType();
SmallVector<Type, 4> expected_callee_input_types;
for (unsigned i = 0; i < num_inputs; ++i) {
auto scalar = RankedTensorType::get({}, input_types[i].getElementType());
expected_callee_input_types.push_back(scalar);
expected_callee_input_types.push_back(scalar);
}
FunctionType expected_callee_type = mlir::FunctionType::get(
op->getContext(), expected_callee_input_types,
RankedTensorType::get({}, IntegerType::get(op->getContext(), 1)));
if (callee_type != expected_callee_type) {
return op.emitOpError()
<< "called_computation type does not match the expected type. Got "
<< callee_type << " expected " << expected_callee_type;
}
if (failed(ctx.converter->RunOnFunction(callee))) return failure();
xla::XlaComputation& comparator =
ctx.converter->GetLoweredComputation(callee);
if (reduction_dim < 0 || reduction_dim > input_types[0].getRank())
return op.emitOpError() << "reduction_dim out of range";
if (recall_target <= 0 || recall_target > 1.0)
return op.emitOpError() << "recall_target out of range";
if (reduction_input_size_override >= 0 &&
reduction_input_size_override <
input_types[0].getShape()[reduction_dim])
return op.emitOpError() << "reduction_input_size_override out of range";
xla::XlaOp cc_op;
if (is_fallback) {
cc_op = xla::ApproxTopKFallback(
ctx.builder, inputs, init_values, top_k, reduction_dim, comparator,
recall_target, aggregate_to_topk, reduction_input_size_override);
} else {
cc_op = xla::ApproxTopK(ctx.builder, inputs, init_values, top_k,
reduction_dim, comparator, recall_target,
aggregate_to_topk, reduction_input_size_override);
}
BuildGetTupleElementsForTupleResults(op, cc_op, ctx);
return success();
}
if (op.getCalledComputations().size() > 1)
return op.emitOpError()
<< "cannot export with more than one called computations";
if (!op.getCalledComputations().empty() && op.getOperandLayouts() &&
op.getResultLayouts()) {
return op.emitOpError() << "cannot export if both called computation and "
"layouts are specified";
}
auto xla_api_version = xla::ConvertCustomCallApiVersion(op.getApiVersion());
if (!xla_api_version.ok()) return failure();
std::string backend_config;
if (*xla_api_version == xla::CustomCallApiVersion::API_VERSION_TYPED_FFI) {
if (auto dict = mlir::dyn_cast_or_null<mlir::DictionaryAttr>(
op.getBackendConfig().value_or(mlir::Attribute()))) {
llvm::raw_string_ostream(backend_config) << dict;
}
} else {
if (auto str = mlir::dyn_cast_or_null<mlir::StringAttr>(
op.getBackendConfig().value_or(mlir::Attribute()))) {
llvm::raw_string_ostream(backend_config) << str.strref();
}
}
absl::StatusOr<xla::Literal> literal;
const xla::Literal* literal_ptr = nullptr;
auto literal_attr = op->getAttrOfType<DenseElementsAttr>(kMhloLiteral);
if (literal_attr) {
literal = CreateArrayLiteralFromAttr(literal_attr, {});
if (!literal.ok()) return failure();
literal_ptr = &*literal;
}
auto aliasInfo =
xla::ConvertOutputOperandAliasing(op.getOutputOperandAliases());
auto output_operand_aliasing = absl::MakeSpan(*aliasInfo);
auto custom_call_schedule =
xla::ConvertCustomCallSchedule(op.getCustomCallSchedule());
if (!custom_call_schedule.ok()) return failure();
std::string call_target_name(op.getCallTargetName());
xla::Shape result_shape;
if (op->getNumResults() == 1) {
result_shape = xla::TypeToShape(op.getResult(0).getType());
} else {
std::vector<xla::Shape> subshapes;
for (const auto& item : op.getResults().getType()) {
subshapes.push_back(xla::TypeToShape(item));
}
result_shape = xla::ShapeUtil::MakeTupleShape(subshapes);
}
xla::XlaOp custom_call;
if (op.getCalledComputations().size() == 1) {
mlir::func::FuncOp callee = ctx.converter->LookUpSymbol(
mlir::cast<FlatSymbolRefAttr>(op.getCalledComputations()[0]));
if (failed(ctx.converter->RunOnFunction(callee))) return failure();
xla::XlaComputation& computation =
ctx.converter->GetLoweredComputation(callee);
custom_call = xla::CustomCallWithComputation(
ctx.builder, call_target_name, args, computation, result_shape,
backend_config, op.getHasSideEffect(), output_operand_aliasing,
literal_ptr, *custom_call_schedule, *xla_api_version);
} else if (op.getOperandLayouts() && op.getResultLayouts()) {
auto operand_shapes_with_layout = ConvertTypesToShapesWithLayout(
op.getOperandTypes(), op.getOperandLayouts().value());
SetLayout(result_shape, op.getResultLayouts().value());
custom_call = xla::CustomCallWithLayout(
ctx.builder, call_target_name, args, result_shape,
operand_shapes_with_layout, backend_config, op.getHasSideEffect(),
output_operand_aliasing, literal_ptr, *custom_call_schedule,
*xla_api_version);
} else {
custom_call = xla::CustomCall(
ctx.builder, call_target_name, args, result_shape, backend_config,
op.getHasSideEffect(), output_operand_aliasing, literal_ptr,
*custom_call_schedule, *xla_api_version);
}
if (op->getNumResults() == 1) {
value_map[op.getResult(0)] = custom_call;
} else {
BuildGetTupleElementsForTupleResults(op, custom_call, ctx);
}
return success();
}
LogicalResult ExportXlaOp(InfeedOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp token;
if (failed(GetXlaOp(op.getToken(), value_map, &token, op))) return failure();
auto result_types = op.getResultTypes();
auto num_results = op.getNumResults();
xla::Shape token_shape = xla::TypeToShape(result_types[num_results - 1]);
std::vector<xla::Shape> subshapes;
for (const auto& item : llvm::enumerate(result_types)) {
if (item.index() == num_results - 1) break;
subshapes.push_back(xla::TypeToShape(item.value()));
}
xla::Shape data_shape = xla::ShapeUtil::MakeTupleShape(subshapes);
auto xla_result = xla::InfeedWithToken(token, data_shape,
std::string(op.getInfeedConfig()));
ctx.builder->ClearSharding();
if (!subshapes.empty()) {
auto data_tuple_element = xla::GetTupleElement(xla_result, 0);
for (const auto& item : llvm::enumerate(op.getResults())) {
if (item.index() == num_results - 1) break;
value_map[item.value()] =
xla::GetTupleElement(data_tuple_element, item.index());
}
}
value_map[op.getResult(num_results - 1)] =
xla::GetTupleElement(xla_result, 1);
return success();
}
LogicalResult ExportXlaOp(IotaOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
value_map[op] = xla::Iota(ctx.builder, xla::TypeToShape(op.getType()),
op.getIotaDimension());
return success();
}
LogicalResult ExportXlaOp(MapOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaComputation computation;
if (failed(ctx.converter->LowerRegionAsComputation(&op.getComputation(),
&computation))) {
return failure();
}
llvm::SmallVector<xla::XlaOp> operands;
if (failed(GetTuple(op, op.getInputs(), ctx, operands))) return failure();
value_map[op] = xla::Map(ctx.builder, operands, computation,
Convert_dimensions(op.getDimensions()));
return success();
}
LogicalResult ExportXlaOp(OutfeedOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
llvm::SmallVector<xla::XlaOp> operands;
if (failed(GetTuple(op, op.getInputs(), ctx, operands))) return failure();
const auto sharding = ctx.builder->sharding();
xla::XlaOp operand;
if (sharding.has_value() &&
sharding->tuple_shardings_size() != operands.size()) {
xla::XlaScopedShardingAssignment scoped_sharding(ctx.builder, std::nullopt);
operand = Tuple(ctx.builder, operands);
} else {
operand = Tuple(ctx.builder, operands);
}
std::vector<xla::Shape> subshapes;
for (auto operand : op.getInputs())
subshapes.push_back(xla::TypeToShape(operand.getType()));
xla::Shape shape_with_layout = xla::ShapeUtil::MakeTupleShape(subshapes);
xla::XlaOp token;
if (failed(GetXlaOp(op.getToken(), value_map, &token, op))) return failure();
value_map[op] = xla::OutfeedWithToken(operand, token, shape_with_layout,
std::string(op.getOutfeedConfig()));
return success();
}
LogicalResult ExportXlaOp(PartitionIdOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::Shape shape = xla::TypeToShape(op.getResult().getType());
value_map[op] =
xla::internal::XlaBuilderFriend::BuildPartitionId(ctx.builder, shape);
return success();
}
LogicalResult ExportXlaOp(PadOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::PaddingConfig padding_config;
auto edge_padding_low = ConvertDenseIntAttr(op.getEdgePaddingLow());
auto edge_padding_high = ConvertDenseIntAttr(op.getEdgePaddingHigh());
auto interior_padding = ConvertDenseIntAttr(op.getInteriorPadding());
for (int64_t i = 0, end = edge_padding_low.size(); i < end; ++i) {
auto* dims = padding_config.add_dimensions();
dims->set_edge_padding_low(edge_padding_low[i]);
dims->set_edge_padding_high(edge_padding_high[i]);
dims->set_interior_padding(interior_padding[i]);
}
xla::XlaOp operand, padding_value;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
if (failed(GetXlaOp(op.getPaddingValue(), value_map, &padding_value, op)))
return failure();
value_map[op] = xla::Pad(operand, padding_value, padding_config);
return success();
}
LogicalResult ExportXlaOp(RecvOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp token;
if (failed(GetXlaOp(op.getToken(), value_map, &token, op))) return failure();
auto result_types = op.getResultTypes();
auto num_results = op.getNumResults();
xla::Shape token_shape = xla::TypeToShape(result_types[num_results - 1]);
std::vector<xla::Shape> subshapes;
for (const auto& item : llvm::enumerate(result_types)) {
if (item.index() == num_results - 1) break;
subshapes.push_back(xla::TypeToShape(item.value()));
}
xla::Shape data_shape;
if (subshapes.size() == 1)
data_shape = subshapes[0];
else
data_shape = xla::ShapeUtil::MakeTupleShape(subshapes);
token = xla::internal::XlaBuilderFriend::BuildRecv(
ctx.builder, token, data_shape,
Convert_channel_handle(op.getChannelHandle()), op.getIsHostTransfer());
xla::XlaOp xla_result = xla::internal::XlaBuilderFriend::BuildRecvDone(
ctx.builder, token, data_shape,
Convert_channel_handle(op.getChannelHandle()), op.getIsHostTransfer());
auto data_tuple_element = xla::GetTupleElement(xla_result, 0);
if (subshapes.size() == 1) {
value_map[op.getResult(0)] = data_tuple_element;
} else {
for (const auto& item : llvm::enumerate(op.getResults())) {
if (item.index() == num_results - 1) break;
value_map[item.value()] =
xla::GetTupleElement(data_tuple_element, item.index());
}
}
value_map[op.getResult(num_results - 1)] =
xla::GetTupleElement(xla_result, 1);
return success();
}
LogicalResult ExportXlaOp(ReduceOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaComputation body;
if (failed(ctx.converter->LowerRegionAsComputation(&op.getBody(), &body))) {
return failure();
}
llvm::SmallVector<xla::XlaOp> operands, init_values;
if (failed(GetTuple(op, op.getInputs(), ctx, operands)) ||
failed(GetTuple(op, op.getInitValues(), ctx, init_values))) {
return failure();
}
xla::XlaOp result =
xla::Reduce(ctx.builder, operands, init_values, body,
Convert_broadcast_dimensions(op.getDimensions()));
if (op.getNumResults() == 1) {
value_map[op.getResult(0)] = result;
} else {
BuildGetTupleElementsForTupleResults(op, result, ctx);
}
return success();
}
LogicalResult ExportXlaOp(ReduceWindowOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaComputation body;
if (failed(ctx.converter->LowerRegionAsComputation(&op.getBody(), &body))) {
return failure();
}
llvm::SmallVector<xla::XlaOp> operands, init_values;
if (failed(GetTuple(op, op.getInputs(), ctx, operands)) ||
failed(GetTuple(op, op.getInitValues(), ctx, init_values))) {
return failure();
}
xla::XlaOp result = xla::ReduceWindowWithGeneralPadding(
operands, init_values, body,
ConvertDenseIntAttr(op.getWindowDimensions()),
ConvertDenseIntAttr(op.getWindowStrides()),
ConvertDenseIntAttr(op.getBaseDilations()),
ConvertDenseIntAttr(op.getWindowDilations()),
Convert_padding(op.getPadding()));
if (op.getNumResults() == 1) {
value_map[op.getResult(0)] = result;
} else {
BuildGetTupleElementsForTupleResults(op, result, ctx);
}
return success();
}
LogicalResult ExportXlaOp(ReshapeOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp operand;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
value_map[op] =
xla::Reshape(operand, xla::TypeToShape(op.getType()).dimensions());
return success();
}
LogicalResult ExportXlaOp(ReturnOp op, OpLoweringContext ctx) {
return failure();
}
LogicalResult ExportXlaOp(RngBitGeneratorOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
auto results = op.getResults();
auto xla_arg_1 = value_map[*op.getODSOperands(0).begin()];
auto xla_result = xla::RngBitGenerator(
static_cast<xla::RandomAlgorithm>(op.getRngAlgorithm()),
Unwrap(xla_arg_1), xla::TypeToShape(results[1].getType()));
BuildGetTupleElementsForTupleResults(op, xla_result, ctx);
return mlir::success();
}
LogicalResult ExportXlaOp(XlaRngGetAndUpdateStateOp op, OpLoweringContext ctx) {
(*ctx.values)[op.getResult()] =
xla::internal::XlaBuilderFriend::BuildRngGetAndUpdateState(
ctx.builder, static_cast<int64_t>(op.getDelta()),
xla::TypeToShape(op.getType()));
return mlir::success();
}
LogicalResult ExportXlaOp(BatchNormGradOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp operand, scale, mean, variance, grad_output;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
if (failed(GetXlaOp(op.getScale(), value_map, &scale, op))) return failure();
if (failed(GetXlaOp(op.getMean(), value_map, &mean, op))) return failure();
if (failed(GetXlaOp(op.getVariance(), value_map, &variance, op)))
return failure();
if (failed(GetXlaOp(op.getGradOutput(), value_map, &grad_output, op)))
return failure();
auto xla_result =
xla::BatchNormGrad(operand, scale, mean, variance, grad_output,
ConvertAPFloat(op.getEpsilon()), op.getFeatureIndex());
BuildGetTupleElementsForTupleResults(op, xla_result, ctx);
return mlir::success();
}
LogicalResult ExportXlaOp(BatchNormTrainingOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp operand, scale, offset;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
if (failed(GetXlaOp(op.getScale(), value_map, &scale, op))) return failure();
if (failed(GetXlaOp(op.getOffset(), value_map, &offset, op)))
return failure();
auto xla_result = xla::BatchNormTraining(operand, scale, offset,
ConvertAPFloat(op.getEpsilon()),
op.getFeatureIndex());
BuildGetTupleElementsForTupleResults(op, xla_result, ctx);
return mlir::success();
}
LogicalResult ExportXlaOp(RngOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp a, b;
if (failed(GetXlaOp(op.getA(), value_map, &a, op))) return failure();
if (failed(GetXlaOp(op.getB(), value_map, &b, op))) return failure();
if (op.getRngDistribution() == RngDistribution::UNIFORM) {
value_map[op] = xla::RngUniform(a, b, xla::TypeToShape(op.getType()));
return success();
} else if (op.getRngDistribution() == RngDistribution::NORMAL) {
value_map[op] = xla::RngNormal(a, b, xla::TypeToShape(op.getType()));
return success();
}
return failure();
}
LogicalResult ExportXlaOp(ScatterOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaComputation update_computation;
if (failed(ctx.converter->LowerRegionAsComputation(&op.getUpdateComputation(),
&update_computation))) {
return failure();
}
xla::ScatterDimensionNumbers dimension_numbers =
Convert_scatter_dimension_numbers(op.getScatterDimensionNumbers());
llvm::SmallVector<xla::XlaOp> operands;
llvm::SmallVector<xla::XlaOp> updates;
if (failed(GetTuple(op, op.getInputs(), ctx, operands))) return failure();
if (failed(GetTuple(op, op.getUpdates(), ctx, updates))) return failure();
xla::XlaOp scatter_indices;
if (failed(GetXlaOp(op.getScatterIndices(), value_map, &scatter_indices, op)))
return failure();
auto scatter_op = xla::Scatter(
operands, scatter_indices, updates, update_computation, dimension_numbers,
op.getIndicesAreSorted(), op.getUniqueIndices());
if (op->getNumResults() == 1) {
value_map[op.getResult(0)] = scatter_op;
return success();
}
BuildGetTupleElementsForTupleResults(op, scatter_op, ctx);
return success();
}
LogicalResult ExportXlaOp(SelectAndScatterOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaComputation select;
xla::XlaComputation scatter;
if (failed(
ctx.converter->LowerRegionAsComputation(&op.getSelect(), &select)) ||
failed(ctx.converter->LowerRegionAsComputation(&op.getScatter(),
&scatter))) {
return failure();
}
xla::XlaOp operand, source, init_value;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
if (failed(GetXlaOp(op.getSource(), value_map, &source, op)))
return failure();
if (failed(GetXlaOp(op.getInitValue(), value_map, &init_value, op)))
return failure();
value_map[op] = xla::SelectAndScatterWithGeneralPadding(
operand, select, ConvertDenseIntAttr(op.getWindowDimensions()),
ConvertDenseIntAttr(op.getWindowStrides()),
Convert_padding(op.getPadding()), source, init_value, scatter);
return success();
}
LogicalResult ExportXlaOp(SendOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
llvm::SmallVector<xla::XlaOp> operands;
if (failed(GetTuple(op, op.getInputs(), ctx, operands))) return failure();
xla::XlaOp operand;
if (operands.size() == 1)
operand = operands[0];
else
operand = Tuple(ctx.builder, operands);
xla::XlaOp token;
if (failed(GetXlaOp(op.getToken(), value_map, &token, op))) return failure();
token = xla::internal::XlaBuilderFriend::BuildSend(
ctx.builder, operand, token,
Convert_channel_handle(op.getChannelHandle()), op.getIsHostTransfer());
value_map[op] = xla::internal::XlaBuilderFriend::BuildSendDone(
ctx.builder, token, Convert_channel_handle(op.getChannelHandle()),
op.getIsHostTransfer());
return success();
}
mlir::LogicalResult ExportXlaOp(mlir::mhlo::SetDimensionSizeOp op,
OpLoweringContext ctx) {
auto& value_map = *ctx.values;
auto result = op.getResult();
xla::XlaOp array;
if (failed(GetXlaOp(op.getOperand(), value_map, &array, op)))
return mlir::failure();
auto dimension = Convertuint64_t(op.getDimension());
auto shape_or = ctx.builder->GetShapePtr(array);
if (!shape_or.ok()) {
return op.emitError(shape_or.status().ToString());
}
xla::XlaOp xla_result;
if (auto constant = llvm::dyn_cast_or_null<mlir::mhlo::ConstantOp>(
op.getSize().getDefiningOp());
constant != nullptr) {
auto value = constant.getValue();
auto values = value.getValues<mlir::IntegerAttr>();
if ((*values.begin()).getValue().getSExtValue() ==
shape_or.value()->dimensions(dimension)) {
xla_result = xla::RemoveDynamicDimension(array, dimension);
}
}
if (!xla_result.valid()) {
xla::XlaOp dynamic_size;
if (failed(GetXlaOp(op.getSize(), value_map, &dynamic_size, op)))
return mlir::failure();
xla_result = xla::SetDimensionSize(array, dynamic_size, dimension);
}
value_map[result] = xla_result;
return mlir::success();
}
mlir::LogicalResult ExportXlaOp(mlir::mhlo::SineOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
auto result = op.getResult();
xla::XlaOp arg;
if (failed(GetXlaOp(*op.getODSOperands(0).begin(), value_map, &arg, op)))
return mlir::failure();
auto xla_result = xla::Sin(Unwrap(arg));
value_map[result] = xla_result;
return mlir::success();
}
LogicalResult ExportXlaOp(SortOp op, OpLoweringContext ctx) {
xla::XlaComputation comparator;
if (failed(ctx.converter->LowerRegionAsComputation(&op.getComparator(),
&comparator)))
return failure();
llvm::SmallVector<xla::XlaOp> operands;
if (failed(GetTuple(op, op.getInputs(), ctx, operands))) return failure();
auto sorted =
xla::Sort(operands, comparator, op.getDimension(), op.getIsStable());
auto& value_map = *ctx.values;
auto shape_or = sorted.builder()->GetShape(sorted);
if (!shape_or.ok()) {
return op.emitError(shape_or.status().ToString());
}
xla::Shape& shape = shape_or.value();
if (!shape.IsTuple()) {
value_map[op.getResult(0)] = sorted;
return success();
}
BuildGetTupleElementsForTupleResults(op, sorted, ctx);
return success();
}
LogicalResult ExportXlaOp(SubtractOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
auto result = op.getResult();
xla::XlaOp lhs;
if (failed(GetXlaOp(*op.getODSOperands(0).begin(), value_map, &lhs, op)))
return mlir::failure();
xla::XlaOp rhs;
if (failed(GetXlaOp(*op.getODSOperands(1).begin(), value_map, &rhs, op)))
return mlir::failure();
auto xla_result = xla::Sub(Unwrap(lhs), Unwrap(rhs));
value_map[result] = xla_result;
return mlir::success();
}
LogicalResult ExportXlaOp(TraceOp op, OpLoweringContext ctx) {
return success();
}
LogicalResult ExportXlaOp(WhileOp op, OpLoweringContext ctx) {
xla::XlaComputation condition;
xla::XlaComputation body;
llvm::SmallVector<std::optional<xla::OpSharding>> res_shardings =
GetResultShardings(ctx.builder->sharding(), op->getNumResults());
llvm::SetVector<mlir::Value> implicit_operand_set;
getUsedValuesDefinedAbove(op->getRegions(), implicit_operand_set);
llvm::SmallVector<mlir::Value> implicit_operands =
implicit_operand_set.takeVector();
llvm::SmallVector<xla::XlaOp> implicit_args;
if (failed(GetXlaOps(op, implicit_operands, ctx, implicit_args)))
return failure();
llvm::SmallVector<std::optional<xla::OpSharding>> implicit_shardings;
if (!implicit_args.empty() && !res_shardings.empty()) {
implicit_shardings = GetXlaOpShardings(implicit_args);
res_shardings.append(implicit_shardings.begin(), implicit_shardings.end());
if (std::optional<xla::OpSharding> new_sharding =
CreateTupleSharding(res_shardings)) {
ctx.builder->SetSharding(*new_sharding);
}
}
if (failed(ctx.converter->LowerRegionAsComputation(
&op.getBody(), &body, implicit_operands,
implicit_operands,
true, res_shardings,
res_shardings)) ||
failed(ctx.converter->LowerRegionAsComputation(
&op.getCond(), &condition, implicit_operands,
{},
true, res_shardings))) {
return failure();
}
llvm::SmallVector<xla::XlaOp> operands;
if (failed(GetTuple(op, op.getOperands(), ctx, operands))) return failure();
operands.append(implicit_args.begin(), implicit_args.end());
xla::XlaOp operand = operands[0];
if (operands.size() > 1) operand = Tuple(ctx.builder, operands);
xla::XlaOp whileop = xla::While(condition, body, operand);
auto& value_map = *ctx.values;
auto shape_or = whileop.builder()->GetShape(whileop);
if (!shape_or.ok()) {
return op.emitError(shape_or.status().ToString());
}
xla::Shape& shape = shape_or.value();
if (!shape.IsTuple()) {
value_map[op.getResult(0)] = whileop;
return success();
}
BuildGetTupleElementsForTupleResults(
op, whileop, ctx, implicit_args.size());
return success();
}
LogicalResult ExportXlaOp(OptimizationBarrierOp op, OpLoweringContext ctx) {
llvm::SmallVector<xla::XlaOp> operands;
if (failed(GetTuple(op, op.getOperands(), ctx, operands))) return failure();
if (operands.empty()) return success();
auto& value_map = *ctx.values;
if (operands.size() == 1) {
value_map[op.getOperation()->getResult(0)] =
xla::OptimizationBarrier(operands[0]);
} else {
auto result = xla::OptimizationBarrier(Tuple(ctx.builder, operands));
BuildGetTupleElementsForTupleResults(op, result, ctx);
}
return success();
}
LogicalResult ExportXlaOp(FusionOp op, OpLoweringContext ctx) {
if (!op.getFusionKind()) {
op.emitOpError() << "requires fusion kind for HLO translation";
return failure();
}
xla::XlaComputation fused_computation;
if (failed(ctx.converter->LowerRegionAsComputation(&op.getFusedComputation(),
&fused_computation)))
return failure();
auto& values = *ctx.values;
auto aliasInfo =
xla::ConvertOutputOperandAliasing(op.getOutputOperandAliases());
auto output_operand_aliasing = absl::MakeSpan(*aliasInfo);
llvm::SmallVector<xla::XlaOp, 4> operands;
for (auto operand : op.getInputs()) operands.push_back(values[operand]);
auto fusion_kind_string =
mlir::mhlo::stringifyFusionKind(op.getFusionKind().value());
xla::XlaOp fusion = xla::internal::XlaBuilderFriend::BuildFusion(
ctx.builder, operands,
absl::string_view(fusion_kind_string.data(), fusion_kind_string.size()),
fused_computation, output_operand_aliasing);
if (op.getNumResults() == 1) {
values[op.getResult(0)] = fusion;
} else {
BuildGetTupleElementsForTupleResults(op, fusion, ctx);
}
return success();
}
LogicalResult ExportXlaOp(BitcastOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp operand;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
xla::XlaOp bitcast = xla::internal::XlaBuilderFriend::BuildBitcast(
ctx.builder, operand, xla::TypeToShape(op.getType()));
value_map[op] = bitcast;
if (ctx.converter->GetOptions().propagate_bitcast_layouts_to_backend_config) {
xla::HloInstructionProto* bitcast_proto =
xla::internal::XlaBuilderFriend::GetInstruction(bitcast);
xla::HloInstructionProto* operand_proto =
xla::internal::XlaBuilderFriend::GetInstruction(operand);
xla::LayoutProto result_layout =
ExtractLayout(op, bitcast_proto->shape().dimensions_size(),
kResultLayout)
.ToProto();
xla::LayoutProto source_layout =
ExtractLayout(op, operand_proto->shape().dimensions_size(),
kSourceLayout)
.ToProto();
xla::gpu::BitcastBackendConfig bitcast_config;
*bitcast_config.mutable_source_layout() = source_layout;
*bitcast_config.mutable_result_layout() = result_layout;
*bitcast_proto->mutable_backend_config() =
bitcast_config.SerializeAsString();
}
return success();
}
LogicalResult ExportXlaOp(UniformQuantizeOp op, OpLoweringContext ctx) {
return failure();
}
LogicalResult ExportXlaOp(UniformDequantizeOp op, OpLoweringContext ctx) {
return failure();
}
LogicalResult ExportXlaOp(TopKOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp operand;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
auto topk = xla::TopK(operand, op.getK(), op.getLargest());
BuildGetTupleElementsForTupleResults(op, topk, ctx);
return success();
}
LogicalResult ExportXlaOp(MinimumBroadcastShapesOp op, OpLoweringContext ctx) {
return failure();
}
}
}
}
#include "xla/hlo/translate/mhlo_to_hlo/operator_writers.inc"
namespace mlir {
namespace {
LogicalResult ConvertLayout(mlir::Operation* op, const mlir::ArrayAttr& layout,
xla::ShapeProto* shape) {
if (shape->element_type() == xla::TUPLE) {
auto subshapes = shape->mutable_tuple_shapes();
size_t subshapes_data_size = subshapes->size();
if (!subshapes->empty() &&
subshapes->Mutable(subshapes->size() - 1)->element_type() == xla::TOKEN)
subshapes_data_size = subshapes->size() - 1;
if (layout.size() != subshapes_data_size) {
op->emitOpError() << "Expected layout of size " << layout.size()
<< ", but found " << subshapes->size();
return failure();
}
for (int i = 0; i < subshapes_data_size; i++) {
mlir::Attribute child = layout[i];
if (mlir::isa<mlir::UnitAttr>(child)) {
continue;
}
mlir::ArrayAttr c = mlir::dyn_cast<mlir::ArrayAttr>(child);
if (!c) {
op->emitOpError() << "Type Error: Expected layout array attribute";
return failure();
}
if (failed(ConvertLayout(op, c, subshapes->Mutable(i)))) {
return failure();
}
}
} else {
int rank = shape->dimensions().size();
if (rank) {
if (layout.size() != rank) {
return failure();
}
std::vector<int64_t> array(rank);
for (int i = 0; i < rank; i++) {
mlir::IntegerAttr attr = mlir::dyn_cast<mlir::IntegerAttr>(layout[i]);
if (!attr) {
op->emitOpError() << "Type Error: Expected layout integer attribute";
return failure();
}
array[i] = attr.getInt();
}
*shape->mutable_layout() = xla::LayoutUtil::MakeLayout(array).ToProto();
}
}
return success();
}
LogicalResult ConvertInfeedtLayout(mlir::Operation* op,
const mlir::ArrayAttr& layout,
xla::ShapeProto* shape,
int64_t layout_index = 0) {
if (shape->element_type() != xla::TUPLE) {
mlir::ArrayAttr child_layout =
mlir::dyn_cast<mlir::ArrayAttr>(layout[layout_index]);
if (!child_layout) {
op->emitOpError() << "Type Error: Expected layout array attribute";
return failure();
}
int rank = shape->dimensions().size();
if (rank) {
if (child_layout.size() != rank) {
return failure();
}
std::vector<int64_t> array(rank);
for (int i = 0; i < rank; i++) {
mlir::IntegerAttr attr =
mlir::dyn_cast<mlir::IntegerAttr>(child_layout[i]);
if (!attr) {
op->emitOpError() << "Type Error: Expected layout integer attribute";
return failure();
}
array[i] = attr.getInt();
}
*shape->mutable_layout() = xla::LayoutUtil::MakeLayout(array).ToProto();
}
return success();
}
auto subshapes = shape->mutable_tuple_shapes();
auto datashape = subshapes->Mutable(0);
if (datashape->element_type() == xla::TUPLE) {
auto data_subshapes = datashape->mutable_tuple_shapes();
if (layout.size() != data_subshapes->size()) {
op->emitOpError() << "Expected " << data_subshapes->size()
<< " layout attribute(s) for infeed data, but found "
<< layout.size();
return failure();
}
for (int i = 0; i < data_subshapes->size(); i++) {
if (failed(
ConvertInfeedtLayout(op, layout, data_subshapes->Mutable(i), i)))
return failure();
}
} else {
if (layout.size() != subshapes->size()) {
op->emitOpError() << "Expected " << subshapes->size()
<< " layout attribute(s) for infeed data, but found "
<< layout.size();
return failure();
}
for (int i = 0; i < subshapes->size(); i++) {
if (failed(ConvertInfeedtLayout(op, layout, subshapes->Mutable(i), i)))
return failure();
}
}
return success();
}
LogicalResult ExportXlaOperatorWrapped(mlir::Operation* inst,
OpLoweringContext ctx) {
auto op = dyn_cast<mlir::mhlo::AddOp>(inst);
if (op && mlir::cast<mlir::TensorType>(op.getResult().getType())
.getElementType()
.isSignlessInteger(1)) {
auto& value_map = *ctx.values;
auto result = op.getResult();
xla::XlaOp xla_arg_0;
if (failed(GetXlaOp(op.getLhs(), value_map, &xla_arg_0, op)))
return mlir::failure();
xla::XlaOp xla_arg_1;
if (failed(GetXlaOp(op.getRhs(), value_map, &xla_arg_1, op)))
return mlir::failure();
auto xla_result = xla::Xor(Unwrap(xla_arg_0), Unwrap(xla_arg_1));
value_map[result] = xla_result;
return mlir::success();
}
return ExportXlaOperator(inst, ctx);
}
LogicalResult ConvertToHloModule::PropagateLayouts(
const MlirToHloConversionOptions& options, mlir::Operation* inst,
xla::XlaOp xla_op) {
if (options.propagate_layouts) {
auto* shape = xla::internal::XlaBuilderFriend::GetInstruction(xla_op)
->mutable_shape();
mlir::FailureOr<xla::Shape> mlir_shape_or = ExtractXlaShape(inst);
if (failed(mlir_shape_or)) return failure();
*shape = mlir_shape_or->ToProto();
}
return success();
}
LogicalResult ConvertToHloModule::LowerCast(
mlir::Operation* inst, const MlirToHloConversionOptions& options,
ConvertToHloModule::ValueLoweringMap* value_lowering) {
auto cast_op = cast<mlir::tensor::CastOp>(inst);
Value operand = cast_op.getOperand();
auto ty = mlir::dyn_cast<ShapedType>(operand.getType());
if (!ty || !IsBoundedOrStatic(ty)) {
inst->emitOpError()
<< "requires static or bounded operand for HLO translation";
return failure();
}
xla::XlaOp xla_operand;
auto& value_map = *value_lowering;
if (failed(GetXlaOp(operand, value_map, &xla_operand, cast_op)))
return failure();
value_map[cast_op.getResult()] = xla_operand;
if (failed(PropagateLayouts(options, inst, xla_operand))) {
return failure();
}
return success();
}
LogicalResult ConvertToHloModule::LowerCompositeCall(
mlir::Operation* inst, xla::XlaBuilder* module_builder,
xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering,
xla::XlaOp* return_value) {
auto& value_map = *value_lowering;
SmallVector<xla::XlaOp, 1> operands;
for (const Value& val : inst->getOperands()) {
xla::XlaOp operand;
if (failed(GetXlaOp(val, value_map, &operand, inst))) {
return failure();
}
operands.push_back(operand);
}
auto composite_op = cast<mhlo::CompositeOp>(inst);
xla::XlaComputation computation;
if (failed(LowerBasicBlockAsFunction(
&module_
.lookupSymbol<mlir::func::FuncOp>(composite_op.getDecomposition())
.getBody()
.front(),
module_builder_
.CreateSubBuilder(composite_op.getDecomposition().str())
.get(),
false,
false,
{},
{}, {},
{}, &computation,
{}))) {
return failure();
}
std::string composite_attributes;
llvm::raw_string_ostream(composite_attributes)
<< composite_op.getCompositeAttributes();
xla::XlaOp composite_call = xla::CompositeCall(
builder, computation, operands, composite_op.getName().str(),
composite_attributes, composite_op.getVersion());
unsigned num_results = composite_op.getNumResults();
if (num_results > 1) {
for (unsigned i = 0; i != num_results; ++i) {
value_map[composite_op.getResult(i)] =
xla::GetTupleElement(composite_call, i);
}
} else if (num_results == 1) {
value_map[composite_op.getResult(0)] = composite_call;
}
*return_value = composite_call;
return success();
}
LogicalResult ConvertToHloModule::LowerConstant(
mlir::Operation* inst, xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering,
ElementsAttr const_attr) {
if (!mlir::isa<ShapedType>(inst->getResult(0).getType())) {
return inst->emitError(
"expected shaped type during constant mhlo -> hlo translation");
}
mlir::FailureOr<xla::Shape> shape_or = ExtractXlaShape(inst);
if (failed(shape_or)) return failure();
auto literal_or = CreateArrayLiteralFromAttr(const_attr, shape_or->layout());
if (!literal_or.ok()) return inst->emitError(literal_or.status().ToString());
xla::XlaScopedShardingAssignment scoped_sharding(
builder, CreateOpShardingFromAttribute(inst));
auto constant = xla::ConstantLiteral(builder, literal_or.value());
auto& value_map = *value_lowering;
value_map[inst->getResult(0)] = constant;
return success();
}
LogicalResult ConvertToHloModule::LowerInfeed(
mlir::Operation* inst, xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering) {
mlir::ArrayAttr layout = inst->getAttrOfType<mlir::ArrayAttr>(kLayout);
if (!layout) return success();
auto num_results = inst->getNumResults();
bool propagate_layout_to_data_tuple = true;
for (unsigned i = 0; i < num_results; i++) {
auto iter = value_lowering->find(inst->getResult(i));
if (iter == value_lowering->end()) {
inst->emitOpError() << "inst's result value at index " << i
<< " has no match in value_lowering";
return failure();
}
auto xla_gte_op = iter->second;
xla::HloInstructionProto* get_tuple_element_proto =
xla::internal::XlaBuilderFriend::GetInstruction(xla_gte_op);
assert(xla::StringToHloOpcode(get_tuple_element_proto->opcode()).value() ==
xla::HloOpcode::kGetTupleElement &&
"The token-result of mhlo.InfeedOp should be mapped to a "
"xla::HloOpcode::kGetTupleElement");
if (i == num_results - 1) {
xla::HloInstructionProto* xla_infeed_op_proto =
xla::internal::XlaBuilderFriend::GetInstructionByHandle(
xla_gte_op.builder(), get_tuple_element_proto->operand_ids(0));
assert(xla::StringToHloOpcode(xla_infeed_op_proto->opcode()).value() ==
xla::HloOpcode::kInfeed &&
"Expected xla::HloOpcode::kInfeed op");
auto* shape = xla_infeed_op_proto->mutable_shape();
if (failed(ConvertInfeedtLayout(inst, layout, shape))) return failure();
continue;
}
auto* shape = get_tuple_element_proto->mutable_shape();
if (failed(ConvertInfeedtLayout(inst, layout, shape, i))) return failure();
if (propagate_layout_to_data_tuple) {
xla::HloInstructionProto* data_tuple_proto =
xla::internal::XlaBuilderFriend::GetInstructionByHandle(
xla_gte_op.builder(), get_tuple_element_proto->operand_ids(0));
auto* data_tuple_shape = data_tuple_proto->mutable_shape();
assert(xla::StringToHloOpcode(data_tuple_proto->opcode()).value() ==
xla::HloOpcode::kGetTupleElement &&
"Expected a xla:tupleOp for all the data results.");
if (failed(ConvertInfeedtLayout(inst, layout, data_tuple_shape)))
return failure();
}
propagate_layout_to_data_tuple = false;
}
return success();
}
LogicalResult ConvertToHloModule::LowerReturn(
Operation* inst, bool is_entry_function,
llvm::ArrayRef<std::optional<xla::OpSharding>> ret_shardings,
llvm::ArrayRef<mlir::Value> implicit_results, xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering,
xla::XlaOp* return_value, const MlirToHloConversionOptions& options) {
unsigned num_return_values = inst->getNumOperands() + implicit_results.size();
std::optional<xla::OpSharding> ret_tuple_sharding =
CreateTupleSharding(ret_shardings);
auto& value_map = *value_lowering;
if ((options_.return_tuple && is_entry_function) || num_return_values != 1) {
std::vector<xla::XlaOp> returns;
returns.reserve(num_return_values);
for (Value ret : inst->getOperands()) {
xla::XlaOp& operand = returns.emplace_back();
if (failed(GetXlaOp(ret, value_map, &operand, inst))) return failure();
}
for (Value ret : implicit_results) {
xla::XlaOp& operand = returns.emplace_back();
if (failed(GetXlaOp(ret, value_map, &operand, inst))) return failure();
}
if (is_entry_function && ret_tuple_sharding) {
assert(implicit_results.empty() &&
"entry functions shouldn't have implicit results");
for (OpOperand& ret : inst->getOpOperands()) {
unsigned index = ret.getOperandNumber();
xla::Shape return_shape = xla::TypeToShape(ret.get().getType());
absl::StatusOr<xla::XlaOp> reshape =
ReshapeWithCorrectRepresentationAndSharding(
builder, returns[index], return_shape,
options_.layout_preference_fn, options_.shape_representation_fn,
ret_shardings[index],
false);
if (!reshape.ok())
return inst->emitError() << reshape.status().message();
returns[index] = reshape.value();
}
}
xla::XlaScopedShardingAssignment scoped_sharding(builder,
ret_tuple_sharding);
*return_value = xla::Tuple(builder, returns);
return success();
}
if (num_return_values == 1) {
Value ret = implicit_results.empty() ? inst->getOperand(0)
: implicit_results.front();
xla::XlaOp operand;
if (failed(GetXlaOp(ret, value_map, &operand, inst))) return failure();
if (ret_tuple_sharding) {
auto tuple = Tuple(builder, {operand});
builder->SetSharding(*ret_shardings[0]);
*return_value = GetTupleElement(tuple, 0);
builder->ClearSharding();
} else {
*return_value = operand;
}
}
return success();
}
LogicalResult ConvertToHloModule::Lower(
mlir::Operation* inst, bool is_entry_function,
llvm::ArrayRef<std::optional<xla::OpSharding>> ret_shardings,
llvm::ArrayRef<mlir::Value> implicit_results, xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering,
xla::XlaOp* return_value) {
if (inst->getDialect() !=
inst->getContext()->getLoadedDialect<mlir::mhlo::MhloDialect>() &&
!mlir::isa<mlir::func::ConstantOp, mlir::arith::ConstantOp,
mlir::func::CallOp, mlir::tensor::CastOp,
mlir::func::ReturnOp>(inst)) {
inst->emitOpError("unsupported op for export to XLA");
return failure();
}
*return_value = xla::XlaOp();
if (succeeded(ExportXlaOperatorWrapped(
inst,
{value_lowering, this, builder, &stack_frame_indexes_builder_}))) {
if (inst->getNumResults() == 1) {
auto iter = value_lowering->find(inst->getResult(0));
if (iter == value_lowering->end()) {
inst->emitOpError(
"inst has a result, but it's not found in value_lowering");
return failure();
}
if (failed(PropagateLayouts(options_, inst, iter->second))) {
return failure();
}
}
if (isa<mhlo::InfeedOp>(inst)) {
return LowerInfeed(inst, builder, value_lowering);
}
return success();
}
if (auto call_op = dyn_cast<mlir::func::CallOp>(inst)) {
return LowerFunctionCall(call_op, builder, value_lowering);
}
if (isa<mlir::tensor::CastOp>(inst)) {
return LowerCast(inst, options_, value_lowering);
}
if (auto composite_op = dyn_cast<mhlo::CompositeOp>(inst)) {
return LowerCompositeCall(inst, &module_builder_, builder, value_lowering,
return_value);
}
ElementsAttr const_attr;
if (matchPattern(inst, m_Constant(&const_attr))) {
return LowerConstant(inst, builder, value_lowering, const_attr);
}
if (isa<mhlo::ReturnOp, mlir::func::ReturnOp>(inst)) {
return LowerReturn(inst, is_entry_function, ret_shardings, implicit_results,
builder, value_lowering, return_value, options_);
}
inst->emitOpError() << "can't be translated to XLA HLO";
return failure();
}
LogicalResult ConvertToHloModule::LowerFunctionCall(
mlir::func::CallOp call_op, xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering) {
auto& value_map = *value_lowering;
mlir::func::FuncOp callee =
module_.lookupSymbol<mlir::func::FuncOp>(call_op.getCallee());
if (failed(RunOnFunction(callee))) return failure();
std::vector<xla::XlaOp> operands;
for (auto operand : call_op.getOperands()) {
xla::XlaOp xla_operand;
if (failed(GetXlaOp(operand, value_map, &xla_operand, call_op)))
return failure();
operands.push_back(xla_operand);
}
xla::FrontendAttributes fe_attrs = CreateXlaFrontendAttributesFromOp(call_op);
xla::XlaScopedFrontendAttributesAssignment assignment(builder, fe_attrs);
xla::XlaOp call_result =
xla::Call(builder, lowered_computation_[callee], operands);
unsigned num_results = call_op.getNumResults();
if (num_results > 1) {
for (unsigned i = 0; i != num_results; ++i) {
value_map[call_op.getResult(i)] = xla::GetTupleElement(call_result, i);
}
} else if (num_results == 1) {
value_map[call_op.getResult(0)] = call_result;
}
return success();
}
LogicalResult ConvertToHloModule::RunOnFunction(mlir::func::FuncOp f) {
if (lowered_computation_.count(f)) return success();
if (!llvm::hasSingleElement(f)) {
return f.emitError("only single block Function supported");
}
std::unique_ptr<xla::XlaBuilder> builder_up;
bool entry_function = f.getName() == kMain;
if (!entry_function)
builder_up = module_builder_.CreateSubBuilder(f.getName().str());
auto& builder = entry_function ? module_builder_ : *builder_up;
xla::XlaComputation computation;
std::vector<bool> entry_args_same_across_replicas;
llvm::SmallVector<std::optional<xla::OpSharding>, 4> arg_shardings;
llvm::SmallVector<std::optional<xla::OpSharding>, 4> ret_shardings;
llvm::SmallVector<std::optional<xla::FrontendAttributes>, 4> arg_fe_attrs;
if (entry_function) {
bool any_arg_replicated = false;
entry_args_same_across_replicas.reserve(f.getNumArguments());
for (int64_t i = 0; i < f.getNumArguments(); ++i) {
auto attr = f.getArgAttrOfType<mlir::BoolAttr>(i, kMhloReplication);
entry_args_same_across_replicas.push_back(attr != nullptr &&
attr.getValue());
any_arg_replicated |= entry_args_same_across_replicas.back();
auto buffer_donor =
f.getArgAttrOfType<mlir::BoolAttr>(i, kJaxBufferDonor);
if (buffer_donor) {
if (options_.use_tuple_args) {
builder.AddBufferDonor(0, {i});
} else {
builder.AddBufferDonor(i, {});
}
}
auto aliasing_output =
f.getArgAttrOfType<mlir::IntegerAttr>(i, kTfAliasingOutput);
if (!aliasing_output) continue;
xla::ShapeIndex output_index;
if ((options_.return_tuple && entry_function) || f.getNumResults() != 1) {
output_index = {aliasing_output.getInt()};
} else {
if (aliasing_output.getInt() != 0) {
return f.emitError(
"Aliasing output must be 0 if only one output exists");
}
output_index = {};
}
if (options_.use_tuple_args) {
builder.SetUpAlias(output_index, 0,
{i});
} else {
builder.SetUpAlias(output_index, i,
{});
}
}
if (!any_arg_replicated) entry_args_same_across_replicas.clear();
ExtractShardingsFromFunction(f, &arg_shardings, &ret_shardings);
ExtractFrontendAttributesFromFunction(f, &arg_fe_attrs);
}
if (failed(LowerBasicBlockAsFunction(&f.front(), &builder, entry_function,
false, entry_args_same_across_replicas,
arg_shardings, ret_shardings,
arg_fe_attrs, &computation))) {
return failure();
}
if (auto execution_thread =
f->getAttrOfType<mlir::StringAttr>(kExecutionThread)) {
computation.mutable_proto()->mutable_computations(0)->set_execution_thread(
execution_thread.str());
}
for (int i = 0; i < f.getNumArguments(); ++i) {
if (auto pr =
f.getArgAttrOfType<mlir::ArrayAttr>(i, kMhloParameterReplication)) {
for (auto b : pr.getValue())
for (auto& instr : *computation.mutable_proto()
->mutable_computations(0)
->mutable_instructions())
if (instr.parameter_number() == i)
instr.mutable_parameter_replication()
->add_replicated_at_leaf_buffers(
mlir::cast<mlir::BoolAttr>(b).getValue());
}
}
lowered_computation_[f] = std::move(computation);
return success();
}
LogicalResult ConvertToHloModule::SetEntryTupleShapesAndLeafReplication(
Block* block, const std::vector<bool>& entry_args_same_across_replicas,
llvm::SmallVectorImpl<xla::Shape>* arg_shapes,
std::vector<bool>* leaf_replication) {
arg_shapes->reserve(block->getNumArguments());
leaf_replication->reserve(block->getNumArguments());
for (BlockArgument& arg : block->getArguments()) {
arg_shapes->push_back(xla::TypeToShape(arg.getType()));
xla::Shape& arg_shape = arg_shapes->back();
auto layout_preference_status =
options_.layout_preference_fn ? options_.layout_preference_fn(arg_shape)
: XlaLayoutPreference::kNoPreference;
if (!layout_preference_status.ok())
return block->getParentOp()->emitError()
<< layout_preference_status.status().message();
auto arg_shape_status = options_.shape_representation_fn
? options_.shape_representation_fn(
arg_shape, false,
layout_preference_status.value())
: arg_shape;
if (!arg_shape_status.ok())
return block->getParentOp()->emitError()
<< arg_shape_status.status().message();
arg_shape = std::move(arg_shape_status.value());
if (entry_args_same_across_replicas.empty()) continue;
for (int i = 0, e = xla::ShapeUtil::GetLeafCount(arg_shape); i < e; ++i)
leaf_replication->push_back(
entry_args_same_across_replicas[arg.getArgNumber()]);
}
return success();
}
LogicalResult ConvertToHloModule::SetEntryTupleShardings(
Block* block, xla::XlaBuilder* builder,
llvm::ArrayRef<std::optional<xla::OpSharding>> arg_shardings,
llvm::SmallVectorImpl<xla::Shape>* arg_shapes) {
if (!arg_shardings.empty() && SomeOptionalShardingsAreSet(arg_shardings)) {
xla::OpSharding sharding;
sharding.set_type(xla::OpSharding::TUPLE);
for (const auto& arg_sharding : llvm::enumerate(arg_shardings)) {
if (arg_sharding.value().has_value()) {
auto hlo_sharding = xla::HloSharding::FromProto(*arg_sharding.value());
if (!hlo_sharding.ok())
return block->getParentOp()->emitError()
<< hlo_sharding.status().message();
auto status = RewriteLayoutWithShardedShape(
hlo_sharding.value(), false,
options_.layout_preference_fn, options_.shape_representation_fn,
&(*arg_shapes)[arg_sharding.index()]);
if (!status.ok())
return block->getParentOp()->emitError() << status.message();
*sharding.add_tuple_shardings() = *arg_sharding.value();
} else {
xla::OpSharding fallback_sharding;
fallback_sharding.set_type(xla::OpSharding::REPLICATED);
*sharding.add_tuple_shardings() = fallback_sharding;
}
}
builder->SetSharding(sharding);
}
return success();
}
namespace {
xla::OpMetadata GetOpNameMetadataFromLocation(Value value) {
xla::OpMetadata m;
m.set_op_name(mhlo::GetDebugNameFromLocation(value.getLoc()));
return m;
}
}
LogicalResult ConvertToHloModule::LowerBasicBlockAsFunction(
Block* block, xla::XlaBuilder* builder, bool is_entry_function,
bool ensure_single_arg,
const std::vector<bool>& entry_args_same_across_replicas,
llvm::ArrayRef<std::optional<xla::OpSharding>> arg_shardings,
llvm::ArrayRef<std::optional<xla::OpSharding>> ret_shardings,
llvm::ArrayRef<std::optional<xla::FrontendAttributes>> fe_attrs,
xla::XlaComputation* result, llvm::ArrayRef<mlir::Value> implicit_operands,
llvm::ArrayRef<mlir::Value> implicit_results) {
ValueLoweringMap lowering;
if (is_entry_function && options_.use_tuple_args) {
llvm::SmallVector<xla::Shape, 4> arg_shapes;
std::vector<bool> leaf_replication;
if (failed(SetEntryTupleShapesAndLeafReplication(
block, entry_args_same_across_replicas, &arg_shapes,
&leaf_replication)))
return failure();
if (failed(
SetEntryTupleShardings(block, builder, arg_shardings, &arg_shapes)))
return failure();
xla::Shape input_shape = xla::ShapeUtil::MakeTupleShape(arg_shapes);
auto tuple =
xla::Parameter(builder, 0, input_shape, kArgTuple, leaf_replication);
builder->ClearSharding();
for (BlockArgument& arg : block->getArguments()) {
xla::XlaScopedShardingAssignment scoped_sharding(
builder, arg_shardings.empty() ? std::nullopt
: arg_shardings[arg.getArgNumber()]);
lowering[arg] = xla::GetTupleElement(tuple, arg.getArgNumber());
}
} else {
if (ensure_single_arg) {
llvm::SmallVector<xla::Shape, 4> arg_shapes;
auto args_size = block->getNumArguments() + implicit_operands.size();
arg_shapes.reserve(args_size);
for (BlockArgument& arg : block->getArguments())
arg_shapes.push_back(xla::TypeToShape(arg.getType()));
for (Value implicit_operand : implicit_operands)
arg_shapes.push_back(xla::TypeToShape(implicit_operand.getType()));
if (args_size > 1) {
xla::XlaScopedShardingAssignment scoped_sharding(
builder, arg_shardings.empty()
? std::nullopt
: CreateTupleSharding(arg_shardings));
auto tuple = xla::Parameter(
builder, 0, xla::ShapeUtil::MakeTupleShape(arg_shapes), kArgTuple);
for (BlockArgument& arg : block->getArguments()) {
auto num = arg.getArgNumber();
xla::XlaScopedShardingAssignment scoped_sharding(
builder,
arg_shardings.empty() ? std::nullopt : arg_shardings[num]);
lowering[arg] = xla::GetTupleElement(tuple, num);
}
for (auto [implicit_index, implicit_operand] :
llvm::enumerate(implicit_operands)) {
int64_t arg_index = block->getNumArguments() + implicit_index;
xla::XlaScopedShardingAssignment scoped_sharding(
builder,
arg_shardings.empty() ? std::nullopt : arg_shardings[arg_index]);
lowering[implicit_operand] = xla::GetTupleElement(tuple, arg_index);
}
} else if (args_size == 1) {
xla::XlaScopedShardingAssignment scoped_sharding(
builder,
arg_shardings.empty() ? std::nullopt : arg_shardings.front());
mlir::Value arg = implicit_operands.empty() ? block->getArgument(0)
: implicit_operands.front();
xla::XlaScopedOpMetadataAssignment op_metadata(
builder, GetOpNameMetadataFromLocation(arg));
lowering[arg] = xla::Parameter(builder, 0, arg_shapes[0], kArgPrefix);
} else {
xla::Parameter(builder, 0, xla::ShapeUtil::MakeTupleShape(arg_shapes),
kArgEmptyTuple);
}
} else {
for (BlockArgument& arg : block->getArguments()) {
auto num = arg.getArgNumber();
xla::Shape shape = xla::TypeToShape(arg.getType());
xla::XlaScopedShardingAssignment scoped_sharding(
builder, arg_shardings.empty() ? std::nullopt : arg_shardings[num]);
if (!fe_attrs.empty() && fe_attrs[num]) {
builder->SetFrontendAttributes(*fe_attrs[num]);
}
xla::XlaScopedOpMetadataAssignment op_metadata(
builder, GetOpNameMetadataFromLocation(arg));
if (entry_args_same_across_replicas.empty()) {
lowering[arg] = xla::Parameter(builder, num, shape,
absl::StrCat(kArgPrefix, num));
} else {
lowering[arg] = xla::Parameter(
builder, num, shape, absl::StrCat(kArgPrefix, num),
std::vector<bool>(entry_args_same_across_replicas[num],
xla::ShapeUtil::GetLeafCount(shape)));
}
builder->ClearFrontendAttributes();
}
}
}
xla::XlaOp return_value;
for (auto& inst : *block)
if (failed(Lower(&inst, is_entry_function, ret_shardings, implicit_results,
builder, &lowering, &return_value)))
return failure();
auto computation_or =
return_value.valid() ? builder->Build(return_value) : builder->Build();
if (!computation_or.ok()) {
block->back().emitError() << computation_or.status().message();
return failure();
}
*result = std::move(computation_or.value());
return success();
}
LogicalResult ConvertToHloModule::LowerRegionAsComputation(
mlir::Region* region, xla::XlaComputation* func,
llvm::ArrayRef<mlir::Value> implicit_operands,
llvm::ArrayRef<mlir::Value> implicit_results, bool ensure_single_arg,
llvm::ArrayRef<std::optional<xla::OpSharding>> arg_shardings,
llvm::ArrayRef<std::optional<xla::OpSharding>> ret_shardings) {
std::unique_ptr<xla::XlaBuilder> builder = module_builder_.CreateSubBuilder(
absl::StrCat(kRegionPrefix, region_id_++));
return LowerBasicBlockAsFunction(
®ion->front(), builder.get(),
false,
ensure_single_arg,
{}, arg_shardings, ret_shardings,
{}, func, implicit_operands, implicit_results);
}
absl::Status PrepareForExport(mlir::ModuleOp module) {
bool hasShapeOps = false;
module.walk([&](Operation* op) {
hasShapeOps |= isa<shape::ShapeDialect>(op->getDialect());
return hasShapeOps ? WalkResult::interrupt() : WalkResult::advance();
});
mlir::PassManager pm(module.getContext());
pm.addNestedPass<mlir::func::FuncOp>(mhlo::createPrepareForExportPass());
if (hasShapeOps) {
pm.addNestedPass<mlir::func::FuncOp>(
mhlo::createSymbolicShapeOptimizationPass());
pm.addNestedPass<mlir::func::FuncOp>(mhlo::createShapeLegalizeToHloPass());
}
mlir::BaseScopedDiagnosticHandler handler(module.getContext());
(void)pm.run(module);
absl::Status s = handler.ConsumeStatus();
if (!s.ok()) {
s = absl::Status(
s.code(),
absl::StrCat("Unable to prepare for XLA export: ", s.message()));
}
return s;
}
}
absl::Status ConvertMlirHloToHlo(mlir::ModuleOp module,
xla::HloProto* hlo_proto,
MlirToHloConversionOptions options) {
mlir::PassManager pm(module->getContext());
pm.addPass(mlir::mhlo::createStablehloLegalizeToHloPass());
if (failed(pm.run(module))) {
return tsl::errors::Internal("Unable to convert StableHLO to MHLO");
}
TF_RETURN_IF_ERROR(PrepareForExport(module));
mlir::BaseScopedDiagnosticHandler diag_handler(module.getContext());
xla::XlaBuilder module_builder(kMain);
ConvertToHloModule converter(module, module_builder, options);
if (failed(converter.Run())) return diag_handler.ConsumeStatus();
xla::HloModuleProto hlo_module = converter.ConsumeMainProto();
StringRef module_name = module.getName() ? *module.getName() : kMain;
hlo_module.set_name(module_name.str());
if (auto cross_program_prefetches =
module->getAttrOfType<mlir::ArrayAttr>(kMhloCrossProgramPrefetches)) {
for (const auto& prefetch :
Convert_cross_program_prefetches(cross_program_prefetches)) {
*hlo_module.add_cross_program_prefetches() = std::move(prefetch);
}
}
if (auto is_dynamic = module->getAttrOfType<mlir::BoolAttr>(kMhloIsDynamic)) {
hlo_module.set_is_dynamic(is_dynamic.getValue());
}
if (auto frontend_attributes =
module->getAttrOfType<DictionaryAttr>(kMhloFrontendAttributes)) {
ConstructFrontendAttributesFromAttribute(
frontend_attributes, *hlo_module.mutable_frontend_attributes());
}
if (auto use_auto_spmd_partitioning =
module->getAttrOfType<mlir::BoolAttr>(kMhloUseAutoSpmdPartitioning)) {
hlo_module.set_use_auto_spmd_partitioning(
use_auto_spmd_partitioning.getValue());
}
if (auto spmd_output_sharding =
module->getAttrOfType<mlir::StringAttr>(kMhloSpmdOutputSharding)) {
*hlo_module.mutable_spmd_output_sharding() =
*xla::ConvertSharding(spmd_output_sharding.getValue());
}
if (auto input_output_alias =
module->getAttrOfType<mlir::ArrayAttr>(kMhloInputOutputAlias)) {
if (std::optional<xla::HloInputOutputAliasProto> input_output_alias_proto =
xla::ConvertInputOutputAlias(input_output_alias.getValue())) {
*hlo_module.mutable_input_output_alias() = *input_output_alias_proto;
}
}
if (auto spmd_parameters_sharding = module->getAttrOfType<mlir::ArrayAttr>(
kMhloSpmdParametersShardings)) {
for (const auto& sharding : spmd_parameters_sharding.getValue()) {
*hlo_module.add_spmd_parameters_shardings() = *xla::ConvertSharding(
mlir::cast<mlir::StringAttr>(sharding).getValue());
}
}
if (auto xla_entry_computation_parameter_layout =
module->getAttrOfType<mlir::ArrayAttr>(
kMhloXlaEntryComputationParameterLayouts)) {
auto status = mhlo::ExportModuleEntryComputationParameterLayouts(
xla_entry_computation_parameter_layout, hlo_module);
if (!status.ok()) return status;
}
if (auto xla_entry_computation_parameter_tiles =
module->getAttrOfType<mlir::ArrayAttr>(
kMhloXlaEntryComputationParameterTiles)) {
auto status = mhlo::ExportModuleEntryComputationParameterTiles(
xla_entry_computation_parameter_tiles, hlo_module);
if (!status.ok()) return status;
}
if (auto xla_entry_computation_result_layout =
module->getAttrOfType<mlir::ArrayAttr>(
kMhloXlaEntryComputationResultLayout)) {
auto status = mhlo::ExportModuleEntryComputationResultLayout(
xla_entry_computation_result_layout, hlo_module);
if (!status.ok()) return status;
}
if (auto xla_entry_computation_result_tiles =
module->getAttrOfType<mlir::ArrayAttr>(
kMhloXlaEntryComputationResultTiles)) {
auto status = mhlo::ExportModuleEntryComputationResultTiles(
xla_entry_computation_result_tiles, hlo_module);
if (!status.ok()) return status;
}
xla::StackFrameIndexProto stack_frame_index =
converter.BuildStackFramesIndexProto();
hlo_module.mutable_stack_frame_index()->Swap(&stack_frame_index);
hlo_proto->mutable_hlo_module()->Swap(&hlo_module);
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<xla::HloModule>> ConvertMlirHloToHloModule(
mlir::ModuleOp module, MlirToHloConversionOptions options) {
xla::HloProto hlo_proto;
TF_RETURN_IF_ERROR(ConvertMlirHloToHlo(module, &hlo_proto, options));
const xla::HloModuleProto& module_proto = hlo_proto.hlo_module();
TF_ASSIGN_OR_RETURN(xla::HloModuleConfig config,
xla::HloModule::CreateModuleConfigFromProto(
module_proto, xla::GetDebugOptionsFromFlags()));
mhlo::ExportHloModuleConfig(config, module);
return xla::HloModule::CreateFromProto(module_proto, config);
}
absl::Status BuildHloFromMlirHlo(mlir::Block& block, xla::XlaBuilder& builder,
llvm::ArrayRef<xla::XlaOp> xla_params,
std::vector<xla::XlaOp>& returns,
MlirToHloConversionOptions options) {
auto module = block.getParentOp()->getParentOfType<mlir::ModuleOp>();
TF_RETURN_IF_ERROR(PrepareForExport(module));
options.return_tuple = false;
options.use_tuple_args = false;
ConvertToHloModule converter(module, builder, options);
ConvertToHloModule::ValueLoweringMap lowering;
if (xla_params.size() != block.getArguments().size())
return tsl::errors::Internal("xla_params size (", xla_params.size(),
") != block arguments size (",
block.getArguments().size(), ")");
for (BlockArgument& arg : block.getArguments()) {
auto num = arg.getArgNumber();
lowering[arg] = xla_params[num];
}
mlir::BaseScopedDiagnosticHandler diag_handler(module.getContext());
for (auto& inst : block) {
if (isa<mhlo::ReturnOp, mlir::func::ReturnOp>(inst)) {
returns.resize(inst.getNumOperands());
for (OpOperand& ret : inst.getOpOperands()) {
unsigned index = ret.getOperandNumber();
xla::XlaOp operand;
if (failed(GetXlaOp(ret.get(), lowering, &operand, &inst)))
return diag_handler.ConsumeStatus();
returns[index] = operand;
}
} else {
xla::XlaOp return_value;
if (failed(converter.Lower(&inst, true,
{},
{}, &builder, &lowering,
&return_value)))
return diag_handler.ConsumeStatus();
}
}
return absl::OkStatus();
}
absl::Status ConvertMlirHloToHlo(mlir::ModuleOp module,
::xla::HloProto* hlo_proto,
bool use_tuple_args, bool return_tuple,
MlirToHloConversionOptions options) {
options.use_tuple_args = use_tuple_args;
options.return_tuple = return_tuple;
return ConvertMlirHloToHlo(module, hlo_proto, options);
}
} | #include "xla/hlo/translate/mhlo_to_hlo/mlir_hlo_to_hlo.h"
#include <string>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "stablehlo/dialect/Register.h"
#include "xla/mlir/utils/error_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
namespace mlir {
namespace {
using testing::_;
using testing::AllOf;
using testing::HasSubstr;
using tsl::testing::StatusIs;
TEST(ConvertMlirHloToHloModuleTest, PropagatesDiagnostics) {
const std::string mlir_source = R"mlir(
func.func @main(%arg0: tensor<?xf32>, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor<?xf32> {
%0 = shape.const_shape [14, 1] : tensor<2xindex>
%1 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor<?xf32>, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor<?xf32>
func.return %1 : tensor<?xf32>
}
)mlir";
mlir::DialectRegistry registry;
registry.insert<mlir::func::FuncDialect, mlir::shape::ShapeDialect>();
mlir::stablehlo::registerAllDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> module;
{
mlir::BaseScopedDiagnosticHandler handler(&context);
module = mlir::parseSourceString<mlir::ModuleOp>(mlir_source, &context);
TF_ASSERT_OK(handler.ConsumeStatus());
}
ASSERT_THAT(ConvertMlirHloToHloModule(*module),
StatusIs(_, AllOf(HasSubstr("Unable to prepare for XLA export"),
HasSubstr("real_dynamic_slice"))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/translate/mhlo_to_hlo/mlir_hlo_to_hlo.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/translate/mhlo_to_hlo/mlir_hlo_to_hlo_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bf1418cd-bbf0-470c-baf8-5dfca9a1e659 | cpp | tensorflow/tensorflow | hlo_pass_pipeline | third_party/xla/xla/hlo/pass/hlo_pass_pipeline.cc | third_party/xla/xla/hlo/pass/hlo_pass_pipeline_test.cc | #include "xla/hlo/pass/hlo_pass_pipeline.h"
#include <functional>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "xla/service/dump.h"
#include "xla/service/hlo_graph_dumper.h"
#include "xla/service/hlo_proto_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/profiler/lib/scoped_annotation.h"
namespace xla {
namespace {
void RecordPassStartMetadata(HloModule& module, const std::string& pass_name,
const std::string& pipeline_name) {
module.metadata()->RecordPassStart();
TF_CHECK_OK(module.metadata()->set_current_pass_name(pass_name));
TF_CHECK_OK(module.metadata()->set_current_pass_pipeline_name(pipeline_name));
}
void RecordPassStartMetadata(HloModuleGroup& module_group,
const std::string& pass_name,
const std::string& pipeline_name) {
for (HloModule* module : module_group.modules()) {
RecordPassStartMetadata(*module, pass_name, pipeline_name);
}
}
absl::Status AttemptRecordPassEndMetadata(HloModule& module,
const std::string& pass_name,
bool module_changed) {
TF_RETURN_IF_ERROR(
module.metadata()->set_current_pass_module_id(module.unique_id()));
TF_RETURN_IF_ERROR(
module.metadata()->set_current_pass_module_changed(module_changed));
TF_RETURN_IF_ERROR(module.metadata()->RecordPassEnd());
return absl::OkStatus();
}
void RecordPassEndMetadata(HloModule& module, const std::string& pass_name,
bool module_changed) {
absl::Status status =
AttemptRecordPassEndMetadata(module, pass_name, module_changed);
if (!status.ok()) {
LOG(FATAL) << status;
}
}
absl::Status AttemptRecordPassEndMetadata(HloModuleGroup& module_group,
const std::string& pass_name,
bool module_changed) {
for (HloModule* module : module_group.modules()) {
for (HloModule* other_module : module_group.modules()) {
TF_RETURN_IF_ERROR(
module->metadata()->add_current_pass_module_group_module_id(
other_module->unique_id()));
}
TF_RETURN_IF_ERROR(
AttemptRecordPassEndMetadata(*module, pass_name, module_changed));
}
return absl::OkStatus();
}
void RecordPassEndMetadata(HloModuleGroup& module_group,
const std::string& pass_name, bool module_changed) {
absl::Status status =
AttemptRecordPassEndMetadata(module_group, pass_name, module_changed);
if (!status.ok()) {
LOG(FATAL) << status;
}
}
}
template <typename HloT>
absl::Status HloPassPipeline::RunInvariantCheckers(
HloT* hlo, absl::string_view after_pass_name,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
for (auto& invariant_checker : invariant_checkers_) {
VLOG(1) << " Invariant checker " << invariant_checker->name();
absl::StatusOr<bool> changed_status =
RunHelper(invariant_checker.get(), hlo, execution_threads);
VLOG(1) << " Invariant checker done " << invariant_checker->name();
if (!changed_status.ok()) {
VLOG(2) << "Failed invariant check:";
XLA_VLOG_LINES(2, hlo->ToString());
return tsl::errors::CreateWithUpdatedMessage(
changed_status.status(),
absl::StrCat(changed_status.status().message(), "\n\nFailed after ",
after_pass_name));
}
TF_RET_CHECK(!changed_status.value())
<< "invariant checkers must not change the graph";
}
return absl::OkStatus();
}
namespace {
std::string UniqueId(const HloModule& mod) {
return std::to_string(mod.unique_id());
}
std::string UniqueId(const HloModuleGroup& group) {
return absl::StrJoin(group.modules(), "-",
[](std::string* out, const HloModule* mod) {
out->append(std::to_string(mod->unique_id()));
});
}
}
template <typename HloT>
absl::StatusOr<bool> HloPassPipeline::RunPassesInternal(
HloT* hlo, const DebugOptions& debug_options,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
auto passes = GetEnabledPasses(debug_options);
std::string dump_regex = debug_options.xla_dump_hlo_pass_re();
static constexpr absl::string_view kPipelineStart = "pipeline-start";
static constexpr absl::string_view kPipelineEnd = "pipeline-end";
std::string pipeline_name = std::string(name());
tsl::profiler::ScopedAnnotation annotation{[&] {
return absl::StrFormat("XlaPassPipeline:#name=%s,module=%s,program_id=%s#",
pipeline_name, hlo->name(), UniqueId(*hlo));
}};
TF_RETURN_IF_ERROR(
RunInvariantCheckers(hlo, kPipelineStart, execution_threads));
RecordPassStartMetadata(*hlo, std::string(kPipelineStart), pipeline_name);
MaybeDumpHloAndSaveFilenames(*hlo,
kPipelineStart,
passes.empty()
? kPipelineEnd
: passes.front()->name());
RecordPassEndMetadata(*hlo, std::string(kPipelineStart),
false);
bool changed = false;
for (int i = 0; i < passes.size(); i++) {
HloPassInterface* pass = passes[i];
std::string pass_name = std::string(pass->name());
XLA_SCOPED_LOGGING_TIMER(absl::StrCat("HLO pass: ", pass_name));
tsl::profiler::ScopedAnnotation annotation{[&] {
return absl::StrFormat("XlaPass:#name=%s,module=%s,program_id=%s#",
pass_name, hlo->name(), UniqueId(*hlo));
}};
VLOG(1) << " HLO pass " << pass_name;
VLOG(2) << " Module hash " << absl::HashOf(*hlo);
if (!pass->IsPassPipeline()) {
compilation_stats_->StartPass(pass_name);
}
RecordPassStartMetadata(*hlo, pass_name, pipeline_name);
auto status_or_changed = RunHelper(pass, hlo, execution_threads);
if (auto status = status_or_changed.status(); !status.ok()) {
compilation_stats_->RecordPassError(
pass_name, absl::StatusCodeToString(status.code()));
}
TF_ASSIGN_OR_RETURN(bool pass_changed, status_or_changed);
if (!dump_regex.empty() && (pass_changed || dump_regex != ".*")) {
MaybeDumpHloAndSaveFilenames(*hlo,
pass_name,
i + 1 >= passes.size()
? kPipelineEnd
: passes[i + 1]->name());
}
RecordPassEndMetadata(*hlo, pass_name, pass_changed);
changed |= pass_changed;
if (pass_changed) {
VLOG(3) << " Pass caused changes " << pass_name;
auto status = RunInvariantCheckers(hlo, pass_name, execution_threads);
if (!status.ok()) {
compilation_stats_->RecordPassError(
pass_name, absl::StatusCodeToString(status.code()));
}
TF_RETURN_IF_ERROR(status);
}
if (!pass->IsPassPipeline()) {
compilation_stats_->EndPass(pass_name);
}
}
return changed;
}
std::vector<HloPassInterface*> HloPassPipeline::GetEnabledPasses(
const DebugOptions& debug_options) {
if (debug_options.xla_disable_all_hlo_passes()) {
VLOG(1) << "*All* passes disabled by --xla_disable_all_hlo_passes.";
return {};
}
absl::flat_hash_set<std::string> disabled_pass_names(
debug_options.xla_disable_hlo_passes().begin(),
debug_options.xla_disable_hlo_passes().end());
absl::flat_hash_set<std::string> enabled_pass_names(
debug_options.xla_enable_hlo_passes_only().begin(),
debug_options.xla_enable_hlo_passes_only().end());
if (!disabled_pass_names.empty()) {
VLOG(1) << "Passes disabled by --xla_disable_hlo_passes: "
<< absl::StrJoin(disabled_pass_names, ", ");
}
if (!enabled_pass_names.empty()) {
VLOG(1) << "Passes enabled by --xla_enable_hlo_passes_only: "
<< absl::StrJoin(enabled_pass_names, ", ");
}
CHECK(disabled_pass_names.empty() || enabled_pass_names.empty());
if (disabled_pass_names.contains(name())) {
VLOG(1) << "Disable the full pass: " << name();
return {};
}
if (enabled_pass_names.contains(name())) {
VLOG(1) << "Enable the full pass: " << name();
enabled_pass_names.clear();
}
std::vector<HloPassInterface*> enabled_passes;
if (!enabled_pass_names.empty()) {
for (auto& pass : passes_) {
if (enabled_pass_names.contains(pass->name())) {
enabled_passes.push_back(pass.get());
}
}
} else {
for (auto& pass : passes_) {
if (!disabled_pass_names.contains(pass->name())) {
enabled_passes.push_back(pass.get());
}
}
}
return enabled_passes;
}
void HloPassPipeline::MaybeDumpHloAndSaveFilenames(
HloModule& module, absl::string_view after_pass_name,
absl::string_view before_pass_name) {
for (const std::string& filename : DumpHloModuleBetweenPassesIfEnabled(
name(), before_pass_name, after_pass_name, module)) {
absl::Status status =
module.metadata()->add_current_pass_dump_filename(filename);
if (!status.ok()) {
LOG(FATAL) << status;
}
}
}
void HloPassPipeline::MaybeDumpHloAndSaveFilenames(
HloModuleGroup& module_group, absl::string_view after_pass_name,
absl::string_view before_pass_name) {
for (HloModule* module : module_group.modules()) {
MaybeDumpHloAndSaveFilenames(*module, after_pass_name, before_pass_name);
}
}
absl::StatusOr<bool> HloPassPipeline::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
run_called_ = true;
VLOG(1) << "Running HLO pass pipeline on module " << module->name() << ": "
<< name();
return RunPassesInternal(module, module->config().debug_options(),
execution_threads);
}
absl::StatusOr<bool> HloPassPipeline::RunOnModuleGroup(
HloModuleGroup* module_group,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
run_called_ = true;
VLOG(1) << "Running HLO pass pipeline on module group "
<< module_group->name() << ": " << name();
if (module_group->modules().empty()) {
VLOG(1) << "Module group is empty. Nothing to do.";
return false;
}
return RunPassesInternal(module_group,
module_group->module(0).config().debug_options(),
execution_threads);
}
} | #include "xla/hlo/pass/hlo_pass_pipeline.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_module_group.h"
#include "xla/hlo/pass/hlo_pass_interface.h"
#include "xla/service/hlo_parser.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
using ::testing::SizeIs;
using ::testing::StrEq;
class HloPassPipelineTest : public HloTestBase {
protected:
absl::StatusOr<HloModuleGroup> ParseModuleGroup(
absl::Span<const std::string> hlo_strings) {
HloModuleGroup group(TestName());
for (const std::string& hlo_string : hlo_strings) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
group.push_back(std::move(module));
}
return std::move(group);
}
};
class FooToBarModulePass : public HloModulePass {
absl::string_view name() const override { return "foo2bar"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(HloModule* module,
const absl::flat_hash_set<absl::string_view>&
execution_threads) override {
bool changed = false;
for (HloComputation* computation :
module->computations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->name() == "foo") {
instruction->SetAndSanitizeName("bar");
changed = true;
}
}
}
return changed;
}
};
class ReverseStringModulePass : public HloModulePass {
absl::string_view name() const override { return "reverse"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(HloModule* module,
const absl::flat_hash_set<absl::string_view>&
execution_threads) override {
bool changed = false;
for (HloComputation* computation :
module->computations(execution_threads)) {
HloInstruction* root = computation->root_instruction();
std::string name(root->name());
std::reverse(name.begin(), name.end());
root->SetAndSanitizeName(name);
changed = true;
}
return changed;
}
};
class BazToQuxModuleGroupPass : public HloModuleGroupPass {
absl::string_view name() const override { return "baz2qux"; }
using HloPassInterface::RunOnModuleGroup;
absl::StatusOr<bool> RunOnModuleGroup(
HloModuleGroup* module_group,
const absl::flat_hash_set<absl::string_view>& execution_threads)
override {
bool changed = false;
for (HloModule* module : module_group->modules()) {
for (HloComputation* computation :
module->computations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->name() == "baz") {
instruction->SetAndSanitizeName("qux");
changed = true;
}
}
}
}
return changed;
}
};
class BarBlowerUpper : public HloModulePass {
absl::string_view name() const override { return "bar-blower-upper"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(HloModule* module,
const absl::flat_hash_set<absl::string_view>&
execution_threads) override {
for (HloComputation* computation :
module->computations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->name() == "bar") {
return Internal("Module has instruction named bar");
}
}
}
return false;
}
};
TEST_F(HloPassPipelineTest, ModulePassChanged) {
const std::string module_str = R"(
HloModule ModulePassChanged
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT foo = f32[] multiply(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloPassPipeline pipeline(TestName());
pipeline.AddPass<FooToBarModulePass>();
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->name(), "foo");
TF_ASSERT_OK_AND_ASSIGN(bool changed, pipeline.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(root->name(), "bar");
}
TEST_F(HloPassPipelineTest, ModulePassUnchanged) {
const std::string module_str = R"(
HloModule ModulePassUnchanged
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT blahblah = f32[] multiply(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloPassPipeline pipeline(TestName());
pipeline.AddPass<FooToBarModulePass>();
TF_ASSERT_OK_AND_ASSIGN(bool changed, pipeline.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(HloPassPipelineTest, ModulePassChangedForParallelThread) {
const std::string module_str = R"(
HloModule ModulePassChanged
%async_builder {
%p0 = f32[10] parameter(0)
%p1 = f32[10] parameter(1)
ROOT %foo = add(%p0, %p1)
}, execution_thread="parallel_thread"
ENTRY %Entry (p0: f32[10], p1: f32[10]) -> f32[10] {
%p0 = f32[10] parameter(0)
%p1 = f32[10] parameter(1)
%async-start = ((f32[10], f32[10]), f32[10], s32[]) async-start(f32[10] %p0, f32[10] %p1), async_execution_thread="parallel_thread",calls=%async_builder
ROOT %baz = f32[10]{0} async-done(((f32[10], f32[10]), f32[10], s32[]) %async-start), async_execution_thread="parallel_thread", calls=%async_builder
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloPassPipeline pipeline(TestName());
pipeline.AddPass<ReverseStringModulePass>();
HloInstruction* main_root = module->entry_computation()->root_instruction();
HloInstruction* parallel_thread_root =
main_root->async_wrapped_computation()->root_instruction();
EXPECT_EQ(main_root->name(), "baz");
EXPECT_EQ(parallel_thread_root->name(), "foo");
TF_ASSERT_OK_AND_ASSIGN(bool changed,
pipeline.Run(module.get(), {"parallel_thread"}));
EXPECT_TRUE(changed);
EXPECT_EQ(main_root->name(), "baz");
EXPECT_EQ(parallel_thread_root->name(), "oof");
}
TEST_F(HloPassPipelineTest, ModulePassChangedForAllexecution_threads) {
const std::string module_str = R"(
HloModule ModulePassChanged
%async_builder {
%p0 = f32[10] parameter(0)
%p1 = f32[10] parameter(1)
ROOT %foo = add(%p0, %p1)
}, execution_thread="parallel_thread"
ENTRY %Entry (p0: f32[10], p1: f32[10]) -> f32[10] {
%p0 = f32[10] parameter(0)
%p1 = f32[10] parameter(1)
%async-start = ((f32[10], f32[10]), f32[10], s32[]) async-start(f32[10] %p0, f32[10] %p1), async_execution_thread="parallel_thread",calls=%async_builder
ROOT %baz = f32[10]{0} async-done(((f32[10], f32[10]), f32[10], s32[]) %async-start), async_execution_thread="parallel_thread", calls=%async_builder
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloPassPipeline pipeline(TestName());
pipeline.AddPass<ReverseStringModulePass>();
HloInstruction* main_root = module->entry_computation()->root_instruction();
HloInstruction* parallel_thread_root =
main_root->async_wrapped_computation()->root_instruction();
EXPECT_EQ(main_root->name(), "baz");
EXPECT_EQ(parallel_thread_root->name(), "foo");
TF_ASSERT_OK_AND_ASSIGN(bool changed, pipeline.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_EQ(main_root->name(), "zab");
EXPECT_EQ(parallel_thread_root->name(), "oof");
}
TEST_F(HloPassPipelineTest, MixedPipeline) {
const std::string module_0_str = R"(
HloModule MixedPipeline.1
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT baz = f32[] multiply(a, b)
}
)";
const std::string module_1_str = R"(
HloModule MixedPipeline.0
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT foo = f32[] multiply(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(HloModuleGroup module_group,
ParseModuleGroup({module_0_str, module_1_str}));
HloPassPipeline pipeline(TestName());
pipeline.AddPass<BazToQuxModuleGroupPass>();
pipeline.AddPass<FooToBarModulePass>();
HloInstruction* root0 =
module_group.module(0).entry_computation()->root_instruction();
HloInstruction* root1 =
module_group.module(1).entry_computation()->root_instruction();
EXPECT_EQ(root0->name(), "baz");
EXPECT_EQ(root1->name(), "foo");
TF_ASSERT_OK_AND_ASSIGN(bool changed,
pipeline.RunOnModuleGroup(&module_group));
EXPECT_TRUE(changed);
EXPECT_EQ(root0->name(), "qux");
EXPECT_EQ(root1->name(), "bar");
}
TEST_F(HloPassPipelineTest, InvariantChecker) {
const std::string module_str = R"(
HloModule InvariantChecker
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT foo = f32[] multiply(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(module_str));
{
HloPassPipeline pipeline(TestName());
pipeline.AddInvariantChecker<BarBlowerUpper>();
TF_ASSERT_OK_AND_ASSIGN(bool changed, pipeline.Run(module.get()));
EXPECT_FALSE(changed);
}
{
HloPassPipeline pipeline(TestName());
pipeline.AddInvariantChecker<BarBlowerUpper>();
pipeline.AddPass<FooToBarModulePass>();
absl::Status status = pipeline.Run(module.get()).status();
ASSERT_IS_NOT_OK(status);
EXPECT_THAT(status.message(),
::testing::HasSubstr("Module has instruction named bar"));
EXPECT_THAT(status.message(), ::testing::HasSubstr("Failed after foo2bar"));
}
{
HloPassPipeline pipeline(TestName());
pipeline.AddInvariantChecker<BarBlowerUpper>();
absl::Status status = pipeline.Run(module.get()).status();
ASSERT_IS_NOT_OK(status);
EXPECT_THAT(status.message(),
::testing::HasSubstr("Module has instruction named bar"));
EXPECT_THAT(status.message(),
::testing::HasSubstr("Failed after pipeline-start"));
}
}
TEST_F(HloPassPipelineTest, ModuleGroupPassOnModule) {
const std::string module_str = R"(
HloModule ModuleGroupPassOnModule
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT foo = f32[] multiply(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloPassPipeline pipeline(TestName());
pipeline.AddPass<BazToQuxModuleGroupPass>();
absl::Status status = pipeline.Run(module.get()).status();
ASSERT_IS_NOT_OK(status);
EXPECT_THAT(
status.message(),
::testing::HasSubstr("Module group pass cannot be run on a module"));
}
TEST_F(HloPassPipelineTest, SetHloModuleMetadata) {
HloModuleGroup module_group(TestName());
module_group.push_back(CreateNewVerifiedModule());
module_group.push_back(CreateNewVerifiedModule());
HloPassPipeline pipeline(TestName());
pipeline.AddPass<BazToQuxModuleGroupPass>();
pipeline.AddPass<FooToBarModulePass>();
TF_ASSERT_OK(pipeline.RunOnModuleGroup(&module_group).status());
ASSERT_THAT(module_group.modules(), SizeIs(2));
std::vector<std::string> pass_names = {"pipeline-start", "baz2qux",
"foo2bar"};
std::string pipeline_name = std::string(pipeline.name());
for (const HloModule* module : module_group.modules()) {
const HloModuleMetadataProto& metadata = module->metadata().proto();
EXPECT_EQ(metadata.canonical_module_id(), module->unique_id());
EXPECT_EQ(metadata.module_group_name(), module_group.name());
ASSERT_THAT(metadata.pass_metadata(), SizeIs(3));
for (int pass = 0; pass < metadata.pass_metadata().size(); pass++) {
const HloPassMetadata& pass_metadata = metadata.pass_metadata(pass);
EXPECT_NE(pass_metadata.pass_id(), 0);
EXPECT_THAT(pass_metadata.pass_name(), StrEq(pass_names[pass]));
EXPECT_THAT(pass_metadata.pipeline_name(), StrEq(pipeline_name));
EXPECT_FALSE(pass_metadata.module_changed());
EXPECT_EQ(pass_metadata.module_id(), module->unique_id());
EXPECT_THAT(pass_metadata.module_group_module_ids(),
ElementsAre(module_group.module(0).unique_id(),
module_group.module(1).unique_id()));
EXPECT_GT(pass_metadata.start_timestamp_usec(), 0);
EXPECT_LE(pass_metadata.start_timestamp_usec(),
pass_metadata.end_timestamp_usec());
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/pass/hlo_pass_pipeline.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/pass/hlo_pass_pipeline_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4db71c48-fba5-4815-b48c-8072b4eda2f0 | cpp | tensorflow/tensorflow | backend_config | third_party/xla/xla/hlo/ir/backend_config.cc | third_party/xla/xla/hlo/ir/backend_config_test.cc | #include "xla/hlo/ir/backend_config.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/human_readable_json.h"
#include "tsl/platform/protobuf.h"
namespace xla {
std::unique_ptr<tsl::protobuf::Message> CloneBackendConfigProto(
const tsl::protobuf::Message* proto) {
if (proto == nullptr) {
return nullptr;
}
std::unique_ptr<tsl::protobuf::Message> result(proto->New());
result->CopyFrom(*proto);
return result;
}
absl::StatusOr<std::string> BackendConfigToRawString(
const tsl::protobuf::Message& proto) {
return tsl::ProtoToHumanReadableJson(proto, true);
}
const std::string& BackendConfigWrapper::GetRawStringWithoutMutex() const {
if (proto_ && raw_string_.empty()) {
raw_string_ = BackendConfigToRawString(*proto_).value();
}
static const std::string* kEmptyString = new std::string();
return raw_string_.empty() ? *kEmptyString : raw_string_;
}
absl::Status BackendConfigWrapper::GetProto(
tsl::protobuf::Message* output_proto) const {
output_proto->Clear();
absl::WriterMutexLock lock{&mutex_};
if (proto_ != nullptr) {
if (proto_->GetDescriptor() != output_proto->GetDescriptor()) {
return Internal("Mismatched backend config descriptors.");
}
output_proto->CopyFrom(*proto_);
return absl::OkStatus();
}
if (raw_string_.empty()) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(tsl::HumanReadableJsonToProto(raw_string_, output_proto));
proto_ = CloneBackendConfigProto(output_proto);
return absl::OkStatus();
}
BackendConfigWrapper& BackendConfigWrapper::operator=(
BackendConfigWrapper&& other) {
std::unique_ptr<tsl::protobuf::Message> temp_proto;
std::string temp_string;
{
absl::MutexLock other_lock{&other.mutex_};
temp_proto = std::move(other.proto_);
temp_string = std::move(other.raw_string_);
}
absl::MutexLock this_lock{&mutex_};
proto_ = std::move(temp_proto);
raw_string_ = std::move(temp_string);
return *this;
}
bool BackendConfigWrapper::operator==(const BackendConfigWrapper& other) const {
tsl::protobuf::Message* this_proto = nullptr;
{
absl::MutexLock this_lock{&mutex_};
this_proto = proto_.get();
}
const std::string* other_raw_string = nullptr;
{
absl::MutexLock other_lock{&other.mutex_};
if (this_proto != nullptr && other.proto_ != nullptr) {
using ::tsl::protobuf::util::MessageDifferencer;
return MessageDifferencer::Equals(*this_proto, *other.proto_);
}
other_raw_string = &other.GetRawStringWithoutMutex();
}
return GetRawString() == *other_raw_string;
}
} | #include "xla/hlo/ir/backend_config.h"
#include <memory>
#include <string>
#include <thread>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "absl/synchronization/notification.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
const int kNumThreads = 100;
const int kNumRepetitions = 100;
constexpr absl::string_view kRawString =
R"({"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__triton_gemm","triton_gemm_config":{"block_m":"256","block_n":"256","block_k":"32","split_k":"1","num_stages":"1","num_warps":"16","num_ctas":"1"}},"force_earliest_schedule":false})";
template <typename Input, typename CheckFn>
void RunThreaded(Input input, CheckFn check_fn) {
for (int i = 0; i < kNumRepetitions; ++i) {
BackendConfigWrapper source(input);
absl::Notification all_threads_created;
std::vector<std::unique_ptr<std::thread>> threads;
for (int i = 0; i < kNumThreads; ++i) {
threads.emplace_back(std::make_unique<std::thread>([&] {
all_threads_created.WaitForNotification();
check_fn(source);
}));
}
all_threads_created.Notify();
for (int i = 0; i < kNumThreads; ++i) {
threads[i]->join();
}
}
}
TEST(BackendConfigWrapperTest, ConcurrentGetProto) {
RunThreaded(std::string{kRawString}, [](BackendConfigWrapper& source) {
gpu::GpuBackendConfig proto;
TF_EXPECT_OK(source.GetProto(&proto));
EXPECT_TRUE(proto.has_fusion_backend_config());
BackendConfigWrapper wrapped(proto);
EXPECT_TRUE(wrapped == source);
});
}
TEST(BackendConfigWrapperTest, ConcurrentGetRawString) {
BackendConfigWrapper source_json(std::string{kRawString});
gpu::GpuBackendConfig proto;
TF_EXPECT_OK(source_json.GetProto(&proto));
RunThreaded(proto, [](BackendConfigWrapper& source) {
std::string raw_string = source.GetRawString();
EXPECT_EQ(raw_string, kRawString);
BackendConfigWrapper wrapped(raw_string);
EXPECT_TRUE(wrapped == source);
});
}
TEST(BackendConfigWrapperTest, AssignmentToNonEmptyIsOK) {
BackendConfigWrapper a(std::string{kRawString});
BackendConfigWrapper b(std::string{kRawString});
a = std::move(b);
EXPECT_TRUE(a == BackendConfigWrapper(std::string{kRawString}));
}
TEST(BackendConfigWrapperTest, AssignmentDoesNotDeadlock) {
BackendConfigWrapper source;
BackendConfigWrapper& ref = source;
source = std::move(ref);
}
TEST(BackendConfigWrapperTest, SelfComparisonDoesNotDeadlock) {
BackendConfigWrapper source(std::string{kRawString});
EXPECT_TRUE(source == source);
}
TEST(BackendConfigWrapperTest, ComparisonDoesNotDeadlock) {
BackendConfigWrapper source_json(std::string{kRawString});
gpu::GpuBackendConfig proto;
TF_EXPECT_OK(source_json.GetProto(&proto));
RunThreaded(std::string{kRawString}, [&proto](BackendConfigWrapper& source) {
BackendConfigWrapper other_first(proto);
EXPECT_TRUE(other_first == source);
BackendConfigWrapper other_second(proto);
EXPECT_TRUE(source == other_second);
});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/backend_config.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/backend_config_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
af3f2af3-6f07-4e3c-b1f8-a9fbaaa3332c | cpp | tensorflow/tensorflow | hlo_opcode | third_party/xla/xla/hlo/ir/hlo_opcode.cc | third_party/xla/xla/service/hlo_opcode_test.cc | #include "xla/hlo/ir/hlo_opcode.h"
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "xla/util.h"
namespace xla {
absl::string_view HloOpcodeString(HloOpcode opcode) {
switch (opcode) {
#define CASE_OPCODE_STRING(enum_name, opcode_name, ...) \
case HloOpcode::enum_name: \
return opcode_name;
HLO_OPCODE_LIST(CASE_OPCODE_STRING)
#undef CASE_OPCODE_STRING
}
}
absl::StatusOr<HloOpcode> StringToHloOpcode(absl::string_view opcode_name) {
static auto* opcode_map = new absl::flat_hash_map<std::string, HloOpcode>({
#define STRING_TO_OPCODE_ENTRY(enum_name, opcode_name, ...) \
{opcode_name, HloOpcode::enum_name},
HLO_OPCODE_LIST(STRING_TO_OPCODE_ENTRY)
#undef STRING_TO_OPCODE_ENTRY
});
auto it = opcode_map->find(opcode_name);
if (it == opcode_map->end()) {
return InvalidArgument("Unknown opcode: %s", opcode_name);
}
return it->second;
}
bool HloOpcodeIsComparison(HloOpcode opcode) {
return opcode == HloOpcode::kCompare;
}
bool HloOpcodeIsVariadic(HloOpcode opcode) {
switch (opcode) {
#define CASE_IS_VARIADIC(enum_name, opcode_name, arity, ...) \
case HloOpcode::enum_name: \
return arity == kHloOpcodeIsVariadic;
HLO_OPCODE_LIST(CASE_IS_VARIADIC)
#undef CASE_IS_VARIADIC
}
}
std::optional<int> HloOpcodeArity(HloOpcode opcode) {
switch (opcode) {
#define CASE_ARITY(enum_name, opcode_name, arity, ...) \
case HloOpcode::enum_name: \
return arity == kHloOpcodeIsVariadic ? std::nullopt \
: std::make_optional(arity);
HLO_OPCODE_LIST(CASE_ARITY)
#undef CASE_ARITY
}
}
bool HloOpcodeIsAsync(HloOpcode opcode) {
return opcode == HloOpcode::kAsyncStart ||
opcode == HloOpcode::kAsyncUpdate || opcode == HloOpcode::kAsyncDone;
}
} | #include "xla/hlo/ir/hlo_opcode.h"
#include "xla/test.h"
#include "xla/types.h"
namespace xla {
namespace {
TEST(HloOpcodeTest, StringifyMultiply) {
ASSERT_EQ("multiply", HloOpcodeString(HloOpcode::kMultiply));
}
TEST(HloOpcodeTest, OpcodeProperties) {
#define SOME_LIST(X) \
X(One) \
X(Two) \
X(Three)
EXPECT_EQ(3, HLO_XLIST_LENGTH(SOME_LIST));
#undef SOME_LIST
for (int i = 0; i < HloOpcodeCount(); ++i) {
auto opcode = static_cast<HloOpcode>(i);
EXPECT_EQ(opcode, StringToHloOpcode(HloOpcodeString(opcode)).value());
switch (opcode) {
case HloOpcode::kCompare:
EXPECT_TRUE(HloOpcodeIsComparison(opcode));
break;
default:
EXPECT_FALSE(HloOpcodeIsComparison(opcode));
}
switch (opcode) {
case HloOpcode::kAfterAll:
case HloOpcode::kAllGather:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllReduce:
case HloOpcode::kAsyncStart:
case HloOpcode::kReduceScatter:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllToAll:
case HloOpcode::kCall:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kConcatenate:
case HloOpcode::kConditional:
case HloOpcode::kCustomCall:
case HloOpcode::kDot:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kDynamicReshape:
case HloOpcode::kFusion:
case HloOpcode::kMap:
case HloOpcode::kReduce:
case HloOpcode::kRng:
case HloOpcode::kScatter:
case HloOpcode::kSort:
case HloOpcode::kTuple:
case HloOpcode::kReduceWindow:
EXPECT_TRUE(HloOpcodeIsVariadic(opcode));
break;
default:
EXPECT_FALSE(HloOpcodeIsVariadic(opcode));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_opcode.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_opcode_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0bc63f44-f8b9-4765-a91f-c9b302a20911 | cpp | tensorflow/tensorflow | hlo_schedule | third_party/xla/xla/hlo/ir/hlo_schedule.cc | third_party/xla/xla/service/hlo_schedule_test.cc | #include "xla/hlo/ir/hlo_schedule.h"
#include <cstdint>
#include <ostream>
#include <queue>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/map_util.h"
#include "xla/status_macros.h"
#include "xla/tsl/lib/gtl/map_util.h"
#include "xla/util.h"
namespace xla {
absl::StatusOr<HloSchedule> HloSchedule::CreateFromProto(
const HloModule* module, const HloScheduleProto& proto) {
absl::flat_hash_map<int64_t, const HloComputation*> id_to_computation;
for (const HloComputation* computation : module->computations()) {
id_to_computation[computation->unique_id()] = computation;
}
HloSchedule schedule(module);
for (const auto& id_sequence : proto.sequences()) {
int64_t computation_id = id_sequence.first;
auto comp_it = id_to_computation.find(computation_id);
if (comp_it == id_to_computation.end()) {
continue;
}
const HloComputation* computation = comp_it->second;
absl::flat_hash_map<int64_t, HloInstruction*> id_to_instruction;
for (HloInstruction* instruction : computation->instructions()) {
id_to_instruction[instruction->unique_id()] = instruction;
}
HloInstructionSequence& sequence =
schedule.GetOrCreateSequence(computation);
for (const int64_t instruction_id : id_sequence.second.instruction_ids()) {
auto instr_it = id_to_instruction.find(instruction_id);
TF_RET_CHECK(instr_it != id_to_instruction.end())
<< "No instruction exists in HLO computation " << computation->name()
<< " with id " << instruction_id;
sequence.push_back(instr_it->second);
}
}
TF_RETURN_IF_ERROR(schedule.Verify());
return std::move(schedule);
}
absl::StatusOr<HloScheduleProto> HloSchedule::ToProto() const {
TF_RETURN_IF_ERROR(Verify());
HloScheduleProto proto;
for (const auto& id_sequence : sequences_) {
int64_t computation_id = id_sequence.first;
const HloInstructionSequence& sequence = id_sequence.second;
HloScheduleProto::InstructionSequence& proto_sequence =
(*proto.mutable_sequences())[computation_id];
proto_sequence.mutable_instruction_ids()->Reserve(sequence.size());
for (const int64_t id : sequence.ids()) {
proto_sequence.add_instruction_ids(id);
}
}
return std::move(proto);
}
void HloSchedule::set_sequence(const HloComputation* computation,
absl::Span<HloInstruction* const> sequence) {
set_sequence(computation, HloInstructionSequence(sequence));
}
void HloSchedule::set_sequence(const HloComputation* computation,
HloInstructionSequence sequence) {
CHECK(computation->parent() == module_);
sequences_[computation->unique_id()] = std::move(sequence);
execution_threads_[computation->unique_id()] =
std::string(computation->execution_thread());
}
HloInstructionSequence& HloSchedule::GetOrCreateSequence(
const HloComputation* computation) {
auto it = sequences_.find(computation->unique_id());
if (it == sequences_.end()) {
CHECK(computation->parent() == module_);
execution_threads_[computation->unique_id()] =
std::string(computation->execution_thread());
return sequences_[computation->unique_id()];
} else {
return it->second;
}
}
const HloInstructionSequence& HloSchedule::sequence(
const HloComputation* computation) const {
return sequences_.at(computation->unique_id());
}
absl::Status HloSchedule::UpdateComputationSchedule(
const HloComputation* computation) {
absl::flat_hash_map<int, HloInstruction*> id_to_instruction;
for (HloInstruction* instruction : computation->instructions()) {
InsertOrDie(&id_to_instruction, instruction->unique_id(), instruction);
}
absl::flat_hash_set<int> ids_in_schedule;
for (int id : sequences_.at(computation->unique_id()).ids()) {
InsertOrDie(&ids_in_schedule, id);
}
absl::flat_hash_map<const HloInstruction*, std::vector<HloInstruction*>>
new_instruction_uses;
absl::flat_hash_map<const HloInstruction*, int> unscheduled_operand_count;
std::queue<HloInstruction*> worklist;
for (HloInstruction* instruction : computation->instructions()) {
if (!ids_in_schedule.contains(instruction->unique_id())) {
if (instruction->operands().empty()) {
worklist.push(instruction);
} else {
for (const HloInstruction* operand : instruction->operands()) {
new_instruction_uses[operand].push_back(instruction);
}
unscheduled_operand_count[instruction] = instruction->operand_count();
}
}
}
HloInstructionSequence new_sequence;
auto schedule_worklist = [&]() {
while (!worklist.empty()) {
HloInstruction* instruction = worklist.front();
worklist.pop();
new_sequence.push_back(instruction);
std::vector<HloInstruction*>* new_users =
tsl::gtl::FindOrNull(new_instruction_uses, instruction);
if (new_users != nullptr) {
for (HloInstruction* new_user : *new_users) {
unscheduled_operand_count.at(new_user)--;
CHECK_GE(unscheduled_operand_count.at(new_user), 0);
if (unscheduled_operand_count.at(new_user) == 0) {
worklist.push(new_user);
}
}
}
}
};
schedule_worklist();
for (int id : sequences_.at(computation->unique_id()).ids()) {
auto it = id_to_instruction.find(id);
if (it == id_to_instruction.end()) {
continue;
}
worklist.push(it->second);
schedule_worklist();
}
set_sequence(computation, std::move(new_sequence));
return absl::OkStatus();
}
absl::Status HloSchedule::Update(
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::vector<HloComputation*> nonfusion_computations =
module_->MakeNonfusionComputations(execution_threads);
for (const HloComputation* computation : nonfusion_computations) {
if (!is_computation_scheduled(computation)) {
GetOrCreateSequence(computation);
TF_RETURN_IF_ERROR(UpdateComputationSchedule(computation));
}
}
auto sum_of_sequences_for_threads = [&]() -> int64_t {
if (execution_threads.empty()) {
return sequences_.size();
}
int64_t sequences_num_for_threads = 0;
for (const auto& [thread_name, sequence_num] :
num_sequences_by_execution_thread()) {
sequences_num_for_threads +=
execution_threads.contains(thread_name) ? sequence_num : 0;
}
return sequences_num_for_threads;
};
int64_t sequence_sum = sum_of_sequences_for_threads();
if (sequence_sum > nonfusion_computations.size()) {
absl::flat_hash_set<int64_t> nonfusion_computations_ids;
for (const HloComputation* computation : nonfusion_computations) {
nonfusion_computations_ids.insert(computation->unique_id());
}
for (auto it = sequences_.begin(); it != sequences_.end();) {
std::string sequence_thread_name = tsl::gtl::FindWithDefault(
execution_threads_, it->first, HloInstruction::kMainExecutionThread);
bool is_thread_included =
execution_threads.empty() ||
execution_threads.contains(sequence_thread_name);
if (!nonfusion_computations_ids.contains(it->first) &&
is_thread_included) {
execution_threads_.erase(it->first);
sequences_.erase(it++);
} else {
++it;
}
}
}
sequence_sum = sum_of_sequences_for_threads();
CHECK_EQ(sequence_sum, nonfusion_computations.size());
for (const HloComputation* computation : nonfusion_computations) {
TF_RETURN_IF_ERROR(UpdateComputationSchedule(computation));
}
TF_RETURN_IF_ERROR(Verify());
return absl::OkStatus();
}
absl::flat_hash_map<std::string, int64_t>
HloSchedule::num_sequences_by_execution_thread() const {
absl::flat_hash_map<std::string, int64_t> sequence_num_by_execution_threads;
for (const auto& id_sequence_item : sequences_) {
++sequence_num_by_execution_threads[tsl::gtl::FindWithDefault(
execution_threads_, id_sequence_item.first,
HloInstruction::kMainExecutionThread)];
}
return sequence_num_by_execution_threads;
}
absl::Status HloSchedule::Verify() const {
VLOG(2) << "VerifySchedule()";
XLA_VLOG_LINES(2, ToString());
absl::flat_hash_map<std::string, int64_t> sequence_num_by_execution_threads =
num_sequences_by_execution_thread();
for (const auto& [thread_name, sequence_size] :
sequence_num_by_execution_threads) {
std::vector<HloComputation*> nonfusion_computations =
module_->MakeNonfusionComputations({thread_name});
TF_RET_CHECK(nonfusion_computations.size() == sequence_size)
<< "For thread " << thread_name << ", schedule has " << sequence_size
<< " sequences, but module has " << nonfusion_computations.size()
<< " non-fusion computations for thread " << thread_name;
for (const HloComputation* computation : nonfusion_computations) {
TF_RET_CHECK(sequences_.contains(computation->unique_id()))
<< "Computation " << computation->name()
<< " missing from HLO schedule.";
}
for (const HloComputation* computation : nonfusion_computations) {
absl::flat_hash_map<const HloInstruction*, int> instruction_position;
int pos = 0;
for (const HloInstruction* instruction :
sequence(computation).instructions()) {
TF_RET_CHECK(instruction_position.insert({instruction, pos}).second)
<< "Instruction " << instruction->name()
<< " appears more than once in the schedule";
pos++;
}
TF_RET_CHECK(instruction_position.size() ==
computation->instruction_count())
<< "Schedule for computation " << computation->name() << " has "
<< instruction_position.size() << " instructions, expected "
<< computation->instruction_count();
for (const HloInstruction* instruction : computation->instructions()) {
TF_RET_CHECK(instruction_position.contains(instruction))
<< "Instruction " << instruction->name() << " is not in schedule";
}
for (const HloInstruction* instruction : computation->instructions()) {
for (const HloInstruction* operand : instruction->operands()) {
TF_RET_CHECK(instruction_position.at(operand) <
instruction_position.at(instruction))
<< "Instruction " << instruction->name()
<< " is not scheduled after its operand " << operand->name();
}
for (const HloInstruction* pred : instruction->control_predecessors()) {
TF_RET_CHECK(instruction_position.at(pred) <
instruction_position.at(instruction))
<< "Instruction " << instruction->name()
<< " is not scheduled after its control predecessor "
<< pred->name();
}
}
}
}
return absl::OkStatus();
}
namespace {
const HloComputation* IdToComputation(const HloModule* module, int64_t id) {
for (const HloComputation* computation : module->computations()) {
if (computation->unique_id() == id) {
return computation;
}
}
return nullptr;
}
}
std::string HloSchedule::ToString() const {
std::vector<std::string> pieces;
pieces.push_back("HloSchedule");
std::vector<int64_t> sorted_ids;
for (const auto& id_sequence : sequences_) {
sorted_ids.push_back(id_sequence.first);
}
absl::c_sort(sorted_ids);
for (const int64_t id : sorted_ids) {
const HloComputation* computation = IdToComputation(module_, id);
const HloInstructionSequence& sequence = sequences_.at(id);
if (computation == nullptr) {
pieces.push_back(absl::StrFormat(
"computation with id %d (no longer in HLO module):", id));
for (int id : sequence.ids()) {
pieces.push_back(absl::StrCat(" ", id));
}
} else {
pieces.push_back(absl::StrFormat("computation %s:", computation->name()));
for (const HloInstruction* instruction : sequence.instructions()) {
pieces.push_back(absl::StrCat(" ", instruction->name()));
}
}
}
return absl::StrJoin(pieces, "\n");
}
std::ostream& operator<<(std::ostream& out, const HloSchedule& schedule) {
return out << schedule.ToString();
}
} | #include "xla/hlo/ir/hlo_schedule.h"
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/hlo_ordering.h"
#include "xla/shape_util.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class HloScheduleTest : public HloTestBase {};
TEST_F(HloScheduleTest, UpdateScheduleUnchangedModule) {
const std::string module_str = R"(
HloModule UpdateScheduleUnchanged
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
c = f32[] constant(42.0)
sum = f32[] add(a, b)
neg = f32[] negate(c)
ROOT root = f32[] multiply(sum, neg)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
}));
const auto& entry_schedule =
schedule.sequence(module->entry_computation()).instructions();
EXPECT_EQ(entry_schedule.size(), 6);
TF_ASSERT_OK(schedule.Update());
TF_ASSERT_OK(schedule.Verify());
EXPECT_EQ(entry_schedule,
schedule.sequence(module->entry_computation()).instructions());
}
TEST_F(HloScheduleTest, UpdateScheduleWithNewInstructions) {
const std::string module_str = R"(
HloModule UpdateScheduleWithNewInstructions
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
c = f32[] constant(42.0)
sum = f32[] add(a, b)
neg = f32[] negate(c)
ROOT root = f32[] multiply(sum, neg)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
}));
HloComputation* entry = module->entry_computation();
const Shape shape = entry->root_instruction()->shape();
HloInstruction* constant = entry->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
HloInstruction* sub = entry->AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kSubtract, constant, entry->root_instruction()));
entry->set_root_instruction(sub);
auto in_schedule = [&](const HloInstruction* hlo) {
return absl::c_linear_search(schedule.sequence(entry).instructions(), hlo);
};
EXPECT_EQ(schedule.sequence(entry).size(), 6);
EXPECT_FALSE(in_schedule(constant));
EXPECT_FALSE(in_schedule(sub));
ASSERT_IS_NOT_OK(schedule.Verify());
TF_ASSERT_OK(schedule.Update());
TF_ASSERT_OK(schedule.Verify());
EXPECT_EQ(schedule.sequence(entry).size(), 8);
EXPECT_TRUE(in_schedule(constant));
EXPECT_TRUE(in_schedule(sub));
}
TEST_F(HloScheduleTest, UpdateScheduleWithAddedAndDeletedInstruction) {
const std::string module_str = R"(
HloModule UpdateScheduleWithAddedAndDeletedInstruction
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
c = f32[] constant(42.0)
sum = f32[] add(a, b)
neg = f32[] negate(c)
ROOT root = f32[] multiply(sum, neg)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
}));
HloComputation* entry = module->entry_computation();
HloInstruction* constant = entry->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
HloInstruction* new_root = entry->AddInstruction(
HloInstruction::CreateBinary(constant->shape(), HloOpcode::kSubtract,
constant, entry->parameter_instruction(0)));
entry->set_root_instruction(new_root);
HloDCE dce;
TF_ASSERT_OK(dce.Run(module.get()).status());
EXPECT_EQ(schedule.sequence(entry).size(), 6);
ASSERT_IS_NOT_OK(schedule.Verify());
TF_ASSERT_OK(schedule.Update());
TF_ASSERT_OK(schedule.Verify());
EXPECT_EQ(schedule.sequence(entry).size(), 4);
}
TEST_F(HloScheduleTest, UpdateScheduleWithCompletelyReplacedModule) {
const std::string module_str = R"(
HloModule UpdateScheduleWithCompletelyReplacedModule
ENTRY main {
a = f32[] constant(42.0)
b = f32[] constant(123.0)
ROOT sum = f32[] add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
}));
HloComputation* entry = module->entry_computation();
HloInstruction* constant = entry->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
HloInstruction* new_root = entry->AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kNegate, constant));
entry->set_root_instruction(new_root);
HloDCE dce;
TF_ASSERT_OK(dce.Run(module.get()).status());
EXPECT_EQ(schedule.sequence(entry).size(), 3);
ASSERT_IS_NOT_OK(schedule.Verify());
TF_ASSERT_OK(schedule.Update());
TF_ASSERT_OK(schedule.Verify());
EXPECT_EQ(schedule.sequence(entry).size(), 2);
}
TEST_F(HloScheduleTest, UpdateScheduleWithMultipleComputations) {
const std::string module_str = R"(
HloModule UpdateScheduleWithMultipleComputations
%Body (param.1: (s32[], token[])) -> (s32[], token[]) {
%param.1 = (s32[], token[]) parameter(0)
%get-tuple-element.1 = s32[] get-tuple-element((s32[], token[]) %param.1), index=0
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = token[] get-tuple-element((s32[], token[]) %param.1), index=1
%after-all = token[] after-all(token[] %get-tuple-element.2)
ROOT %tuple = (s32[], token[]) tuple(s32[] %add, token[] %after-all)
}
%Cond (param: (s32[], token[])) -> pred[] {
%param = (s32[], token[]) parameter(0)
%get-tuple-element = s32[] get-tuple-element((s32[], token[]) %param), index=0
%constant = s32[] constant(42)
ROOT %less-than = pred[] compare(s32[] %get-tuple-element, s32[] %constant), direction=LT
}
ENTRY %WhileLoop () -> s32[] {
%zero = s32[] constant(0)
%init_token = token[] after-all()
%init_tuple = (s32[], token[]) tuple(s32[] %zero, token[] %init_token)
%while = (s32[], token[]) while((s32[], token[]) %init_tuple), condition=%Cond, body=%Body
ROOT %root = s32[] get-tuple-element((s32[], token[]) %while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(),
sizeof(void*));
}));
const HloInstruction* xla_while =
module->entry_computation()->root_instruction()->operand(0);
HloComputation* body = xla_while->while_body();
HloComputation* cond = xla_while->while_condition();
cond->set_root_instruction(cond->AddInstruction(
HloInstruction::CreateUnary(ShapeUtil::MakeShape(PRED, {}),
HloOpcode::kNot, cond->root_instruction())));
body->set_root_instruction(body->parameter_instruction(0));
HloDCE dce;
TF_ASSERT_OK(dce.Run(module.get()).status());
EXPECT_EQ(schedule.sequence(body).size(), 7);
EXPECT_EQ(schedule.sequence(cond).size(), 4);
ASSERT_IS_NOT_OK(schedule.Verify());
TF_ASSERT_OK(schedule.Update());
TF_ASSERT_OK(schedule.Verify());
EXPECT_EQ(schedule.sequence(body).size(), 1);
EXPECT_EQ(schedule.sequence(cond).size(), 5);
}
TEST_F(HloScheduleTest, UpdateScheduleComputationRemoved) {
const std::string module_str = R"(
HloModule UpdateScheduleWithMultipleComputations
%Body (param.1: (s32[], token[])) -> (s32[], token[]) {
%param.1 = (s32[], token[]) parameter(0)
%get-tuple-element.1 = s32[] get-tuple-element((s32[], token[]) %param.1), index=0
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = token[] get-tuple-element((s32[], token[]) %param.1), index=1
%after-all = token[] after-all(token[] %get-tuple-element.2)
ROOT %tuple = (s32[], token[]) tuple(s32[] %add, token[] %after-all)
}
%Cond (param: (s32[], token[])) -> pred[] {
%param = (s32[], token[]) parameter(0)
%get-tuple-element = s32[] get-tuple-element((s32[], token[]) %param), index=0
%constant = s32[] constant(42)
ROOT %less-than = pred[] compare(s32[] %get-tuple-element, s32[] %constant), direction=LT
}
ENTRY %WhileLoop () -> s32[] {
%zero = s32[] constant(0)
%init_token = token[] after-all()
%init_tuple = (s32[], token[]) tuple(s32[] %zero, token[] %init_token)
%while = (s32[], token[]) while((s32[], token[]) %init_tuple), condition=%Cond, body=%Body
ROOT %root = s32[] get-tuple-element((s32[], token[]) %while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(),
sizeof(void*));
}));
HloInstruction* xla_while =
module->entry_computation()->root_instruction()->mutable_operand(0);
HloInstruction* init = xla_while->mutable_operand(0);
TF_ASSERT_OK(xla_while->ReplaceAllUsesWith(init));
HloDCE dce;
ASSERT_EQ(module->computation_count(), 3);
TF_ASSERT_OK(dce.Run(module.get()).status());
ASSERT_EQ(module->computation_count(), 1);
ASSERT_IS_NOT_OK(schedule.Verify());
TF_ASSERT_OK(schedule.Update());
TF_ASSERT_OK(schedule.Verify());
}
TEST_F(HloScheduleTest, UpdateScheduleComputationRemovedWithMultiThreads) {
const std::string module_str = R"(
HloModule UpdateScheduleWithMultipleComputations
%Body (param.1: (s32[], token[])) -> (s32[], token[]) {
%param.1 = (s32[], token[]) parameter(0)
%get-tuple-element.1 = s32[] get-tuple-element((s32[], token[]) %param.1), index=0
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = token[] get-tuple-element((s32[], token[]) %param.1), index=1
%after-all = token[] after-all(token[] %get-tuple-element.2)
ROOT %tuple = (s32[], token[]) tuple(s32[] %add, token[] %after-all)
}
%Cond (param: (s32[], token[])) -> pred[] {
%param = (s32[], token[]) parameter(0)
%get-tuple-element = s32[] get-tuple-element((s32[], token[]) %param), index=0
%constant = s32[] constant(42)
ROOT %less-than = pred[] compare(s32[] %get-tuple-element, s32[] %constant), direction=LT
}
%async_builder {
%p0 = f32[10] parameter(0)
%p1 = f32[10] parameter(1)
ROOT %foo = add(%p0, %p1)
}, execution_thread="parallel_thread"
ENTRY %WhileLoop () -> (s32[], f32[10]) {
%p0 = f32[10] parameter(0)
%p1 = f32[10] parameter(1)
%zero = s32[] constant(0)
%init_token = token[] after-all()
%init_tuple = (s32[], token[]) tuple(s32[] %zero, token[] %init_token)
%while = (s32[], token[]) while((s32[], token[]) %init_tuple), condition=%Cond, body=%Body
%async-start = ((f32[10], f32[10]), f32[10], s32[]) async-start(f32[10] %p0, f32[10] %p1), async_execution_thread="parallel_thread",calls=%async_builder
%async-done = f32[10]{0} async-done(((f32[10], f32[10]), f32[10], s32[]) %async-start), async_execution_thread="parallel_thread", calls=%async_builder
%main_res = s32[] get-tuple-element((s32[], token[]) %while), index=0
ROOT %res = tuple(%main_res, %async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(),
[](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(
buffer.shape(),
sizeof(void*));
},
{}, {HloInstruction::kMainExecutionThread}));
HloInstruction* xla_while = module->entry_computation()
->root_instruction()
->mutable_operand(0)
->mutable_operand(0);
HloInstruction* init = xla_while->mutable_operand(0);
TF_ASSERT_OK(xla_while->ReplaceAllUsesWith(init));
HloDCE dce;
ASSERT_EQ(module->computation_count(), 4);
TF_ASSERT_OK(dce.Run(module.get()).status());
ASSERT_EQ(module->computation_count(), 2);
ASSERT_IS_NOT_OK(schedule.Verify());
TF_ASSERT_OK(schedule.Update({HloInstruction::kMainExecutionThread}));
TF_ASSERT_OK(schedule.Verify());
ASSERT_EQ(module->MakeNonfusionComputations({"parallel_thread"}).size(), 1);
ASSERT_FALSE(schedule.is_computation_scheduled(
module->MakeNonfusionComputations({"parallel_thread"}).front()));
}
TEST_F(HloScheduleTest, UpdateScheduleAddComputation) {
const std::string module_str = R"(
HloModule UpdateScheduleWithMultipleComputations
%Body (param.1: (s32[], token[])) -> (s32[], token[]) {
%param.1 = (s32[], token[]) parameter(0)
%get-tuple-element.1 = s32[] get-tuple-element((s32[], token[]) %param.1), index=0
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = token[] get-tuple-element((s32[], token[]) %param.1), index=1
%after-all = token[] after-all(token[] %get-tuple-element.2)
ROOT %tuple = (s32[], token[]) tuple(s32[] %add, token[] %after-all)
}
%Cond (param: (s32[], token[])) -> pred[] {
%param = (s32[], token[]) parameter(0)
%get-tuple-element = s32[] get-tuple-element((s32[], token[]) %param), index=0
%constant = s32[] constant(42)
ROOT %less-than = pred[] compare(s32[] %get-tuple-element, s32[] %constant), direction=LT
}
%async_builder {
%p0 = f32[10] parameter(0)
%p1 = f32[10] parameter(1)
ROOT %foo = add(%p0, %p1)
}, execution_thread="parallel_thread"
ENTRY %WhileLoop () -> (s32[], f32[10]) {
%p0 = f32[10] parameter(0)
%p1 = f32[10] parameter(1)
%zero = s32[] constant(0)
%init_token = token[] after-all()
%init_tuple = (s32[], token[]) tuple(s32[] %zero, token[] %init_token)
%while = (s32[], token[]) while((s32[], token[]) %init_tuple), condition=%Cond, body=%Body
%async-start = ((f32[10], f32[10]), f32[10], s32[]) async-start(f32[10] %p0, f32[10] %p1), async_execution_thread="parallel_thread",calls=%async_builder
%async-done = f32[10]{0} async-done(((f32[10], f32[10]), f32[10], s32[]) %async-start), async_execution_thread="parallel_thread", calls=%async_builder
%main_res = s32[] get-tuple-element((s32[], token[]) %while), index=0
ROOT %res = tuple(%main_res, %async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(),
[](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(
buffer.shape(),
sizeof(void*));
},
{}, {HloInstruction::kMainExecutionThread}));
HloComputation* entry_computation = module->entry_computation();
HloComputation::Builder comp_builder("fusion_computation");
HloInstruction* entry_comp_parameter_0 =
entry_computation->parameter_instruction(0);
HloInstruction* entry_comp_parameter_1 =
entry_computation->parameter_instruction(1);
std::vector<HloInstruction*> instructions_in_new_computation;
HloInstruction* added_instruction =
entry_computation->AddInstruction(HloInstruction::CreateBinary(
entry_comp_parameter_0->shape(), HloOpcode::kMultiply,
entry_comp_parameter_0, entry_comp_parameter_1));
instructions_in_new_computation.push_back(added_instruction);
HloInstruction* call =
entry_computation->CreateCallInstruction(instructions_in_new_computation);
Shape completion_sflag_shape = ShapeUtil::MakeScalarShape(U32);
TF_ASSERT_OK_AND_ASSIGN(
HloInstruction * async_done,
entry_computation->CreateAsyncInstructions(
call, {completion_sflag_shape}, entry_computation->execution_thread(),
true, true));
HloInstruction* result_2 =
entry_computation->root_instruction()->mutable_operand(1);
HloInstruction* modified_result_2 =
entry_computation->AddInstruction(HloInstruction::CreateBinary(
result_2->shape(), HloOpcode::kAdd, async_done, result_2));
TF_ASSERT_OK(result_2->ReplaceAllUsesWith(modified_result_2));
auto added_computation_name =
async_done->operand(0)->called_computations()[0]->name();
ASSERT_FALSE(schedule.is_computation_scheduled(
module->GetComputationWithName(added_computation_name)));
ASSERT_IS_NOT_OK(schedule.Verify());
TF_ASSERT_OK(schedule.Update({HloInstruction::kMainExecutionThread}));
TF_ASSERT_OK(schedule.Verify());
ASSERT_TRUE(schedule.is_computation_scheduled(
module->GetComputationWithName(added_computation_name)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_schedule.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_schedule_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
576c35e6-cf82-4d5a-afeb-c1c2dc9dbaaf | cpp | tensorflow/tensorflow | hlo_module_metadata | third_party/xla/xla/hlo/ir/hlo_module_metadata.cc | third_party/xla/xla/service/hlo_module_metadata_test.cc | #include "xla/hlo/ir/hlo_module_metadata.h"
#include <algorithm>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "xla/util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/protobuf.h"
namespace xla {
absl::StatusOr<HloPassMetadata*>
HloModuleMetadata::GetCurrentHloPassMetadata() {
if (running_passes_.empty()) {
return NotFound(
"HloPassMetadata for currently running pass not found, either because "
"the pass did not call RecordPassStart or because a pass is "
"creating/switching modules without using "
"HloModuleGroup::ReplaceModule.");
}
return running_passes_.back();
}
absl::Status HloModuleMetadata::MutateCurrentHloPassMetadata(
absl::FunctionRef<void(HloPassMetadata*)> mutator) {
TF_ASSIGN_OR_RETURN(HloPassMetadata * pass_metadata,
GetCurrentHloPassMetadata());
mutator(pass_metadata);
return absl::OkStatus();
}
void HloModuleMetadata::RecordPassStart() {
HloPassMetadata* pass_metadata = module_metadata_.add_pass_metadata();
pass_metadata->set_pass_id(next_pass_id_++);
pass_metadata->set_start_timestamp_usec(env_->NowMicros());
running_passes_.push_back(pass_metadata);
}
absl::Status HloModuleMetadata::RecordPassEnd() {
TF_ASSIGN_OR_RETURN(HloPassMetadata * pass_metadata,
GetCurrentHloPassMetadata());
pass_metadata->set_end_timestamp_usec(env_->NowMicros());
running_passes_.pop_back();
return absl::OkStatus();
}
void HloModuleMetadata::set_prepartitioning_metadata(
const HloModuleMetadata& prepartitioning_metadata) {
module_metadata_.set_original_module_id(
prepartitioning_metadata.proto().canonical_module_id());
prepartitioning_metadata_ = prepartitioning_metadata.proto();
prepartitioning_metadata_->clear_pass_metadata();
absl::flat_hash_set<HloPassMetadata*> running_passes(
prepartitioning_metadata.running_passes_.begin(),
prepartitioning_metadata.running_passes_.end());
for (const HloPassMetadata& pass_metadata :
prepartitioning_metadata.proto().pass_metadata()) {
if (running_passes.contains(&pass_metadata)) {
HloPassMetadata* added_pass_metadata =
module_metadata_.add_pass_metadata();
*added_pass_metadata = pass_metadata;
running_passes_.push_back(added_pass_metadata);
next_pass_id_ =
std::max(next_pass_id_,
static_cast<int64_t>(added_pass_metadata->pass_id()) + 1);
} else {
*prepartitioning_metadata_->add_pass_metadata() = pass_metadata;
}
}
}
absl::Status HloModuleMetadata::set_custom_metadata(
const ::tsl::protobuf::Message& message) {
TF_ASSIGN_OR_RETURN(HloPassMetadata * pass_metadata,
GetCurrentHloPassMetadata());
if (!pass_metadata->mutable_custom_metadata()->PackFrom(message)) {
LOG(WARNING) << "failed to pack custom metadata for "
<< pass_metadata->pass_id();
return Internal("failed to pack custom metadata");
};
return absl::OkStatus();
}
} | #include "xla/hlo/ir/hlo_module_metadata.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
using ::testing::Property;
using ::testing::StrEq;
class TestEnv : public tsl::EnvWrapper {
public:
TestEnv() : EnvWrapper(Env::Default()) {}
uint64_t NowMicros() const override { return current_micros_; }
void SetCurrentMicros(uint64_t micros) { current_micros_ = micros; }
private:
uint64_t current_micros_ = 1;
};
TEST(HloModuleMetadata, RecordsPassStart) {
TestEnv env;
HloModuleMetadata module_metadata(&env);
env.SetCurrentMicros(1234);
module_metadata.RecordPassStart();
EXPECT_THAT(
module_metadata.proto().pass_metadata(),
ElementsAre(Property(&HloPassMetadata::start_timestamp_usec, 1234)));
}
TEST(HloModuleMetadata, RecordsPassEnd) {
TestEnv env;
HloModuleMetadata module_metadata(&env);
module_metadata.RecordPassStart();
env.SetCurrentMicros(4321);
EXPECT_IS_OK(module_metadata.RecordPassEnd());
EXPECT_THAT(
module_metadata.proto().pass_metadata(),
ElementsAre(Property(&HloPassMetadata::end_timestamp_usec, 4321)));
}
TEST(HloModuleMetadata, RecordsPassEndInNestedMetadata) {
TestEnv env;
HloModuleMetadata module_metadata(&env);
module_metadata.RecordPassStart();
module_metadata.RecordPassStart();
env.SetCurrentMicros(111);
EXPECT_IS_OK(module_metadata.RecordPassEnd());
EXPECT_THAT(module_metadata.proto().pass_metadata(),
ElementsAre(Property(&HloPassMetadata::end_timestamp_usec, 0),
Property(&HloPassMetadata::end_timestamp_usec, 111)));
env.SetCurrentMicros(222);
EXPECT_IS_OK(module_metadata.RecordPassEnd());
EXPECT_THAT(module_metadata.proto().pass_metadata(),
ElementsAre(Property(&HloPassMetadata::end_timestamp_usec, 222),
Property(&HloPassMetadata::end_timestamp_usec, 111)));
}
TEST(HloModuleMetadata, RecordPassEndReturnsNotFound) {
HloModuleMetadata module_metadata(tsl::Env::Default());
EXPECT_EQ(module_metadata.RecordPassEnd().code(), tsl::error::NOT_FOUND);
module_metadata.RecordPassStart();
EXPECT_IS_OK(module_metadata.RecordPassEnd());
EXPECT_EQ(module_metadata.RecordPassEnd().code(), tsl::error::NOT_FOUND);
}
TEST(HloModuleMetadata, SetsHloPassMetadataFields) {
HloModuleMetadata module_metadata(tsl::Env::Default());
module_metadata.RecordPassStart();
EXPECT_IS_OK(module_metadata.set_current_pass_name("fake name"));
EXPECT_THAT(
module_metadata.proto().pass_metadata(),
ElementsAre(Property(&HloPassMetadata::pass_name, StrEq("fake name"))));
}
TEST(HloModuleMetadata, SetsHloPassMetadataFieldsInNestedMetadata) {
HloModuleMetadata module_metadata(tsl::Env::Default());
module_metadata.RecordPassStart();
module_metadata.RecordPassStart();
EXPECT_IS_OK(module_metadata.set_current_pass_name("fake name"));
EXPECT_THAT(
module_metadata.proto().pass_metadata(),
ElementsAre(Property(&HloPassMetadata::pass_name, StrEq("")),
Property(&HloPassMetadata::pass_name, StrEq("fake name"))));
}
TEST(HloModuleMetadata, SetterReturnsNotFound) {
HloModuleMetadata module_metadata(tsl::Env::Default());
EXPECT_EQ(module_metadata.set_current_pass_name("fake name").code(),
tsl::error::NOT_FOUND);
}
TEST(HloModuleMetadata, CopiesRunningPrepartitioningPasses) {
HloModuleMetadata old_module_metadata(tsl::Env::Default());
old_module_metadata.RecordPassStart();
EXPECT_IS_OK(old_module_metadata.set_current_pass_name("outer pass"));
old_module_metadata.RecordPassStart();
EXPECT_IS_OK(old_module_metadata.set_current_pass_name("finished pass"));
EXPECT_IS_OK(old_module_metadata.RecordPassEnd());
old_module_metadata.RecordPassStart();
EXPECT_IS_OK(old_module_metadata.set_current_pass_name("inner pass"));
HloModuleMetadata new_module_metadata(tsl::Env::Default());
new_module_metadata.set_prepartitioning_metadata(old_module_metadata);
EXPECT_THAT(
new_module_metadata.proto().pass_metadata(),
ElementsAre(Property(&HloPassMetadata::pass_name, StrEq("outer pass")),
Property(&HloPassMetadata::pass_name, StrEq("inner pass"))));
EXPECT_THAT(new_module_metadata.prepartitioning_metadata()->pass_metadata(),
ElementsAre(Property(&HloPassMetadata::pass_name,
StrEq("finished pass"))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_module_metadata.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_module_metadata_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
23a79605-f8d1-470f-b2fc-6fc4ba7ce262 | cpp | tensorflow/tensorflow | hlo_dfs_reachability | third_party/xla/xla/hlo/ir/hlo_dfs_reachability.cc | third_party/xla/xla/service/hlo_dfs_reachability_test.cc | #include "xla/hlo/ir/hlo_dfs_reachability.h"
#include <cstddef>
#include <memory>
#include <vector>
#include "absl/algorithm/container.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
namespace xla {
bool HloDfsReachability::IsPresent(const HloInstruction* instruction) const {
return instruction_to_idx_.contains(instruction);
}
bool HloDfsReachability::IsReachable(const HloInstruction* from,
const HloInstruction* to) const {
if (from == to) {
return true;
}
if (to->operand_count() == 0 && from->control_predecessors().empty()) {
return false;
}
size_t target_node_idx = instruction_to_idx_.at(from);
size_t dfs_root_idx = instruction_to_idx_.at(to);
if (dfs_root_idx < target_node_idx) {
return false;
}
llvm::SmallVector<const HloInstruction*> stack{to};
llvm::BitVector visited_idxs(1 + (dfs_root_idx - target_node_idx));
visited_idxs.set(dfs_root_idx - target_node_idx);
auto check_and_enqueue = [&](const HloInstruction* instr) {
if (instr == from) {
return true;
}
size_t instr_idx = instruction_to_idx_.at(instr);
if (instr_idx < target_node_idx) {
return false;
}
size_t visited_idx = instr_idx - target_node_idx;
if (visited_idxs.test(visited_idx)) {
return false;
}
visited_idxs.set(visited_idx);
stack.push_back(instr);
return false;
};
while (!stack.empty()) {
const HloInstruction* instr = stack.pop_back_val();
if (absl::c_any_of(instr->operands(), check_and_enqueue) ||
absl::c_any_of(instr->control_predecessors(), check_and_enqueue)) {
return true;
}
}
return false;
}
bool HloDfsReachability::IsConnected(const HloInstruction* a,
const HloInstruction* b) const {
return IsReachable(a, b) || IsReachable(b, a);
}
std::unique_ptr<HloDfsReachability> HloDfsReachability::Build(
const HloComputation* computation) {
auto res = std::make_unique<HloDfsReachability>();
HloComputation::ChannelDependencies empty_channel_dependencies;
std::vector<HloInstruction*> instructions =
computation->MakeInstructionPostOrder(empty_channel_dependencies);
res->instruction_to_idx_.reserve(instructions.size());
for (size_t i = 0; i < instructions.size(); ++i) {
res->instruction_to_idx_[instructions[i]] = i;
}
return res;
}
} | #include "xla/hlo/ir/hlo_dfs_reachability.h"
#include <cstddef>
#include <memory>
#include <string>
#include <string_view>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/computation_placer.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
class HloDfsReachabilityTest : public HloTestBase {};
TEST_F(HloDfsReachabilityTest, NonTrivialReachability) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32, HloOpcode::kAdd, constant1, constant2));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kNegate, constant2));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kExp, negate));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kMultiply, add, exp));
auto copy = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kCopy, exp));
auto module = CreateNewVerifiedModule();
auto computation =
module->AddEntryComputation(builder.Build(mul));
TF_CHECK_OK(add->AddControlDependencyTo(exp));
auto reachability = HloDfsReachability::Build(computation);
EXPECT_TRUE(reachability->IsReachable(constant1, constant1));
EXPECT_FALSE(reachability->IsReachable(constant1, constant2));
EXPECT_TRUE(reachability->IsReachable(constant1, add));
EXPECT_FALSE(reachability->IsReachable(constant1, negate));
EXPECT_TRUE(reachability->IsReachable(constant1, exp));
EXPECT_TRUE(reachability->IsReachable(constant1, mul));
EXPECT_TRUE(reachability->IsReachable(constant1, copy));
EXPECT_FALSE(reachability->IsReachable(constant2, constant1));
EXPECT_TRUE(reachability->IsReachable(constant2, constant2));
EXPECT_TRUE(reachability->IsReachable(constant2, add));
EXPECT_TRUE(reachability->IsReachable(constant2, negate));
EXPECT_TRUE(reachability->IsReachable(constant2, exp));
EXPECT_TRUE(reachability->IsReachable(constant2, mul));
EXPECT_TRUE(reachability->IsReachable(constant2, copy));
EXPECT_FALSE(reachability->IsReachable(exp, constant1));
EXPECT_FALSE(reachability->IsReachable(exp, constant2));
EXPECT_FALSE(reachability->IsReachable(exp, add));
EXPECT_FALSE(reachability->IsReachable(exp, negate));
EXPECT_TRUE(reachability->IsReachable(exp, exp));
EXPECT_TRUE(reachability->IsReachable(exp, mul));
EXPECT_TRUE(reachability->IsReachable(exp, copy));
EXPECT_FALSE(reachability->IsReachable(mul, constant1));
EXPECT_FALSE(reachability->IsReachable(mul, constant2));
EXPECT_FALSE(reachability->IsReachable(mul, add));
EXPECT_FALSE(reachability->IsReachable(mul, negate));
EXPECT_FALSE(reachability->IsReachable(mul, exp));
EXPECT_TRUE(reachability->IsReachable(mul, mul));
EXPECT_FALSE(reachability->IsReachable(mul, copy));
EXPECT_TRUE(reachability->IsConnected(constant1, copy));
EXPECT_TRUE(reachability->IsConnected(copy, constant1));
EXPECT_FALSE(reachability->IsConnected(negate, add));
EXPECT_FALSE(reachability->IsConnected(add, negate));
}
TEST_F(HloDfsReachabilityTest, ChannelReachability) {
const Shape shape = ShapeUtil::MakeShape(F32, {5, 7});
HloComputation::Builder builder("ChannelReachability");
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
auto token0 = builder.AddInstruction(HloInstruction::CreateToken());
auto send =
builder.AddInstruction(HloInstruction::CreateSend(param, token0, 1));
auto send_done = builder.AddInstruction(HloInstruction::CreateSendDone(send));
auto token1 = builder.AddInstruction(HloInstruction::CreateToken());
auto recv =
builder.AddInstruction(HloInstruction::CreateRecv(shape, token1, 1));
auto recv_done = builder.AddInstruction(HloInstruction::CreateRecvDone(recv));
auto module = CreateNewVerifiedModule();
module->mutable_config().set_use_spmd_partitioning(false);
module->mutable_config().set_static_device_assignment(DeviceAssignment(1, 2));
auto computation = module->AddEntryComputation(builder.Build(recv_done));
auto reachability = HloDfsReachability::Build(computation);
EXPECT_FALSE(reachability->IsReachable(param, recv_done));
EXPECT_FALSE(reachability->IsReachable(send, recv));
EXPECT_FALSE(reachability->IsReachable(send_done, recv));
}
class HloDfsReachabilityBenchmark {
public:
HloDfsReachabilityBenchmark(int size, std::string_view name) : name_(name) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto builder = HloComputation::Builder(name);
HloInstruction* constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
HloInstruction* prev = constant;
for (int i = 1; i < size; ++i) {
prev = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kExp, prev));
}
HloModuleConfig hlo_config;
module_ = std::make_unique<HloModule>(name_, hlo_config);
computation_ =
module_->AddEntryComputation(builder.Build(prev));
}
std::unique_ptr<HloDfsReachability> Build() {
return HloDfsReachability::Build(computation_);
}
const HloComputation* computation() { return computation_; }
private:
std::unique_ptr<HloModule> module_;
HloComputation* computation_;
const std::string name_;
};
void BM_HloDfsReachabilityBuild(benchmark::State& state) {
int num_nodes = state.range(0);
HloDfsReachabilityBenchmark bm(num_nodes, state.name());
while (state.KeepRunningBatch(num_nodes)) {
benchmark::DoNotOptimize(bm.Build());
}
}
void BM_HloDfsReachabilityCheck(benchmark::State& state) {
size_t size = state.range(0);
HloDfsReachabilityBenchmark bm(size, state.name());
auto reachability = bm.Build();
auto instrs = bm.computation()->MakeInstructionPostOrder();
size_t i = 0;
for (auto s : state) {
size_t from = i % size;
size_t to = (++i + size / 2) % size;
reachability->IsReachable(instrs[from], instrs[to]);
}
}
#define BM_ARGS Arg(1)->Arg(64)->Arg(128)->Arg(256)->Range(512, 256 * 1024)
BENCHMARK(BM_HloDfsReachabilityBuild)->BM_ARGS;
BENCHMARK(BM_HloDfsReachabilityCheck)->BM_ARGS;
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_dfs_reachability.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_dfs_reachability_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f6581bd4-a731-48f5-97bf-4572aa575fb6 | cpp | tensorflow/tensorflow | collective_device_list | third_party/xla/xla/hlo/ir/collective_device_list.cc | third_party/xla/xla/hlo/ir/collective_device_list_test.cc | #include "xla/hlo/ir/collective_device_list.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/protobuf.h"
namespace xla {
std::string ReplicaGroupsToString(
absl::Span<const ReplicaGroup> replica_groups) {
std::vector<std::string> replica_group_str;
replica_group_str.reserve(replica_groups.size());
for (const ReplicaGroup& group : replica_groups) {
replica_group_str.push_back(
absl::StrCat("{", absl::StrJoin(group.replica_ids(), ","), "}"));
}
return absl::StrCat("{", absl::StrJoin(replica_group_str, ","), "}");
}
int64_t IotaReplicaGroupList::num_replica_groups() const {
DCHECK_GE(num_replica_groups_, 0);
return num_replica_groups_;
}
int64_t IotaReplicaGroupList::num_devices_per_group() const {
DCHECK_GE(num_devices_per_group_, 0);
return num_devices_per_group_;
}
std::string IotaReplicaGroupList::ToString() const {
return iota_tile_assignment_.ToString();
}
IotaReplicaGroupListProto IotaReplicaGroupList::ToProto() const {
IotaReplicaGroupListProto proto;
proto.set_num_replica_groups(num_replica_groups_);
proto.set_num_devices_per_group(num_devices_per_group_);
proto.mutable_iota_reshape_dims()->Assign(
iota_tile_assignment_.reshape_dims().begin(),
iota_tile_assignment_.reshape_dims().end());
proto.mutable_iota_transpose_perm()->Assign(
iota_tile_assignment_.transpose_perm().begin(),
iota_tile_assignment_.transpose_perm().end());
return proto;
}
IotaReplicaGroupList IotaReplicaGroupList::FromProto(
const IotaReplicaGroupListProto& proto) {
return IotaReplicaGroupList(
proto.num_replica_groups(), proto.num_devices_per_group(),
std::vector<int64_t>(proto.iota_reshape_dims().begin(),
proto.iota_reshape_dims().end()),
std::vector<int>(proto.iota_transpose_perm().begin(),
proto.iota_transpose_perm().end()));
}
CollectiveDeviceList::CollectiveDeviceList(
tsl::protobuf::RepeatedPtrField<ReplicaGroup>::const_iterator start,
tsl::protobuf::RepeatedPtrField<ReplicaGroup>::const_iterator end) {
replica_groups_shared_ =
std::make_shared<std::vector<ReplicaGroup>>(start, end);
replica_groups_ = replica_groups_shared_.get();
}
CollectiveDeviceList::CollectiveDeviceList(
absl::Span<const ReplicaGroup> replica_groups) {
replica_groups_shared_ = std::make_shared<std::vector<ReplicaGroup>>(
replica_groups.begin(), replica_groups.end());
replica_groups_ = replica_groups_shared_.get();
}
CollectiveDeviceList::CollectiveDeviceList(
absl::Span<const std::vector<int64_t>> replica_groups) {
auto rg_list = std::make_shared<std::vector<ReplicaGroup>>();
rg_list->reserve(replica_groups.size());
for (auto g : replica_groups) {
auto& group = rg_list->emplace_back();
*group.mutable_replica_ids() = {g.begin(), g.end()};
}
replica_groups_shared_ = std::move(rg_list);
replica_groups_ = replica_groups_shared_.get();
}
CollectiveDeviceList::CollectiveDeviceList() {
replica_groups_shared_ = std::make_shared<std::vector<ReplicaGroup>>();
replica_groups_ = replica_groups_shared_.get();
}
void CollectiveDeviceList::MaybeMaterializeFullReplicaGroupList() const {
if (replica_groups_ != nullptr) {
VLOG(10) << "Replica group list already materialized.";
return;
}
DCHECK(iota_replica_group_list_.has_value());
VLOG(10) << "Materializing full replica group list";
auto rg_list = std::make_shared<std::vector<ReplicaGroup>>();
const int64_t num_replica_groups =
iota_replica_group_list_->num_replica_groups();
rg_list->reserve(num_replica_groups);
auto array = iota_replica_group_list_->ToArray();
DCHECK_EQ(array.num_dimensions(), 2);
const int64_t num_devices_per_group =
iota_replica_group_list_->num_devices_per_group();
DCHECK_EQ(array.end() - array.begin(),
num_devices_per_group * num_replica_groups);
for (auto it = array.begin(), end = array.end(); it != end;
it += num_devices_per_group) {
*rg_list->emplace_back().mutable_replica_ids() = {
it, it + num_devices_per_group};
}
replica_groups_shared_ = std::move(rg_list);
replica_groups_ = replica_groups_shared_.get();
}
const std::vector<ReplicaGroup>& CollectiveDeviceList::replica_groups() const {
MaybeMaterializeFullReplicaGroupList();
return *replica_groups_;
}
std::string CollectiveDeviceList::ToString() const {
if (iota_replica_group_list_.has_value()) {
return iota_replica_group_list_->ToString();
}
return ReplicaGroupsToString(replica_groups());
}
CollectiveDeviceListProto CollectiveDeviceList::ToProto() const {
CollectiveDeviceListProto proto;
if (iota_replica_group_list_.has_value()) {
*(proto.mutable_iota_replica_group_list()) =
iota_replica_group_list_->ToProto();
return proto;
}
proto.mutable_replica_groups()->Assign(replica_groups().begin(),
replica_groups().end());
return proto;
}
CollectiveDeviceList CollectiveDeviceList::FromProto(
const CollectiveDeviceListProto& proto) {
if (proto.has_iota_replica_group_list()) {
return CollectiveDeviceList(
IotaReplicaGroupList::FromProto(proto.iota_replica_group_list()));
}
if (proto.replica_groups_size() > 0) {
return CollectiveDeviceList(proto.replica_groups().begin(),
proto.replica_groups().end());
}
return CollectiveDeviceList();
}
CollectiveDeviceList CollectiveDeviceList::FromProto(
const HloInstructionProto& proto) {
if (proto.replica_groups_size() > 0) {
VLOG(10) << "Creating collective device list from proto using legacy "
"replica groups field.";
return CollectiveDeviceList(proto.replica_groups().begin(),
proto.replica_groups().end());
}
if (!proto.has_collective_device_list()) {
return CollectiveDeviceList();
}
return FromProto(proto.collective_device_list());
}
} | #include "xla/hlo/ir/collective_device_list.h"
#include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/service/hlo.pb.h"
#include "xla/xla_data.pb.h"
namespace xla {
CollectiveDeviceListProto CreateDeviceListProto(
const std::vector<std::vector<int64_t>>& replica_groups) {
CollectiveDeviceListProto proto;
for (const auto& group : replica_groups) {
auto* replica_group = proto.add_replica_groups();
for (const auto& replica : group) {
replica_group->add_replica_ids(replica);
}
}
return proto;
}
TEST(CollectiveDeviceListTest, DefaultListToString) {
CollectiveDeviceList list({{1, 2}, {3, 4}});
ASSERT_EQ(list.ToString(), "{{1,2},{3,4}}");
}
TEST(CollectiveDeviceListTest, DefaultListToString2) {
CollectiveDeviceList list({{1, 2, 3, 4, 5, 6, 7}});
EXPECT_EQ(list.ToString(), "{{1,2,3,4,5,6,7}}");
}
TEST(CollectiveDeviceListTest, DefaultListToProto) {
CollectiveDeviceList list({{1, 2}, {3, 4}});
CollectiveDeviceListProto proto = list.ToProto();
EXPECT_THAT(proto.replica_groups().size(), 2);
EXPECT_THAT(proto.replica_groups(0).replica_ids(),
testing::ElementsAre(1, 2));
EXPECT_THAT(proto.replica_groups(1).replica_ids(),
testing::ElementsAre(3, 4));
EXPECT_FALSE(proto.has_iota_replica_group_list());
}
TEST(CollectiveDeviceListTest, DefaultListToProto2) {
CollectiveDeviceList list({{1, 2, 3, 4, 5, 6, 7}});
CollectiveDeviceListProto proto = list.ToProto();
EXPECT_THAT(proto.replica_groups().size(), 1);
EXPECT_THAT(proto.replica_groups(0).replica_ids(),
testing::ElementsAre(1, 2, 3, 4, 5, 6, 7));
EXPECT_FALSE(proto.has_iota_replica_group_list());
}
TEST(CollectiveDeviceListTest, DefaultListFromProto) {
HloInstructionProto initial_proto;
*(initial_proto.mutable_collective_device_list()) =
CreateDeviceListProto({{1, 2}, {3, 4}});
CollectiveDeviceList list = CollectiveDeviceList::FromProto(initial_proto);
EXPECT_EQ(list.replica_groups().size(), 2);
EXPECT_THAT(list.replica_groups()[0].replica_ids(),
testing::ElementsAre(1, 2));
EXPECT_THAT(list.replica_groups()[1].replica_ids(),
testing::ElementsAre(3, 4));
EXPECT_FALSE(list.iota_replica_group_list().has_value());
}
TEST(CollectiveDeviceListTest, DefaultListFromProto2) {
HloInstructionProto initial_proto;
*(initial_proto.mutable_collective_device_list()) =
CreateDeviceListProto({{1, 2, 3, 4, 5, 6, 7}});
CollectiveDeviceList list = CollectiveDeviceList::FromProto(initial_proto);
EXPECT_EQ(list.replica_groups().size(), 1);
EXPECT_THAT(list.replica_groups()[0].replica_ids(),
testing::ElementsAre(1, 2, 3, 4, 5, 6, 7));
EXPECT_FALSE(list.iota_replica_group_list().has_value());
}
TEST(CollectiveDeviceListTest, IotaListToString) {
CollectiveDeviceList list(IotaReplicaGroupList(2, 10));
EXPECT_EQ(list.ToString(), "[2,10]<=[20]");
}
TEST(CollectiveDeviceListTest, IotaListToString2) {
CollectiveDeviceList list(IotaReplicaGroupList(2, 10, {4, 5}, {1, 0}));
EXPECT_EQ(list.ToString(), "[2,10]<=[4,5]T(1,0)");
}
TEST(CollectiveDeviceListTest, IotaListToProto) {
CollectiveDeviceList list(IotaReplicaGroupList(2, 10));
CollectiveDeviceListProto proto = list.ToProto();
EXPECT_EQ(proto.iota_replica_group_list().num_replica_groups(), 2);
EXPECT_EQ(proto.iota_replica_group_list().num_devices_per_group(), 10);
EXPECT_THAT(proto.iota_replica_group_list().iota_reshape_dims(),
testing::ElementsAre(20));
EXPECT_THAT(proto.iota_replica_group_list().iota_transpose_perm(),
testing::ElementsAre(0));
EXPECT_THAT(proto.replica_groups_size(), 0);
}
TEST(CollectiveDeviceListTest, IotaListToProto2) {
CollectiveDeviceList list(IotaReplicaGroupList(2, 10, {4, 5}, {1, 0}));
CollectiveDeviceListProto proto = list.ToProto();
EXPECT_EQ(proto.iota_replica_group_list().num_replica_groups(), 2);
EXPECT_EQ(proto.iota_replica_group_list().num_devices_per_group(), 10);
EXPECT_THAT(proto.iota_replica_group_list().iota_reshape_dims(),
testing::ElementsAre(4, 5));
EXPECT_THAT(proto.iota_replica_group_list().iota_transpose_perm(),
testing::ElementsAre(1, 0));
EXPECT_THAT(proto.replica_groups_size(), 0);
}
TEST(CollectiveDeviceListTest, IotaListFromProto) {
HloInstructionProto initial_proto;
CollectiveDeviceListProto device_group;
IotaReplicaGroupListProto* iota_replica_group_list =
device_group.mutable_iota_replica_group_list();
iota_replica_group_list->set_num_replica_groups(2);
iota_replica_group_list->set_num_devices_per_group(10);
iota_replica_group_list->add_iota_reshape_dims(20);
iota_replica_group_list->add_iota_transpose_perm(0);
*(initial_proto.mutable_collective_device_list()) = device_group;
CollectiveDeviceList list = CollectiveDeviceList::FromProto(initial_proto);
EXPECT_TRUE(list.iota_replica_group_list().has_value());
EXPECT_EQ(list.iota_replica_group_list()->num_replica_groups(), 2);
EXPECT_EQ(list.iota_replica_group_list()->num_devices_per_group(), 10);
EXPECT_THAT(list.iota_replica_group_list()->reshape_dims(),
testing::ElementsAre(20));
EXPECT_THAT(list.iota_replica_group_list()->transpose_perm(),
testing::ElementsAre(0));
}
TEST(CollectiveDeviceListTest, IotaListFromProto2) {
HloInstructionProto initial_proto;
CollectiveDeviceListProto device_group;
IotaReplicaGroupListProto* iota_replica_group_list =
device_group.mutable_iota_replica_group_list();
iota_replica_group_list->set_num_replica_groups(2);
iota_replica_group_list->set_num_devices_per_group(10);
iota_replica_group_list->add_iota_reshape_dims(4);
iota_replica_group_list->add_iota_reshape_dims(5);
iota_replica_group_list->add_iota_transpose_perm(1);
iota_replica_group_list->add_iota_transpose_perm(0);
*(initial_proto.mutable_collective_device_list()) = device_group;
CollectiveDeviceList list = CollectiveDeviceList::FromProto(initial_proto);
EXPECT_TRUE(list.iota_replica_group_list().has_value());
EXPECT_EQ(list.iota_replica_group_list()->num_replica_groups(), 2);
EXPECT_EQ(list.iota_replica_group_list()->num_devices_per_group(), 10);
EXPECT_THAT(list.iota_replica_group_list()->reshape_dims(),
testing::ElementsAre(4, 5));
EXPECT_THAT(list.iota_replica_group_list()->transpose_perm(),
testing::ElementsAre(1, 0));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/collective_device_list.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/collective_device_list_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0ec28ff5-400f-4eb8-b45c-475999016815 | cpp | tensorflow/tensorflow | hlo_module_group | third_party/xla/xla/hlo/ir/hlo_module_group.cc | third_party/xla/xla/service/hlo_module_group_test.cc | #include "xla/hlo/ir/hlo_module_group.h"
#include <memory>
#include <ostream>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
namespace xla {
HloModuleGroup::HloModuleGroup(std::unique_ptr<HloModule> module)
: name_(module->name()) {
push_back(std::move(module));
}
HloModuleGroup::HloModuleGroup(absl::string_view name,
absl::Span<std::unique_ptr<HloModule>> modules)
: name_(name) {
for (auto& module : modules) {
push_back(std::move(module));
}
}
HloModuleGroup::HloModuleGroup(
absl::string_view name, std::vector<std::unique_ptr<HloModule>>&& modules)
: name_(name) {
for (auto& module : modules) {
push_back(std::move(module));
}
}
std::vector<std::unique_ptr<HloModule>> HloModuleGroup::ConsumeModules() {
std::vector<std::unique_ptr<HloModule>> ret_modules = std::move(modules_);
modules_.clear();
module_ptrs_.clear();
return ret_modules;
}
std::string HloModuleGroup::ToString() const {
std::ostringstream s;
s << "HloModuleGroup " << name() << "\n\n";
for (const HloModule* module : modules()) {
s << module->ToString() << "\n";
}
return s.str();
}
HloModuleGroupProto HloModuleGroup::ToProto() const {
HloModuleGroupProto proto;
proto.set_name(name());
for (const HloModule* module : modules()) {
*proto.add_hlo_modules() = module->ToProto();
}
return proto;
}
absl::StatusOr<HloModuleGroup> HloModuleGroup::CreateFromProto(
const HloModuleGroupProto& proto,
absl::Span<const HloModuleConfig> module_configs) {
TF_RET_CHECK(!proto.name().empty()) << "Module group name cannot be empty";
TF_RET_CHECK(proto.hlo_modules_size() > 0)
<< "Module group must have at least one HLO module";
TF_RET_CHECK(proto.hlo_modules_size() == module_configs.size());
std::vector<std::unique_ptr<HloModule>> modules;
for (int i = 0; i < proto.hlo_modules_size(); ++i) {
const HloModuleProto& module_proto = proto.hlo_modules(i);
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModule> module,
HloModule::CreateFromProto(module_proto, module_configs[i]));
modules.push_back(std::move(module));
}
return HloModuleGroup(proto.name(), absl::MakeSpan(modules));
}
void HloModuleGroup::push_back(std::unique_ptr<HloModule> module) {
module->metadata()->set_module_group_name(name());
modules_.push_back(std::move(module));
module_ptrs_.push_back(modules_.back().get());
}
void HloModuleGroup::ReplaceModule(int index,
std::unique_ptr<HloModule> module) {
modules_.at(index)->MoveMetadataToModule(module.get());
modules_.at(index) = std::move(module);
module_ptrs_.at(index) = modules_.at(index).get();
}
std::ostream& operator<<(std::ostream& out, const HloModuleGroup& group) {
out << group.ToString();
return out;
}
} | #include "xla/hlo/ir/hlo_module_group.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_group_metadata.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
using ::testing::Property;
using ::testing::StrEq;
class HloModuleGroupTest : public HloTestBase {
protected:
HloModuleGroupTest() = default;
};
TEST_F(HloModuleGroupTest, SingleModule) {
const std::string text = R"(
HloModule simple_module
ENTRY %entry (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(text));
HloModuleGroup group(std::move(module));
EXPECT_EQ(group.modules().size(), 1);
EXPECT_THAT(
group.module(0).entry_computation()->instructions(),
::testing::ElementsAre(op::Parameter(), op::Parameter(), op::Add()));
TF_ASSERT_OK_AND_ASSIGN(HloModuleGroup group_copy,
HloModuleGroup::CreateFromProto(
group.ToProto(), {group.module(0).config()}));
EXPECT_EQ(group_copy.modules().size(), 1);
EXPECT_THAT(
group_copy.module(0).entry_computation()->instructions(),
::testing::ElementsAre(op::Parameter(), op::Parameter(), op::Add()));
std::vector<std::unique_ptr<HloModule>> modules = group.ConsumeModules();
EXPECT_EQ(modules.size(), 1);
EXPECT_EQ(group.modules().size(), 0);
}
TEST_F(HloModuleGroupTest, MultipleModules) {
const std::string text_0 = R"(
HloModule module0
ENTRY %entry (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
)";
const std::string text_1 = R"(
HloModule module1
ENTRY %entry (a: f32[]) -> f32[] {
ROOT %a = f32[] parameter(0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module_0,
ParseAndReturnVerifiedModule(text_0));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module_1,
ParseAndReturnVerifiedModule(text_1));
std::vector<std::unique_ptr<HloModule>> modules;
modules.push_back(std::move(module_0));
modules.push_back(std::move(module_1));
HloModuleGroup group(TestName(), absl::MakeSpan(modules));
EXPECT_EQ(group.modules().size(), 2);
EXPECT_THAT(
group.module(0).entry_computation()->instructions(),
::testing::ElementsAre(op::Parameter(), op::Parameter(), op::Add()));
EXPECT_THAT(group.module(1).entry_computation()->instructions(),
::testing::ElementsAre(op::Parameter()));
TF_ASSERT_OK_AND_ASSIGN(HloModuleGroup group_copy,
HloModuleGroup::CreateFromProto(
group.ToProto(), {group.module(0).config(),
group.module(1).config()}));
EXPECT_EQ(group_copy.modules().size(), 2);
}
TEST_F(HloModuleGroupTest, BuildModuleGroupByPushBack) {
const std::string text_0 = R"(
HloModule module0
ENTRY %entry (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
)";
const std::string text_1 = R"(
HloModule module1
ENTRY %entry (a: f32[]) -> f32[] {
ROOT %a = f32[] parameter(0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module_0,
ParseAndReturnVerifiedModule(text_0));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module_1,
ParseAndReturnVerifiedModule(text_1));
HloModuleGroup group(TestName());
group.push_back(std::move(module_0));
group.push_back(std::move(module_1));
EXPECT_EQ(group.modules().size(), 2);
EXPECT_THAT(
group.module(0).entry_computation()->instructions(),
::testing::ElementsAre(op::Parameter(), op::Parameter(), op::Add()));
EXPECT_THAT(group.module(1).entry_computation()->instructions(),
::testing::ElementsAre(op::Parameter()));
}
TEST_F(HloModuleGroupTest, ModuleGroupCompanionOrder) {
constexpr char text[] = R"(
HloModule module_%d
while_cond {
param = s32[] parameter(0)
ROOT p = pred[] constant(true)
}
while_body {
param = s32[] parameter(0)
token.s = token[] after-all()
token.r = token[] after-all()
send = (s32[], u32[], token[]) send(param, token.s), channel_id=%d
send-done = token[] send-done(send), channel_id=%d
recv = (s32[], u32[], token[]) recv(token.r), channel_id=%d
recv-done = (s32[], token[]) recv-done(recv), channel_id=%d
ROOT data = s32[] get-tuple-element(recv-done), index=0
}
ENTRY entry {
while_init = s32[] constant(1)
ROOT while = s32[] while(while_init), condition=while_cond, body=while_body
}
)";
const int64_t kTrialCount = 5;
const int64_t kDeviceCount = 10;
std::vector<int64_t> companion_order;
for (int64_t t = 0; t < kTrialCount; ++t) {
HloModuleGroup group(TestName());
for (int64_t i = 0; i < kDeviceCount; ++i) {
const int64_t send_channel = i;
const int64_t recv_channel = i == 0 ? kDeviceCount - 1 : i - 1;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(absl::StrFormat(
text, i, send_channel, send_channel,
recv_channel, recv_channel)));
group.push_back(std::move(module));
}
ASSERT_EQ(group.modules().size(), kDeviceCount);
TF_ASSERT_OK_AND_ASSIGN(auto metadata,
HloModuleGroupMetadata::Build(group.modules()));
ASSERT_EQ(metadata->companion_sets().size(), 1);
std::vector<int64_t> module_ids;
const auto& companion_sets = *metadata->companion_sets()[0];
module_ids.reserve(companion_sets.size());
for (HloInstruction* companion : companion_sets) {
module_ids.push_back(metadata->GetModuleId(companion->GetModule()));
}
if (t == 0) {
companion_order = module_ids;
} else {
EXPECT_TRUE(absl::c_equal(companion_order, module_ids));
}
}
}
TEST_F(HloModuleGroupTest, ReplaceModuleMetadata) {
auto old_module = CreateNewVerifiedModule();
int old_module_id = old_module->unique_id();
old_module->metadata()->RecordPassStart();
TF_EXPECT_OK(old_module->metadata()->set_current_pass_name("fake pass"));
HloModuleGroup group(std::move(old_module));
EXPECT_EQ(group.module(0).metadata()->proto().module_group_name(),
group.name());
auto new_module = CreateNewVerifiedModule();
group.ReplaceModule(0, std::move(new_module));
EXPECT_NE(group.module(0).unique_id(), old_module_id);
const HloModuleMetadataProto& module_metadata =
group.module(0).metadata()->proto();
EXPECT_EQ(module_metadata.canonical_module_id(), old_module_id);
const HloPassMetadata& pass_metadata =
*module_metadata.pass_metadata().rbegin();
EXPECT_THAT(pass_metadata,
Property(&HloPassMetadata::pass_name, StrEq("fake pass")));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_module_group.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_module_group_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ec92ce8c-32f6-44bf-8aec-55c051a57685 | cpp | tensorflow/tensorflow | hlo_reachability | third_party/xla/xla/hlo/ir/hlo_reachability.cc | third_party/xla/xla/service/hlo_reachability_test.cc | #include "xla/hlo/ir/hlo_reachability.h"
#include <memory>
#include <queue>
#include <vector>
#include "absl/algorithm/container.h"
#include "xla/hlo/ir/hlo_instruction.h"
namespace xla {
HloReachabilityMap::HloReachabilityMap(
absl::Span<const HloInstruction* const> instructions)
: bit_sets_(instructions.size(), BitSet(instructions.size())) {
indices_.reserve(instructions.size());
for (size_t i = 0; i < instructions.size(); ++i) {
bit_sets_[i].Set(i);
indices_[GetKey(instructions[i])] = i;
}
}
bool HloReachabilityMap::SetReachabilityToUnion(
absl::Span<const HloInstruction* const> inputs,
const HloInstruction* instruction) {
Index index = GetIndex(instruction);
BitSet& bit_set = bit_sets_[index];
tmp_bit_set_ = bit_set;
SetReachabilityToUnionHelper(inputs, index);
return bit_set != tmp_bit_set_;
}
void HloReachabilityMap::FastSetReachabilityToUnion(
absl::Span<const HloInstruction* const> inputs,
const HloInstruction* instruction) {
SetReachabilityToUnionHelper(inputs, GetIndex(instruction));
}
void HloReachabilityMap::FastSetReachabilityToUnion(
absl::Span<const Index> input_indices, Index index) {
SetReachabilityToUnionHelper(input_indices, index);
}
void HloReachabilityMap::SetReachabilityToUnionHelper(
absl::Span<const HloInstruction* const> inputs, Index index) {
absl::InlinedVector<Index, 16> input_indices;
input_indices.reserve(inputs.size());
for (const HloInstruction* input : inputs) {
input_indices.push_back(GetIndex(input));
}
SetReachabilityToUnionHelper(input_indices, index);
}
void HloReachabilityMap::SetReachabilityToUnionHelper(
absl::Span<const Index> input_indices, Index index) {
BitSet& bit_set = bit_sets_[index];
if (!absl::c_linear_search(input_indices, index)) {
bit_set.SetToZero();
}
bit_set.Set(index);
for (Index input_index : input_indices) {
if (input_index != index) {
bit_set |= bit_sets_[input_index];
}
}
}
void HloReachabilityMap::Replace(const HloInstruction* original,
const HloInstruction* replacement) {
if (GetKey(original) != GetKey(replacement)) {
indices_[GetKey(replacement)] = GetIndex(original);
indices_.erase(GetKey(original));
}
}
std::unique_ptr<HloReachabilityMap> HloReachabilityMap::BuildWithRestrictions(
const HloComputation* computation,
absl::FunctionRef<void(const HloInstruction*,
std::vector<HloInstruction*>*)>
add_dependencies) {
const auto& all = computation->MakeInstructionPostOrder();
auto result = std::make_unique<HloReachabilityMap>(all);
std::vector<HloInstruction*> inputs;
for (const HloInstruction* hlo : all) {
inputs.clear();
add_dependencies(hlo, &inputs);
result->FastSetReachabilityToUnion(inputs, hlo);
}
return result;
}
std::unique_ptr<HloReachabilityMap> HloReachabilityMap::Build(
const HloComputation* computation) {
HloComputation::ChannelDependencies channel_dependencies =
computation->ComputeChannelDependencies();
std::vector<HloInstruction*> instructions =
computation->MakeInstructionPostOrder(channel_dependencies);
auto result = std::make_unique<HloReachabilityMap>(instructions);
auto get_bit_set = [&](const HloInstruction* instruction) -> BitSet& {
return result->bit_sets_[result->GetIndex(instruction)];
};
for (const HloInstruction* instruction : instructions) {
BitSet& bit_set = get_bit_set(instruction);
auto add_dependencies = [&](const HloInstruction* instruction) {
for (const HloInstruction* operand : instruction->operands()) {
bit_set |= get_bit_set(operand);
}
for (const HloInstruction* predecessor :
instruction->control_predecessors()) {
bit_set |= get_bit_set(predecessor);
}
};
add_dependencies(instruction);
auto it = channel_dependencies.find(instruction);
if (it != channel_dependencies.end()) {
absl::c_for_each(it->second, add_dependencies);
}
}
return result;
}
void HloReachabilityMap::UpdateReachabilityThroughInstruction(
const HloInstruction* instruction) {
std::queue<const HloInstruction*> worklist;
worklist.push(instruction);
std::vector<HloInstruction*> inputs;
while (!worklist.empty()) {
const HloInstruction* item = worklist.front();
worklist.pop();
inputs.assign(item->operands().begin(), item->operands().end());
inputs.insert(inputs.end(), item->control_predecessors().begin(),
item->control_predecessors().end());
if (SetReachabilityToUnion(inputs, item)) {
for (const HloInstruction* user : item->users()) {
worklist.push(user);
}
for (const HloInstruction* succ : item->control_successors()) {
worklist.push(succ);
}
}
}
}
} | #include "xla/hlo/ir/hlo_reachability.h"
#include <memory>
#include <set>
#include <string>
#include <string_view>
#include "absl/random/random.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/computation_placer.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
class HloReachabilityTest : public HloTestBase {};
TEST_F(HloReachabilityTest, Reachability) {
auto builder = HloComputation::Builder(TestName());
auto a = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto b = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto c = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto d = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto e = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
HloReachabilityMap reachability({a, b, c, d, e});
reachability.SetReachable(a, a);
EXPECT_TRUE(reachability.SetReachabilityToUnion({a}, b));
EXPECT_TRUE(reachability.SetReachabilityToUnion({a}, c));
EXPECT_TRUE(reachability.SetReachabilityToUnion({b, c}, d));
EXPECT_TRUE(reachability.SetReachabilityToUnion({c}, e));
EXPECT_TRUE(reachability.IsReachable(a, a));
EXPECT_TRUE(reachability.IsReachable(a, b));
EXPECT_TRUE(reachability.IsReachable(a, c));
EXPECT_TRUE(reachability.IsReachable(a, d));
EXPECT_TRUE(reachability.IsReachable(a, e));
EXPECT_FALSE(reachability.IsReachable(b, a));
EXPECT_TRUE(reachability.IsReachable(b, b));
EXPECT_FALSE(reachability.IsReachable(b, c));
EXPECT_TRUE(reachability.IsReachable(b, d));
EXPECT_FALSE(reachability.IsReachable(b, e));
EXPECT_FALSE(reachability.IsReachable(e, a));
EXPECT_FALSE(reachability.IsReachable(e, b));
EXPECT_FALSE(reachability.IsReachable(e, c));
EXPECT_FALSE(reachability.IsReachable(e, d));
EXPECT_TRUE(reachability.IsReachable(e, e));
EXPECT_FALSE(reachability.SetReachabilityToUnion({a}, b));
EXPECT_FALSE(reachability.SetReachabilityToUnion({b, c}, d));
}
TEST_F(HloReachabilityTest, NonTrivialReachability) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32, HloOpcode::kAdd, constant1, constant2));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kNegate, constant2));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kExp, negate));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kMultiply, add, exp));
auto copy = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kCopy, exp));
auto module = CreateNewVerifiedModule();
auto computation =
module->AddEntryComputation(builder.Build(mul));
TF_CHECK_OK(add->AddControlDependencyTo(exp));
auto reachability = HloReachabilityMap::Build(computation);
EXPECT_TRUE(reachability->IsReachable(constant1, constant1));
EXPECT_FALSE(reachability->IsReachable(constant1, constant2));
EXPECT_TRUE(reachability->IsReachable(constant1, add));
EXPECT_FALSE(reachability->IsReachable(constant1, negate));
EXPECT_TRUE(reachability->IsReachable(constant1, exp));
EXPECT_TRUE(reachability->IsReachable(constant1, mul));
EXPECT_TRUE(reachability->IsReachable(constant1, copy));
EXPECT_FALSE(reachability->IsReachable(constant2, constant1));
EXPECT_TRUE(reachability->IsReachable(constant2, constant2));
EXPECT_TRUE(reachability->IsReachable(constant2, add));
EXPECT_TRUE(reachability->IsReachable(constant2, negate));
EXPECT_TRUE(reachability->IsReachable(constant2, exp));
EXPECT_TRUE(reachability->IsReachable(constant2, mul));
EXPECT_TRUE(reachability->IsReachable(constant2, copy));
EXPECT_FALSE(reachability->IsReachable(exp, constant1));
EXPECT_FALSE(reachability->IsReachable(exp, constant2));
EXPECT_FALSE(reachability->IsReachable(exp, add));
EXPECT_FALSE(reachability->IsReachable(exp, negate));
EXPECT_TRUE(reachability->IsReachable(exp, exp));
EXPECT_TRUE(reachability->IsReachable(exp, mul));
EXPECT_TRUE(reachability->IsReachable(exp, copy));
EXPECT_FALSE(reachability->IsReachable(mul, constant1));
EXPECT_FALSE(reachability->IsReachable(mul, constant2));
EXPECT_FALSE(reachability->IsReachable(mul, add));
EXPECT_FALSE(reachability->IsReachable(mul, negate));
EXPECT_FALSE(reachability->IsReachable(mul, exp));
EXPECT_TRUE(reachability->IsReachable(mul, mul));
EXPECT_FALSE(reachability->IsReachable(mul, copy));
EXPECT_TRUE(reachability->IsConnected(constant1, copy));
EXPECT_TRUE(reachability->IsConnected(copy, constant1));
EXPECT_FALSE(reachability->IsConnected(negate, add));
EXPECT_FALSE(reachability->IsConnected(add, negate));
ASSERT_IS_OK(add->RemoveControlDependencyTo(exp));
reachability->UpdateReachabilityThroughInstruction(exp);
EXPECT_TRUE(reachability->IsReachable(constant1, constant1));
EXPECT_FALSE(reachability->IsReachable(constant1, constant2));
EXPECT_TRUE(reachability->IsReachable(constant1, add));
EXPECT_FALSE(reachability->IsReachable(constant1, negate));
EXPECT_FALSE(reachability->IsReachable(constant1, exp));
EXPECT_TRUE(reachability->IsReachable(constant1, mul));
EXPECT_FALSE(reachability->IsReachable(constant1, copy));
ASSERT_IS_OK(constant2->ReplaceUseWith(negate, constant1));
reachability->UpdateReachabilityThroughInstruction(negate);
EXPECT_FALSE(reachability->IsReachable(constant2, constant1));
EXPECT_TRUE(reachability->IsReachable(constant2, constant2));
EXPECT_TRUE(reachability->IsReachable(constant2, add));
EXPECT_FALSE(reachability->IsReachable(constant2, negate));
EXPECT_FALSE(reachability->IsReachable(constant2, exp));
EXPECT_TRUE(reachability->IsReachable(constant2, mul));
EXPECT_FALSE(reachability->IsReachable(constant2, copy));
}
TEST_F(HloReachabilityTest, ChannelReachability) {
const Shape shape = ShapeUtil::MakeShape(F32, {5, 7});
HloComputation::Builder builder("ChannelReachability");
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
auto token0 = builder.AddInstruction(HloInstruction::CreateToken());
auto send =
builder.AddInstruction(HloInstruction::CreateSend(param, token0, 1));
auto send_done = builder.AddInstruction(HloInstruction::CreateSendDone(send));
auto token1 = builder.AddInstruction(HloInstruction::CreateToken());
auto recv =
builder.AddInstruction(HloInstruction::CreateRecv(shape, token1, 1));
auto recv_done = builder.AddInstruction(HloInstruction::CreateRecvDone(recv));
auto module = CreateNewVerifiedModule();
module->mutable_config().set_use_spmd_partitioning(false);
module->mutable_config().set_static_device_assignment(DeviceAssignment(1, 2));
auto computation = module->AddEntryComputation(builder.Build(recv_done));
auto reachability = HloReachabilityMap::Build(computation);
EXPECT_FALSE(reachability->IsReachable(param, recv_done));
EXPECT_FALSE(reachability->IsReachable(send, recv));
EXPECT_FALSE(reachability->IsReachable(send_done, recv));
}
TEST_F(HloReachabilityTest, ReplaceInstructions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
ENTRY entry {
p0 = f32[28,28]{1,0} parameter(0)
ROOT add = f32[28,28]{1,0} add(p0, p0)
})")
.value();
auto computation = module->entry_computation();
auto reachability = HloReachabilityMap::Build(computation);
auto* add = module->entry_computation()->root_instruction();
auto* p0 = add->operand(0);
EXPECT_TRUE(reachability->IsReachable(p0, add));
reachability->Replace(add, add);
EXPECT_TRUE(reachability->IsReachable(p0, add));
auto* fusion = computation->AddInstruction(HloInstruction::CreateFusion(
add->shape(), HloInstruction::FusionKind::kLoop, add));
EXPECT_FALSE(reachability->IsPresent(fusion));
EXPECT_TRUE(reachability->IsReachable(p0, add));
reachability->Replace(add, fusion);
EXPECT_FALSE(reachability->IsPresent(add));
EXPECT_TRUE(reachability->IsReachable(p0, fusion));
}
}
class HloReachabilityMapBitSetBenchmark {
public:
explicit HloReachabilityMapBitSetBenchmark(int size) : a_(size), b_(size) {
absl::BitGen gen;
for (int i = 0; i < size; ++i) {
if (absl::Bernoulli(gen, 0.5)) a_.Set(i);
if (absl::Bernoulli(gen, 0.5)) b_.Set(i);
}
}
void Union() { a_ |= b_; }
private:
HloReachabilityMap::BitSet a_;
HloReachabilityMap::BitSet b_;
};
namespace {
void BM_HloReachabilityBitSetUnion(benchmark::State& state) {
HloReachabilityMapBitSetBenchmark bm(state.range(0));
for (auto s : state) {
bm.Union();
}
}
#define BM_ARGS Arg(1)->Arg(64)->Arg(128)->Arg(256)->Range(512, 256 * 1024)
BENCHMARK(BM_HloReachabilityBitSetUnion)->BM_ARGS;
class HloReachabilityBenchmark {
public:
HloReachabilityBenchmark(int size, std::string_view name) : name_(name) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto builder = HloComputation::Builder(name);
HloInstruction* constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
HloInstruction* prev = constant;
for (int i = 1; i < size; ++i) {
prev = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kExp, prev));
}
HloModuleConfig hlo_config;
module_ = std::make_unique<HloModule>(name_, hlo_config);
computation_ =
module_->AddEntryComputation(builder.Build(prev));
}
std::unique_ptr<HloReachabilityMap> Build() {
return HloReachabilityMap::Build(computation_);
}
private:
std::unique_ptr<HloModule> module_;
HloComputation* computation_;
const std::string name_;
};
void BM_HloReachabilityBuild(benchmark::State& state) {
HloReachabilityBenchmark bm(state.range(0), state.name());
for (auto s : state) {
benchmark::DoNotOptimize(bm.Build());
}
}
BENCHMARK(BM_HloReachabilityBuild)->BM_ARGS;
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_reachability.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_reachability_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
30284e41-2319-4f52-8b80-30cb28be38ab | cpp | tensorflow/tensorflow | dynamic_parameter_binding | third_party/xla/xla/hlo/ir/dynamic_parameter_binding.cc | third_party/xla/xla/service/dynamic_parameter_binding_test.cc | #include "xla/hlo/ir/dynamic_parameter_binding.h"
#include <optional>
#include <ostream>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::Status DynamicParameterBinding::Bind(
const DynamicSizeParameter& dynamic_parameter,
const DynamicDimension& dynamic_dimension) {
auto result = bindings_.emplace(dynamic_dimension, dynamic_parameter);
TF_RET_CHECK(result.second);
return absl::OkStatus();
}
std::optional<DynamicParameterBinding::DynamicSizeParameter>
DynamicParameterBinding::GetBinding(
const DynamicDimension& dynamic_dimension) const {
auto param_iter = bindings_.find(dynamic_dimension);
if (param_iter == bindings_.end()) {
return std::nullopt;
}
return param_iter->second;
}
std::string DynamicParameterBinding::ToString() const {
std::vector<std::string> pieces;
pieces.push_back("DynamicParameterBinding: ");
for (const auto& binding : bindings_) {
const DynamicDimension& dynamic_dimension = binding.first;
const DynamicSizeParameter& dynamic_param = binding.second;
pieces.push_back(absl::StrFormat(
" -- Input param number %lld at %s has dim %lld as dynamic"
" dimension, which is represented by param number %lld at "
"%s",
dynamic_dimension.parameter_num,
dynamic_dimension.parameter_index.ToString(),
dynamic_dimension.dimension, dynamic_param.parameter_num,
dynamic_param.parameter_index.ToString()));
}
return absl::StrJoin(pieces, "\n");
}
absl::Status DynamicParameterBinding::ForEachBinding(BindingFn fn) const {
for (const auto& binding : bindings_) {
TF_RETURN_IF_ERROR(fn(binding.second, binding.first));
}
return absl::OkStatus();
}
absl::Status DynamicParameterBinding::Verify(
const HloComputation& computation) const {
return ForEachBinding([&](const DynamicSizeParameter& dynamic_parameter,
const DynamicDimension& dynamic_dimension)
-> absl::Status {
TF_RET_CHECK(dynamic_parameter.parameter_num >= 0 &&
dynamic_parameter.parameter_num <
computation.num_parameters());
TF_RET_CHECK(dynamic_dimension.parameter_num <
computation.num_parameters());
TF_RET_CHECK(ShapeUtil::IndexIsValid(
computation.parameter_instruction(dynamic_parameter.parameter_num)
->shape(),
dynamic_parameter.parameter_index));
TF_RET_CHECK(ShapeUtil::IndexIsValid(
computation.parameter_instruction(dynamic_dimension.parameter_num)
->shape(),
dynamic_dimension.parameter_index));
TF_RET_CHECK(
dynamic_dimension.dimension <
ShapeUtil::GetSubshape(
computation.parameter_instruction(dynamic_dimension.parameter_num)
->shape(),
dynamic_dimension.parameter_index)
.rank());
return absl::OkStatus();
});
}
std::ostream& operator<<(std::ostream& out,
const DynamicParameterBinding& binding) {
out << binding.ToString();
return out;
}
} | #include "xla/hlo/ir/dynamic_parameter_binding.h"
#include <memory>
#include <optional>
#include <string>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using DynamicParameterBindingTest = HloTestBase;
TEST_F(DynamicParameterBindingTest, SimpleBinding) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
a = f32[] parameter(0)
b = f32[10] parameter(1)
ROOT root = (f32[], f32[10]) tuple(%a, %b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
DynamicParameterBinding binding;
TF_EXPECT_OK(
binding.Bind(DynamicParameterBinding::DynamicSizeParameter{0, {}},
DynamicParameterBinding::DynamicDimension{1, {}, 0}));
auto test = [&](const DynamicParameterBinding& binding) {
std::optional<DynamicParameterBinding::DynamicSizeParameter> param =
binding.GetBinding(
DynamicParameterBinding::DynamicDimension{1,
{},
0});
EXPECT_TRUE(param);
EXPECT_EQ(param->parameter_num, 0);
EXPECT_EQ(param->parameter_index, ShapeIndex({}));
TF_EXPECT_OK(binding.Verify(*module->entry_computation()));
};
test(binding);
}
TEST_F(DynamicParameterBindingTest, TupleBinding) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
param = (f32[], f32[10]) parameter(0)
gte1 = f32[] get-tuple-element(%param), index=0
gte2 = f32[10] get-tuple-element(%param), index=1
ROOT root = (f32[], f32[10]) tuple(%gte1, %gte2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
DynamicParameterBinding binding;
TF_EXPECT_OK(
binding.Bind(DynamicParameterBinding::DynamicSizeParameter{0, {0}},
DynamicParameterBinding::DynamicDimension{0, {1}, 0}));
auto test = [&](const DynamicParameterBinding& binding) {
std::optional<DynamicParameterBinding::DynamicSizeParameter> param =
binding.GetBinding(
DynamicParameterBinding::DynamicDimension{0,
{1},
0});
EXPECT_TRUE(param);
EXPECT_EQ(param->parameter_num, 0);
EXPECT_EQ(param->parameter_index, ShapeIndex({0}));
TF_EXPECT_OK(binding.Verify(*module->entry_computation()));
};
test(binding);
}
TEST_F(DynamicParameterBindingTest, TupleBindingWithMultiDimension) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
param = (f32[], f32[10, 10]) parameter(0)
gte1 = f32[] get-tuple-element(%param), index=0
gte2 = f32[10, 10] get-tuple-element(%param), index=1
ROOT root = (f32[], f32[10, 10]) tuple(%gte1, %gte2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
DynamicParameterBinding binding;
TF_EXPECT_OK(
binding.Bind(DynamicParameterBinding::DynamicSizeParameter{0, {0}},
DynamicParameterBinding::DynamicDimension{0, {1}, 0}));
TF_EXPECT_OK(
binding.Bind(DynamicParameterBinding::DynamicSizeParameter{0, {0}},
DynamicParameterBinding::DynamicDimension{0, {1}, 1}));
auto test = [&](const DynamicParameterBinding& binding) {
std::optional<DynamicParameterBinding::DynamicSizeParameter> param =
binding.GetBinding(
DynamicParameterBinding::DynamicDimension{0,
{1},
0});
EXPECT_TRUE(param);
EXPECT_EQ(param->parameter_num, 0);
EXPECT_EQ(param->parameter_index, ShapeIndex({0}));
std::optional<DynamicParameterBinding::DynamicSizeParameter> param2 =
binding.GetBinding(
DynamicParameterBinding::DynamicDimension{0,
{1},
0});
EXPECT_TRUE(param2);
EXPECT_EQ(param2->parameter_num, 0);
EXPECT_EQ(param2->parameter_index, ShapeIndex({0}));
TF_EXPECT_OK(binding.Verify(*module->entry_computation()));
};
test(binding);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/dynamic_parameter_binding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_parameter_binding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
382f3e30-b23c-401e-9945-f8020eb0786a | cpp | tensorflow/tensorflow | hlo_computation | third_party/xla/xla/hlo/ir/hlo_computation.cc | third_party/xla/xla/service/hlo_computation_test.cc | #include "xla/hlo/ir/hlo_computation.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <optional>
#include <ostream>
#include <queue>
#include <stack>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/ptrvec.h"
#include "xla/map_util.h"
#include "xla/printer.h"
#include "xla/service/mapped_ptr_container_sorter.h"
#include "xla/service/name_uniquer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tsl/lib/gtl/iterator_range.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
using absl::StrCat;
enum class VisitState { kNew = 0, kVisiting = 1, kVisited = 2 };
static std::ostream& operator<<(std::ostream& os, const VisitState& state) {
switch (state) {
case VisitState::kNew:
os << "new";
break;
case VisitState::kVisiting:
os << "visiting";
break;
case VisitState::kVisited:
os << "visited";
break;
}
return os;
}
class HloComputation::VisitMap {
public:
VisitMap() = default;
explicit VisitMap(int capacity) : size_(capacity) {
int num_words = (capacity + 31) / 32;
bits_.resize(num_words);
bit_ptr_ = bits_.empty() ? nullptr : bits_.data();
}
using Handle = uint32_t;
VisitState GetState(Handle h) const {
DCHECK_LT(h, size_);
uint32_t word = (h / 32);
uint32_t shift = (h % 32) << 1;
return static_cast<VisitState>((bit_ptr_[word] >> shift) & 0x3);
}
void SetState(Handle h, VisitState new_state) {
DCHECK_LT(h, size_);
uint32_t word = (h / 32);
uint32_t shift = (h % 32) << 1;
uint64_t mask = ~(3ull << shift);
uint64_t val = static_cast<uint64_t>(new_state);
bit_ptr_[word] = (bit_ptr_[word] & mask) | (val << shift);
}
private:
absl::InlinedVector<uint64_t, 1> bits_;
uint64_t* bit_ptr_ = nullptr;
int size_ = 0;
};
std::unique_ptr<HloComputation> HloComputation::Builder::Build(
HloInstruction* root_instruction) {
int parameter_count = 0;
for (auto& instruction : instructions_) {
if (instruction->opcode() == HloOpcode::kParameter) {
parameter_count++;
}
}
HloInstruction* root =
root_instruction ? root_instruction : last_added_instruction();
CHECK_NE(nullptr, root);
return absl::WrapUnique(
new HloComputation(name_, parameter_count, &instructions_, root));
}
HloComputation::HloComputation(
const std::string& name, int parameter_count,
std::vector<std::unique_ptr<HloInstruction>>* instructions,
HloInstruction* root_instruction)
: unique_id_(-1),
root_instruction_(root_instruction),
instruction_count_(0),
name_(NameUniquer::GetSanitizedName(name)) {
param_instructions_.resize(parameter_count, nullptr);
bool root_found = false;
for (auto& instruction : *instructions) {
if (instruction->opcode() == HloOpcode::kParameter) {
int64_t param_no = instruction->parameter_number();
CHECK(param_no >= 0 && param_no < parameter_count)
<< "\nERROR: invalid parameter number. Expected [0, "
<< parameter_count << "), got " << param_no;
CHECK(param_instructions_[param_no] == nullptr)
<< "\nERROR: parameter number " << param_no
<< " already allocated in this computation";
param_instructions_[param_no] = instruction.get();
}
root_found |= instruction.get() == root_instruction_;
AddInstructionInternal(std::move(instruction));
}
CHECK(root_found)
<< "\nERROR: root instruction is not present in computation.";
root_instruction_->MarkAsRoot();
}
HloComputation::~HloComputation() {
if (FusionInstruction() != nullptr) {
CHECK(FusionInstruction()->fused_instructions_computation() == this);
FusionInstruction()->ClearCalledComputations();
}
if (IsAsyncComputation()) {
CHECK(async_start_->async_wrapped_computation() == this);
async_start_->ClearCalledComputations();
}
Cleanup();
for (const auto& i : instructions_) {
delete i.inst();
}
}
void HloComputation::SetInstruction(HloInstruction* instruction,
InstructionType type) {
static_assert(alignof(HloInstruction) == kInstructionTypeMask + 1,
"HloInstruction should be aligned as a QWORD");
DCHECK(type != InstructionType::kUnset)
<< "Set instruction must be called with a valid type, not kUnset.";
DCHECK(instruction_type() == InstructionType::kUnset ||
instruction_type() == type)
<< "Unexpected instruction type. Current type is "
<< static_cast<int>(instruction_type()) << " and it cannot be reset to "
<< static_cast<int>(type);
if (instruction == nullptr) {
type = instruction_type();
}
instruction_and_type_ =
reinterpret_cast<uintptr_t>(instruction) | static_cast<uintptr_t>(type);
}
HloInstruction* HloComputation::AddInstruction(
std::unique_ptr<HloInstruction> instruction, absl::string_view new_name) {
CHECK(instruction->opcode() != HloOpcode::kParameter)
<< "Parameter instructions cannot be added to a computation after "
<< "it has been built";
if (!new_name.empty()) {
instruction->SetAndSanitizeName(new_name);
}
return AddInstructionInternal(std::move(instruction));
}
HloInstruction* HloComputation::AddInstruction(
std::unique_ptr<HloInstruction> instruction, const OpMetadata* metadata) {
if (metadata != nullptr) {
instruction->set_metadata(*metadata);
}
return AddInstruction(std::move(instruction));
}
HloInstruction* HloComputation::AddInstruction(
std::unique_ptr<HloInstruction> instruction, const OpMetadata* metadata,
const FrontendAttributes* frontend_attributes) {
if (metadata != nullptr) {
instruction->set_metadata(*metadata);
}
if (frontend_attributes != nullptr) {
instruction->set_frontend_attributes(*frontend_attributes);
}
return AddInstruction(std::move(instruction));
}
HloInstruction* HloComputation::AddInstructionInternal(
std::unique_ptr<HloInstruction> instruction) {
if (parent() != nullptr) {
instruction->UniquifyName(&parent()->instruction_name_uniquer());
instruction->SetUniqueId(parent()->NewUniqueInstructionId());
}
instruction->set_parent(this);
HloInstruction* pinst = instruction.release();
HloInstructionInfo info;
info.opcode_ = pinst->opcode();
info.inst_ = pinst;
VLOG(2) << "Adding instruction " << pinst << " " << pinst->name()
<< " from computation " << name() << " opcode " << info.opcode();
uint32_t index = instructions_.size();
instruction_count_++;
pinst->index_in_parent_ = index;
instructions_.push_back(info);
return pinst;
}
HloInstruction* HloComputation::AddParameter(
std::unique_ptr<HloInstruction> instruction) {
CHECK(instruction->opcode() == HloOpcode::kParameter);
CHECK(!IsFusionComputation() ||
FusionInstruction()->operand_count() == param_instructions_.size());
instruction->set_parent(this);
param_instructions_.push_back(instruction.get());
AddInstructionInternal(std::move(instruction));
return instructions_.back().get();
}
HloInstruction* HloComputation::AddEntryComputationParameter(
std::unique_ptr<HloInstruction> instruction) {
CHECK_EQ(instruction->opcode(), HloOpcode::kParameter);
CHECK_EQ(instruction->parameter_number(), num_parameters());
CHECK(parent()->entry_computation() == this);
HloModuleConfig config = parent()->config();
config.mutable_entry_computation_layout()->add_parameter_layout(
ShapeLayout(instruction->shape()));
parent()->set_config(config);
instruction->set_parent(this);
param_instructions_.push_back(instruction.get());
AddInstructionInternal(std::move(instruction));
return instructions_.back().get();
}
absl::Status HloComputation::ReplaceEntryComputationParameter(
int64_t param_no, HloInstruction* old_instruction,
std::unique_ptr<HloInstruction> instruction) {
CHECK_GE(param_no, 0);
CHECK_LT(param_no, param_instructions_.size());
CHECK_EQ(instruction->opcode(), HloOpcode::kParameter);
CHECK(parent()->entry_computation() == this);
HloModuleConfig config = parent()->config();
*config.mutable_entry_computation_layout()->mutable_parameter_layout(
param_no) = ShapeLayout(instruction->shape());
parent()->set_config(config);
instruction->set_parent(this);
param_instructions_[param_no] = instruction.get();
AddInstructionInternal(std::move(instruction));
return ForceRemoveInstruction(old_instruction);
}
absl::Status HloComputation::RemoveParameter(int64_t param_no) {
CHECK_GE(param_no, 0);
CHECK_LT(param_no, param_instructions_.size());
HloInstruction* param_instruction = param_instructions_[param_no];
auto param_instruction_iterator = param_instructions_.begin() + param_no;
param_instructions_.erase(param_instruction_iterator);
TF_RETURN_IF_ERROR(ForceRemoveInstruction(param_instruction));
while (param_no < param_instructions_.size()) {
param_instruction = param_instructions_[param_no];
HloInstruction* new_instr =
AddInstructionInternal(HloInstruction::CreateParameter(
param_no, param_instruction->shape(), StrCat("param_", param_no)));
TF_RETURN_IF_ERROR(param_instruction->ReplaceAllUsesWith(new_instr));
param_instructions_[param_no] = new_instr;
TF_RETURN_IF_ERROR(ForceRemoveInstruction(param_instruction));
param_no++;
}
return absl::OkStatus();
}
HloInstruction* HloComputation::ReplaceParameter(
int64_t param_no, std::unique_ptr<HloInstruction> instruction) {
CHECK_GE(param_no, 0);
CHECK_LT(param_no, param_instructions_.size());
CHECK(instruction->opcode() == HloOpcode::kParameter);
CHECK(!IsFusionComputation() ||
FusionInstruction()->operand_count() == param_instructions_.size());
instruction->set_parent(this);
HloInstruction* new_instruction =
AddInstructionInternal(std::move(instruction));
HloInstruction* old_instruction = param_instructions_[param_no];
TF_CHECK_OK(
old_instruction->ReplaceAllUsesWithDifferentShape(new_instruction));
param_instructions_[param_no] = new_instruction;
TF_CHECK_OK(ForceRemoveInstruction(old_instruction));
return new_instruction;
}
absl::Status HloComputation::RemoveUnusedParametersFromFusedComputation() {
return RemoveUnusedParametersImpl(false);
}
absl::Status HloComputation::RemoveUnusedParametersFromAnyComputation() {
return RemoveUnusedParametersImpl(true);
}
absl::Status HloComputation::RemoveUnusedParametersImpl(bool allow_non_fusion) {
CHECK(allow_non_fusion || IsFusionComputation());
int64_t removed = 0;
for (int64_t i = 0; i < param_instructions_.size(); ++i) {
HloInstruction* param_instruction = param_instructions_[i];
if (param_instruction->IsDead()) {
TF_RETURN_IF_ERROR(
RemoveInstructionImpl(param_instruction, allow_non_fusion));
++removed;
continue;
}
if (removed > 0) {
const int64_t param_no = i - removed;
HloInstruction* new_instr = AddInstructionInternal(
HloInstruction::CreateParameter(param_no, param_instruction->shape(),
StrCat("param_", param_no)));
TF_RETURN_IF_ERROR(param_instruction->ReplaceAllUsesWith(new_instr));
param_instructions_[param_no] = new_instr;
TF_RETURN_IF_ERROR(
RemoveInstructionImpl(param_instruction, allow_non_fusion));
}
}
param_instructions_.resize(param_instructions_.size() - removed);
return absl::OkStatus();
}
bool HloComputation::IsSafelyRemovable(const HloInstruction* instruction,
bool ignore_control_dependency) {
if (!ignore_control_dependency && instruction->HasControlDependencies()) {
return false;
}
if (instruction->opcode() == HloOpcode::kParameter &&
!IsFusionComputation()) {
return false;
}
return true;
}
bool HloComputation::HasSideEffect() const {
for (auto* instruction : instructions()) {
if (instruction->HasSideEffect()) {
return true;
}
}
return false;
}
bool HloComputation::IsMarkedAsDead(const HloInstruction* inst) {
return inst->IsMarkedAsDead();
}
absl::Status HloComputation::RemoveInstructionAndUnusedOperands(
HloInstruction* instruction,
std::optional<absl::FunctionRef<void(HloInstruction*)>> cleanup,
bool ignore_control_dependencies) {
TF_RET_CHECK(root_instruction() != instruction);
TF_RET_CHECK(instruction->IsDead());
TF_RET_CHECK(IsSafelyRemovable(instruction, ignore_control_dependencies))
<< "Cannot remove instruction: " << instruction->ToString();
absl::flat_hash_set<HloInstruction*> removed;
std::queue<HloInstruction*> worklist;
worklist.push(instruction);
std::vector<HloInstruction*> parameters_to_be_removed;
while (!worklist.empty()) {
HloInstruction* item = worklist.front();
worklist.pop();
if (removed.contains(item) || !item->IsDead() ||
!IsSafelyRemovable(item, ignore_control_dependencies) ||
(item->HasSideEffect() && item != instruction)) {
continue;
}
if (ignore_control_dependencies) {
TF_RETURN_IF_ERROR(item->SafelyDropAllControlDependencies());
} else if (item->HasControlDependencies()) {
continue;
}
for (int i = 0; i < item->operand_count(); ++i) {
worklist.push(item->mutable_operand(i));
}
if (cleanup != std::nullopt) {
(*cleanup)(item);
}
if (item->opcode() == HloOpcode::kParameter) {
parameters_to_be_removed.push_back(item);
} else {
TF_RETURN_IF_ERROR(RemoveInstruction(item));
}
removed.insert(item);
}
std::sort(parameters_to_be_removed.begin(), parameters_to_be_removed.end(),
[](HloInstruction* a, HloInstruction* b) {
return a->parameter_number() > b->parameter_number();
});
for (HloInstruction* param : parameters_to_be_removed) {
int64_t parameter_number = param->parameter_number();
TF_RETURN_IF_ERROR(RemoveParameter(parameter_number));
if (FusionInstruction() != nullptr) {
auto operand = FusionInstruction()->mutable_operand(parameter_number);
FusionInstruction()->RemoveOperandAt(parameter_number);
FusionInstruction()->DetachFrom(operand);
if (operand->IsDead() && operand->parent()->IsSafelyRemovable(
operand, ignore_control_dependencies)) {
TF_RETURN_IF_ERROR(
operand->parent()->RemoveInstructionAndUnusedOperands(
operand, cleanup, ignore_control_dependencies));
}
}
}
return absl::OkStatus();
}
absl::Status HloComputation::RemoveInstruction(HloInstruction* instruction) {
return RemoveInstructionImpl(instruction, false);
}
absl::Status HloComputation::ForceRemoveInstruction(
HloInstruction* instruction) {
return RemoveInstructionImpl(instruction, true);
}
absl::Status HloComputation::RemoveInstructionImpl(HloInstruction* instruction,
bool ignore_safety_check) {
VLOG(2) << "Removing instruction " << instruction << " "
<< instruction->name() << " from computation " << name();
TF_RET_CHECK(ignore_safety_check || IsSafelyRemovable(instruction))
<< "cannot remove instruction: " << instruction->ToString();
TF_RET_CHECK(instruction->IsDead()) << "instruction " << instruction->name()
<< " is live and cannot be removed";
TF_RET_CHECK(instruction->control_predecessors().empty())
<< "instruction " << instruction->name()
<< " has control predecessors and cannot be removed";
TF_RET_CHECK(instruction->control_successors().empty())
<< "instruction " << instruction->name()
<< " has control successors and cannot be removed";
HloInstructionInfo* info = &instructions_[instruction->index_in_parent_];
DCHECK_EQ(info->inst(), instruction);
info->inst()->set_parent(nullptr);
to_be_deleted_.push_back(info->inst());
to_be_deleted_.back()->DetachFromOperandsAndUsers();
to_be_deleted_.back()->RemoveAllOperands();
to_be_deleted_.back()->ClearCalledComputations();
to_be_deleted_.back()->MarkAsDead();
info->inst_ =
nullptr;
instruction->index_in_parent_ = ~0u;
instruction_count_--;
DCHECK_EQ(instructions_.size() - to_be_deleted_.size(), instruction_count())
<< "instructions_.size(): " << instructions_.size()
<< ", to_be_deleted_.size(): " << to_be_deleted_.size();
return absl::OkStatus();
}
void HloComputation::Cleanup() {
if (to_be_deleted_.empty()) return;
DCHECK_GT(instruction_count(), 0);
auto is_marked_for_removal = [](const HloInstructionInfo& info) {
return info.inst() == nullptr;
};
auto marked_it = absl::c_find_if(instructions_, is_marked_for_removal);
DCHECK(marked_it < instructions_.end());
for (auto it = marked_it + 1; it < instructions_.end(); ++it) {
if (is_marked_for_removal(*it)) continue;
HloInstruction* unmarked_instruction = it->inst();
unmarked_instruction->index_in_parent_ =
std::distance(instructions_.begin(), marked_it);
*marked_it++ = std::move(*it);
}
DCHECK(marked_it < instructions_.end());
DCHECK_EQ(std::distance(marked_it, instructions_.end()),
to_be_deleted_.size());
DCHECK_EQ(instructions_.size() - to_be_deleted_.size(), instruction_count())
<< "instructions_.size(): " << instructions_.size()
<< ", to_be_deleted_.size(): " << to_be_deleted_.size();
for (HloInstruction* marked_instruction : to_be_deleted_) {
delete marked_instruction;
}
to_be_deleted_.clear();
instructions_.resize(instruction_count());
}
void HloComputation::set_root_instruction(HloInstruction* new_root_instruction,
bool accept_different_shape) {
if (!IsFusionComputation() && !accept_different_shape) {
CHECK(ShapeUtil::Compatible(new_root_instruction->shape(),
root_instruction_->shape()))
<< new_root_instruction->shape() << " is incompatible with "
<< root_instruction_->shape();
}
bool root_found = false;
for (auto& instruction : instructions_) {
if (new_root_instruction == instruction.get()) {
root_found = true;
break;
}
}
DCHECK(root_found);
if (parent() && parent()->has_entry_computation() &&
parent()->entry_computation() == this) {
if (!Shape::Equal().IgnoreLayout()(new_root_instruction->shape(),
root_instruction_->shape())) {
parent()->input_output_alias_config() =
HloInputOutputAliasConfig(new_root_instruction->shape());
}
}
root_instruction_->MarkAsNonRoot();
new_root_instruction->MarkAsRoot();
root_instruction_ = new_root_instruction;
}
void HloComputation::ComputeInstructionPostOrder(
HloInstruction* root, const ChannelDependencies& channel_dependencies,
VisitMap& visited, std::vector<HloInstruction*>& post_order,
std::vector<HloInstruction*>* dfs_stack_scratch) const {
ForEachInstructionPostOrderImpl(
[&post_order](HloInstruction* hlo) { post_order.push_back(hlo); }, root,
channel_dependencies, visited, dfs_stack_scratch);
}
void HloComputation::ForEachInstructionPostOrderImpl(
absl::FunctionRef<void(HloInstruction*)> func, HloInstruction* root,
const ChannelDependencies& channel_dependencies, VisitMap& visited,
std::vector<HloInstruction*>* dfs_stack_scratch) const {
bool has_channel_dependencies = !channel_dependencies.empty();
auto* dfs_stack = dfs_stack_scratch;
dfs_stack->clear();
auto dfs_stack_push = [&](HloInstruction* instr) {
VisitState state = visited.GetState(instr->index_in_parent_);
if (state != VisitState::kVisited) dfs_stack->push_back(instr);
};
dfs_stack_push(root);
while (!dfs_stack->empty()) {
HloInstruction* current = dfs_stack->back();
DCHECK_EQ(current->parent(), this)
<< "Instruction " << current->name()
<< " is not in the current computation (" << name() << ").";
VisitMap::Handle h = current->index_in_parent_;
VisitState state = visited.GetState(h);
if (state == VisitState::kNew) {
visited.SetState(h, VisitState::kVisiting);
} else {
dfs_stack->pop_back();
if (state != VisitState::kVisited) {
visited.SetState(h, VisitState::kVisited);
func(current);
}
continue;
}
if (has_channel_dependencies && current != root) {
auto it = channel_dependencies.find(current);
if (it != channel_dependencies.end()) {
absl::c_for_each(it->second, dfs_stack_push);
}
}
const HloInstruction::InstructionVector& operands = current->operands();
absl::c_for_each(tsl::gtl::make_range(operands.rbegin(), operands.rend()),
dfs_stack_push);
absl::c_for_each(current->control_predecessors(), dfs_stack_push);
}
}
HloComputation::ChannelDependencies HloComputation::ComputeChannelDependencies()
const {
if (parent() && parent()->config().has_static_device_assignment() &&
(parent()->config().static_device_assignment().computation_count() == 1 ||
parent()->config().use_spmd_partitioning())) {
return {};
}
using Instructions = absl::InlinedVector<HloInstruction*, 1>;
absl::flat_hash_map<int64_t, Instructions> channel_groups;
ChannelDependencies dependencies;
for (const auto& inst : instructions_with_info()) {
switch (inst.opcode()) {
case HloOpcode::kAllReduce:
case HloOpcode::kAllGather:
case HloOpcode::kAllToAll:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kReduceScatter: {
HloInstruction* instruction = inst.inst();
std::optional<int64_t> channel_id = instruction->channel_id();
if (channel_id) {
Instructions& group = channel_groups[*channel_id];
for (const HloInstruction* group_inst : group) {
dependencies[group_inst].push_back(instruction);
}
dependencies[instruction] = group;
group.push_back(instruction);
}
break;
}
default:
break;
}
}
return dependencies;
}
std::vector<HloInstruction*> HloComputation::MakeInstructionPostOrderFrom(
HloInstruction& postorder_root) const {
std::vector<HloInstruction*> post_order;
VisitMap visited(instructions_.size());
std::vector<HloInstruction*> dfs_stack_scratch;
ComputeInstructionPostOrder(&postorder_root, ComputeChannelDependencies(),
visited, post_order, &dfs_stack_scratch);
return post_order;
}
std::vector<HloInstruction*> HloComputation::MakeInstructionPostOrder() const {
return MakeInstructionPostOrder(ComputeChannelDependencies());
}
std::vector<HloInstruction*> HloComputation::MakeInstructionPostOrder(
const ChannelDependencies& channel_dependencies) const {
std::vector<HloInstruction*> post_order;
post_order.reserve(instruction_count());
VisitMap visited(instructions_.size());
std::vector<HloInstruction*> dfs_stack_scratch;
dfs_stack_scratch.reserve(instruction_count());
for (const auto& instruction : instructions()) {
if (instruction->users().empty()) {
ComputeInstructionPostOrder(instruction, channel_dependencies, visited,
post_order, &dfs_stack_scratch);
}
}
CHECK_EQ(instruction_count(), post_order.size())
<< "number of instructions does not match post order size";
return post_order;
}
std::vector<HloInstruction*>
HloComputation::MakeInstructionPostOrderWithReshapeFirst() const {
std::vector<HloInstruction*> frontier_std;
std::vector<HloInstruction*> frontier_reshapes;
std::vector<HloInstruction*> sorted;
absl::flat_hash_map<int, uint32_t> visitations;
sorted.reserve(instruction_count());
visitations.reserve(instruction_count());
auto pop_frontier_element = [&frontier_std, &frontier_reshapes]() mutable {
if (!frontier_std.empty()) {
HloInstruction* const to_return = frontier_std.back();
frontier_std.pop_back();
return to_return;
}
if (!frontier_reshapes.empty()) {
HloInstruction* const to_return = frontier_reshapes.back();
frontier_reshapes.pop_back();
return to_return;
}
return static_cast<HloInstruction*>(nullptr);
};
auto add_to_frontier = [&frontier_std, &frontier_reshapes](
HloInstruction* const instruction_to_add) mutable {
if (instruction_to_add->opcode() == HloOpcode::kReshape) {
frontier_reshapes.push_back(instruction_to_add);
} else {
frontier_std.push_back(instruction_to_add);
}
};
bool found_root_instruction = false;
for (HloInstruction* const inst : instructions()) {
if (inst->user_count() == 0) {
if (inst == root_instruction()) {
found_root_instruction = true;
}
add_to_frontier(inst);
}
}
CHECK(found_root_instruction);
while (HloInstruction* const inst = pop_frontier_element()) {
sorted.push_back(inst);
for (HloInstruction* const child : inst->operands()) {
visitations[child->unique_id()]++;
if (child->user_count() == visitations[child->unique_id()]) {
add_to_frontier(child);
}
}
}
std::reverse(sorted.begin(), sorted.end());
CHECK_EQ(sorted.size(), instruction_count());
return sorted;
}
void HloComputation::ForEachInstructionPostOrder(
absl::FunctionRef<void(HloInstruction*)> func) const {
VisitMap visited(instructions_.size());
std::vector<HloInstruction*> dfs_stack_scratch;
dfs_stack_scratch.reserve(instruction_count());
auto channel_dependencies = ComputeChannelDependencies();
for (const auto& instruction : instructions()) {
if (instruction->users().empty()) {
ForEachInstructionPostOrderImpl(func, instruction, channel_dependencies,
visited, &dfs_stack_scratch);
}
}
}
std::vector<HloComputation*> HloComputation::MakeEmbeddedComputationsList()
const {
absl::flat_hash_set<HloComputation*> visited;
std::vector<HloComputation*> post_order;
using ComputationIter =
std::pair<HloComputation*, InstructionList::const_iterator>;
std::stack<ComputationIter, absl::InlinedVector<ComputationIter, 8>> st;
for (const HloInstructionInfo& instruction : instructions_with_info()) {
using PtrVec = PtrVec<HloComputation*>;
auto process_called_computations = [&](const PtrVec& called_computations) {
if (called_computations.empty()) return;
std::reverse_iterator<PtrVec::const_iterator> i(
called_computations.end());
std::reverse_iterator<PtrVec::const_iterator> rend(
called_computations.begin());
for (; i != rend; ++i) {
HloComputation* called_computation = *i;
if (visited.insert(called_computation).second) {
st.emplace(called_computation,
called_computation->instructions_.cbegin());
}
}
};
process_called_computations(instruction->called_computations());
while (!st.empty()) {
auto& cur = st.top();
HloComputation* computation = cur.first;
if (cur.second == computation->instructions_.cend()) {
st.pop();
post_order.push_back(computation);
} else {
if (cur.second->inst() == nullptr) {
++cur.second;
} else {
HloOpcode opcode = cur.second->opcode();
HloInstruction* next_instruction = cur.second->get();
++cur.second;
if (HloInstruction::MightHaveCalledComputations(opcode)) {
process_called_computations(
next_instruction->called_computations());
} else {
DCHECK(next_instruction->called_computations().empty());
}
}
}
}
}
return post_order;
}
void HloComputation::Print(Printer* printer,
const HloPrintOptions& options) const {
Print(printer, options, {});
}
void HloComputation::Print(
Printer* printer, const HloPrintOptions& options,
absl::Span<const HloInstruction* const> instruction_order) const {
if (!instruction_order.empty()) {
CHECK_EQ(instruction_order.size(), instruction_count());
}
const std::string tab(2 * options.indent_amount(), ' ');
printer->Append(tab);
if (!options.is_in_nested_computation()) {
if (options.print_percent()) {
printer->Append("%");
}
if (options.print_ids()) {
printer->Append(name());
printer->Append(" ");
}
}
if (options.print_program_shape()) {
ShapeUtil::PrintHumanString(printer,
ComputeProgramShape(options.print_ids()));
printer->Append(" ");
}
printer->Append("{\n");
{
HloPrintOptions new_options =
HloPrintOptions(options)
.set_indent_amount(options.indent_amount() + 1)
.set_is_in_nested_computation(true);
CanonicalNameMap name_map;
name_map.Reserve(instruction_count());
auto print_one = [&](const HloInstruction* instruction) {
DCHECK_EQ(this, instruction->parent());
printer->Append(tab);
printer->Append(" ");
if (instruction == root_instruction_) {
printer->Append("ROOT ");
}
instruction->PrintWithCanonicalNameMap(printer, new_options, &name_map);
printer->Append("\n");
};
if (instruction_order.empty()) {
ForEachInstructionPostOrder(print_one);
} else {
for (const HloInstruction* const instruction : instruction_order) {
print_one(instruction);
}
}
}
printer->Append(tab);
printer->Append("}");
if (options.print_ids() && !IsMainThread()) {
printer->Append(", execution_thread=\"");
printer->Append(execution_thread());
printer->Append("\"");
}
if (options.print_name_after_closing_brace() && instruction_count() > 5) {
printer->Append("
printer->Append(name());
}
}
std::string HloComputation::ToString() const {
return ToString(HloPrintOptions::Default());
}
std::string HloComputation::ToString(const HloPrintOptions& options) const {
return ToString(options, MakeInstructionPostOrder());
}
std::string HloComputation::ToString(
const HloPrintOptions& options,
absl::Span<const HloInstruction* const> instruction_order) const {
StringPrinter printer;
Print(&printer, options, instruction_order);
return std::move(printer).ToString();
}
absl::Cord HloComputation::ToCord(
const HloPrintOptions& options,
absl::Span<const HloInstruction* const> instruction_order) const {
CordPrinter printer;
Print(&printer, options, instruction_order);
return std::move(printer).ToCord();
}
HloComputationProto HloComputation::ToProto() const {
HloComputationProto proto;
CHECK(unique_id_ != -1)
<< "This computation does not have a valid id. Please make sure the "
"computation is inside a module before dumping it.";
proto.set_id(unique_id_);
proto.set_name(name_);
for (const HloInstruction* instruction : MakeInstructionPostOrder()) {
HloInstructionProto instruction_proto = instruction->ToProto();
proto.add_instructions()->Swap(&instruction_proto);
}
proto.set_root_id(root_instruction()->unique_id());
*proto.mutable_program_shape() = ComputeProgramShape().ToProto();
proto.set_is_fusion_computation(IsFusionComputation());
proto.set_execution_thread(IsMainThread() ? ""
: std::string(execution_thread()));
return proto;
}
absl::StatusOr<std::unique_ptr<HloComputation>>
HloComputation::CreateFromProto(
const HloComputationProto& proto,
const absl::flat_hash_map<int64_t, HloComputation*>& computation_map,
bool prohibit_empty_literal) {
absl::flat_hash_map<int64_t, HloInstruction*> instruction_map;
absl::flat_hash_map<HloInstruction*, int64_t> to_proto_id;
std::vector<std::unique_ptr<HloInstruction>> instructions;
int64_t parameter_count = 0;
for (const HloInstructionProto& instruction_proto : proto.instructions()) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloInstruction> instruction,
HloInstruction::CreateFromProto(
instruction_proto, instruction_map, computation_map,
prohibit_empty_literal));
if (instruction->opcode() == HloOpcode::kParameter) {
parameter_count++;
}
TF_RET_CHECK(!ContainsKey(instruction_map, instruction_proto.id()));
instruction_map[instruction_proto.id()] = instruction.get();
to_proto_id[instruction.get()] = instruction_proto.id();
instructions.push_back(std::move(instruction));
}
TF_RET_CHECK(proto.root_id() != -1);
TF_RET_CHECK(ContainsKey(instruction_map, proto.root_id()));
HloInstruction* root = instruction_map.at(proto.root_id());
absl::c_sort(instructions, [&](const std::unique_ptr<HloInstruction>& a,
const std::unique_ptr<HloInstruction>& b) {
return to_proto_id[a.get()] < to_proto_id[b.get()];
});
TF_RETURN_IF_ERROR([&]() -> absl::Status {
std::vector<bool> parameters_seen(parameter_count);
int parameters_seen_count = 0;
for (auto& instruction : instructions) {
if (instruction->opcode() == HloOpcode::kParameter) {
int64_t param_no = instruction->parameter_number();
TF_RET_CHECK(param_no >= 0 && param_no < parameter_count)
<< "Invalid parameter number. Expected [0, " << parameter_count
<< "), got " << param_no;
TF_RET_CHECK(!parameters_seen[param_no])
<< "Parameter number " << param_no
<< " already allocated in this computation";
parameters_seen[param_no] = true;
parameters_seen_count++;
}
}
TF_RET_CHECK(parameters_seen_count == parameter_count)
<< "Not all parameters in range [0, " << parameter_count
<< ") were referenced";
return absl::OkStatus();
}());
auto computation = absl::WrapUnique(
new HloComputation(proto.name(), parameter_count, &instructions, root));
computation->unique_id_ = proto.id();
if (proto.is_fusion_computation()) {
computation->instruction_and_type_ =
static_cast<uintptr_t>(InstructionType::kFusion);
}
if (!proto.execution_thread().empty()) {
computation->SetExecutionThread(proto.execution_thread());
}
return std::move(computation);
}
void HloComputation::AppendInstructionsIntoCalledComputation(
absl::Span<HloInstruction* const> instructions_to_append,
HloInstruction* caller) {
HloInstruction* root = instructions_to_append.front();
TF_CHECK_OK(caller->CopyAllControlDepsFrom(root));
TF_CHECK_OK(root->DropAllControlDeps());
TF_CHECK_OK(root->ReplaceAllUsesWith(caller));
if (root == root_instruction()) {
set_root_instruction(caller);
}
TF_CHECK_OK(RemoveInstruction(root));
for (size_t i = 1; i < instructions_to_append.size(); ++i) {
HloInstruction* instruction = instructions_to_append[i];
caller->AppendInstructionIntoCalledComputation(instruction);
if (instruction->IsDead()) {
TF_CHECK_OK(RemoveInstruction(instruction));
}
}
}
HloInstruction* HloComputation::CreateFusionInstruction(
absl::Span<HloInstruction* const> instructions_to_fuse,
HloInstruction::FusionKind fusion_kind) {
HloInstruction* root = instructions_to_fuse.front();
HloInstruction* fusion_instruction = AddInstruction(
HloInstruction::CreateFusion(root->shape(), fusion_kind, root));
AppendInstructionsIntoCalledComputation(instructions_to_fuse,
fusion_instruction);
return fusion_instruction;
}
HloInstruction* HloComputation::CreateCallInstruction(
absl::Span<HloInstruction* const> instructions_to_call) {
HloInstruction* root = instructions_to_call.front();
HloInstruction* call_instruction = AddInstruction(
HloInstruction::CreateCall(root->shape(), root), root->name());
AppendInstructionsIntoCalledComputation(instructions_to_call,
call_instruction);
return call_instruction;
}
HloInstruction* HloComputation::CreateCompositeCallInstruction(
absl::Span<HloInstruction* const> instructions_to_call,
const std::string& name, const std::string& attributes, int64_t version) {
HloInstruction* root = instructions_to_call.front();
HloInstruction* call_instruction =
AddInstruction(HloInstruction::CreateCompositeCall(
root->shape(), root, name, attributes, version),
root->name());
AppendInstructionsIntoCalledComputation(instructions_to_call,
call_instruction);
return call_instruction;
}
absl::StatusOr<HloInstruction*> HloComputation::CreateAsyncInstructions(
HloInstruction* instruction, absl::Span<const Shape> context_shapes,
absl::string_view async_execution_thread, bool replace,
bool override_names) {
HloInstruction* async_start;
HloInstruction* async_done;
if (instruction->opcode() == HloOpcode::kCopy) {
std::vector<Shape> context_shapes_tuple;
context_shapes_tuple.reserve(context_shapes.size() + 2);
Shape instruction_shape_destination = instruction->shape();
context_shapes_tuple.push_back(instruction_shape_destination);
Shape instruction_shape_source = instruction->operand(0)->shape();
context_shapes_tuple.push_back(instruction_shape_source);
context_shapes_tuple.insert(context_shapes_tuple.end(),
context_shapes.begin(), context_shapes.end());
async_start = AddInstruction(HloInstruction::CreateCopyStart(
ShapeUtil::MakeTupleShape(context_shapes_tuple),
instruction->mutable_operand(0)));
async_done = AddInstruction(HloInstruction::CreateUnary(
instruction_shape_destination, HloOpcode::kCopyDone, async_start));
} else {
Builder builder("async_computation");
std::vector<HloInstruction*> parameters(instruction->operand_count());
std::vector<Shape> parameter_shapes(instruction->operand_count());
for (int i = 0; i < instruction->operand_count(); ++i) {
const Shape& parameter_shape = instruction->operand(i)->shape();
parameters[i] = builder.AddInstruction(HloInstruction::CreateParameter(
i, parameter_shape, absl::StrCat("param_", i)));
parameter_shapes[i] = parameter_shape;
}
HloInstruction* root = builder.AddInstruction(
instruction->CloneWithNewOperands(instruction->shape(), parameters));
if (override_names) {
parent()->SetAndUniquifyInstrName(
root, absl::StrCat(instruction->name(), ".cloned"));
}
HloComputation* async_computation =
parent_->AddEmbeddedComputation(builder.Build(root));
std::vector<Shape> start_shapes = {
ShapeUtil::MakeTupleShape(parameter_shapes), root->shape()};
for (const Shape& context_shape : context_shapes) {
start_shapes.push_back(context_shape);
}
async_start = AddInstruction(HloInstruction::CreateAsyncStart(
ShapeUtil::MakeTupleShape(start_shapes), instruction->operands(),
async_computation, async_execution_thread));
async_done = AddInstruction(
HloInstruction::CreateAsyncDone(root->shape(), async_start));
if (override_names) {
parent()->SetAndUniquifyInstrName(
async_start, absl::StrCat(root->name(), ".call-start"));
parent()->SetAndUniquifyInstrName(
async_done, absl::StrCat(root->name(), ".call-done"));
}
}
async_start->set_metadata(instruction->metadata());
async_start->CopyBackendConfigFrom(instruction);
async_done->set_metadata(instruction->metadata());
async_done->CopyBackendConfigFrom(instruction);
for (HloInstruction* control_pred : instruction->control_predecessors()) {
TF_RETURN_IF_ERROR(control_pred->AddControlDependencyTo(async_start));
}
for (HloInstruction* control_successor : instruction->control_successors()) {
TF_RETURN_IF_ERROR(async_done->AddControlDependencyTo(control_successor));
}
if (replace) {
TF_RETURN_IF_ERROR(instruction->DropAllControlDeps());
TF_RETURN_IF_ERROR(ReplaceInstruction(instruction, async_done));
}
return async_done;
}
absl::StatusOr<HloInstruction*> HloComputation::DeepCopyHelper(
HloInstruction* instruction, ShapeIndex* index,
absl::FunctionRef<HloInstruction*(HloInstruction* leaf,
const ShapeIndex& leaf_index,
HloComputation* computation)>
copy_leaf) {
if (instruction->shape().IsTuple()) {
std::vector<HloInstruction*> elements;
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(instruction->shape());
i++) {
HloInstruction* gte =
AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetTupleElementShape(instruction->shape(), i),
instruction, i));
index->push_back(i);
TF_ASSIGN_OR_RETURN(HloInstruction * element,
DeepCopyHelper(gte, index, copy_leaf));
elements.push_back(element);
index->pop_back();
}
return AddInstruction(HloInstruction::CreateTuple(elements));
}
if (instruction->shape().IsToken()) {
return instruction;
}
TF_RET_CHECK(instruction->shape().IsArray());
return copy_leaf(instruction, *index, this);
}
absl::StatusOr<HloInstruction*> HloComputation::DeepCopyInstruction(
HloInstruction* instruction, const ShapeTree<bool>* indices_to_copy,
ShapeTree<HloInstruction*>* copies_added) {
if (instruction->parent() != this) {
return FailedPrecondition(
"Can't deep copy instruction %s: instruction is not in computation %s",
instruction->name(), name());
}
if (indices_to_copy != nullptr &&
!ShapeUtil::Compatible(instruction->shape(), indices_to_copy->shape())) {
return FailedPrecondition(
"Can't deep copy instruction %s: given shape tree of indices to copy "
"has incompatible shapes: %s vs. %s",
instruction->name(), ShapeUtil::HumanString(instruction->shape()),
ShapeUtil::HumanString(indices_to_copy->shape()));
}
ShapeIndex index;
auto copy_leaf = [indices_to_copy, copies_added](
HloInstruction* leaf, const ShapeIndex& leaf_index,
HloComputation* computation) {
if (indices_to_copy == nullptr || indices_to_copy->element(leaf_index)) {
HloInstruction* copy = computation->AddInstruction(
HloInstruction::CreateUnary(leaf->shape(), HloOpcode::kCopy, leaf));
if (copies_added != nullptr) {
*copies_added->mutable_element(leaf_index) = copy;
}
return copy;
}
return leaf;
};
return DeepCopyHelper(instruction, &index, copy_leaf);
}
absl::StatusOr<HloInstruction*>
HloComputation::DeepCopyInstructionWithCustomCopier(
HloInstruction* instruction,
absl::FunctionRef<HloInstruction*(HloInstruction* leaf,
const ShapeIndex& leaf_index,
HloComputation* computation)>
copy_leaf) {
if (instruction->parent() != this) {
return FailedPrecondition(
"Can't deep copy instruction %s: instruction is not in computation %s",
instruction->name(), name());
}
ShapeIndex index;
return DeepCopyHelper(instruction, &index, copy_leaf);
}
ProgramShape HloComputation::ComputeProgramShape(bool include_ids) const {
ProgramShape program_shape;
for (auto* param_instruction : param_instructions_) {
*program_shape.add_parameters() = param_instruction->shape();
*program_shape.add_parameter_names() =
std::string(PrintName(param_instruction->name(), include_ids));
}
*program_shape.mutable_result() = root_instruction_->shape();
return program_shape;
}
bool HloComputation::EqualInternal(
const HloComputation& other, bool is_layout_sensitive,
std::optional<
absl::FunctionRef<bool(const HloComputation*, const HloComputation*)>>
computations_comparator,
bool ignore_channel_id_values, bool ignore_execution_thread) const {
if (this == &other) {
return true;
}
absl::flat_hash_set<std::pair<const HloInstruction*, const HloInstruction*>>
visited;
std::vector<std::pair<const HloInstruction*, const HloInstruction*>> worklist;
worklist.push_back({root_instruction(), other.root_instruction()});
while (!worklist.empty()) {
auto pair = worklist.back();
worklist.pop_back();
if (visited.contains(pair)) {
continue;
}
visited.emplace(pair);
auto operands_eq = [](const HloInstruction*, const HloInstruction*) {
return true;
};
auto comp_eq = [&](const HloComputation* a, const HloComputation* b) {
return a->EqualInternal(*b, is_layout_sensitive, computations_comparator,
ignore_channel_id_values,
ignore_execution_thread);
};
bool identical_ignoring_operands =
ignore_channel_id_values
? pair.first->IdenticalIgnoringChannelIdValues(
*pair.second, operands_eq,
(computations_comparator ? *computations_comparator
: comp_eq),
is_layout_sensitive)
: pair.first->Identical(
*pair.second, operands_eq,
(computations_comparator ? *computations_comparator
: comp_eq),
is_layout_sensitive);
if (!identical_ignoring_operands) {
return false;
}
for (size_t i = 0; i < pair.first->operands().size(); ++i) {
worklist.push_back({pair.first->operand(i), pair.second->operand(i)});
}
}
if (!ignore_execution_thread) {
return execution_thread() == other.execution_thread();
}
return true;
}
absl::Status HloComputation::ReplaceWithNewInstruction(
HloInstruction* old_instruction,
std::unique_ptr<HloInstruction> new_instruction) {
return ReplaceInstruction(old_instruction,
AddInstruction(std::move(new_instruction)));
}
absl::Status HloComputation::ReplaceWithNewEntryComputationParameter(
HloInstruction* old_instruction,
std::unique_ptr<HloInstruction> new_instruction) {
return ReplaceInstruction(old_instruction, AddEntryComputationParameter(
std::move(new_instruction)));
}
absl::StatusOr<bool> HloComputation::ReplaceInstruction(
HloInstruction* old_instruction, HloInstruction* new_instruction,
bool preserve_sharding, bool relay_control_dependency,
bool remove_unused_operands) {
TF_RET_CHECK(
ShapeUtil::Compatible(old_instruction->shape(), new_instruction->shape()))
<< absl::StreamFormat(
"\"%s\" (%s) vs \"%s\" (%s)", old_instruction->name(),
old_instruction->shape().ToString(true),
new_instruction->name(),
new_instruction->shape().ToString(true));
return ReplaceInstructionWithDifferentShape(
old_instruction, new_instruction, preserve_sharding,
relay_control_dependency, remove_unused_operands);
}
absl::Status HloComputation::ReplaceInstruction(
HloInstruction* old_instruction, HloInstruction* new_instruction) {
TF_ASSIGN_OR_RETURN(bool changed,
ReplaceInstruction(old_instruction, new_instruction,
false));
DCHECK(changed);
return absl::OkStatus();
}
absl::StatusOr<bool> HloComputation::ReplaceInstructionWithDifferentShape(
HloInstruction* old_instruction, HloInstruction* new_instruction,
bool preserve_sharding, bool relay_control_dependency,
bool remove_unused_operands) {
if (preserve_sharding && new_instruction->has_sharding() &&
old_instruction->has_sharding() &&
!new_instruction->has_compatible_sharding(old_instruction)) {
VLOG(10) << "Skipping replacement due to incompatible sharding";
return false;
}
if (relay_control_dependency) {
TF_RETURN_IF_ERROR(
new_instruction->CopyAllControlDepsFrom(old_instruction));
TF_RETURN_IF_ERROR(old_instruction->DropAllControlDeps());
} else if (old_instruction->HasControlDependencies()) {
VLOG(10) << "Skipping replacement because old instruction has "
"control dependencies";
return false;
}
VLOG(10) << "transformed " << old_instruction->ToString() << " to "
<< new_instruction->ToString();
bool overwrite_op_name = new_instruction->metadata().op_name().empty() &&
!old_instruction->metadata().op_name().empty();
if (overwrite_op_name) {
new_instruction->set_metadata(old_instruction->metadata());
}
if (new_instruction->frontend_attributes().map().empty()) {
new_instruction->set_frontend_attributes(
old_instruction->frontend_attributes());
}
if (auto old_original_value = old_instruction->original_value()) {
if (new_instruction->opcode() != HloOpcode::kFusion) {
if (ShapeUtil::Compatible(old_instruction->shape(),
new_instruction->shape())) {
new_instruction->set_original_value(old_original_value);
} else {
LOG(WARNING)
<< "Expect the new instruction to have the same shape with the old "
"instruction when copying over original_value\n";
}
}
}
if (!new_instruction->has_sharding()) {
new_instruction->copy_sharding(old_instruction);
}
TF_RETURN_IF_ERROR(
old_instruction->ReplaceAllUsesWithDifferentShape(new_instruction));
if (old_instruction->opcode() == new_instruction->opcode() &&
(old_instruction->opcode() != HloOpcode::kCustomCall ||
old_instruction->custom_call_target() ==
new_instruction->custom_call_target())) {
new_instruction->SetAndSanitizeName(old_instruction->name());
}
if (remove_unused_operands) {
TF_RETURN_IF_ERROR(RemoveInstructionAndUnusedOperands(
old_instruction, std::nullopt,
relay_control_dependency));
} else {
TF_RETURN_IF_ERROR(RemoveInstruction(old_instruction));
}
return true;
}
absl::Status HloComputation::ReplaceInstructionWithDifferentShape(
HloInstruction* old_instruction, HloInstruction* new_instruction) {
TF_ASSIGN_OR_RETURN(bool changed, ReplaceInstructionWithDifferentShape(
old_instruction, new_instruction,
false));
DCHECK(changed);
return absl::OkStatus();
}
std::vector<HloInstruction*> HloComputation::CollectUnreachableRoots() const {
std::vector<HloInstruction*> unreachable_roots;
for (auto* instruction : instructions()) {
if (instruction->IsDead() && instruction->control_successors().empty()) {
unreachable_roots.push_back(instruction);
}
}
VLOG(3) << "Unreachable roots:"
<< absl::StrJoin(unreachable_roots, "\n\t",
[](std::string* out, const HloInstruction* hlo) {
absl::StrAppend(out, hlo->ToString());
});
return unreachable_roots;
}
absl::Status HloComputation::AcceptWithOperandOrder(
DfsHloVisitor* visitor,
const HloInstruction::CompareFunction& operand_order) const {
for (HloInstruction* root : CollectUnreachableRoots()) {
TF_RETURN_IF_ERROR(
root->AcceptWithOperandOrder(visitor, operand_order,
false));
}
return root_instruction()->AcceptWithOperandOrder(visitor, operand_order,
true);
}
std::unique_ptr<HloComputation> HloComputation::Clone(
const std::string& suffix, HloCloneContext* context) {
return CloneWithReplacements(
nullptr,
{}, context, suffix);
}
std::unique_ptr<HloComputation> HloComputation::CloneWithReplacementPairs(
std::pair<const HloInstruction*, std::unique_ptr<HloInstruction>> r1,
HloCloneContext* context, const std::string& suffix) {
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
replacements.emplace(std::move(r1));
return CloneWithReplacements(&replacements, {}, context,
suffix);
}
std::unique_ptr<HloComputation> HloComputation::CloneWithReplacementPairs(
std::pair<const HloInstruction*, std::unique_ptr<HloInstruction>> r1,
std::pair<const HloInstruction*, std::unique_ptr<HloInstruction>> r2,
HloCloneContext* context, const std::string& suffix) {
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
replacements.emplace(std::move(r1));
replacements.emplace(std::move(r2));
return CloneWithReplacements(&replacements, {}, context,
suffix);
}
std::unique_ptr<HloComputation> HloComputation::CloneWithReplacementPairs(
std::pair<const HloInstruction*, std::unique_ptr<HloInstruction>> r1,
std::pair<const HloInstruction*, std::unique_ptr<HloInstruction>> r2,
std::pair<const HloInstruction*, std::unique_ptr<HloInstruction>> r3,
HloCloneContext* context, const std::string& suffix) {
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
replacements.emplace(std::move(r1));
replacements.emplace(std::move(r2));
replacements.emplace(std::move(r3));
return CloneWithReplacements(&replacements, {}, context,
suffix);
}
namespace {
void SortClonedInstructions(
const HloCloneContext& context,
absl::FunctionRef<const HloInstruction*(const HloInstruction*)> replace,
const HloComputation& computation,
const HloComputation::InstructionList& ordered_instructions,
std::vector<std::unique_ptr<HloInstruction>>& unordered_instructions) {
using InstructionSorter = MappedPtrContainerSorter<HloInstruction>;
auto instruction_mapper = [&context, replace](const HloInstruction* i) {
return context.FindInstruction(replace(i));
};
size_t num_mapped_instructions = 0;
size_t mapped_index_of_last_parameter_plus_one = 0;
for (const auto& instruction : ordered_instructions) {
if (!instruction_mapper(instruction.get())) {
continue;
}
++num_mapped_instructions;
if (!dynamic_cast<const HloParameterInstruction*>(instruction.get())) {
continue;
}
mapped_index_of_last_parameter_plus_one = num_mapped_instructions;
}
auto unmapped_ptr_index =
[num_mapped_instructions,
mapped_index_of_last_parameter_plus_one](const HloInstruction* i) {
if (dynamic_cast<const HloParameterInstruction*>(i)) {
if (num_mapped_instructions > 0 &&
mapped_index_of_last_parameter_plus_one > 0) {
return mapped_index_of_last_parameter_plus_one - 1;
}
return InstructionSorter::IndexBeforeMappedElementsFn()(i);
}
return InstructionSorter::IndexAfterMappedElementsFn()(i);
};
auto status =
InstructionSorter::Sort(instruction_mapper, unmapped_ptr_index,
ordered_instructions, unordered_instructions);
if (!status.ok()) {
LOG(ERROR) << "Failed to reorder instructions while cloning computation: "
<< computation.name() << "; " << status;
}
}
void SortClonedInstructionUsersAndControlLists(
const HloCloneContext& context,
absl::FunctionRef<const HloInstruction*(const HloInstruction*)> replace,
const HloComputation::InstructionList& sorted_instructions) {
auto instruction_mapper = [&context, replace](const HloInstruction* i) {
return context.FindInstruction(replace(i));
};
for (const HloInstructionInfo& instruction : sorted_instructions) {
HloInstruction* cloned_instruction =
context.FindInstruction(replace(instruction.get()));
if (!cloned_instruction) {
continue;
}
cloned_instruction->SortInstructionUsersAndControlLists(instruction_mapper,
*instruction);
}
}
}
std::unique_ptr<HloComputation> HloComputation::CloneWithReplacements(
const absl::flat_hash_map<const HloInstruction*,
std::unique_ptr<HloInstruction>>* replacements,
absl::Span<const HloInstruction* const> extra_parameters,
HloCloneContext* context, const std::string& suffix,
const HloInstruction* new_root) {
std::unique_ptr<HloCloneContext> context_ptr;
if (context == nullptr) {
context_ptr = std::make_unique<HloCloneContext>(parent(), suffix);
context = context_ptr.get();
}
return CloneInContext(*context, replacements, extra_parameters, suffix,
new_root);
}
std::unique_ptr<HloComputation> HloComputation::CloneInContext(
HloCloneContext& context,
const absl::flat_hash_map<const HloInstruction*,
std::unique_ptr<HloInstruction>>* replacements,
absl::Span<const HloInstruction* const> extra_parameters,
const std::string& suffix, const HloInstruction* new_root) const {
if (new_root == nullptr) {
new_root = root_instruction();
}
auto replace = [&](const HloInstruction* instr) {
if (!replacements) return instr;
auto it = replacements->find(instr);
return it != replacements->end() ? it->second.get() : instr;
};
VLOG(1) << "Cloning " << name() << " --> " << suffix << "\n";
std::vector<const HloInstruction*> postorder;
absl::flat_hash_map<const HloInstruction*, VisitState> visited;
std::vector<const HloInstruction*> dfs_stack;
for (const auto& instr : instructions()) {
const HloInstruction* new_instr = replace(instr);
if (!new_instr) {
continue;
}
dfs_stack.clear();
dfs_stack.push_back(new_instr);
while (!dfs_stack.empty()) {
auto* cur = dfs_stack.back();
auto it = visited.find(cur);
if (it != visited.end()) {
dfs_stack.pop_back();
if (it->second == VisitState::kVisited) {
continue;
}
CHECK_EQ(it->second, VisitState::kVisiting);
postorder.push_back(cur);
it->second = VisitState::kVisited;
continue;
}
visited.insert({cur, VisitState::kVisiting});
for (HloInstruction* operand : cur->operands()) {
const HloInstruction* new_operand = replace(operand);
if (new_operand) {
dfs_stack.emplace_back(new_operand);
}
}
}
}
std::vector<std::unique_ptr<HloInstruction>> instructions;
for (const auto& instr : extra_parameters) {
CHECK_EQ(instr->opcode(), HloOpcode::kParameter)
<< "Only parameter instructions are allowed in 'extra_parameters'";
instructions.emplace_back(instr->Clone());
}
for (auto instr : postorder) {
std::vector<HloInstruction*> new_operands;
for (auto operand : instr->operands()) {
auto replaced_operand = replace(operand);
CHECK_NE(replaced_operand, nullptr)
<< "replacements map tried to eliminate a used instruction "
<< operand->ToString() << ", used by " << instr->ToString();
new_operands.push_back(context.GetInstruction(replaced_operand));
}
std::unique_ptr<HloInstruction> new_instr =
instr->CloneWithNewOperands(instr->shape(), new_operands, &context);
if (instr->opcode() == HloOpcode::kParameter &&
instr->parameter_replicated_at_leaf_buffers().has_value()) {
new_instr->set_parameter_replicated_at_leaf_buffers(
instr->parameter_replicated_at_leaf_buffers().value());
}
instructions.push_back(std::move(new_instr));
}
SortClonedInstructions(context, replace, *this, instructions_, instructions);
Builder builder(suffix.empty() ? std::string(name())
: absl::StrCat(name(), ".", suffix));
for (auto& instr : instructions) {
builder.AddInstruction(std::move(instr));
}
auto result = builder.Build(
context.GetInstruction(replace(new_root)));
for (auto instr : postorder) {
HloInstruction* new_instr = context.GetInstruction(instr);
for (auto successor : instr->control_successors()) {
auto replaced_successor = replace(successor);
if (replaced_successor != nullptr) {
TF_CHECK_OK(new_instr->AddControlDependencyTo(
context.GetInstruction(replaced_successor)));
}
}
}
SortClonedInstructionUsersAndControlLists(context, replace, instructions_);
context.MapComputation(this, result.get());
result->SetExecutionThread(execution_thread());
return result;
}
void HloComputation::UniquifyName(NameUniquer* name_uniquer) {
name_ = name_uniquer->GetUniqueName(name_);
}
void HloComputation::UniquifyName(HloModule* module) {
UniquifyName(&module->computation_name_uniquer());
}
HloInstruction* HloComputation::GetInstructionWithName(absl::string_view name) {
auto instructions_in_computation = instructions();
auto it = absl::c_find_if(
instructions_in_computation,
[&](HloInstruction* instr) { return instr->name() == name; });
return it == instructions_in_computation.end() ? nullptr : *it;
}
bool HloComputation::IsEntryComputation() const {
return parent()->entry_computation() == this;
}
bool HloComputation::CanExpandIntoSingleInstruction() const {
return absl::c_all_of(
instructions(), [root = root_instruction()](const HloInstruction* instr) {
return root == instr || instr->opcode() == HloOpcode::kParameter;
});
}
} | #include "xla/hlo/ir/hlo_computation.h"
#include <cstdint>
#include <memory>
#include <string>
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = match;
namespace op = xla::testing::opcode_matchers;
using ::testing::ElementsAre;
using ::testing::UnorderedElementsAre;
class HloComputationTest : public HloTestBase {
protected:
HloComputationTest() {}
std::unique_ptr<HloComputation> CreateNegateComputation() {
auto builder = HloComputation::Builder("Negate");
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "param0"));
builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, param));
return builder.Build();
}
std::unique_ptr<HloComputation> CreateMapComputation(
HloComputation* map_computation) {
auto builder = HloComputation::Builder("Map");
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "param0"));
builder.AddInstruction(
HloInstruction::CreateMap(r0f32_, {param}, map_computation));
return builder.Build();
}
Shape r0f32_ = ShapeUtil::MakeShape(F32, {});
};
TEST_F(HloComputationTest, GetEmbeddedComputationsEmpty) {
auto module = CreateNewVerifiedModule();
auto negate_computation =
module->AddEntryComputation(CreateNegateComputation());
EXPECT_TRUE(negate_computation->MakeEmbeddedComputationsList().empty());
}
TEST_F(HloComputationTest, GetEmbeddedComputationsOneComputation) {
auto module = CreateNewVerifiedModule();
auto negate_computation =
module->AddEmbeddedComputation(CreateNegateComputation());
auto map_computation =
module->AddEntryComputation(CreateMapComputation(negate_computation));
EXPECT_TRUE(negate_computation->MakeEmbeddedComputationsList().empty());
EXPECT_THAT(map_computation->MakeEmbeddedComputationsList(),
ElementsAre(negate_computation));
}
TEST_F(HloComputationTest, GetEmbeddedComputationsDiamond) {
auto module = CreateNewVerifiedModule();
auto negate_computation =
module->AddEmbeddedComputation(CreateNegateComputation());
auto map1_computation =
module->AddEmbeddedComputation(CreateMapComputation(negate_computation));
auto map2_computation =
module->AddEmbeddedComputation(CreateMapComputation(negate_computation));
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "param0"));
auto map1 = builder.AddInstruction(
HloInstruction::CreateMap(r0f32_, {param}, map1_computation));
auto map2 = builder.AddInstruction(
HloInstruction::CreateMap(r0f32_, {param}, map2_computation));
builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, map1, map2));
auto computation = module->AddEntryComputation(builder.Build());
auto embedded_computations = computation->MakeEmbeddedComputationsList();
EXPECT_EQ(3, embedded_computations.size());
EXPECT_EQ(negate_computation, *embedded_computations.begin());
EXPECT_THAT(embedded_computations,
UnorderedElementsAre(negate_computation, map1_computation,
map2_computation));
}
TEST_F(HloComputationTest, PostOrderSingleton) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->MakeInstructionPostOrder(), ElementsAre(constant));
}
TEST_F(HloComputationTest, PostOrderSimple) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, constant));
auto negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, negate1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->MakeInstructionPostOrder(),
ElementsAre(constant, negate1, negate2));
}
TEST_F(HloComputationTest, PostOrderDisconnectedInstructions) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant4 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->MakeInstructionPostOrder(),
UnorderedElementsAre(constant1, constant2, constant3, constant4));
}
TEST_F(HloComputationTest, PostOrderWithReshapeFirst) {
const std::string& hlo_string = R"(
HloModule test
ENTRY %entry {
parameter.0 = f32[3] parameter(0)
broadcast.0 = f32[1, 3] broadcast(f32[3] parameter.0), dimensions={1}
reshape.0 = f32[3, 1] reshape(f32[3] parameter.0)
ROOT tuple.0 = (f32[1, 3], f32[3, 1]) tuple(f32[1, 3] broadcast.0, f32[3, 1] reshape.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* entry_computation =
FindComputation(hlo_module.get(), "entry");
HloInstruction* parameter_0 =
FindInstruction(hlo_module.get(), "parameter.0");
HloInstruction* broadcast_0 =
FindInstruction(hlo_module.get(), "broadcast.0");
HloInstruction* reshape_0 = FindInstruction(hlo_module.get(), "reshape.0");
HloInstruction* tuple_0 = FindInstruction(hlo_module.get(), "tuple.0");
EXPECT_THAT(entry_computation->MakeInstructionPostOrder(),
ElementsAre(parameter_0, broadcast_0, reshape_0, tuple_0));
EXPECT_THAT(entry_computation->MakeInstructionPostOrderWithReshapeFirst(),
ElementsAre(parameter_0, reshape_0, broadcast_0, tuple_0));
}
TEST_F(HloComputationTest, PostOrderWithMultipleRoots) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto add1 = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
auto add2 = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant2, constant3));
auto add3 = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant3));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto post_order = computation->MakeInstructionPostOrder();
EXPECT_EQ(6, post_order.size());
EXPECT_THAT(post_order, UnorderedElementsAre(constant1, constant2, constant3,
add1, add2, add3));
}
TEST_F(HloComputationTest, VisitWithMultipleRoots) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
builder.AddInstruction(HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd,
constant1, constant2));
builder.AddInstruction(HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd,
constant2, constant3));
builder.AddInstruction(HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd,
constant1, constant3));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
class TestVisitor : public DfsHloVisitorWithDefault {
public:
explicit TestVisitor(HloComputation* computation)
: computation_(computation) {}
absl::Status DefaultAction(HloInstruction* hlo_instruction) override {
EXPECT_FALSE(visited_set_.contains(hlo_instruction));
visited_set_.insert(hlo_instruction);
last_visited_ = hlo_instruction;
return absl::OkStatus();
}
absl::Status FinishVisit(HloInstruction* root) override {
EXPECT_EQ(computation_->root_instruction(), root);
++finish_visit_calls_;
return absl::OkStatus();
}
HloComputation* computation_;
absl::flat_hash_set<HloInstruction*> visited_set_;
int64_t finish_visit_calls_ = 0;
HloInstruction* last_visited_ = nullptr;
};
TestVisitor visitor(computation);
EXPECT_IS_OK(computation->Accept(&visitor));
EXPECT_EQ(6, visitor.visited_set_.size());
EXPECT_EQ(1, visitor.finish_visit_calls_);
EXPECT_EQ(computation->root_instruction(), visitor.last_visited_);
}
TEST_F(HloComputationTest, DeepCopyArray) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0})));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto copy = computation->DeepCopyInstruction(constant).value();
EXPECT_THAT(copy, GmockMatch(m::Copy(m::Op().Is(constant))));
}
TEST_F(HloComputationTest, DeepCopyTuple) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0})));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto tuple_copy = computation->DeepCopyInstruction(tuple).value();
EXPECT_THAT(tuple_copy, GmockMatch(m::Tuple(
m::Copy(m::GetTupleElement(m::Op().Is(tuple))),
m::Copy(m::GetTupleElement(m::Op().Is(tuple))))));
EXPECT_EQ(0, tuple_copy->operand(0)->operand(0)->tuple_index());
EXPECT_EQ(1, tuple_copy->operand(1)->operand(0)->tuple_index());
}
TEST_F(HloComputationTest, DeepCopyArrayAtIndices) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0})));
auto computation = builder.Build();
{
ShapeTree<bool> indices_to_copy(constant->shape(), true);
EXPECT_THAT(
computation->DeepCopyInstruction(constant, &indices_to_copy).value(),
GmockMatch(m::Copy(m::Op().Is(constant))));
}
{
ShapeTree<bool> indices_to_copy(constant->shape(), false);
EXPECT_EQ(
computation->DeepCopyInstruction(constant, &indices_to_copy).value(),
constant);
}
}
TEST_F(HloComputationTest, DeepCopyTupleAtIndices) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0})));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto computation = builder.Build();
{
ShapeTree<bool> indices_to_copy(tuple->shape(), true);
ShapeTree<HloInstruction*> copies_added(tuple->shape(),
nullptr);
HloInstruction* deep_copy =
computation->DeepCopyInstruction(tuple, &indices_to_copy, &copies_added)
.value();
EXPECT_THAT(deep_copy, GmockMatch(m::Tuple(
m::Copy(m::GetTupleElement(m::Op().Is(tuple)))
.Is(copies_added.element({0})),
m::Copy(m::GetTupleElement(m::Op().Is(tuple)))
.Is(copies_added.element({1})))));
}
{
ShapeTree<bool> indices_to_copy(tuple->shape(), false);
ShapeTree<HloInstruction*> copies_added(tuple->shape(),
nullptr);
HloInstruction* deep_copy =
computation->DeepCopyInstruction(tuple, &indices_to_copy, &copies_added)
.value();
EXPECT_THAT(deep_copy,
GmockMatch(m::Tuple(m::GetTupleElement(m::Op().Is(tuple)),
m::GetTupleElement(m::Op().Is(tuple)))));
EXPECT_TRUE(copies_added.element({}) == nullptr);
EXPECT_TRUE(copies_added.element({0}) == nullptr);
EXPECT_TRUE(copies_added.element({1}) == nullptr);
}
{
ShapeTree<bool> indices_to_copy(tuple->shape(), false);
*indices_to_copy.mutable_element({0}) = true;
ShapeTree<HloInstruction*> copies_added(tuple->shape(),
nullptr);
HloInstruction* deep_copy =
computation->DeepCopyInstruction(tuple, &indices_to_copy, &copies_added)
.value();
EXPECT_THAT(deep_copy, GmockMatch(m::Tuple(
m::Copy(m::GetTupleElement(m::Op().Is(tuple))),
m::GetTupleElement(m::Op().Is(tuple)))));
EXPECT_TRUE(copies_added.element({}) == nullptr);
EXPECT_TRUE(copies_added.element({0}) != nullptr);
EXPECT_TRUE(copies_added.element({1}) == nullptr);
}
}
TEST_F(HloComputationTest, DeepCopyToken) {
auto builder = HloComputation::Builder(TestName());
auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto copy = computation->DeepCopyInstruction(token).value();
EXPECT_THAT(copy, GmockMatch(m::AfterAll()));
}
TEST_F(HloComputationTest, DeepCopyTokenTuple) {
auto builder = HloComputation::Builder(TestName());
auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({token, constant}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto copy = computation->DeepCopyInstruction(tuple).value();
EXPECT_THAT(copy, GmockMatch(m::Tuple(
m::GetTupleElement(m::Op().Is(tuple)),
m::Copy(m::GetTupleElement(m::Op().Is(tuple))))));
}
TEST_F(HloComputationTest, CycleDetection) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, constant));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, negate, negate));
auto module = CreateNewUnverifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
ASSERT_IS_OK(add->AddControlDependencyTo(negate));
auto instructions = computation->MakeInstructionPostOrder();
EXPECT_EQ(3, instructions.size());
FunctionVisitor visitor(
[](HloInstruction* instruction) { return absl::OkStatus(); });
auto visit_status = computation->Accept(&visitor);
ASSERT_FALSE(visit_status.ok());
ASSERT_THAT(visit_status.message(),
::testing::ContainsRegex("cycle is detecte"));
}
TEST_F(HloComputationTest, RemoveInstructionWithDuplicateOperand) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto dead_negate = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, constant));
auto dead_add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, dead_negate, dead_negate));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, constant));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(4, computation->instruction_count());
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Negate(m::Op().Is(constant))));
EXPECT_EQ(negate, computation->root_instruction());
ASSERT_IS_OK(computation->RemoveInstructionAndUnusedOperands(dead_add));
EXPECT_EQ(2, computation->instruction_count());
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Negate(m::Op().Is(constant))));
EXPECT_EQ(negate, computation->root_instruction());
}
TEST_F(HloComputationTest, RemoveSeveralUnusedFusionParameters) {
const char* const kHloModule = R"(
HloModule test
f {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
p2 = f32[] parameter(2)
add = f32[] add(p0, p2)
ROOT neg = f32[] negate(p1)
}
ENTRY main {
param0 = f32[] parameter(0)
param1 = f32[] parameter(1)
param2 = f32[] parameter(2)
ROOT res = f32[] fusion(param0, param1, param2), kind=kLoop, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHloModule));
auto root = module->entry_computation()->root_instruction();
auto dead_add = FindInstruction(module.get(), "add");
ASSERT_IS_OK(root->fused_instructions_computation()
->RemoveInstructionAndUnusedOperands(dead_add));
root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Fusion(m::Parameter(1))));
EXPECT_THAT(root->fused_expression_root(),
GmockMatch(m::Negate(m::Parameter(0))));
}
TEST_F(HloComputationTest, ReplaceParameter) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val = f32[2] get-tuple-element(p_body), index=0
const = s32[] constant(-1)
ROOT root = (f32[2], s32[]) tuple(val, const)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.1 = s32[] parameter(0)
const = f32[2] constant({0,1})
while_init = (f32[2], s32[]) tuple(const, param.1)
while = (f32[2], s32[]) while(while_init), condition=condition, body=body
ROOT out = s32[] get-tuple-element(while), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHloModule));
HloComputation* body = module->GetComputationWithName("body");
Shape new_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(S32, {2}), ShapeUtil::MakeShape(S32, {})});
body->ReplaceParameter(
0, HloInstruction::CreateParameter(0, new_param_shape, "new_p_body"));
EXPECT_TRUE(ShapeUtil::Equal(body->parameter_instruction(0)->shape(),
new_param_shape));
}
TEST_F(HloComputationTest, CloneWithControlDependency) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "param0"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, param));
auto module = CreateNewVerifiedModule();
auto computation =
module->AddEntryComputation(builder.Build(add));
TF_CHECK_OK(negate->AddControlDependencyTo(add));
auto clone = computation->Clone();
auto cloned_add = clone->root_instruction();
EXPECT_EQ(cloned_add->opcode(), HloOpcode::kAdd);
auto predecessors = cloned_add->control_predecessors();
EXPECT_EQ(1, predecessors.size());
EXPECT_EQ(HloOpcode::kNegate, predecessors[0]->opcode());
auto successors = predecessors[0]->control_successors();
EXPECT_THAT(successors, ::testing::ElementsAre(cloned_add));
}
TEST_F(HloComputationTest, CloneWithReplacements) {
auto builder = HloComputation::Builder(TestName());
Shape r0s64 = ShapeUtil::MakeShape(S64, {});
Shape r0s32 = ShapeUtil::MakeShape(S32, {});
Shape r0u32 = ShapeUtil::MakeShape(U32, {});
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "p.0.lhs"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32_, "p.0.rhs"));
auto param2 =
builder.AddInstruction(HloInstruction::CreateParameter(2, r0s64, "p.1"));
auto lt = builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), param0,
param1, ComparisonDirection::kLt));
auto module = CreateNewVerifiedModule();
auto computation =
module->AddEntryComputation(builder.Build(lt));
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
replacements.emplace(param2,
HloInstruction::CreateParameter(2, r0s32, "p.1"));
auto param3 = HloInstruction::CreateParameter(3, r0u32, "p.2");
std::vector<const HloInstruction*> extra_parameters{param3.get()};
auto clone =
computation->CloneWithReplacements(&replacements, extra_parameters);
ASSERT_EQ(clone->num_parameters(), 4);
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(0)->shape(), r0f32_));
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(1)->shape(), r0f32_));
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(2)->shape(), r0s32));
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(3)->shape(), r0u32));
}
TEST_F(HloComputationTest, CloneInContext) {
HloComputation::Builder builder(TestName());
Shape r0s64 = ShapeUtil::MakeShape(S64, {});
Shape r0s32 = ShapeUtil::MakeShape(S32, {});
Shape r0u32 = ShapeUtil::MakeShape(U32, {});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "p.0.lhs"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32_, "p.0.rhs"));
HloInstruction* param2 =
builder.AddInstruction(HloInstruction::CreateParameter(2, r0s64, "p.1"));
HloInstruction* lt = builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), param0,
param1, ComparisonDirection::kLt));
std::unique_ptr<VerifiedHloModule> module = CreateNewVerifiedModule();
const HloComputation& computation =
*module->AddEntryComputation(builder.Build(lt));
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
replacements.emplace(param2,
HloInstruction::CreateParameter(2, r0s32, "p.1"));
std::unique_ptr<HloInstruction> param3 =
HloInstruction::CreateParameter(3, r0u32, "p.2");
std::vector<const HloInstruction*> extra_parameters = {param3.get()};
HloCloneContext clone_context(module.get());
std::unique_ptr<HloComputation> clone = computation.CloneInContext(
clone_context, &replacements, extra_parameters);
ASSERT_EQ(clone->num_parameters(), 4);
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(0)->shape(), r0f32_));
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(1)->shape(), r0f32_));
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(2)->shape(), r0s32));
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(3)->shape(), r0u32));
}
TEST_F(HloComputationTest, Stringification) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
2, PrecisionConfig::DEFAULT);
builder.AddInstruction(
HloInstruction::CreateDot(sout, x, reshape, dot_dnums, precision_config));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
computation->SetExecutionThread("MainThread");
auto options = HloPrintOptions().set_print_metadata(false);
const std::string expected_computation =
R"(%TransposeDot (x: f32[5,10], y: f32[20,10]) -> f32[5,20] {
%x = f32[5,10]{1,0} parameter(0)
%y = f32[20,10]{1,0} parameter(1)
%transpose = f32[10,20]{1,0} transpose(f32[20,10]{1,0} %y), dimensions={1,0}
ROOT %dot = f32[5,20]{1,0} dot(f32[5,10]{1,0} %x, f32[10,20]{1,0} %transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}, execution_thread="MainThread")";
EXPECT_EQ(computation->ToString(options), expected_computation);
}
TEST_F(HloComputationTest, StringificationIndent) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
2, PrecisionConfig::DEFAULT);
builder.AddInstruction(
HloInstruction::CreateDot(sout, x, reshape, dot_dnums, precision_config));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
computation->SetExecutionThread("MainThread");
auto options =
HloPrintOptions().set_print_metadata(false).set_indent_amount(2);
const std::string expected_computation =
R"( %TransposeDot (x: f32[5,10], y: f32[20,10]) -> f32[5,20] {
%x = f32[5,10]{1,0} parameter(0)
%y = f32[20,10]{1,0} parameter(1)
%transpose = f32[10,20]{1,0} transpose(f32[20,10]{1,0} %y), dimensions={1,0}
ROOT %dot = f32[5,20]{1,0} dot(f32[5,10]{1,0} %x, f32[10,20]{1,0} %transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}, execution_thread="MainThread")";
EXPECT_EQ(computation->ToString(options), expected_computation);
}
TEST_F(HloComputationTest, StringificationCanonical) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
2, PrecisionConfig::DEFAULT);
builder.AddInstruction(
HloInstruction::CreateDot(sout, x, reshape, dot_dnums, precision_config));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
computation->SetExecutionThread("MainThread");
auto options = HloPrintOptions().set_print_metadata(false);
const std::string expected_computation1 =
R"(%TransposeDot (x: f32[5,10], y: f32[20,10]) -> f32[5,20] {
%x = f32[5,10]{1,0} parameter(0)
%y = f32[20,10]{1,0} parameter(1)
%transpose = f32[10,20]{1,0} transpose(f32[20,10]{1,0} %y), dimensions={1,0}
ROOT %dot = f32[5,20]{1,0} dot(f32[5,10]{1,0} %x, f32[10,20]{1,0} %transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}, execution_thread="MainThread")";
EXPECT_EQ(computation->ToString(options), expected_computation1);
options = HloPrintOptions().Canonical();
const std::string expected_computation2 = R"(TransposeDot {
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
tmp_2 = f32[10,20]{1,0} transpose(f32[20,10]{1,0} tmp_1), dimensions={1,0}
ROOT tmp_3 = f32[5,20]{1,0} dot(f32[5,10]{1,0} tmp_0, f32[10,20]{1,0} tmp_2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}, execution_thread="MainThread")";
EXPECT_EQ(computation->ToString(options), expected_computation2);
}
std::unique_ptr<HloComputation> MakeAddNComputation(
int n, std::string name = "add_n") {
auto builder = HloComputation::Builder(name);
auto result = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "x_value"));
auto one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
for (int i = 0; i < n; ++i) {
result = builder.AddInstruction(HloInstruction::CreateBinary(
one->shape(), HloOpcode::kAdd, result, one));
}
return builder.Build();
}
TEST_F(HloComputationTest, DeepEquality) {
auto computation_a = MakeAddNComputation(200000);
auto computation_b = MakeAddNComputation(200000);
EXPECT_TRUE(*computation_a == *computation_b);
auto computation_c = MakeAddNComputation(199999);
EXPECT_FALSE(*computation_a == *computation_c);
EXPECT_FALSE(*computation_c == *computation_b);
}
TEST_F(HloComputationTest, InstructionPostOrderWithAllReduce) {
const char* const hlo_string = R"(
HloModule Module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
param = f32[128] parameter(0), sharding={maximal device=0}
crs0 = f32[128] all-reduce(param),
replica_groups={{0}}, channel_id=1, to_apply=add,
sharding={maximal device=0}
crs1 = f32[128] all-reduce(param),
replica_groups={{0}}, channel_id=1, to_apply=add,
sharding={maximal device=1}
add = f32[128] add(crs0, crs0), sharding={maximal device=0}
ROOT t = (f32[128], f32[128]) tuple(add, crs1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(module->entry_computation()->MakeInstructionPostOrder(),
ElementsAre(op::Parameter(), op::AllReduce(), op::AllReduce(),
op::Add(), op::Tuple()));
}
TEST_F(HloComputationTest, ComparisonWithCustomComparator) {
std::string_view mod_txt = R"(
HloModule Module
region_X {
Arg_0.5 = s32[] parameter(0)
Arg_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_0.5, Arg_1.6)
}
region_Y {
Arg_0.5 = s32[] parameter(0)
Ar_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_0.5, Ar_1.6)
}
region_A {
Arg_0.5 = s32[] parameter(0)
Arg_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] multiply(Arg_0.5, Arg_1.6)
}
region_B {
Arg_0.5 = s32[] parameter(0)
Ar_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_0.5, Ar_1.6)
}
main.15 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_X
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_Y
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
ENTRY main.16 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(mod_txt));
absl::flat_hash_map<std::string_view, std::string_view> replace_map;
replace_map["region_X"] = "region_A";
replace_map["region_Y"] = "region_B";
auto compare_func = [&replace_map](const HloComputation* a,
const HloComputation* b) {
return (a->name() == b->name() || replace_map[a->name()] == b->name());
};
HloComputation *comp_a = nullptr, *comp_b = nullptr;
for (auto comp : module->computations()) {
if (comp->name() == "main.15") {
comp_a = comp;
}
if (comp->name() == "main.16") {
comp_b = comp;
}
}
EXPECT_FALSE(comp_a->Equal(*comp_b, false));
EXPECT_TRUE(comp_a->Equal(*comp_b, false, compare_func));
}
TEST_F(HloComputationTest, CloneWrappedAsyncInstructionSameWrappedFunc) {
const char* const hlo_string = R"(
HloModule Module
add (lhs: u32[], rhs: u32[]) -> u32[] {
lhs = u32[] parameter(0)
rhs = u32[] parameter(1)
ROOT add = u32[] add(u32[] lhs, u32[] rhs)
}
async_wrapped (async_param.1: u32[8]) -> u32[4] {
async_param.1 = u32[8]{0} parameter(0)
ROOT reduce-scatter.1 = u32[4]{0} reduce-scatter(u32[8]{0} async_param.1),
replica_groups={}, dimensions={0}, to_apply=add
}
ENTRY main (data: u32[8]) -> u32[4] {
data = u32[8]{0} parameter(0)
reduce-scatter-start = ((u32[8]{0}), u32[4]{0}) async-start(u32[8]{0} data),
calls=async_wrapped, backend_config={"is_sync":false}
ROOT reduce-scatter-done = u32[4]{0} async-done(((u32[8]{0}), u32[4]{0}) reduce-scatter-start),
calls=async_wrapped
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* start = FindInstruction(module.get(), "reduce-scatter-start");
HloInstruction* done = FindInstruction(module.get(), "reduce-scatter-done");
EXPECT_EQ(start->async_wrapped_computation(),
done->async_wrapped_computation());
std::unique_ptr<HloInstruction> cloned_start = start->Clone();
std::unique_ptr<HloInstruction> cloned_done =
done->CloneWithNewOperands(done->shape(), {cloned_start.get()});
EXPECT_EQ(cloned_start.get()->async_wrapped_computation(),
cloned_done.get()->async_wrapped_computation());
}
TEST_F(HloComputationTest, CompositeCall) {
const char* const hlo_string = R"(
HloModule Module
add (x: f32[]) -> f32[] {
%x = f32[] parameter(0)
%constant = f32[] constant(2)
ROOT %z = f32[] add(f32[] %x, f32[] %constant)
}
ENTRY %CallR0F32AddScalar.v2 () -> f32[] {
%constant.1 = f32[] constant(42)
ROOT %call = f32[] call(f32[] %constant.1), to_apply=add, is_composite=true,
frontend_attributes={
composite.attributes={n = 1 : i32, tensor = dense<1> : tensor<i32>},
composite.name="foo.bar",
composite.version="1"
}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* composite_call = FindInstruction(module.get(), "call");
EXPECT_EQ(composite_call->opcode(), HloOpcode::kCall);
EXPECT_TRUE(composite_call->is_composite());
EXPECT_EQ(composite_call->frontend_attributes().map().size(), 3);
}
TEST_F(HloComputationTest, CloneComputationWithAsyncInstructions) {
constexpr std::string_view hlo = R"(
HloModule main
comp.0 {
ROOT custom-call.0 = () custom-call(), custom_call_target="foo"
}
ENTRY main {
in.0 = () parameter(0)
call.0 = () call(), to_apply=comp.0
ROOT out.0 = () tuple()
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
HloComputation* comp0 = FindComputation(module.get(), "comp.0");
HloInstruction* custom_call = FindInstruction(module.get(), "custom-call.0");
TF_ASSERT_OK(comp0->CreateAsyncInstructions(
custom_call, {ShapeUtil::MakeScalarShape(U32)},
HloInstruction::kMainExecutionThread,
true,
true));
HloComputation* comp1 = module->AddEmbeddedComputation(comp0->Clone());
HloComputation* comp2 = module->AddEmbeddedComputation(comp0->Clone());
EXPECT_NE(comp0->root_instruction()->name(),
comp1->root_instruction()->name());
EXPECT_NE(comp0->root_instruction()->operand(0)->name(),
comp1->root_instruction()->operand(0)->name());
EXPECT_NE(comp1->root_instruction()->name(),
comp2->root_instruction()->name());
EXPECT_NE(comp1->root_instruction()->operand(0)->name(),
comp2->root_instruction()->operand(0)->name());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_computation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_computation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fc21e004-d9cf-4df2-88e5-7f58d4352930 | cpp | tensorflow/tensorflow | tile_assignment | third_party/xla/xla/hlo/ir/tile_assignment.cc | third_party/xla/xla/tests/tile_assignment_test.cc | #include "xla/hlo/ir/tile_assignment.h"
#include <cstdint>
#include <cstring>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/util.h"
namespace xla {
namespace {
void CanonicalizeIotaDims(absl::Span<int64_t>& dims, absl::Span<int>& perm) {
DCHECK_EQ(dims.size(), perm.size());
if (dims.size() <= 1) {
return;
}
absl::InlinedVector<int, 6> old_to_new_dims(dims.size());
while (true) {
bool changed = false;
int new_ndims = 0;
for (int i = 0; i < dims.size(); ++i) {
if (dims[i] == 1) {
old_to_new_dims[i] = -1;
} else {
old_to_new_dims[i] = new_ndims;
++new_ndims;
}
}
if (new_ndims != dims.size()) {
for (int i = 0, new_idx = 0; i < dims.size(); ++i) {
int new_dim = old_to_new_dims[i];
if (new_dim >= 0) {
dims[new_dim] = dims[i];
}
int new_perm_dim = old_to_new_dims[perm[i]];
if (new_perm_dim >= 0) {
perm[new_idx] = new_perm_dim;
++new_idx;
DCHECK_LE(new_idx, new_ndims);
}
}
perm = perm.subspan(0, new_ndims);
dims = dims.subspan(0, new_ndims);
}
for (int i = 1, base = 0, n = dims.size(); i < n; ++i) {
const int base_dim = perm[base];
const int dim = perm[i];
if (base_dim + (i - base) == dim) {
dims[base_dim] *= dims[dim];
dims[dim] = 1;
changed = true;
} else {
base = i;
}
}
if (!changed) {
break;
}
}
}
enum class TransposeKind {
kNoop,
kReshape,
kTranspose,
};
TransposeKind GetTransposeKind(absl::Span<const int64_t> dims,
absl::Span<const int> perm) {
TransposeKind kind = TransposeKind::kNoop;
int prev_non_one_dim = -1;
for (int i = 0; i < perm.size(); ++i) {
const auto& d = perm[i];
if (dims[d] == 1) {
if (d != i && dims[i] != 1) kind = TransposeKind::kReshape;
continue;
}
if (d <= prev_non_one_dim) return TransposeKind::kTranspose;
prev_non_one_dim = d;
}
return kind;
}
std::pair<absl::InlinedVector<int64_t, 6>, absl::InlinedVector<int, 6>>
FullyDecanonicalize(absl::Span<const int64_t> reshape_dims,
absl::Span<const int> transpose_perm) {
absl::InlinedVector<int64_t, 6> new_reshape_dims;
absl::InlinedVector<int, 6> old_to_new_dims(reshape_dims.size() + 1);
for (int i = 0, n = reshape_dims.size(); i < n; ++i) {
int64_t dim_size = reshape_dims[i];
while (dim_size % 2 == 0) {
new_reshape_dims.push_back(2);
dim_size /= 2;
}
for (int i = 3; i * i <= dim_size; i += 2) {
while (dim_size % i == 0) {
new_reshape_dims.push_back(i);
dim_size /= i;
}
}
if (dim_size > 1) {
CHECK_GT(dim_size, 2);
new_reshape_dims.push_back(dim_size);
}
old_to_new_dims[i + 1] = new_reshape_dims.size();
}
absl::InlinedVector<int, 6> new_transpose_perm;
new_transpose_perm.reserve(new_reshape_dims.size());
for (int i = 0; i < transpose_perm.size(); ++i) {
const int old_dim = transpose_perm[i];
for (int j = old_to_new_dims[old_dim], n = old_to_new_dims[old_dim + 1];
j < n; ++j) {
new_transpose_perm.push_back(j);
}
}
return std::make_pair(std::move(new_reshape_dims),
std::move(new_transpose_perm));
}
}
IotaTileAssignment IotaTileAssignment::Create(
absl::Span<const int64_t> dims) {
return IotaTileAssignment(dims, {Product(dims)}, {0});
}
IotaTileAssignment IotaTileAssignment::Create(
absl::Span<const int64_t> dims, absl::Span<const int64_t> reshape_dims,
absl::Span<const int> transpose_perm) {
absl::InlinedVector<int64_t, 6> canonicalized_dims(reshape_dims.begin(),
reshape_dims.end());
absl::InlinedVector<int, 6> canonicalized_perm(transpose_perm.begin(),
transpose_perm.end());
auto dims_span = absl::MakeSpan(canonicalized_dims);
auto perm_span = absl::MakeSpan(canonicalized_perm);
CanonicalizeIotaDims(dims_span, perm_span);
if (dims_span.empty()) {
canonicalized_dims[0] = 1;
dims_span = absl::MakeSpan(canonicalized_dims.data(), 1);
canonicalized_perm[0] = 0;
perm_span = absl::MakeSpan(canonicalized_perm.data(), 1);
}
return IotaTileAssignment(dims, dims_span, perm_span);
}
Array<int64_t> IotaTileAssignment::ToArray() const {
Array<int64_t> array(reshape_dims());
array.FillIota(0);
array.TransposeDimensions(transpose_perm());
array.Reshape(dims());
return array;
}
IotaTileAssignment::IotaTileAssignment(const IotaTileAssignment& other)
: IotaTileAssignment(other.ndims_, other.reshape_ndims_) {
std::memcpy(storage_.get(), other.storage_.get(), size_bytes());
}
IotaTileAssignment& IotaTileAssignment::operator=(
const IotaTileAssignment& other) {
const int new_size = other.size_bytes();
if (size_bytes() != new_size) {
storage_.reset(new char[new_size]);
}
ndims_ = other.ndims_;
reshape_ndims_ = other.reshape_ndims_;
std::memcpy(storage_.get(), other.storage_.get(), new_size);
return *this;
}
IotaTileAssignment::IotaTileAssignment(absl::Span<const int64_t> dims,
absl::Span<const int64_t> reshape_dims,
absl::Span<const int> transpose_perm)
: IotaTileAssignment(dims.size(), reshape_dims.size()) {
DCHECK_EQ(reshape_dims.size(), transpose_perm.size());
std::memcpy(dims_ptr(), dims.data(), ndims_ * sizeof(int64_t));
DCHECK_EQ(num_elements(), Product(reshape_dims));
std::memcpy(reshape_dims_ptr(), reshape_dims.data(),
reshape_ndims_ * sizeof(int64_t));
std::memcpy(transpose_perm_ptr(), transpose_perm.data(),
reshape_ndims_ * sizeof(int));
}
IotaTileAssignment::IotaTileAssignment(int ndims, int reshape_ndims)
: ndims_(ndims),
reshape_ndims_(reshape_ndims),
storage_(new char[size_bytes()]) {}
std::optional<IotaTileAssignment> IotaTileAssignment::Transpose(
absl::Span<const int> perm) const {
DCHECK_EQ(ndims_, perm.size());
auto dims = this->dims();
const TransposeKind kind = GetTransposeKind(dims, perm);
if (kind == TransposeKind::kNoop) return *this;
absl::InlinedVector<int64_t, 6> new_dims(ndims_);
for (int64_t i = 0; i < ndims_; ++i) {
new_dims[i] = dims[perm[i]];
}
if (kind == TransposeKind::kReshape) {
return IotaTileAssignment::Create(new_dims, reshape_dims(),
transpose_perm());
}
if (reshape_ndims_ == 1) {
return IotaTileAssignment::Create(new_dims, dims, perm);
}
bool is_pure_transpose = true;
absl::InlinedVector<int64_t, 6> non_one_dims;
absl::InlinedVector<int, 6> one_to_non_one(ndims_);
non_one_dims.reserve(ndims_);
auto reshape_dims = this->reshape_dims();
auto transpose_perm = this->transpose_perm();
for (int i = 0; i < ndims_; ++i) {
const int64_t dim = dims[i];
if (dim == 1) {
one_to_non_one[i] = -1;
continue;
}
if (non_one_dims.size() >= reshape_ndims_ ||
reshape_dims[transpose_perm[non_one_dims.size()]] != dim) {
is_pure_transpose = false;
}
one_to_non_one[i] = non_one_dims.size();
non_one_dims.push_back(dims[i]);
}
if (is_pure_transpose) {
CHECK_EQ(reshape_ndims_, non_one_dims.size());
absl::InlinedVector<int, 6> new_perm;
new_perm.reserve(non_one_dims.size());
for (int i = 0; i < ndims_; ++i) {
if (dims[perm[i]] == 1) continue;
new_perm.push_back(transpose_perm[one_to_non_one[perm[i]]]);
}
CHECK_EQ(reshape_ndims_, new_perm.size());
return IotaTileAssignment::Create(new_dims, reshape_dims, new_perm);
}
auto [decanonicalized_reshape_dims, decanonicalized_transpose_perm] =
FullyDecanonicalize(reshape_dims, transpose_perm);
CHECK_LE(non_one_dims.size(), decanonicalized_reshape_dims.size());
absl::InlinedVector<absl::InlinedVector<int, 2>, 6> grouped_reshape_dims(
non_one_dims.size());
int transpose_perm_idx = 0;
for (int i = 0, n = non_one_dims.size(),
dn = decanonicalized_reshape_dims.size();
i < n && transpose_perm_idx < dn; ++i) {
int reshape_dim_idx = decanonicalized_transpose_perm[transpose_perm_idx];
int64_t cand = decanonicalized_reshape_dims[reshape_dim_idx];
int64_t target = non_one_dims[i];
while (target % cand == 0) {
target /= cand;
grouped_reshape_dims[i].push_back(reshape_dim_idx);
if (++transpose_perm_idx >= dn) {
break;
}
reshape_dim_idx = decanonicalized_transpose_perm[transpose_perm_idx];
cand = decanonicalized_reshape_dims[reshape_dim_idx];
}
if (target != 1) {
return std::nullopt;
}
}
absl::InlinedVector<int, 6> flattened_transpose_perm;
flattened_transpose_perm.reserve(reshape_ndims_);
for (int i = 0; i < perm.size(); ++i) {
const int dim = perm[i];
if (one_to_non_one[dim] < 0) {
continue;
}
auto& group = grouped_reshape_dims[one_to_non_one[dim]];
flattened_transpose_perm.insert(flattened_transpose_perm.end(),
group.begin(), group.end());
}
CHECK_EQ(flattened_transpose_perm.size(),
decanonicalized_transpose_perm.size());
return IotaTileAssignment::Create(new_dims, decanonicalized_reshape_dims,
flattened_transpose_perm);
}
void IotaTileAssignment::Print(Printer* printer) const {
printer->Append("[");
AppendJoin(printer, dims(), ",");
printer->Append("]<=[");
AppendJoin(printer, reshape_dims(), ",");
printer->Append("]");
if (reshape_ndims_ > 1) {
printer->Append("T(");
AppendJoin(printer, transpose_perm(), ",");
printer->Append(")");
}
}
std::string IotaTileAssignment::ToString() const {
StringPrinter printer;
Print(&printer);
return std::move(printer).ToString();
}
int64_t IotaTileAssignment::value_at(absl::Span<const int64_t> index) const {
DCHECK_EQ(index.size(), ndims_);
int64_t linear_index = index[0];
auto dims = this->dims();
for (int64_t i = 1; i < ndims_; ++i) {
linear_index *= dims[i];
linear_index += index[i];
}
auto reshape_dims = this->reshape_dims();
auto transpose_perm = this->transpose_perm();
absl::InlinedVector<int64_t, 6> reshape_index(reshape_ndims_);
for (int64_t i = reshape_ndims_ - 1; i >= 0; --i) {
int dim = transpose_perm[i];
int dim_size = reshape_dims[dim];
reshape_index[dim] = linear_index % dim_size;
linear_index /= dim_size;
}
int64_t value = reshape_index[0];
for (int64_t i = 1; i < reshape_ndims_; ++i) {
value *= reshape_dims[i];
value += reshape_index[i];
}
return value;
}
bool TileAssignment::operator==(const TileAssignment& other) const {
if (iota_ && other.iota_) {
return *iota_ == *other.iota_;
}
return array() == other.array();
}
int64_t TileAssignment::operator()(absl::Span<const int64_t> indexes) const {
return array_ ? (*array_)(indexes) : iota_->value_at(indexes);
}
absl::Span<const int64_t> TileAssignment::dimensions() const {
return array_ ? array_->dimensions() : iota_->dims();
}
int64_t TileAssignment::num_dimensions() const {
return array_ ? array_->num_dimensions() : iota_->ndims();
}
int64_t TileAssignment::dim(int64_t n) const {
return array_ ? array_->dim(n) : iota_->dim(n);
}
int64_t TileAssignment::num_elements() const {
return array_ ? array_->num_elements() : iota_->num_elements();
}
int64_t TileAssignment::first() const { return array_ ? *array_->begin() : 0; }
void TileAssignment::Each(
absl::FunctionRef<void(absl::Span<const int64_t>, int64_t)> f) const {
MaybeMaterializeFullArray();
array_->Each(f);
}
absl::Status TileAssignment::EachStatus(
absl::FunctionRef<absl::Status(absl::Span<const int64_t>, int64_t)> f)
const {
MaybeMaterializeFullArray();
return array_->EachStatus(f);
}
[[nodiscard]] TileAssignment TileAssignment::Reshape(
absl::Span<const int64_t> new_dimensions) const {
if (iota_) {
CHECK_EQ(Product(new_dimensions), iota_->num_elements());
return TileAssignment(
IotaTileAssignment(new_dimensions, iota_->reshape_dims(),
iota_->transpose_perm()),
nullptr);
}
auto reshaped = std::make_shared<Array<int64_t>>(*array_);
reshaped->Reshape(new_dimensions);
return TileAssignment(std::move(reshaped));
}
[[nodiscard]] TileAssignment TileAssignment::Transpose(
absl::Span<const int> perm) const {
const TransposeKind kind = GetTransposeKind(dimensions(), perm);
if (kind == TransposeKind::kNoop) {
return *this;
}
if (iota_) {
auto transposed = iota_->Transpose(perm);
if (transposed) {
return TileAssignment(std::move(*transposed));
}
}
auto cloned_array = shared_array_clone();
cloned_array->TransposeDimensions(perm);
return TileAssignment(std::move(cloned_array));
}
void TileAssignment::Print(Printer* printer) const {
if (iota_) {
printer->Append("devices=");
iota_->Print(printer);
} else {
printer->Append("devices=[");
AppendJoin(printer, array().dimensions(), ",");
printer->Append("]");
AppendJoin(printer, array(), ",");
}
}
std::string TileAssignment::ToString() const {
StringPrinter printer;
Print(&printer);
return std::move(printer).ToString();
}
bool TileAssignment::UsesDevice(int64_t device) const {
return iota_ ? device < iota_->num_elements()
: absl::c_linear_search(array(), device);
}
const Array<int64_t>& TileAssignment::array() const {
MaybeMaterializeFullArray();
return *array_;
}
const std::shared_ptr<const Array<int64_t>>& TileAssignment::shared_array()
const {
MaybeMaterializeFullArray();
return shared_array_;
}
std::shared_ptr<Array<int64_t>> TileAssignment::shared_array_clone() const {
MaybeMaterializeFullArray();
return std::make_shared<Array<int64_t>>(*array_);
}
void TileAssignment::MaybeMaterializeFullArray() const {
if (array_ == nullptr) {
DCHECK(shared_array_ == nullptr);
DCHECK(iota_.has_value());
auto full = std::make_shared<Array<int64_t>>(iota_->ToArray());
shared_array_ = std::move(full);
array_ = shared_array_.get();
}
}
} | #include "xla/hlo/ir/tile_assignment.h"
#include <memory>
#include <vector>
#include "absl/hash/hash.h"
#include "xla/array3d.h"
#include "xla/test.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
std::vector<int64_t> ToVectorUsingEach(const TileAssignment& tile) {
std::vector<int64_t> result;
result.reserve(tile.num_elements());
tile.Each([&](absl::Span<const int64_t> index, int64_t device) {
result.push_back(device);
});
return result;
}
TEST(TileAssignmentTest, Replicated) {
TileAssignment tile;
EXPECT_EQ(tile.num_dimensions(), 1);
EXPECT_EQ(tile.dim(0), 0);
}
TEST(TileAssignmentTest, Maximal) {
TileAssignment tile(5);
EXPECT_EQ(tile.num_dimensions(), 1);
EXPECT_EQ(tile.dim(0), 1);
EXPECT_EQ(tile(0), 5);
EXPECT_EQ(tile({0}), 5);
EXPECT_FALSE(tile.iota());
EXPECT_TRUE(tile.UsesDevice(5));
EXPECT_EQ(tile.first(), 5);
EXPECT_FALSE(tile.UsesDevice(0));
EXPECT_THAT(ToVectorUsingEach(tile), ElementsAre(5));
}
TEST(TileAssignmentTest, V1V2Equivalence) {
Array3D<int64_t> array(
{{{0, 8, 4, 12}, {1, 9, 5, 13}}, {{2, 10, 6, 14}, {3, 11, 7, 15}}});
TileAssignment v1(std::make_shared<const Array<int64_t>>(array));
TileAssignment v2({2, 2, 4}, {2, 2, 4}, {2, 1, 0});
EXPECT_EQ(v1, v2);
EXPECT_EQ(v2, v1);
EXPECT_EQ(v1.first(), 0);
EXPECT_EQ(v2.first(), 0);
EXPECT_NE(v1.iota().has_value(), v2.iota().has_value());
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
TEST(TileAssignmentTest, CopyConstruction) {
TileAssignment tile({2, 2, 4}, {2, 2, 4}, {2, 1, 0});
TileAssignment copied(tile);
EXPECT_EQ(tile, copied);
EXPECT_EQ(tile.iota().has_value(), copied.iota().has_value());
EXPECT_EQ(absl::HashOf(tile), absl::HashOf(copied));
}
TEST(TileAssignmentTest, CopyAssignment) {
TileAssignment tile({2, 2, 4}, {2, 2, 4}, {2, 1, 0});
TileAssignment copied = tile;
EXPECT_EQ(tile, copied);
EXPECT_EQ(tile.iota().has_value(), copied.iota().has_value());
EXPECT_EQ(absl::HashOf(tile), absl::HashOf(copied));
}
class FormattedTileAssignmentTest : public ::testing::TestWithParam<bool> {
protected:
bool ShouldConvertToV1() { return GetParam(); }
};
TEST_P(FormattedTileAssignmentTest, TrivialIotaTile) {
TileAssignment tile({4, 4, 2});
EXPECT_EQ(tile.ToString(), "devices=[4,4,2]<=[32]");
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
EXPECT_EQ(tile, TileAssignment({4, 4, 2}));
EXPECT_EQ(tile.num_dimensions(), 3);
EXPECT_EQ(tile.dim(0), 4);
EXPECT_EQ(tile.dim(1), 4);
EXPECT_EQ(tile.dim(2), 2);
EXPECT_EQ(tile(0, 0, 0), 0);
EXPECT_EQ(tile({3, 2, 1}), 29);
EXPECT_EQ(tile.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(tile.UsesDevice(0));
EXPECT_TRUE(tile.UsesDevice(31));
EXPECT_FALSE(tile.UsesDevice(32));
EXPECT_THAT(
ToVectorUsingEach(tile),
ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31));
}
TEST_P(FormattedTileAssignmentTest, TransposedIotaTile) {
TileAssignment tile({4, 4, 2}, {2, 4, 4}, {2, 1, 0});
EXPECT_EQ(tile.ToString(), "devices=[4,4,2]<=[2,4,4]T(2,1,0)");
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
EXPECT_EQ(tile, TileAssignment({4, 4, 2}, {2, 4, 4}, {2, 1, 0}));
EXPECT_EQ(tile.num_dimensions(), 3);
EXPECT_EQ(tile.dim(0), 4);
EXPECT_EQ(tile.dim(1), 4);
EXPECT_EQ(tile.dim(2), 2);
EXPECT_EQ(tile(0, 0, 0), 0);
EXPECT_EQ(tile({3, 2, 1}), 27);
EXPECT_EQ(tile.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(tile.UsesDevice(0));
EXPECT_TRUE(tile.UsesDevice(31));
EXPECT_FALSE(tile.UsesDevice(32));
EXPECT_THAT(
ToVectorUsingEach(tile),
ElementsAre(0, 16, 4, 20, 8, 24, 12, 28, 1, 17, 5, 21, 9, 25, 13, 29, 2,
18, 6, 22, 10, 26, 14, 30, 3, 19, 7, 23, 11, 27, 15, 31));
}
TEST_P(FormattedTileAssignmentTest, NonCanonicalTransposedIotaTile) {
TileAssignment tile({4, 8}, {2, 4, 4}, {1, 2, 0});
EXPECT_EQ(tile.ToString(), "devices=[4,8]<=[2,16]T(1,0)");
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
EXPECT_EQ(tile, TileAssignment({4, 8}, {2, 16}, {1, 0}));
EXPECT_EQ(tile.num_dimensions(), 2);
EXPECT_EQ(tile.dim(0), 4);
EXPECT_EQ(tile.dim(1), 8);
EXPECT_EQ(tile(0, 0), 0);
EXPECT_EQ(tile({3, 2}), 13);
EXPECT_EQ(tile.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(tile.UsesDevice(0));
EXPECT_TRUE(tile.UsesDevice(31));
EXPECT_FALSE(tile.UsesDevice(32));
EXPECT_THAT(
ToVectorUsingEach(tile),
ElementsAre(0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23, 8, 24,
9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31));
}
TEST_P(FormattedTileAssignmentTest, ReshapeTrivalIotaTile) {
TileAssignment tile({4, 4, 2});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment reshaped = tile.Reshape({2, 8, 2});
EXPECT_NE(reshaped, tile);
EXPECT_EQ(reshaped, TileAssignment({2, 8, 2}));
EXPECT_EQ(reshaped.num_dimensions(), 3);
EXPECT_EQ(reshaped.dim(0), 2);
EXPECT_EQ(reshaped.dim(1), 8);
EXPECT_EQ(reshaped.dim(2), 2);
EXPECT_EQ(reshaped(0, 0, 0), 0);
EXPECT_EQ(reshaped({1, 3, 1}), 23);
EXPECT_EQ(reshaped.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(reshaped.UsesDevice(0));
EXPECT_TRUE(reshaped.UsesDevice(31));
EXPECT_FALSE(reshaped.UsesDevice(32));
EXPECT_THAT(
ToVectorUsingEach(reshaped),
ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31));
}
TEST_P(FormattedTileAssignmentTest, ReshapeTransposedIotaTile) {
TileAssignment tile({4, 4, 2}, {2, 4, 4}, {2, 1, 0});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment reshaped = tile.Reshape({2, 2, 4, 2});
EXPECT_NE(reshaped, tile);
EXPECT_EQ(reshaped, TileAssignment({2, 2, 4, 2}, {2, 4, 4}, {2, 1, 0}));
EXPECT_EQ(reshaped.num_dimensions(), 4);
EXPECT_EQ(reshaped.dim(0), 2);
EXPECT_EQ(reshaped.dim(1), 2);
EXPECT_EQ(reshaped.dim(2), 4);
EXPECT_EQ(reshaped.dim(3), 2);
EXPECT_EQ(reshaped(0, 0, 0, 0), 0);
EXPECT_EQ(reshaped({1, 1, 2, 1}), 27);
EXPECT_EQ(reshaped.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(reshaped.UsesDevice(0));
EXPECT_TRUE(reshaped.UsesDevice(31));
EXPECT_FALSE(reshaped.UsesDevice(32));
EXPECT_THAT(
ToVectorUsingEach(reshaped),
ElementsAre(0, 16, 4, 20, 8, 24, 12, 28, 1, 17, 5, 21, 9, 25, 13, 29, 2,
18, 6, 22, 10, 26, 14, 30, 3, 19, 7, 23, 11, 27, 15, 31));
}
TEST_P(FormattedTileAssignmentTest, TransposeTrivalIotaTile) {
TileAssignment tile({4, 4, 2});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({2, 0, 1});
EXPECT_NE(xposed, tile);
EXPECT_EQ(xposed, TileAssignment({2, 4, 4}, {16, 2}, {1, 0}));
EXPECT_EQ(xposed.num_dimensions(), 3);
EXPECT_EQ(xposed.dim(0), 2);
EXPECT_EQ(xposed.dim(1), 4);
EXPECT_EQ(xposed.dim(2), 4);
EXPECT_EQ(xposed(0, 0, 0), 0);
EXPECT_EQ(xposed({1, 3, 1}), 27);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(31));
EXPECT_FALSE(xposed.UsesDevice(32));
EXPECT_THAT(
ToVectorUsingEach(xposed),
ElementsAre(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 1,
3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31));
}
TEST_P(FormattedTileAssignmentTest, TransposeTransposedIotaTile) {
TileAssignment tile({4, 4, 2}, {2, 4, 4}, {2, 1, 0});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({0, 2, 1});
EXPECT_NE(xposed, tile);
EXPECT_EQ(xposed, TileAssignment({4, 2, 4}, {8, 4}, {1, 0}));
EXPECT_EQ(xposed.num_dimensions(), 3);
EXPECT_EQ(xposed.dim(0), 4);
EXPECT_EQ(xposed.dim(1), 2);
EXPECT_EQ(xposed.dim(2), 4);
EXPECT_EQ(xposed(0, 0, 0), 0);
EXPECT_EQ(xposed({3, 0, 3}), 15);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(31));
EXPECT_FALSE(xposed.UsesDevice(32));
EXPECT_THAT(
ToVectorUsingEach(xposed),
ElementsAre(0, 4, 8, 12, 16, 20, 24, 28, 1, 5, 9, 13, 17, 21, 25, 29, 2,
6, 10, 14, 18, 22, 26, 30, 3, 7, 11, 15, 19, 23, 27, 31));
}
TEST_P(FormattedTileAssignmentTest, TransposeIotaTileWithDegernateDims) {
TileAssignment tile({4, 4, 1}, {4, 4}, {1, 0});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({1, 2, 0});
EXPECT_NE(xposed, tile);
EXPECT_EQ(xposed, TileAssignment({4, 1, 4}));
EXPECT_EQ(xposed.num_dimensions(), 3);
EXPECT_EQ(xposed.dim(0), 4);
EXPECT_EQ(xposed.dim(1), 1);
EXPECT_EQ(xposed.dim(2), 4);
EXPECT_EQ(xposed(0, 0, 0), 0);
EXPECT_EQ(xposed({2, 0, 3}), 11);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(15));
EXPECT_FALSE(xposed.UsesDevice(16));
EXPECT_THAT(
ToVectorUsingEach(xposed),
ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15));
}
TEST_P(FormattedTileAssignmentTest,
TransposeIotaTileSplittingCanonicalizedReshapeDims) {
TileAssignment tile({8, 2, 16}, {16, 16}, {1, 0});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({0, 2, 1});
EXPECT_NE(xposed, tile);
EXPECT_EQ(xposed, TileAssignment({8, 16, 2}, {16, 8, 2}, {1, 0, 2}));
EXPECT_EQ(xposed.num_dimensions(), 3);
EXPECT_EQ(xposed.dim(0), 8);
EXPECT_EQ(xposed.dim(1), 16);
EXPECT_EQ(xposed.dim(2), 2);
EXPECT_EQ(xposed(0, 0, 0), 0);
EXPECT_EQ(xposed({2, 7, 1}), 117);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(255));
EXPECT_FALSE(xposed.UsesDevice(256));
EXPECT_THAT(
ToVectorUsingEach(xposed),
ElementsAre(
0, 1, 16, 17, 32, 33, 48, 49, 64, 65, 80, 81, 96, 97, 112, 113, 128,
129, 144, 145, 160, 161, 176, 177, 192, 193, 208, 209, 224, 225, 240,
241, 2, 3, 18, 19, 34, 35, 50, 51, 66, 67, 82, 83, 98, 99, 114, 115,
130, 131, 146, 147, 162, 163, 178, 179, 194, 195, 210, 211, 226, 227,
242, 243, 4, 5, 20, 21, 36, 37, 52, 53, 68, 69, 84, 85, 100, 101, 116,
117, 132, 133, 148, 149, 164, 165, 180, 181, 196, 197, 212, 213, 228,
229, 244, 245, 6, 7, 22, 23, 38, 39, 54, 55, 70, 71, 86, 87, 102, 103,
118, 119, 134, 135, 150, 151, 166, 167, 182, 183, 198, 199, 214, 215,
230, 231, 246, 247, 8, 9, 24, 25, 40, 41, 56, 57, 72, 73, 88, 89, 104,
105, 120, 121, 136, 137, 152, 153, 168, 169, 184, 185, 200, 201, 216,
217, 232, 233, 248, 249, 10, 11, 26, 27, 42, 43, 58, 59, 74, 75, 90,
91, 106, 107, 122, 123, 138, 139, 154, 155, 170, 171, 186, 187, 202,
203, 218, 219, 234, 235, 250, 251, 12, 13, 28, 29, 44, 45, 60, 61, 76,
77, 92, 93, 108, 109, 124, 125, 140, 141, 156, 157, 172, 173, 188,
189, 204, 205, 220, 221, 236, 237, 252, 253, 14, 15, 30, 31, 46, 47,
62, 63, 78, 79, 94, 95, 110, 111, 126, 127, 142, 143, 158, 159, 174,
175, 190, 191, 206, 207, 222, 223, 238, 239, 254, 255));
}
TEST_P(FormattedTileAssignmentTest,
TransposeIotaTileSplittingBothCanonicalizedReshapeDimsAndTileDims) {
TileAssignment tile({14, 3, 5}, {6, 5, 7}, {2, 0, 1});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({1, 0, 2});
EXPECT_NE(xposed, tile);
EXPECT_EQ(xposed, TileAssignment({3, 14, 5}, {2, 3, 5, 7}, {1, 3, 0, 2}));
EXPECT_EQ(xposed.num_dimensions(), 3);
EXPECT_EQ(xposed.dim(0), 3);
EXPECT_EQ(xposed.dim(1), 14);
EXPECT_EQ(xposed.dim(2), 5);
EXPECT_EQ(xposed(0, 0, 0), 0);
EXPECT_EQ(xposed({2, 11, 3}), 201);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(209));
EXPECT_FALSE(xposed.UsesDevice(210));
EXPECT_THAT(
ToVectorUsingEach(xposed),
ElementsAre(
0, 7, 14, 21, 28, 105, 112, 119, 126, 133, 1, 8, 15, 22, 29, 106, 113,
120, 127, 134, 2, 9, 16, 23, 30, 107, 114, 121, 128, 135, 3, 10, 17,
24, 31, 108, 115, 122, 129, 136, 4, 11, 18, 25, 32, 109, 116, 123,
130, 137, 5, 12, 19, 26, 33, 110, 117, 124, 131, 138, 6, 13, 20, 27,
34, 111, 118, 125, 132, 139, 35, 42, 49, 56, 63, 140, 147, 154, 161,
168, 36, 43, 50, 57, 64, 141, 148, 155, 162, 169, 37, 44, 51, 58, 65,
142, 149, 156, 163, 170, 38, 45, 52, 59, 66, 143, 150, 157, 164, 171,
39, 46, 53, 60, 67, 144, 151, 158, 165, 172, 40, 47, 54, 61, 68, 145,
152, 159, 166, 173, 41, 48, 55, 62, 69, 146, 153, 160, 167, 174, 70,
77, 84, 91, 98, 175, 182, 189, 196, 203, 71, 78, 85, 92, 99, 176, 183,
190, 197, 204, 72, 79, 86, 93, 100, 177, 184, 191, 198, 205, 73, 80,
87, 94, 101, 178, 185, 192, 199, 206, 74, 81, 88, 95, 102, 179, 186,
193, 200, 207, 75, 82, 89, 96, 103, 180, 187, 194, 201, 208, 76, 83,
90, 97, 104, 181, 188, 195, 202, 209));
}
TEST_P(FormattedTileAssignmentTest,
TransposeIotaTileGroupingCanonicalizedReshapeDims) {
TileAssignment tile({1, 4, 16}, {4, 4, 4}, {1, 0, 2});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({2, 0, 1});
EXPECT_NE(xposed, tile);
EXPECT_EQ(xposed, TileAssignment({16, 1, 4}, {4, 4, 4}, {0, 2, 1}));
EXPECT_EQ(xposed.num_dimensions(), 3);
EXPECT_EQ(xposed.dim(0), 16);
EXPECT_EQ(xposed.dim(1), 1);
EXPECT_EQ(xposed.dim(2), 4);
EXPECT_EQ(xposed(0, 0, 0), 0);
EXPECT_EQ(xposed({7, 0, 3}), 31);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(63));
EXPECT_FALSE(xposed.UsesDevice(64));
EXPECT_THAT(ToVectorUsingEach(xposed),
ElementsAre(0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15,
16, 20, 24, 28, 17, 21, 25, 29, 18, 22, 26, 30, 19,
23, 27, 31, 32, 36, 40, 44, 33, 37, 41, 45, 34, 38,
42, 46, 35, 39, 43, 47, 48, 52, 56, 60, 49, 53, 57,
61, 50, 54, 58, 62, 51, 55, 59, 63));
}
TEST_P(FormattedTileAssignmentTest, TransposeNoopIotaTile) {
TileAssignment tile({4, 4}, {4, 4}, {1, 0});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({0, 1});
EXPECT_EQ(xposed, tile);
EXPECT_EQ(xposed.num_dimensions(), 2);
EXPECT_EQ(xposed.dim(0), 4);
EXPECT_EQ(xposed.dim(1), 4);
EXPECT_EQ(xposed(0, 0), 0);
EXPECT_EQ(xposed({2, 3}), 14);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(15));
EXPECT_FALSE(xposed.UsesDevice(16));
EXPECT_THAT(
ToVectorUsingEach(xposed),
ElementsAre(0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15));
}
TEST_P(FormattedTileAssignmentTest, TransposeNoopIotaTileWithDegernateDims) {
TileAssignment tile({1, 4, 1, 1, 4, 1}, {4, 4}, {1, 0});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({1, 5, 0, 4, 3, 2});
EXPECT_NE(xposed, tile);
EXPECT_EQ(xposed.num_dimensions(), 6);
EXPECT_EQ(xposed.dim(0), 4);
EXPECT_EQ(xposed.dim(1), 1);
EXPECT_EQ(xposed.dim(2), 1);
EXPECT_EQ(xposed.dim(3), 4);
EXPECT_EQ(xposed.dim(4), 1);
EXPECT_EQ(xposed.dim(5), 1);
EXPECT_EQ(xposed(0, 0, 0, 0, 0, 0), 0);
EXPECT_EQ(xposed({2, 0, 0, 3, 0, 0}), 14);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(15));
EXPECT_FALSE(xposed.UsesDevice(16));
EXPECT_THAT(
ToVectorUsingEach(xposed),
ElementsAre(0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15));
}
INSTANTIATE_TEST_SUITE_P(All, FormattedTileAssignmentTest, ::testing::Bool());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/tile_assignment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/tile_assignment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7484ff92-acf1-42ef-86b8-36fb3c19fc85 | cpp | tensorflow/tensorflow | hlo_instruction | third_party/xla/xla/hlo/ir/hlo_instruction.cc | third_party/xla/xla/service/hlo_instruction_test.cc | #include "xla/hlo/ir/hlo_instruction.h"
#include <algorithm>
#include <climits>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iostream>
#include <iterator>
#include <memory>
#include <optional>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/optimization.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "absl/strings/escaping.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/backend_config.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_domain_metadata.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_op_metadata.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_original_value.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/hlo_sharding_metadata.h"
#include "xla/hlo/ir/ptrvec.h"
#include "xla/layout.h"
#include "xla/literal.h"
#include "xla/map_util.h"
#include "xla/primitive_util.h"
#include "xla/printer.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_lexer.h"
#include "xla/service/mapped_ptr_container_sorter.h"
#include "xla/service/name_uniquer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/sort_json.h"
#include "xla/status_macros.h"
#include "xla/tsl/lib/gtl/iterator_range.h"
#include "xla/tsl/lib/gtl/map_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
using absl::CEscape;
using absl::StrAppend;
using absl::StrCat;
using absl::StrJoin;
const HloInstruction::Rare* const HloInstruction::kEmptyRare =
new HloInstruction::Rare;
namespace {
template <typename T>
absl::Status EraseElementFromVector(PtrVec<T>* container, T value) {
auto it = std::find(container->begin(), container->end(), value);
TF_RET_CHECK(it != container->end());
container->erase(it);
return absl::OkStatus();
}
}
HloInstruction::Users::~Users() = default;
void HloInstruction::Users::Clear() {
users_.clear();
user_map_.reset(nullptr);
DCHECK(CheckInvariants());
}
bool HloInstruction::Users::Contains(const HloInstruction* instruction) const {
if (user_map_ == nullptr) {
return std::find(users_.begin(), users_.end(), instruction) != users_.end();
} else {
return user_map_->contains(instruction);
}
}
void HloInstruction::Users::AddUser(HloInstruction* user) {
if (!Contains(user)) {
if (user_map_ == nullptr && users_.size() >= kMapThreshold) {
user_map_ =
std::make_unique<absl::flat_hash_map<const HloInstruction*, int64_t>>(
users_.size());
RebuildMap();
DCHECK(CheckInvariants());
}
if (user_map_ != nullptr) {
user_map_->emplace(user, users_.size());
}
users_.push_back(user);
DCHECK(CheckInvariants());
}
}
int64_t HloInstruction::Users::UserId(HloInstruction* user) {
if (user_map_ == nullptr) {
auto it = std::find(users_.begin(), users_.end(), user);
CHECK(it != users_.end());
return it - users_.begin();
} else {
auto result = user_map_->find(user);
CHECK(result != user_map_->end());
return result->second;
}
}
void HloInstruction::Users::MaybeRemoveUser(HloInstruction* user) {
if (Contains(user)) {
RemoveUser(user);
DCHECK(CheckInvariants());
}
}
void HloInstruction::Users::RemoveUser(HloInstruction* user) {
const int64_t index = UserId(user);
CHECK_EQ(users_[index], user);
HloInstruction* last = users_.back();
if (user_map_ != nullptr) {
(*user_map_)[last] = index;
user_map_->erase(user);
}
users_[index] = last;
users_.pop_back();
DCHECK(CheckInvariants());
}
void HloInstruction::Users::SortInstructionUsers(
const MappedPtrContainerSorter<HloInstruction>::MapPtrFn& map_fn,
const Users& sorted_instruction_users) {
using Sorter = MappedPtrContainerSorter<HloInstruction>;
auto status = Sorter::Sort(map_fn, Sorter::IndexAfterMappedElementsFn(),
sorted_instruction_users.users_, users_);
if (!status.ok()) {
LOG(ERROR) << "Failed to sort instruction users: " << status;
}
if (user_map_ != nullptr) {
user_map_->clear();
RebuildMap();
}
DCHECK(CheckInvariants());
}
void HloInstruction::Users::RebuildMap() {
for (uint64_t i = 0; i < users_.size(); ++i) {
(*user_map_)[users_[i]] = i;
}
}
bool HloInstruction::Users::CheckInvariants() {
if (user_map_ != nullptr) {
CHECK_EQ(users_.size(), user_map_->size());
}
return true;
}
void HloInstruction::AppendComputation(HloComputation* computation) {
mutable_rare()->called_computations.push_back(computation);
}
HloInstruction* HloInstruction::AddInstruction(
std::unique_ptr<HloInstruction> derived_instruction) {
HloInstruction* derived =
parent()->AddInstruction(std::move(derived_instruction));
const bool has_prior_sharding = derived->has_sharding();
SetupDerivedInstruction(derived);
if (!has_prior_sharding && (derived->opcode() == HloOpcode::kReshape ||
derived->opcode() == HloOpcode::kTranspose)) {
derived->clear_sharding();
}
return derived;
}
absl::StatusOr<std::unique_ptr<HloInstruction>> HloInstruction::CreateFromProto(
const HloInstructionProto& proto,
const absl::flat_hash_map<int64_t, HloInstruction*>& instruction_map,
const absl::flat_hash_map<int64_t, HloComputation*>& computation_map,
bool prohibit_empty_literal) {
TF_RET_CHECK(!proto.opcode().empty());
HloOpcode opcode;
auto opcode_or = StringToHloOpcode(proto.opcode());
std::optional<ComparisonDirection> comparison_direction;
if (opcode_or.ok()) {
opcode = std::move(opcode_or).value();
} else {
if (proto.opcode() == "equal-to") {
comparison_direction = ComparisonDirection::kEq;
} else if (proto.opcode() == "not-equal-to") {
comparison_direction = ComparisonDirection::kNe;
} else if (proto.opcode() == "greater-than-or-equal-to") {
comparison_direction = ComparisonDirection::kGe;
} else if (proto.opcode() == "greater-than") {
comparison_direction = ComparisonDirection::kGt;
} else if (proto.opcode() == "less-than-or-equal-to") {
comparison_direction = ComparisonDirection::kLe;
} else if (proto.opcode() == "less-than") {
comparison_direction = ComparisonDirection::kLt;
}
if (comparison_direction) {
opcode = HloOpcode::kCompare;
} else {
return InvalidArgument("Unknown opcode: %s", proto.opcode());
}
}
TF_RET_CHECK(proto.has_shape());
std::unique_ptr<HloInstruction> instruction;
const auto operands = [&instruction_map, &proto](int index) {
return instruction_map.at(proto.operand_ids(index));
};
const auto all_operands = [&instruction_map, &proto]() {
std::vector<HloInstruction*> result(proto.operand_ids_size());
std::transform(proto.operand_ids().begin(), proto.operand_ids().end(),
result.begin(), [&instruction_map](int64_t operand_id) {
return instruction_map.at(operand_id);
});
return result;
};
const auto output_to_operand_aliasing = [&proto]() {
std::vector<std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
output_to_operand_aliasing;
for (const auto& aliasing : proto.output_operand_aliasing()) {
output_to_operand_aliasing.emplace_back(
ShapeIndex(aliasing.output_shape_index().begin(),
aliasing.output_shape_index().end()),
std::make_pair(aliasing.operand_index(),
ShapeIndex(aliasing.operand_shape_index().begin(),
aliasing.operand_shape_index().end())));
}
return output_to_operand_aliasing;
};
const auto computations = [&computation_map, &proto](int index) {
return computation_map.at(proto.called_computation_ids(index));
};
const auto all_computations = [&computation_map, &proto]() {
std::vector<HloComputation*> result(proto.called_computation_ids_size());
std::transform(proto.called_computation_ids().begin(),
proto.called_computation_ids().end(), result.begin(),
[&computation_map](int64_t computation_id) {
return computation_map.at(computation_id);
});
return result;
};
TF_RET_CHECK(
absl::c_all_of(proto.operand_ids(),
[&](int64_t id) { return instruction_map.contains(id); }))
<< proto.name() << " instruction contains invalid operand id(s)";
TF_RET_CHECK(
absl::c_all_of(proto.called_computation_ids(),
[&](int64_t id) { return computation_map.contains(id); }))
<< proto.name() << " instruction references invalid computation id(s)";
Shape shape(proto.shape());
TF_RETURN_IF_ERROR(ShapeUtil::ValidateShapeWithOptionalLayout(shape));
std::optional<int> arity = HloOpcodeArity(opcode);
if (arity) {
TF_RET_CHECK(proto.operand_ids_size() == *arity)
<< proto.opcode() << " instruction should have " << *arity
<< " operands but sees " << proto.operand_ids_size();
}
switch (opcode) {
case HloOpcode::kBatchNormTraining:
instruction =
CreateBatchNormTraining(shape, operands(0), operands(1), operands(2),
proto.epsilon(), proto.feature_index());
break;
case HloOpcode::kBatchNormInference:
instruction = CreateBatchNormInference(
shape, operands(0), operands(1), operands(2), operands(3),
operands(4), proto.epsilon(), proto.feature_index());
break;
case HloOpcode::kBatchNormGrad:
instruction = CreateBatchNormGrad(shape, operands(0), operands(1),
operands(2), operands(3), operands(4),
proto.epsilon(), proto.feature_index());
break;
case HloOpcode::kFft: {
std::vector<int64_t> fft_length(proto.fft_length().begin(),
proto.fft_length().end());
instruction = CreateFft(shape, operands(0), proto.fft_type(),
absl::Span<const int64_t>(fft_length));
break;
}
case HloOpcode::kAsyncStart: {
TF_RET_CHECK(proto.called_computation_ids_size() == 1)
<< "Async start instruction should have 1 called computation but "
"sees "
<< proto.called_computation_ids_size();
instruction = CreateAsyncStart(shape, all_operands(), computations(0),
proto.async_execution_thread().empty()
? kMainExecutionThread
: proto.async_execution_thread());
break;
}
case HloOpcode::kAsyncUpdate: {
TF_RET_CHECK(proto.operand_ids_size() == 1)
<< "Async update requires one singular operand";
HloInstruction* prev_op = operands(0);
TF_RET_CHECK(prev_op->IsAsynchronous())
<< "Async update requires its operand to be an asynchronous op";
if (!proto.async_execution_thread().empty()) {
TF_RET_CHECK(proto.async_execution_thread() ==
prev_op->async_execution_thread())
<< "Async update should have " << prev_op->async_execution_thread()
<< " async_execution_thread, but sees "
<< proto.async_execution_thread();
}
if (!proto.called_computation_ids().empty()) {
TF_RET_CHECK(computations(0) == prev_op->async_wrapped_computation())
<< "Async update should have "
<< prev_op->async_wrapped_computation()->name()
<< " async_wrapped_computation, but sees "
<< computations(0)->name();
}
instruction = CreateAsyncUpdate(shape, prev_op);
break;
}
case HloOpcode::kAsyncDone: {
TF_RET_CHECK(proto.operand_ids_size() == 1)
<< "Async done requires one singular operand";
HloInstruction* prev_op = operands(0);
TF_RET_CHECK(prev_op->IsAsynchronous())
<< "Async done requires its operand to be an asynchronous op";
if (!proto.async_execution_thread().empty()) {
TF_RET_CHECK(proto.async_execution_thread() ==
prev_op->async_execution_thread())
<< "Async done should have " << prev_op->async_execution_thread()
<< " async_execution_thread, but sees "
<< proto.async_execution_thread();
}
if (!proto.called_computation_ids().empty()) {
TF_RET_CHECK(computations(0) == prev_op->async_wrapped_computation())
<< "Async done should have "
<< prev_op->async_wrapped_computation()->name()
<< " async_wrapped_computation, but sees "
<< computations(0)->name();
}
instruction = CreateAsyncDone(shape, prev_op);
break;
}
case HloOpcode::kCopyStart: {
std::optional<int> cross_program_prefetch_index;
if (proto.optional_cross_program_prefetch_index_case() ==
HloInstructionProto::kCrossProgramPrefetchIndex) {
cross_program_prefetch_index =
std::make_optional(proto.cross_program_prefetch_index());
} else if (proto.is_cross_program_prefetch()) {
cross_program_prefetch_index = 0;
}
instruction =
CreateCopyStart(shape, operands(0), cross_program_prefetch_index);
break;
}
case HloOpcode::kCompare: {
if (!comparison_direction) {
TF_ASSIGN_OR_RETURN(
comparison_direction,
StringToComparisonDirection(proto.comparison_direction()));
}
auto comparison_type_str = proto.comparison_type();
if (!comparison_type_str.empty()) {
TF_ASSIGN_OR_RETURN(auto comparison_type,
StringToComparisonType(comparison_type_str));
instruction = CreateCompare(shape, operands(0), operands(1),
*comparison_direction, comparison_type);
} else {
instruction = CreateCompare(shape, operands(0), operands(1),
*comparison_direction);
}
break;
}
case HloOpcode::kTriangularSolve: {
instruction = CreateTriangularSolve(shape, operands(0), operands(1),
proto.triangular_solve_options());
break;
}
case HloOpcode::kCholesky: {
instruction =
CreateCholesky(shape, operands(0), proto.cholesky_options());
break;
}
case HloOpcode::kSend:
instruction = CreateSend(operands(0), operands(1), proto.channel_id(),
proto.is_host_transfer());
break;
case HloOpcode::kSendDone:
TF_RET_CHECK(DynCast<HloSendInstruction>(operands(0)) != nullptr)
<< "SendDone must take the context operand from Send";
instruction = CreateSendDone(operands(0), proto.is_host_transfer());
break;
case HloOpcode::kRecv:
instruction = CreateRecv(shape.tuple_shapes(0), operands(0),
proto.channel_id(), proto.is_host_transfer());
break;
case HloOpcode::kRecvDone:
TF_RET_CHECK(DynCast<HloRecvInstruction>(operands(0)) != nullptr)
<< "RecvDone must take the context operand from Recv";
instruction = CreateRecvDone(operands(0), proto.is_host_transfer());
break;
case HloOpcode::kReverse:
instruction =
CreateReverse(shape, operands(0),
std::vector<int64_t>(proto.dimensions().begin(),
proto.dimensions().end()));
break;
case HloOpcode::kConcatenate:
TF_RET_CHECK(proto.dimensions_size() == 1)
<< "Concatenate instruction should have 1 dimension but sees "
<< proto.dimensions_size();
instruction =
CreateConcatenate(shape, all_operands(), proto.dimensions(0));
break;
case HloOpcode::kConditional: {
TF_RET_CHECK(proto.called_computation_ids_size() > 0)
<< "conditional should have at least 1 called computation";
if (operands(0)->shape().element_type() == PRED) {
TF_RET_CHECK(proto.called_computation_ids_size() == 2)
<< "conditional should have exactly 2 called computations but got "
<< proto.called_computation_ids_size();
}
TF_RET_CHECK(proto.operand_ids_size() ==
proto.called_computation_ids_size() + 1)
<< "conditional should have one branch_index operand plus one "
"operand per called computation but got "
<< proto.operand_ids_size() << " operands for "
<< proto.called_computation_ids_size() << " branch computations";
auto cond_operands = all_operands();
instruction =
CreateConditional(shape, cond_operands[0], all_computations(),
absl::MakeSpan(cond_operands).subspan(1));
break;
}
case HloOpcode::kReduce:
TF_RET_CHECK(proto.operand_ids_size() % 2 == 0)
<< "Reduce instruction should have an even number of operands but "
"sees "
<< proto.operand_ids_size();
TF_RET_CHECK(proto.called_computation_ids_size() == 1)
<< "Reduce instruction should have 1 called computation but sees "
<< proto.called_computation_ids_size();
{
const auto reduce_operands = all_operands();
auto inputs = absl::MakeSpan(reduce_operands)
.subspan(0, reduce_operands.size() / 2);
auto init_values =
absl::MakeSpan(reduce_operands)
.subspan(reduce_operands.size() / 2, reduce_operands.size());
instruction =
CreateReduce(shape, inputs, init_values,
std::vector<int64_t>(proto.dimensions().begin(),
proto.dimensions().end()),
computations(0));
}
break;
case HloOpcode::kSort: {
TF_RET_CHECK(proto.operand_ids_size() >= 1)
<< "Sort instruction should have at least 1 operand but has "
<< proto.operand_ids_size();
TF_RET_CHECK(proto.dimensions().size() == 1)
<< "Sort instruction should have 1 dimension";
TF_RET_CHECK(proto.called_computation_ids_size() == 1)
<< "Sort instruction should one called computation but sees "
<< proto.called_computation_ids_size();
auto sort_operands = all_operands();
instruction = CreateSort(shape, proto.dimensions(0), all_operands(),
computations(0), proto.is_stable());
break;
}
case HloOpcode::kTopK: {
TF_RET_CHECK(proto.operand_ids_size() == 1)
<< "TopK instruction should have exactly 1 operand but has "
<< proto.operand_ids_size();
instruction =
CreateTopK(shape, all_operands()[0], proto.k(), proto.largest());
break;
}
case HloOpcode::kTranspose:
instruction =
CreateTranspose(shape, operands(0),
std::vector<int64_t>(proto.dimensions().begin(),
proto.dimensions().end()));
break;
case HloOpcode::kBroadcast:
instruction =
CreateBroadcast(shape, operands(0),
std::vector<int64_t>(proto.dimensions().begin(),
proto.dimensions().end()));
break;
case HloOpcode::kMap:
TF_RET_CHECK(proto.called_computation_ids_size() == 1)
<< "Map instruction should have 1 called computation but sees "
<< proto.called_computation_ids_size();
instruction = CreateMap(shape, all_operands(), computations(0));
break;
case HloOpcode::kSlice: {
std::vector<int64_t> slice_starts, slice_limits, slice_strides;
for (const HloInstructionProto::SliceDimensions& slice_dimensions :
proto.slice_dimensions()) {
slice_starts.push_back(slice_dimensions.start());
slice_limits.push_back(slice_dimensions.limit());
slice_strides.push_back(slice_dimensions.stride());
}
instruction = CreateSlice(shape, operands(0), slice_starts, slice_limits,
slice_strides);
break;
}
case HloOpcode::kConstant: {
if (proto.has_literal()) {
TF_ASSIGN_OR_RETURN(
auto literal,
Literal::CreateFromProto(proto.literal(), prohibit_empty_literal));
instruction = CreateConstant(std::move(literal));
TF_RET_CHECK(Shape::Equal().MinorToMajorOnlyInLayout()(
instruction->shape(), shape))
<< instruction->shape().ToString(true) << " vs "
<< shape.ToString(true);
*instruction->mutable_shape() = shape;
} else {
instruction = std::make_unique<HloConstantInstruction>(shape);
}
break;
}
case HloOpcode::kFusion: {
TF_RET_CHECK(!proto.fusion_kind().empty());
TF_ASSIGN_OR_RETURN(FusionKind fusion_kind,
StringToFusionKind(proto.fusion_kind()));
TF_RET_CHECK(proto.called_computation_ids_size() == 1)
<< "Expect 1 called computation for fusion instruction but sees "
<< proto.called_computation_ids_size();
const int64_t fusion_id = proto.called_computation_ids(0);
auto* fused_computation =
tsl::gtl::FindPtrOrNull(computation_map, fusion_id);
TF_RET_CHECK(fused_computation != nullptr)
<< "No fusion computation with id " << fusion_id;
instruction =
CreateFusion(shape, fusion_kind, all_operands(), fused_computation);
auto fusion_instr = DynCast<HloFusionInstruction>(instruction.get());
fusion_instr->set_output_to_operand_aliasing(
output_to_operand_aliasing());
break;
}
case HloOpcode::kRng:
instruction = CreateRng(shape, proto.distribution(), all_operands());
break;
case HloOpcode::kRngBitGenerator:
instruction =
CreateRngBitGenerator(shape, operands(0), proto.rng_algorithm());
break;
case HloOpcode::kRngGetAndUpdateState:
instruction = CreateRngGetAndUpdateState(shape, proto.delta());
break;
case HloOpcode::kParameter:
instruction =
CreateParameter(proto.parameter_number(), shape, proto.name());
if (!proto.parameter_replication().replicated_at_leaf_buffers().empty()) {
instruction->set_parameter_replicated_at_leaf_buffers(
proto.parameter_replication().replicated_at_leaf_buffers());
}
break;
case HloOpcode::kGetTupleElement:
instruction =
CreateGetTupleElement(shape, operands(0), proto.tuple_index());
break;
case HloOpcode::kReducePrecision:
instruction = CreateReducePrecision(
shape, operands(0), proto.exponent_bits(), proto.mantissa_bits());
break;
case HloOpcode::kInfeed: {
TF_RET_CHECK(shape.IsTuple() &&
(ShapeUtil::TupleElementCount(shape) == 2))
<< "Infeed should have a tuple shape with 2 operands, but has: "
<< shape;
const Shape& data_shape = ShapeUtil::GetTupleElementShape(shape, 0);
instruction =
CreateInfeed(data_shape, operands(0), proto.infeed_config());
} break;
case HloOpcode::kOutfeed: {
Shape outfeed_shape(proto.outfeed_shape());
TF_RETURN_IF_ERROR(
ShapeUtil::ValidateShapeWithOptionalLayout(outfeed_shape));
instruction = CreateOutfeed(outfeed_shape, operands(0), operands(1),
proto.outfeed_config());
break;
}
case HloOpcode::kAllGather:
case HloOpcode::kAllGatherStart: {
std::optional<int64_t> channel_id;
if (proto.channel_id() > 0) {
channel_id = proto.channel_id();
}
TF_RET_CHECK(proto.dimensions_size() == 1)
<< "AllGather cannot have more than 1 all-gather dimensions";
int64_t all_gather_dimension = proto.dimensions(0);
if (opcode == HloOpcode::kAllGather) {
instruction = CreateAllGather(
shape, all_operands(), all_gather_dimension,
CollectiveDeviceList::FromProto(proto), proto.constrain_layout(),
channel_id, proto.use_global_device_ids());
} else {
instruction = CreateAllGatherStart(
shape, all_operands(), all_gather_dimension,
CollectiveDeviceList::FromProto(proto), proto.constrain_layout(),
channel_id, proto.use_global_device_ids());
}
break;
}
case HloOpcode::kAllReduce:
case HloOpcode::kAllReduceStart:
case HloOpcode::kReduceScatter: {
TF_RET_CHECK(proto.called_computation_ids_size() == 1)
<< "AllReduce should have 1 called computation but sees "
<< proto.called_computation_ids_size();
TF_RET_CHECK(proto.channel_id() <= 0 || proto.all_reduce_id() <= 0)
<< "AllReduce cannot have both channel_id() and all_reduce_id()";
std::optional<int64_t> channel_id;
if (proto.channel_id() > 0) {
channel_id = proto.channel_id();
}
if (proto.all_reduce_id() > 0) {
channel_id = proto.all_reduce_id();
}
CollectiveDeviceList device_list = CollectiveDeviceList::FromProto(proto);
if (opcode == HloOpcode::kAllReduce) {
instruction =
CreateAllReduce(shape, all_operands(), computations(0), device_list,
proto.constrain_layout(), channel_id,
proto.use_global_device_ids());
} else if (opcode == HloOpcode::kReduceScatter) {
TF_RET_CHECK(proto.dimensions_size() == 1)
<< "ReduceScatter cannot have more than 1 scatter dimensions";
int64_t scatter_dimension = proto.dimensions(0);
instruction = CreateReduceScatter(
shape, all_operands(), computations(0), device_list,
proto.constrain_layout(), channel_id, proto.use_global_device_ids(),
scatter_dimension);
} else {
instruction =
CreateAllReduceStart(shape, all_operands(), computations(0),
device_list, proto.constrain_layout(),
channel_id, proto.use_global_device_ids());
}
break;
}
case HloOpcode::kAllToAll: {
std::optional<int64_t> channel_id;
if (proto.channel_id() > 0) {
channel_id = proto.channel_id();
}
std::optional<int64_t> split_dimension;
if (proto.dimensions_size() > 0) {
TF_RET_CHECK(proto.dimensions_size() == 1)
<< "AllToAll cannot have more than 1 dimension (split dimension)";
TF_RET_CHECK(all_operands().size() == 1)
<< "AllToAll must have a single operand when the split dimension "
"is specified";
split_dimension = proto.dimensions(0);
}
instruction = CreateAllToAll(
shape, all_operands(), CollectiveDeviceList::FromProto(proto),
proto.constrain_layout(), channel_id, split_dimension);
break;
}
case HloOpcode::kCollectiveBroadcast: {
std::optional<int64_t> channel_id;
if (proto.channel_id() > 0) {
channel_id = proto.channel_id();
}
instruction = CreateCollectiveBroadcast(
shape, all_operands(), CollectiveDeviceList::FromProto(proto), false,
channel_id);
break;
}
case HloOpcode::kCollectivePermute:
case HloOpcode::kCollectivePermuteStart: {
TF_RET_CHECK(proto.operand_ids().size() == 1 ||
proto.operand_ids().size() == 4);
std::vector<std::pair<int64_t, int64_t>> source_target_pairs(
proto.source_target_pairs_size());
std::optional<int64_t> channel_id;
if (proto.channel_id() > 0) {
channel_id = proto.channel_id();
}
for (int i = 0; i < source_target_pairs.size(); ++i) {
source_target_pairs[i].first = proto.source_target_pairs(i).source();
source_target_pairs[i].second = proto.source_target_pairs(i).target();
}
if (proto.dynamic_slice_sizes_size() == 0) {
if (opcode == HloOpcode::kCollectivePermute) {
instruction = CreateCollectivePermute(
shape, operands(0), source_target_pairs, channel_id);
} else if (opcode == HloOpcode::kCollectivePermuteStart) {
instruction = CreateCollectivePermuteStart(
shape, operands(0), source_target_pairs, channel_id);
} else {
LOG(FATAL) << "Expect CollectivePermute or CollectivePermuteStart, "
<< "but got " << opcode;
}
} else {
std::vector<std::vector<int64_t>> slice_sizes;
HloInstruction* input = operands(0);
HloInstruction* input_start_indices = operands(2);
if (input->shape().IsTuple() &&
input->shape().tuple_shapes_size() > 1) {
slice_sizes.resize(input->shape().tuple_shapes_size());
} else {
slice_sizes.resize(1);
}
int proto_index = 0;
if (input->shape().IsTuple()) {
if (input_start_indices->shape()
.tuple_shapes(0)
.tuple_shapes(0)
.IsArray()) {
slice_sizes.resize(input->shape().tuple_shapes_size());
for (int i = 0; i < input->shape().tuple_shapes_size(); ++i) {
slice_sizes[i].resize(
input->shape().tuple_shapes(i).dimensions_size());
for (int j = 0;
j < input->shape().tuple_shapes(i).dimensions_size(); ++j) {
CHECK_GE(proto.dynamic_slice_sizes_size(), proto_index);
slice_sizes[i][j] = proto.dynamic_slice_sizes(proto_index);
proto_index += 1;
}
}
} else {
slice_sizes.resize(
input->shape().tuple_shapes_size() *
ShapeUtil::TupleElementCount(
input_start_indices->shape().tuple_shapes(0)));
int slice_sizes_count = 0;
for (int i = 0; i < input->shape().tuple_shapes_size(); ++i) {
for (int j = 0;
j < ShapeUtil::TupleElementCount(
input_start_indices->shape().tuple_shapes(i));
++j) {
slice_sizes[slice_sizes_count].resize(
input->shape().tuple_shapes(i).rank());
for (int k = 0; k < input->shape().tuple_shapes(i).rank();
++k) {
CHECK_GE(proto.dynamic_slice_sizes_size(), proto_index);
slice_sizes[slice_sizes_count][k] =
proto.dynamic_slice_sizes(proto_index);
proto_index += 1;
}
slice_sizes_count += 1;
}
}
}
} else {
slice_sizes.resize(
ShapeUtil::TupleElementCount(input_start_indices->shape()));
if (input_start_indices->shape().tuple_shapes(0).IsTuple()) {
for (int i = 0;
i < ShapeUtil::TupleElementCount(input_start_indices->shape());
++i) {
slice_sizes[i].resize(input->shape().dimensions_size());
for (int j = 0; j < input->shape().dimensions_size(); ++j) {
slice_sizes[i][j] = proto.dynamic_slice_sizes(proto_index);
proto_index += 1;
}
}
} else {
slice_sizes.resize(1);
slice_sizes[0].resize(input->shape().dimensions_size());
for (int j = 0; j < input->shape().dimensions_size(); ++j) {
slice_sizes[0][j] = proto.dynamic_slice_sizes(proto_index);
proto_index += 1;
}
}
}
if (opcode == HloOpcode::kCollectivePermute) {
instruction = CreateCollectivePermute(
shape, operands(0), operands(1), operands(2), operands(3),
source_target_pairs, slice_sizes, channel_id);
} else if (opcode == HloOpcode::kCollectivePermuteStart) {
instruction = CreateCollectivePermuteStart(
shape, operands(0), operands(1), operands(2), operands(3),
source_target_pairs, slice_sizes, channel_id);
} else {
LOG(FATAL) << "Expect CollectivePermute or CollectivePermuteStart, "
<< "but got " << opcode;
}
}
break;
}
case HloOpcode::kReplicaId: {
instruction = CreateReplicaId(shape);
break;
}
case HloOpcode::kPartitionId: {
instruction = CreatePartitionId(shape);
break;
}
case HloOpcode::kConvolution: {
TF_RET_CHECK(proto.has_window());
TF_RET_CHECK(proto.has_convolution_dimension_numbers());
TF_RET_CHECK(absl::c_all_of(proto.precision_config().operand_precision(),
PrecisionConfig::Precision_IsValid));
PrecisionConfig precision_config = proto.precision_config();
precision_config.mutable_operand_precision()->Resize(
proto.operand_ids_size(), PrecisionConfig::DEFAULT);
instruction = CreateConvolve(
shape, operands(0), operands(1),
std::max<int64_t>(proto.feature_group_count(), 1),
std::max<int64_t>(proto.batch_group_count(), 1), proto.window(),
proto.convolution_dimension_numbers(), precision_config);
break;
}
case HloOpcode::kReduceWindow:
TF_RET_CHECK(proto.operand_ids_size() % 2 == 0)
<< "Reduce window should have an even number of operands but "
"sees "
<< proto.operand_ids_size();
TF_RET_CHECK(proto.called_computation_ids_size() == 1)
<< "ReduceWindow should have 1 called computation but sees "
<< proto.called_computation_ids_size();
{
const auto reduce_operands = all_operands();
auto inputs = absl::MakeSpan(reduce_operands)
.subspan(0, reduce_operands.size() / 2);
auto init_values =
absl::MakeSpan(reduce_operands)
.subspan(reduce_operands.size() / 2, reduce_operands.size());
instruction = CreateReduceWindow(shape, inputs, init_values,
proto.window(), computations(0));
}
break;
case HloOpcode::kSelectAndScatter:
TF_RET_CHECK(proto.called_computation_ids_size() == 2)
<< "SelectAndScatter should have 2 called computations but sees "
<< proto.called_computation_ids_size();
instruction = CreateSelectAndScatter(shape, operands(0), computations(0),
proto.window(), operands(1),
operands(2), computations(1));
break;
case HloOpcode::kCustomCall: {
if (proto.constrain_layout()) {
std::vector<Shape> operand_shapes;
const auto& operand_shapes_with_layout =
proto.operand_shapes_with_layout();
operand_shapes.reserve(operand_shapes_with_layout.size());
for (const ShapeProto& shape_proto : operand_shapes_with_layout) {
operand_shapes.emplace_back(shape_proto);
}
instruction =
CreateCustomCall(shape, all_operands(), proto.custom_call_target(),
operand_shapes, proto.backend_config());
} else {
if (proto.called_computation_ids_size() == 1) {
instruction = CreateCustomCall(shape, all_operands(), computations(0),
proto.custom_call_target(),
proto.backend_config());
} else if (proto.called_computation_ids_size() > 1) {
instruction = CreateCustomCall(
shape, all_operands(), all_computations(),
proto.custom_call_target(), proto.backend_config());
} else {
instruction = CreateCustomCall(shape, all_operands(),
proto.custom_call_target(),
proto.backend_config());
}
}
auto custom_call_instr =
Cast<HloCustomCallInstruction>(instruction.get());
if (proto.has_window()) {
custom_call_instr->set_window(proto.window());
}
if (proto.has_literal()) {
TF_ASSIGN_OR_RETURN(
auto literal,
Literal::CreateFromProto(proto.literal(), prohibit_empty_literal));
custom_call_instr->set_literal(std::move(literal));
}
if (proto.has_convolution_dimension_numbers()) {
custom_call_instr->set_convolution_dimension_numbers(
proto.convolution_dimension_numbers());
}
custom_call_instr->set_feature_group_count(std::max(
static_cast<int64_t>(proto.feature_group_count()), int64_t{1}));
custom_call_instr->set_batch_group_count(std::max(
static_cast<int64_t>(proto.batch_group_count()), int64_t{1}));
custom_call_instr->set_custom_call_has_side_effect(
proto.custom_call_has_side_effect());
custom_call_instr->set_padding_type(proto.padding_type());
TF_RET_CHECK(absl::c_all_of(proto.precision_config().operand_precision(),
PrecisionConfig::Precision_IsValid));
PrecisionConfig precision_config = proto.precision_config();
precision_config.mutable_operand_precision()->Resize(
proto.operand_ids_size(), PrecisionConfig::DEFAULT);
*custom_call_instr->mutable_precision_config() = precision_config;
custom_call_instr->set_output_to_operand_aliasing(
output_to_operand_aliasing());
custom_call_instr->set_custom_call_schedule(proto.custom_call_schedule());
custom_call_instr->set_api_version(proto.custom_call_api_version());
break;
}
case HloOpcode::kPad:
TF_RET_CHECK(proto.has_padding_config());
instruction =
CreatePad(shape, operands(0), operands(1), proto.padding_config());
break;
case HloOpcode::kDynamicSlice: {
std::vector<int64_t> slice_sizes(proto.dynamic_slice_sizes_size());
absl::c_copy(proto.dynamic_slice_sizes(), slice_sizes.begin());
TF_RET_CHECK(proto.operand_ids_size() >= 1)
<< "DynamicSlice instruction should have at least 1 operands but "
"sees "
<< proto.operand_ids_size();
if (proto.operand_ids_size() != 2 || operands(1)->shape().rank() != 1) {
auto expected_operands = 1 + operands(0)->shape().rank();
TF_RET_CHECK(proto.operand_ids_size() == expected_operands)
<< "DynamicSlice instruction should have " << expected_operands
<< " operands, but has " << proto.operand_ids_size();
}
const auto& operand_vector = all_operands();
instruction = CreateDynamicSlice(
shape, operands(0), absl::MakeSpan(operand_vector).subspan(1),
slice_sizes);
break;
}
case HloOpcode::kDynamicUpdateSlice: {
TF_RET_CHECK(proto.operand_ids_size() >= 2)
<< "DynamicUpdateSlice instruction should have at least 2 operands "
"but sees "
<< proto.operand_ids_size();
if (proto.operand_ids_size() != 3 || operands(2)->shape().rank() != 1) {
auto expected_operands = 2 + operands(0)->shape().rank();
TF_RET_CHECK(proto.operand_ids_size() == expected_operands)
<< "DynamicUpdateSlice instruction should have "
<< expected_operands << " operands, but has "
<< proto.operand_ids_size();
}
const auto& operand_vector = all_operands();
instruction =
CreateDynamicUpdateSlice(shape, operands(0), operands(1),
absl::MakeSpan(operand_vector).subspan(2));
break;
}
case HloOpcode::kGather: {
TF_RET_CHECK(proto.has_gather_dimension_numbers())
<< "Gather instruction should have GatherDimensionNumbers set.";
auto gather_dimension_numbers = std::make_unique<GatherDimensionNumbers>(
proto.gather_dimension_numbers());
std::vector<int64_t> gather_slice_sizes;
const auto& slice_sizes = proto.gather_slice_sizes();
gather_slice_sizes.reserve(slice_sizes.size());
for (int64_t bound : slice_sizes) {
gather_slice_sizes.push_back(bound);
}
instruction = CreateGather(shape, operands(0), operands(1),
*gather_dimension_numbers, gather_slice_sizes,
proto.indices_are_sorted());
break;
}
case HloOpcode::kScatter: {
TF_RET_CHECK(proto.has_scatter_dimension_numbers())
<< "Scatter instruction should have ScatterDimensionNumbers set.";
TF_RET_CHECK(proto.called_computation_ids_size() == 1)
<< "Scatter instruction should have 1 called computation but sees "
<< proto.called_computation_ids_size();
auto scatter_dimension_numbers =
std::make_unique<ScatterDimensionNumbers>(
proto.scatter_dimension_numbers());
auto operands = all_operands();
auto operand_span = absl::MakeConstSpan(operands);
auto input_count = operands.size() / 2;
instruction =
CreateScatter(shape, operand_span.first(input_count),
operands[input_count], operand_span.last(input_count),
computations(0), *scatter_dimension_numbers,
proto.indices_are_sorted(), proto.unique_indices());
break;
}
case HloOpcode::kIota:
TF_RET_CHECK(proto.dimensions_size() == 1)
<< "Iota instruction should have 1 dimension but sees "
<< proto.dimensions_size();
instruction = CreateIota(shape, proto.dimensions(0));
break;
case HloOpcode::kDot: {
int expected_operands =
HloDotInstruction::kOperands + proto.dot_sparsity_size();
TF_RET_CHECK(proto.dot_sparsity_size() <= HloDotInstruction::kOperands)
<< "Too many sparse dot descriptors: " << proto.dot_sparsity_size();
TF_RET_CHECK(proto.operand_ids_size() == expected_operands)
<< proto.opcode() << " instruction should have " << expected_operands
<< " operands but sees " << proto.operand_ids_size();
TF_RET_CHECK(proto.has_dot_dimension_numbers())
<< "Dot instruction should have dot_dimension_numbers.";
TF_RET_CHECK(absl::c_all_of(proto.precision_config().operand_precision(),
PrecisionConfig::Precision_IsValid));
PrecisionConfig precision_config = proto.precision_config();
precision_config.mutable_operand_precision()->Resize(
HloDotInstruction::kOperands, PrecisionConfig::DEFAULT);
std::vector<SparsityDescriptor> sparsity(proto.dot_sparsity().begin(),
proto.dot_sparsity().end());
auto operand_vector = all_operands();
instruction = std::make_unique<HloDotInstruction>(
shape, operands(0), operands(1), proto.dot_dimension_numbers(),
precision_config, std::move(sparsity),
absl::MakeSpan(operand_vector).subspan(HloDotInstruction::kOperands));
break;
}
case HloOpcode::kDomain: {
std::shared_ptr<const HloSharding> entry_hlo_sharding;
std::shared_ptr<const HloSharding> exit_hlo_sharding;
if (proto.has_domain_entry_sharding()) {
TF_ASSIGN_OR_RETURN(
HloSharding sharding,
HloSharding::FromProto(proto.domain_entry_sharding()));
entry_hlo_sharding = std::make_shared<const HloSharding>(sharding);
}
if (proto.has_domain_exit_sharding()) {
TF_ASSIGN_OR_RETURN(
HloSharding sharding,
HloSharding::FromProto(proto.domain_exit_sharding()));
exit_hlo_sharding = std::make_shared<const HloSharding>(sharding);
}
instruction = std::make_unique<HloDomainInstruction>(
shape, operands(0),
std::make_unique<ShardingMetadata>(entry_hlo_sharding),
std::make_unique<ShardingMetadata>(exit_hlo_sharding));
break;
}
case HloOpcode::kGetDimensionSize:
TF_RET_CHECK(proto.dimensions_size() == 1);
instruction =
CreateGetDimensionSize(shape, operands(0), proto.dimensions(0));
break;
case HloOpcode::kSetDimensionSize:
TF_RET_CHECK(proto.dimensions_size() == 1);
instruction = CreateSetDimensionSize(shape, operands(0), operands(1),
proto.dimensions(0));
break;
case HloOpcode::kReshape: {
int64_t inferred_dimension = -1;
if (!proto.dimensions().empty()) {
inferred_dimension = proto.dimensions()[0];
}
TF_RET_CHECK(shape.IsArray() && operands(0)->shape().IsArray() &&
(operands(0)->shape().is_unbounded_dynamic() ||
ShapeUtil::StaticExtentProduct(shape) ==
ShapeUtil::StaticExtentProduct(operands(0)->shape())))
<< "shape: " << ShapeUtil::HumanString(shape)
<< " operand: " << ShapeUtil::HumanString(operands(0)->shape());
instruction = CreateReshape(shape, operands(0), inferred_dimension);
break;
}
case HloOpcode::kDynamicReshape: {
TF_RET_CHECK(shape.IsArray() && operands(0)->shape().IsArray() &&
ShapeUtil::StaticExtentProduct(shape) ==
ShapeUtil::StaticExtentProduct(operands(0)->shape()))
<< "shape: " << ShapeUtil::HumanString(shape)
<< " operand: " << ShapeUtil::HumanString(operands(0)->shape());
const auto& operand_vector = all_operands();
instruction = CreateDynamicReshape(
shape, operands(0), absl::MakeSpan(operand_vector).subspan(1));
break;
}
case HloOpcode::kCall: {
TF_RET_CHECK(proto.called_computation_ids_size() == 1)
<< "Call should have 1 called computation but has "
<< proto.called_computation_ids_size();
TF_RET_CHECK(!proto.has_precision_config())
<< instruction->opcode() << proto.name();
TF_RET_CHECK(!proto.has_dot_dimension_numbers()) << instruction->opcode();
if (proto.is_composite()) {
TF_RET_CHECK(proto.has_frontend_attributes())
<< "A composite call op must have frontend attributes";
auto map = proto.frontend_attributes().map();
auto name = map.find("composite.name");
TF_RET_CHECK(name != map.end() && !name->second.empty())
<< "A composite call op must have frontend attributes with key "
"composite.name whose value is non-empty";
auto attributes = map.find("composite.attributes");
TF_RET_CHECK(attributes == map.end() || !attributes->second.empty())
<< "A composite call op must have frontend attributes with key "
"composite.attributes whose value is default: {} or non-empty";
auto version_str = map.find("composite.version");
int64_t version = 0;
TF_RET_CHECK(
version_str == map.end() ||
(absl::SimpleAtoi(version_str->second, &version) && version >= 0))
<< "A composite call op must have frontend attributes with a "
"composite.version whose value is a non-negative integer but "
"got: "
<< version_str->second;
instruction = CreateCompositeCall(
shape, all_operands(),
computation_map.at(proto.called_computation_ids()[0]), name->second,
attributes == map.end() ? "{}" : attributes->second, version);
instruction->set_output_to_operand_aliasing(
output_to_operand_aliasing());
} else {
instruction = std::make_unique<HloCallInstruction>(
shape, all_operands(),
computation_map.at(proto.called_computation_ids()[0]));
instruction->set_output_to_operand_aliasing(
output_to_operand_aliasing());
}
break;
}
default: {
instruction = absl::WrapUnique(new HloInstruction(opcode, shape));
if (instruction->opcode() == HloOpcode::kWhile) {
TF_RET_CHECK(proto.called_computation_ids_size() == 2)
<< "While should have 2 called computation but has "
<< proto.called_computation_ids_size();
computation_map.at(proto.called_computation_ids(0))
->SetWhileCallInstruction(instruction.get());
}
for (const int64_t operand_id : proto.operand_ids()) {
instruction->AppendOperand(instruction_map.at(operand_id));
}
for (const int64_t computation_id : proto.called_computation_ids()) {
instruction->AppendComputation(computation_map.at(computation_id));
}
if (instruction->opcode() == HloOpcode::kWhile) {
instruction->while_body()->SetWhileCallInstruction(instruction.get());
}
TF_RET_CHECK(!proto.has_precision_config())
<< instruction->opcode() << proto.DebugString();
TF_RET_CHECK(!proto.has_dot_dimension_numbers()) << instruction->opcode();
break;
}
}
for (const int64_t predecessor_id : proto.control_predecessor_ids()) {
TF_RET_CHECK(ContainsKey(instruction_map, predecessor_id))
<< "No instruction with id " << predecessor_id;
TF_RETURN_IF_ERROR(instruction_map.at(predecessor_id)
->AddControlDependencyTo(instruction.get()));
}
TF_RET_CHECK(!proto.name().empty());
instruction->SetAndSanitizeName(proto.name());
*instruction->metadata_ = proto.metadata();
instruction->backend_config_ = BackendConfigWrapper(proto.backend_config());
TF_RET_CHECK(proto.id() >= 0)
<< "Instruction with negative id: " << proto.id();
TF_RET_CHECK(proto.id() <= INT_MAX)
<< "Instruction with id > INT_MAX: " << proto.id();
instruction->unique_id_ = proto.id();
if (proto.has_sharding()) {
TF_ASSIGN_OR_RETURN(HloSharding sharding,
HloSharding::FromProto(proto.sharding()));
sharding = sharding.NormalizeTupleSharding(instruction->shape());
instruction->set_sharding(sharding);
}
if (proto.has_frontend_attributes()) {
instruction->set_frontend_attributes(proto.frontend_attributes());
}
if (proto.has_statistics_viz()) {
instruction->set_statistics_viz(proto.statistics_viz());
}
if (proto.has_original_value()) {
const xla::OriginalValueProto& original_value_proto =
proto.original_value();
auto original_value = std::make_shared<OriginalValue>(shape);
for (const auto& leaf : original_value_proto.leaves()) {
*original_value->mutable_element(ShapeIndex(leaf.leaf_shape_index())) = {
leaf.instruction_name(), ShapeIndex(leaf.shape_index())};
}
instruction->set_original_value(original_value);
}
return std::move(instruction);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateParameter(
int64_t parameter_number, const Shape& shape, absl::string_view name) {
return std::make_unique<HloParameterInstruction>(parameter_number, shape,
name);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateConstant(
Literal literal) {
return std::make_unique<HloConstantInstruction>(std::move(literal));
}
std::unique_ptr<HloInstruction> HloInstruction::CreateIota(
const Shape& shape, int64_t iota_dimension) {
return std::make_unique<HloIotaInstruction>(shape, iota_dimension);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateTopK(
const Shape& shape, HloInstruction* input, int64_t k, bool largest) {
return std::make_unique<HloTopKInstruction>(shape, input, k, largest);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateGetTupleElement(const Shape& shape,
HloInstruction* operand, int64_t index) {
return std::make_unique<HloGetTupleElementInstruction>(shape, operand, index);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateGetTupleElement(HloInstruction* operand, int64_t index) {
return std::make_unique<HloGetTupleElementInstruction>(
operand->shape().tuple_shapes(index), operand, index);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateRng(
const Shape& shape, RandomDistribution distribution,
absl::Span<HloInstruction* const> parameters) {
return std::make_unique<HloRngInstruction>(shape, distribution, parameters);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateRngGetAndUpdateState(const Shape& shape, int64_t delta) {
return std::make_unique<HloRngGetAndUpdateStateInstruction>(shape, delta);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateRngBitGenerator(const Shape& shape, HloInstruction* state,
RandomAlgorithm algorithm) {
return std::make_unique<HloRngBitGeneratorInstruction>(shape, state,
algorithm);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateNary(
const Shape& shape, HloOpcode opcode,
absl::Span<HloInstruction* const> operands) {
if (opcode == HloOpcode::kCopy) {
CHECK(!shape.IsOpaque());
}
auto instruction = absl::WrapUnique(new HloInstruction(opcode, shape));
for (auto operand : operands) {
instruction->AppendOperand(operand);
}
return instruction;
}
std::unique_ptr<HloInstruction> HloInstruction::CreateUnary(
const Shape& shape, HloOpcode opcode, HloInstruction* operand) {
switch (opcode) {
case HloOpcode::kAbs:
case HloOpcode::kAllGatherDone:
case HloOpcode::kAllReduceDone:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kBitcast:
case HloOpcode::kCeil:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kCopy:
case HloOpcode::kCopyDone:
case HloOpcode::kCos:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kClz:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFloor:
case HloOpcode::kImag:
case HloOpcode::kIsFinite:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kNot:
case HloOpcode::kNegate:
case HloOpcode::kPopulationCount:
case HloOpcode::kReal:
case HloOpcode::kRsqrt:
case HloOpcode::kLogistic:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSqrt:
case HloOpcode::kCbrt:
case HloOpcode::kTanh:
case HloOpcode::kTan:
break;
default:
LOG(FATAL) << "Invalid unary instruction opcode " << opcode;
}
return CreateNary(shape, opcode, {operand});
}
std::unique_ptr<HloInstruction> HloInstruction::CreateBinary(
const Shape& shape, HloOpcode opcode, HloInstruction* lhs,
HloInstruction* rhs) {
switch (opcode) {
case HloOpcode::kAdd:
case HloOpcode::kAtan2:
case HloOpcode::kDivide:
case HloOpcode::kComplex:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kMultiply:
case HloOpcode::kPower:
case HloOpcode::kRemainder:
case HloOpcode::kSubtract:
case HloOpcode::kAnd:
case HloOpcode::kOr:
case HloOpcode::kXor:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical:
case HloOpcode::kStochasticConvert:
break;
default:
LOG(FATAL) << "Invalid binary instruction opcode " << opcode;
}
return CreateNary(shape, opcode, {lhs, rhs});
}
std::unique_ptr<HloInstruction> HloInstruction::CreateTernary(
const Shape& shape, HloOpcode opcode, HloInstruction* lhs,
HloInstruction* rhs, HloInstruction* ehs) {
switch (opcode) {
case HloOpcode::kClamp:
case HloOpcode::kSelect:
break;
default:
LOG(FATAL) << "Invalid ternary instruction opcode " << opcode;
}
return CreateNary(shape, opcode, {lhs, rhs, ehs});
}
std::unique_ptr<HloInstruction> HloInstruction::CreateVariadic(
const Shape& shape, HloOpcode opcode,
absl::Span<HloInstruction* const> operands) {
CHECK_EQ(HloOpcode::kTuple, opcode);
return CreateNary(shape, opcode, operands);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateMap(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* map_computation) {
return std::make_unique<HloMapInstruction>(shape, operands, map_computation);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateConvolve(
const Shape& shape, HloInstruction* lhs, HloInstruction* rhs,
int64_t feature_group_count, int64_t batch_group_count,
const Window& window, const ConvolutionDimensionNumbers& dimension_numbers,
const PrecisionConfig& precision_config) {
return std::make_unique<HloConvolutionInstruction>(
shape, lhs, rhs, feature_group_count, batch_group_count, window,
dimension_numbers, precision_config);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateFft(
const Shape& shape, HloInstruction* operand, FftType fft_type,
absl::Span<const int64_t> fft_length) {
return std::make_unique<HloFftInstruction>(shape, operand, fft_type,
fft_length);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateAsyncStart(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* async_computation,
absl::string_view async_execution_thread) {
return std::make_unique<HloAsyncStartInstruction>(
HloOpcode::kAsyncStart, shape, operands, async_computation,
async_execution_thread);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateAsyncUpdate(
const Shape& shape, HloInstruction* operand) {
return std::make_unique<HloAsyncInstruction>(HloOpcode::kAsyncUpdate, shape,
operand);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateAsyncDone(
const Shape& shape, HloInstruction* operand) {
return std::make_unique<HloAsyncInstruction>(HloOpcode::kAsyncDone, shape,
operand);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateCopyStart(
const Shape& shape, HloInstruction* operand,
std::optional<int> cross_program_prefetch) {
return std::make_unique<HloCopyStartInstruction>(shape, operand,
cross_program_prefetch);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateCompare(
const Shape& shape, HloInstruction* lhs, HloInstruction* rhs,
ComparisonDirection direction, std::optional<Comparison::Type> type) {
return std::make_unique<HloCompareInstruction>(shape, lhs, rhs, direction,
type);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateTriangularSolve(const Shape& shape, HloInstruction* a,
HloInstruction* b,
const TriangularSolveOptions& options) {
return std::make_unique<HloTriangularSolveInstruction>(shape, a, b, options);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateCholesky(
const Shape& shape, HloInstruction* a, const CholeskyOptions& options) {
return std::make_unique<HloCholeskyInstruction>(shape, a, options);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateDot(
const Shape& shape, HloInstruction* lhs, HloInstruction* rhs,
const DotDimensionNumbers& dimension_numbers,
const PrecisionConfig& precision_config,
std::vector<SparsityDescriptor> sparsity,
absl::Span<HloInstruction* const> sparse_meta) {
return std::make_unique<HloDotInstruction>(shape, lhs, rhs, dimension_numbers,
precision_config,
std::move(sparsity), sparse_meta);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateReducePrecision(const Shape& shape,
HloInstruction* operand,
const int exponent_bits,
const int mantissa_bits) {
return std::make_unique<HloReducePrecisionInstruction>(
shape, operand, exponent_bits, mantissa_bits);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateAllGather(
const Shape& shape, absl::Span<HloInstruction* const> operands,
int64_t all_gather_dimension, const CollectiveDeviceList& device_list,
bool constrain_layout, const std::optional<int64_t>& channel_id,
bool use_global_device_ids) {
return std::make_unique<HloAllGatherInstruction>(
HloOpcode::kAllGather, shape, operands, all_gather_dimension, device_list,
constrain_layout, channel_id, use_global_device_ids);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateAllGather(
const Shape& shape, absl::Span<HloInstruction* const> operands,
int64_t all_gather_dimension, absl::Span<const ReplicaGroup> replica_groups,
bool constrain_layout, const std::optional<int64_t>& channel_id,
bool use_global_device_ids) {
return CreateAllGather(shape, operands, all_gather_dimension,
CollectiveDeviceList(replica_groups), constrain_layout,
channel_id, use_global_device_ids);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateAllGatherStart(const Shape& shape,
absl::Span<HloInstruction* const> operands,
int64_t all_gather_dimension,
const CollectiveDeviceList& device_list,
bool constrain_layout,
const std::optional<int64_t>& channel_id,
bool use_global_device_ids) {
return std::make_unique<HloAllGatherInstruction>(
HloOpcode::kAllGatherStart, shape, operands, all_gather_dimension,
device_list, constrain_layout, channel_id, use_global_device_ids);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateAllGatherStart(
const Shape& shape, absl::Span<HloInstruction* const> operands,
int64_t all_gather_dimension, absl::Span<const ReplicaGroup> replica_groups,
bool constrain_layout, const std::optional<int64_t>& channel_id,
bool use_global_device_ids) {
return CreateAllGatherStart(shape, operands, all_gather_dimension,
CollectiveDeviceList(replica_groups),
constrain_layout, channel_id,
use_global_device_ids);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateAllReduce(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* reduce_computation, const CollectiveDeviceList& device_list,
bool constrain_layout, const std::optional<int64_t>& channel_id,
bool use_global_device_ids) {
return std::make_unique<HloAllReduceInstruction>(
HloOpcode::kAllReduce, shape, operands, reduce_computation, device_list,
constrain_layout, channel_id, use_global_device_ids);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateAllReduce(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* reduce_computation,
absl::Span<const ReplicaGroup> replica_groups, bool constrain_layout,
const std::optional<int64_t>& channel_id, bool use_global_device_ids) {
return CreateAllReduce(shape, operands, reduce_computation,
CollectiveDeviceList(replica_groups), constrain_layout,
channel_id, use_global_device_ids);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateReduceScatter(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* reduce_computation, const CollectiveDeviceList& device_list,
bool constrain_layout, const std::optional<int64_t>& channel_id,
bool use_global_device_ids, int64_t scatter_dimension) {
return std::make_unique<HloReduceScatterInstruction>(
shape, operands, reduce_computation, device_list, constrain_layout,
channel_id, use_global_device_ids, scatter_dimension);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateReduceScatter(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* reduce_computation,
absl::Span<const ReplicaGroup> replica_groups, bool constrain_layout,
const std::optional<int64_t>& channel_id, bool use_global_device_ids,
int64_t scatter_dimension) {
return CreateReduceScatter(
shape, operands, reduce_computation, CollectiveDeviceList(replica_groups),
constrain_layout, channel_id, use_global_device_ids, scatter_dimension);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateAllReduceStart(const Shape& shape,
absl::Span<HloInstruction* const> operands,
HloComputation* reduce_computation,
const CollectiveDeviceList& device_list,
bool constrain_layout,
const std::optional<int64_t>& channel_id,
bool use_global_device_ids) {
return std::make_unique<HloAllReduceInstruction>(
HloOpcode::kAllReduceStart, shape, operands, reduce_computation,
device_list, constrain_layout, channel_id, use_global_device_ids);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateAllReduceStart(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* reduce_computation,
absl::Span<const ReplicaGroup> replica_groups, bool constrain_layout,
const std::optional<int64_t>& channel_id, bool use_global_device_ids) {
return CreateAllReduceStart(
shape, operands, reduce_computation, CollectiveDeviceList(replica_groups),
constrain_layout, channel_id, use_global_device_ids);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateAllToAll(
const Shape& shape, absl::Span<HloInstruction* const> operands,
const CollectiveDeviceList& device_list, bool constrain_layout,
const std::optional<int64_t>& channel_id,
const std::optional<int64_t>& split_dimension) {
return std::make_unique<HloAllToAllInstruction>(shape, operands, device_list,
constrain_layout, channel_id,
split_dimension);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateAllToAll(
const Shape& shape, absl::Span<HloInstruction* const> operands,
absl::Span<const ReplicaGroup> replica_groups, bool constrain_layout,
const std::optional<int64_t>& channel_id,
const std::optional<int64_t>& split_dimension) {
return CreateAllToAll(shape, operands, CollectiveDeviceList(replica_groups),
constrain_layout, channel_id, split_dimension);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateCollectiveBroadcast(
const Shape& shape, absl::Span<HloInstruction* const> operands,
const CollectiveDeviceList& device_list, bool constrain_layout,
const std::optional<int64_t>& channel_id) {
return std::make_unique<HloCollectiveBroadcastInstruction>(
HloOpcode::kCollectiveBroadcast, shape, operands, device_list,
constrain_layout, channel_id);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateCollectiveBroadcast(
const Shape& shape, absl::Span<HloInstruction* const> operands,
absl::Span<const ReplicaGroup> replica_groups, bool constrain_layout,
const std::optional<int64_t>& channel_id) {
return CreateCollectiveBroadcast(shape, operands,
CollectiveDeviceList(replica_groups),
constrain_layout, channel_id);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateCollectivePermute(
const Shape& shape, HloInstruction* operand,
const std::vector<std::pair<int64_t, int64_t>>& source_target_pairs,
const std::optional<int64_t>& channel_id) {
return std::make_unique<HloCollectivePermuteInstruction>(
HloOpcode::kCollectivePermute, shape, operand, source_target_pairs,
channel_id);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateCollectivePermute(
const Shape& shape, HloInstruction* input, HloInstruction* output,
HloInstruction* input_start_indices, HloInstruction* output_start_indices,
absl::Span<const std::pair<int64_t, int64_t>> source_target_pairs,
absl::Span<const std::vector<int64_t>> slice_sizes,
const std::optional<int64_t>& channel_id) {
return std::make_unique<HloCollectivePermuteInstruction>(
HloOpcode::kCollectivePermute, shape, input, output, input_start_indices,
output_start_indices, source_target_pairs, slice_sizes, channel_id);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateCollectivePermuteStart(
const Shape& shape, HloInstruction* operand,
const std::vector<std::pair<int64_t, int64_t>>& source_target_pairs,
const std::optional<int64_t>& channel_id) {
return std::make_unique<HloCollectivePermuteInstruction>(
HloOpcode::kCollectivePermuteStart, shape, operand, source_target_pairs,
channel_id);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateCollectivePermuteStart(
const Shape& shape, HloInstruction* input, HloInstruction* output,
HloInstruction* input_start_indices, HloInstruction* output_start_indices,
absl::Span<const std::pair<int64_t, int64_t>> source_target_pairs,
absl::Span<const std::vector<int64_t>> slice_sizes,
const std::optional<int64_t>& channel_id) {
return std::make_unique<HloCollectivePermuteInstruction>(
HloOpcode::kCollectivePermuteStart, shape, input, output,
input_start_indices, output_start_indices, source_target_pairs,
slice_sizes, channel_id);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateReplicaId(
const Shape& shape) {
CHECK(Shape::Equal().IgnoreLayout()(shape, ShapeUtil::MakeShape(U32, {})))
<< "HloInstruction replica-id must have a shape of u32[], but "
<< shape.ToString() << " is specified";
return absl::WrapUnique(new HloInstruction(HloOpcode::kReplicaId, shape));
}
std::unique_ptr<HloInstruction> HloInstruction::CreatePartitionId(
const Shape& shape) {
CHECK(Shape::Equal().IgnoreLayout()(shape, ShapeUtil::MakeShape(U32, {})))
<< "HloInstruction partition-id must have a shape of u32[], but "
<< shape.ToString() << " is specified";
return absl::WrapUnique(new HloInstruction(HloOpcode::kPartitionId, shape));
}
std::unique_ptr<HloInstruction> HloInstruction::CreateInfeed(
const Shape& infeed_shape, HloInstruction* token_operand,
const std::string& config) {
return std::make_unique<HloInfeedInstruction>(infeed_shape, token_operand,
config);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateOutfeed(
const Shape& outfeed_shape, HloInstruction* operand,
HloInstruction* token_operand, absl::string_view outfeed_config) {
return std::make_unique<HloOutfeedInstruction>(outfeed_shape, operand,
token_operand, outfeed_config);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateSend(
HloInstruction* operand, HloInstruction* token, int64_t channel_id,
bool is_host_transfer) {
return std::make_unique<HloSendInstruction>(operand, token, channel_id,
is_host_transfer);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateSendDone(
HloInstruction* operand, bool is_host_transfer) {
auto send_operand = DynCast<HloSendInstruction>(operand);
CHECK(send_operand != nullptr)
<< "SendDone must take the context operand from Send";
return std::make_unique<HloSendDoneInstruction>(send_operand,
is_host_transfer);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateSendDone(
HloInstruction* operand, int64_t channel_id, bool is_host_transfer) {
return std::make_unique<HloSendDoneInstruction>(operand, channel_id,
is_host_transfer);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateRecv(
const Shape& shape, HloInstruction* token, int64_t channel_id,
bool is_host_transfer) {
return std::make_unique<HloRecvInstruction>(shape, token, channel_id,
is_host_transfer);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateRecvDone(
HloInstruction* operand, bool is_host_transfer) {
auto recv_operand = DynCast<HloRecvInstruction>(operand);
CHECK(recv_operand != nullptr)
<< "RecvDone must take the context operand from Recv";
return std::make_unique<HloRecvDoneInstruction>(recv_operand,
is_host_transfer);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateRecvDone(
HloInstruction* operand, int64_t channel_id, bool is_host_transfer) {
return std::make_unique<HloRecvDoneInstruction>(operand, channel_id,
is_host_transfer);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateReverse(
const Shape& shape, HloInstruction* operand,
absl::Span<const int64_t> dimensions) {
return std::make_unique<HloReverseInstruction>(shape, operand, dimensions);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateAfterAll(
absl::Span<HloInstruction* const> operands) {
CHECK(!operands.empty());
auto instruction = absl::WrapUnique(
new HloInstruction(HloOpcode::kAfterAll, ShapeUtil::MakeTokenShape()));
for (auto operand : operands) {
instruction->AppendOperand(operand);
}
return instruction;
}
std::unique_ptr<HloInstruction> HloInstruction::CreateToken() {
return absl::WrapUnique(
new HloInstruction(HloOpcode::kAfterAll, ShapeUtil::MakeTokenShape()));
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateAddDependency(HloInstruction* data_operand,
HloInstruction* token_operand) {
auto instruction = absl::WrapUnique(
new HloInstruction(HloOpcode::kAddDependency, data_operand->shape()));
instruction->AppendOperand(data_operand);
instruction->AppendOperand(token_operand);
return instruction;
}
std::unique_ptr<HloInstruction> HloInstruction::CreateWhile(
const Shape& shape, HloComputation* condition, HloComputation* body,
HloInstruction* init) {
auto instruction =
absl::WrapUnique(new HloInstruction(HloOpcode::kWhile, shape));
instruction->AppendOperand(init);
instruction->AppendComputation(body);
instruction->AppendComputation(condition);
body->SetWhileCallInstruction(instruction.get());
return instruction;
}
std::unique_ptr<HloInstruction> HloInstruction::CreateConditional(
const Shape& shape, HloInstruction* pred,
HloInstruction* true_computation_arg, HloComputation* true_computation,
HloInstruction* false_computation_arg, HloComputation* false_computation) {
auto instruction =
absl::WrapUnique(new HloInstruction(HloOpcode::kConditional, shape));
instruction->AppendOperand(pred);
instruction->AppendOperand(true_computation_arg);
instruction->AppendOperand(false_computation_arg);
instruction->AppendComputation(true_computation);
instruction->AppendComputation(false_computation);
true_computation->SetConditionalCallInstruction(instruction.get());
false_computation->SetConditionalCallInstruction(instruction.get());
return instruction;
}
std::unique_ptr<HloInstruction> HloInstruction::CreateConditional(
const Shape& shape, HloInstruction* branch_index,
absl::Span<HloComputation* const> branch_computations,
absl::Span<HloInstruction* const> branch_computation_args) {
auto instruction =
absl::WrapUnique(new HloInstruction(HloOpcode::kConditional, shape));
instruction->AppendOperand(branch_index);
CHECK_EQ(branch_computations.size(), branch_computation_args.size());
for (int i = 0; i < branch_computations.size(); ++i) {
instruction->AppendComputation(branch_computations[i]);
instruction->AppendOperand(branch_computation_args[i]);
branch_computations[i]->SetConditionalCallInstruction(instruction.get());
}
return instruction;
}
std::unique_ptr<HloInstruction> HloInstruction::CreateSlice(
const Shape& shape, HloInstruction* operand,
absl::Span<const int64_t> start_indices,
absl::Span<const int64_t> limit_indices,
absl::Span<const int64_t> strides) {
return std::make_unique<HloSliceInstruction>(shape, operand, start_indices,
limit_indices, strides);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateDynamicSlice(
const Shape& shape, HloInstruction* operand,
absl::Span<HloInstruction* const> start_indices,
absl::Span<const int64_t> slice_sizes) {
return std::make_unique<HloDynamicSliceInstruction>(
shape, operand, start_indices, slice_sizes);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateDynamicUpdateSlice(
const Shape& shape, HloInstruction* operand, HloInstruction* update,
absl::Span<HloInstruction* const> start_indices) {
return std::make_unique<HloDynamicUpdateSliceInstruction>(
shape, operand, update, start_indices);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateConcatenate(
const Shape& shape, absl::Span<HloInstruction* const> operands,
int64_t dimension) {
return std::make_unique<HloConcatenateInstruction>(shape, operands,
dimension);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateConvert(
const Shape& shape, HloInstruction* operand) {
auto instruction =
absl::WrapUnique(new HloInstruction(HloOpcode::kConvert, shape));
instruction->AppendOperand(operand);
return instruction;
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateBitcastConvert(const Shape& shape,
HloInstruction* operand) {
auto instruction =
absl::WrapUnique(new HloInstruction(HloOpcode::kBitcastConvert, shape));
instruction->AppendOperand(operand);
return instruction;
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateStochasticConvert(const Shape& shape,
HloInstruction* operand,
HloInstruction* random) {
auto instruction = absl::WrapUnique(
new HloInstruction(HloOpcode::kStochasticConvert, shape));
instruction->AppendOperand(operand);
instruction->AppendOperand(random);
return instruction;
}
std::unique_ptr<HloInstruction> HloInstruction::CreateBitcast(
const Shape& shape, HloInstruction* operand) {
auto instruction =
absl::WrapUnique(new HloInstruction(HloOpcode::kBitcast, shape));
instruction->AppendOperand(operand);
return instruction;
}
std::unique_ptr<HloInstruction> HloInstruction::CreateReduce(
const Shape& shape, HloInstruction* operand, HloInstruction* init_value,
absl::Span<const int64_t> dimensions_to_reduce,
HloComputation* reduce_computation) {
return absl::WrapUnique(new HloReduceInstruction(
shape, {operand, init_value}, dimensions_to_reduce, reduce_computation));
}
std::unique_ptr<HloInstruction> HloInstruction::CreateReduce(
const Shape& shape, absl::Span<HloInstruction* const> operands,
absl::Span<HloInstruction* const> init_values,
absl::Span<const int64_t> dimensions_to_reduce,
HloComputation* reduce_computation) {
std::vector<HloInstruction*> all_args;
all_args.reserve(operands.size() * 2);
all_args.insert(all_args.end(), operands.begin(), operands.end());
all_args.insert(all_args.end(), init_values.begin(), init_values.end());
return std::make_unique<HloReduceInstruction>(
shape, all_args, dimensions_to_reduce, reduce_computation);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateReduce(
const Shape& shape, HloInstruction* tuple_of_instructions,
absl::Span<HloInstruction* const> init_values,
absl::Span<const int64_t> dimensions_to_reduce,
HloComputation* reduce_computation) {
if (!tuple_of_instructions->shape().IsTuple()) {
CHECK_EQ(init_values.size(), 1)
<< "The first input has to be a tuple, or the number of init values "
"has to be one.";
return CreateReduce(shape, tuple_of_instructions, init_values[0],
dimensions_to_reduce, reduce_computation);
}
absl::InlinedVector<HloInstruction*, 4> inputs;
for (int idx = 0; idx < tuple_of_instructions->shape().tuple_shapes_size();
idx++) {
std::unique_ptr<HloInstruction> gte =
HloInstruction::CreateGetTupleElement(tuple_of_instructions, idx);
inputs.push_back(
tuple_of_instructions->parent()->AddInstruction(std::move(gte)));
}
return CreateReduce(shape, inputs, init_values, dimensions_to_reduce,
reduce_computation);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateReduceWindow(
const Shape& shape, HloInstruction* operand, HloInstruction* init_value,
const Window& window, HloComputation* reduce_computation) {
return std::make_unique<HloReduceWindowInstruction>(
shape, operand, init_value, window, reduce_computation);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateReduceWindow(
const Shape& shape, absl::Span<HloInstruction* const> operands,
absl::Span<HloInstruction* const> init_values, const Window& window,
HloComputation* reduce_computation) {
return std::make_unique<HloReduceWindowInstruction>(
shape, operands, init_values, window, reduce_computation);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateBatchNormTraining(const Shape& shape,
HloInstruction* operand,
HloInstruction* scale,
HloInstruction* offset, float epsilon,
int64_t feature_index) {
return std::make_unique<HloBatchNormTrainingInstruction>(
shape, operand, scale, offset, epsilon, feature_index);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateBatchNormInference(
const Shape& shape, HloInstruction* operand, HloInstruction* scale,
HloInstruction* offset, HloInstruction* mean, HloInstruction* variance,
float epsilon, int64_t feature_index) {
return std::make_unique<HloBatchNormInferenceInstruction>(
shape, operand, scale, offset, mean, variance, epsilon, feature_index);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateBatchNormGrad(const Shape& shape, HloInstruction* operand,
HloInstruction* scale, HloInstruction* mean,
HloInstruction* variance,
HloInstruction* grad_output, float epsilon,
int64_t feature_index) {
return std::make_unique<HloBatchNormGradInstruction>(
shape, operand, scale, mean, variance, grad_output, epsilon,
feature_index);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateSelectAndScatter(
const Shape& shape, HloInstruction* operand, HloComputation* select,
const Window& window, HloInstruction* source, HloInstruction* init_value,
HloComputation* scatter) {
return std::make_unique<HloSelectAndScatterInstruction>(
shape, operand, select, window, source, init_value, scatter);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateBroadcast(
const Shape& shape, HloInstruction* operand,
absl::Span<const int64_t> broadcast_dimensions) {
return std::make_unique<HloBroadcastInstruction>(shape, operand,
broadcast_dimensions);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateGetDimensionSize(const Shape& shape,
HloInstruction* operand,
int64_t dimension) {
return std::make_unique<HloGetDimensionSizeInstruction>(shape, operand,
dimension);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateSetDimensionSize(const Shape& shape,
HloInstruction* operand,
HloInstruction* val, int64_t dimension) {
return std::make_unique<HloSetDimensionSizeInstruction>(shape, operand, val,
dimension);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateBroadcastSequence(
const Shape& output_shape, HloInstruction* operand,
absl::FunctionRef<HloInstruction*(std::unique_ptr<HloInstruction>)> adder) {
CHECK(ShapeUtil::IsScalar(operand->shape()) ||
operand->shape().rank() == output_shape.rank());
Shape broadcast_shape = ShapeUtil::ChangeElementType(
output_shape, operand->shape().element_type());
if (ShapeUtil::IsScalar(operand->shape())) {
auto broadcast =
HloInstruction::CreateBroadcast(broadcast_shape, operand, {});
broadcast->set_metadata(operand->metadata());
if (operand->has_sharding()) {
broadcast->copy_sharding(operand);
}
broadcast->set_frontend_attributes(operand->frontend_attributes());
broadcast->set_statistics_viz(operand->statistics_viz());
return broadcast;
}
std::vector<int64_t> broadcast_dimensions;
std::vector<int64_t> reshaped_dimensions;
for (int i = 0; i < operand->shape().rank(); i++) {
if (operand->shape().dimensions(i) == output_shape.dimensions(i)) {
broadcast_dimensions.push_back(i);
reshaped_dimensions.push_back(operand->shape().dimensions(i));
} else {
CHECK_EQ(operand->shape().dimensions(i), 1)
<< "An explicit broadcast sequence requires the broadcasted "
"dimensions to be trivial; operand: "
<< operand->ToString() << "; output_shape: " << output_shape;
}
}
HloInstruction* reshaped_operand = adder(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(operand->shape().element_type(),
reshaped_dimensions),
operand));
reshaped_operand->set_metadata(operand->metadata());
if (operand->has_sharding()) {
reshaped_operand->copy_sharding(operand);
}
reshaped_operand->set_frontend_attributes(operand->frontend_attributes());
reshaped_operand->set_statistics_viz(operand->statistics_viz());
auto broadcast = HloInstruction::CreateBroadcast(
broadcast_shape, reshaped_operand, broadcast_dimensions);
broadcast->set_metadata(operand->metadata());
if (operand->has_sharding()) {
broadcast->copy_sharding(operand);
}
broadcast->set_frontend_attributes(operand->frontend_attributes());
broadcast->set_statistics_viz(operand->statistics_viz());
return broadcast;
}
std::unique_ptr<HloInstruction> HloInstruction::CreatePad(
const Shape& shape, HloInstruction* operand, HloInstruction* padding_value,
const PaddingConfig& padding_config) {
return std::make_unique<HloPadInstruction>(shape, operand, padding_value,
padding_config);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateReshape(
const Shape& shape, HloInstruction* operand, int64_t inferred_dimension) {
CHECK(operand->shape().is_unbounded_dynamic() ||
ShapeUtil::StaticExtentProduct(shape) ==
ShapeUtil::StaticExtentProduct(operand->shape()))
<< "shape: " << ShapeUtil::HumanString(shape)
<< " operand: " << ShapeUtil::HumanString(operand->shape());
return std::make_unique<HloReshapeInstruction>(shape, operand,
inferred_dimension);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateDynamicReshape(
const Shape& shape, HloInstruction* data_operand,
absl::Span<HloInstruction* const> dim_sizes) {
CHECK_EQ(ShapeUtil::StaticExtentProduct(shape),
ShapeUtil::StaticExtentProduct(data_operand[0].shape()))
<< "shape: " << ShapeUtil::HumanString(shape)
<< " operand: " << ShapeUtil::HumanString(data_operand[0].shape());
CHECK_EQ(shape.rank(), dim_sizes.size());
return std::make_unique<HloDynamicReshapeInstruction>(shape, data_operand,
dim_sizes);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateTranspose(
const Shape& shape, HloInstruction* operand,
absl::Span<const int64_t> dimensions) {
return std::make_unique<HloTransposeInstruction>(shape, operand, dimensions);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateSort(
const Shape& shape, int64_t dimension,
absl::Span<HloInstruction* const> operands, HloComputation* compare,
bool is_stable) {
return std::make_unique<HloSortInstruction>(shape, dimension, operands,
compare, is_stable);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateFusion(
const Shape& shape, FusionKind fusion_kind, HloInstruction* fused_root,
absl::string_view prefix) {
return std::make_unique<HloFusionInstruction>(shape, fusion_kind, fused_root,
prefix);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateFusion(
const Shape& shape, FusionKind fusion_kind,
absl::Span<HloInstruction* const> operands,
HloComputation* fusion_computation, absl::string_view prefix) {
return std::make_unique<HloFusionInstruction>(shape, fusion_kind, operands,
fusion_computation, prefix);
}
void HloInstruction::set_single_sharding(const HloSharding& sharding) {
CHECK(!sharding.IsTuple()) << sharding;
if (shape().IsTuple()) {
set_sharding(HloSharding::Tuple(sharding.GetAsShapeTree(shape())));
} else {
set_sharding(sharding);
}
}
void HloInstruction::SetupDerivedInstruction(
HloInstruction* derived_instruction) const {
if (sharding_ != nullptr &&
ShapeUtil::CompatibleKind(shape_, derived_instruction->shape())) {
derived_instruction->set_sharding(*sharding_);
} else if (!ShapeUtil::CompatibleKind(shape_, derived_instruction->shape())) {
derived_instruction->clear_sharding();
}
derived_instruction->set_metadata(*metadata_);
if (has_rare()) {
derived_instruction->set_frontend_attributes(frontend_attributes());
derived_instruction->set_statistics_viz(statistics_viz());
} else if (derived_instruction->has_rare()) {
derived_instruction->mutable_rare()->frontend_attributes.Clear();
derived_instruction->mutable_rare()->statistics_viz.Clear();
}
if (opcode() == derived_instruction->opcode() && has_backend_config()) {
derived_instruction->CopyBackendConfigFrom(this);
}
}
bool HloInstruction::HasSideEffectNoRecurse() const {
switch (opcode_) {
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kRng:
case HloOpcode::kRngGetAndUpdateState:
case HloOpcode::kInfeed:
case HloOpcode::kOutfeed:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllReduceDone:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllGatherDone:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kCollectivePermuteDone:
return true;
case HloOpcode::kAllToAll:
case HloOpcode::kAllGather:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
if (Cast<HloCollectiveInstruction>(this)->constrain_layout()) {
return true;
}
[[fallthrough]];
case HloOpcode::kCollectivePermute:
return Cast<HloChannelInstruction>(this)->channel_id().has_value() &&
!GetModule()->config().use_spmd_partitioning();
case HloOpcode::kCustomCall:
return Cast<HloCustomCallInstruction>(this)
->custom_call_has_side_effect();
default:
return false;
}
}
bool HloInstruction::HasSideEffect() const {
if (HasSideEffectNoRecurse()) {
return true;
}
for (const auto& computation : called_computations()) {
if (computation->HasSideEffect()) {
return true;
}
}
return false;
}
std::unique_ptr<HloInstruction> HloInstruction::CreateCall(
const Shape& shape, HloInstruction* called_computation_root) {
return std::make_unique<HloCallInstruction>(shape, called_computation_root);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateCall(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* computation) {
return std::make_unique<HloCallInstruction>(shape, operands, computation);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateCompositeCall(const Shape& shape,
HloInstruction* decomposition_root,
const std::string& name,
const std::string& attributes,
int64_t version) {
return std::make_unique<HloCallInstruction>(shape, decomposition_root, name,
attributes, version);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateCompositeCall(const Shape& shape,
absl::Span<HloInstruction* const> operands,
HloComputation* decomposition,
const std::string& name,
const std::string& attributes,
int64_t version) {
return std::make_unique<HloCallInstruction>(shape, operands, decomposition,
name, attributes, version);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateCustomCall(
const Shape& shape, absl::Span<HloInstruction* const> operands,
absl::string_view custom_call_target, std::string opaque,
CustomCallApiVersion api_version) {
return std::make_unique<HloCustomCallInstruction>(
shape, operands, custom_call_target, std::move(opaque), api_version);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateCustomCall(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* to_apply, absl::string_view custom_call_target,
std::string opaque, CustomCallApiVersion api_version) {
return std::make_unique<HloCustomCallInstruction>(
shape, operands, to_apply, custom_call_target, std::move(opaque),
api_version);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateCustomCall(
const Shape& shape, absl::Span<HloInstruction* const> operands,
absl::Span<HloComputation* const> called_computations,
absl::string_view custom_call_target, std::string opaque,
CustomCallApiVersion api_version) {
return std::make_unique<HloCustomCallInstruction>(
shape, operands, called_computations, custom_call_target,
std::move(opaque), api_version);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateCustomCall(
const Shape& shape, absl::Span<HloInstruction* const> operands,
absl::string_view custom_call_target,
absl::Span<const Shape> operand_shapes_with_layout, std::string opaque,
CustomCallApiVersion api_version) {
return std::make_unique<HloCustomCallInstruction>(
shape, operands, custom_call_target, std::move(opaque),
operand_shapes_with_layout, api_version);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateTuple(
absl::Span<HloInstruction* const> elements) {
std::vector<const Shape*> element_shapes;
element_shapes.reserve(elements.size());
for (auto element : elements) {
element_shapes.push_back(&element->shape());
}
Shape tuple_shape = ShapeUtil::MakeTupleShapeWithPtrs(element_shapes);
return CreateVariadic(tuple_shape, HloOpcode::kTuple, elements);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateGather(
const Shape& shape, HloInstruction* operand, HloInstruction* start_indices,
const GatherDimensionNumbers& gather_dim_numbers,
absl::Span<const int64_t> slice_sizes, bool indices_are_sorted) {
return std::make_unique<HloGatherInstruction>(shape, operand, start_indices,
gather_dim_numbers, slice_sizes,
indices_are_sorted);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateScatter(
const Shape& shape, HloInstruction* operand,
HloInstruction* scatter_indices, HloInstruction* updates,
HloComputation* update_computation,
const ScatterDimensionNumbers& scatter_dim_numbers, bool indices_are_sorted,
bool unique_indices) {
return absl::WrapUnique(new HloScatterInstruction(
shape, {operand, scatter_indices, updates}, update_computation,
scatter_dim_numbers, indices_are_sorted, unique_indices));
}
std::unique_ptr<HloInstruction> HloInstruction::CreateScatter(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloInstruction* scatter_indices, absl::Span<HloInstruction* const> updates,
HloComputation* update_computation,
const ScatterDimensionNumbers& scatter_dim_numbers, bool indices_are_sorted,
bool unique_indices) {
absl::InlinedVector<HloInstruction*, 3> args;
args.reserve(operands.size() + updates.size() + 1);
absl::c_copy(operands, std::back_inserter(args));
args.push_back(scatter_indices);
absl::c_copy(updates, std::back_inserter(args));
return std::make_unique<HloScatterInstruction>(
shape, args, update_computation, scatter_dim_numbers, indices_are_sorted,
unique_indices);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateDomain(
const Shape& shape, HloInstruction* operand,
std::unique_ptr<DomainMetadata> operand_side_metadata,
std::unique_ptr<DomainMetadata> user_side_metadata) {
return std::make_unique<HloDomainInstruction>(
shape, operand, std::move(operand_side_metadata),
std::move(user_side_metadata));
}
bool HloInstruction::IsThreadIncluded(
absl::string_view execution_thread,
const absl::flat_hash_set<absl::string_view>& execution_threads_set) {
return execution_threads_set.empty() ||
execution_threads_set.contains(execution_thread);
}
void HloInstruction::AddSuffixToInstructionName(
const absl::string_view suffix) {
const std::string dot_suffix = absl::StrCat(".", suffix);
size_t index = name().rfind(dot_suffix);
if (index == std::string::npos) {
this->name_ = absl::StrCat(name(), dot_suffix);
} else {
auto after_suffix = name().substr(index + dot_suffix.size());
if (after_suffix.empty()) {
this->name_ = absl::StrCat(name(), "2");
} else {
int64_t numeric_suffix;
if (absl::SimpleAtoi(after_suffix, &numeric_suffix)) {
this->name_ =
StrCat(name().substr(0, index), dot_suffix, numeric_suffix + 1);
} else {
this->name_ = absl::StrCat(name(), dot_suffix);
}
}
}
}
std::unique_ptr<HloInstruction> HloInstruction::CloneWithNewOperands(
const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
return CloneWithNewOperands(shape, new_operands, "", context);
}
std::unique_ptr<HloInstruction> HloInstruction::CloneWithNewOperands(
const Shape& shape, absl::Span<HloInstruction* const> new_operands,
const std::string& suffix, HloCloneContext* context) const {
VLOG(3) << "CloneWithNewOperands:\n " << ToString();
VLOG(3) << " new operands:";
for (const HloInstruction* new_operand : new_operands) {
VLOG(3) << " %" << new_operand->name();
}
std::unique_ptr<HloInstruction> clone;
switch (opcode_) {
case HloOpcode::kBatchNormTraining:
case HloOpcode::kBatchNormInference:
case HloOpcode::kBatchNormGrad:
case HloOpcode::kFft:
case HloOpcode::kCompare:
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncDone:
case HloOpcode::kCopyStart:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kReverse:
case HloOpcode::kConcatenate:
case HloOpcode::kReduce:
case HloOpcode::kTranspose:
case HloOpcode::kBroadcast:
case HloOpcode::kReshape:
case HloOpcode::kDynamicReshape:
case HloOpcode::kMap:
case HloOpcode::kSlice:
case HloOpcode::kConstant:
case HloOpcode::kFusion:
case HloOpcode::kRng:
case HloOpcode::kRngBitGenerator:
case HloOpcode::kRngGetAndUpdateState:
case HloOpcode::kParameter:
case HloOpcode::kGetTupleElement:
case HloOpcode::kReducePrecision:
case HloOpcode::kAllGather:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllToAll:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kInfeed:
case HloOpcode::kOutfeed:
case HloOpcode::kConvolution:
case HloOpcode::kCustomCall:
case HloOpcode::kReduceWindow:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kPad:
case HloOpcode::kDynamicSlice:
case HloOpcode::kSort:
case HloOpcode::kGather:
case HloOpcode::kScatter:
case HloOpcode::kIota:
case HloOpcode::kDot:
case HloOpcode::kDomain:
case HloOpcode::kGetDimensionSize:
case HloOpcode::kSetDimensionSize:
case HloOpcode::kTriangularSolve:
case HloOpcode::kCholesky:
case HloOpcode::kTopK:
clone = CloneWithNewOperandsImpl(shape, new_operands, context);
break;
case HloOpcode::kAbs:
case HloOpcode::kAllGatherDone:
case HloOpcode::kAllReduceDone:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kBitcast:
case HloOpcode::kCeil:
case HloOpcode::kClz:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kCopy:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kCopyDone:
case HloOpcode::kCos:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kImag:
case HloOpcode::kIsFinite:
case HloOpcode::kFloor:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kNot:
case HloOpcode::kNegate:
case HloOpcode::kPopulationCount:
case HloOpcode::kReal:
case HloOpcode::kRsqrt:
case HloOpcode::kLogistic:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSqrt:
case HloOpcode::kCbrt:
case HloOpcode::kTan:
case HloOpcode::kTanh:
CHECK_EQ(new_operands.size(), 1);
clone = CreateUnary(shape, opcode_, new_operands[0]);
break;
case HloOpcode::kAdd:
case HloOpcode::kAtan2:
case HloOpcode::kComplex:
case HloOpcode::kDivide:
case HloOpcode::kMultiply:
case HloOpcode::kSubtract:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kPower:
case HloOpcode::kRemainder:
case HloOpcode::kAnd:
case HloOpcode::kOr:
case HloOpcode::kXor:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical:
CHECK_EQ(new_operands.size(), 2);
clone = CreateBinary(shape, opcode_, new_operands[0], new_operands[1]);
break;
case HloOpcode::kClamp:
case HloOpcode::kSelect:
CHECK_EQ(new_operands.size(), 3);
clone = CreateTernary(shape, opcode_, new_operands[0], new_operands[1],
new_operands[2]);
break;
case HloOpcode::kCall:
clone = CreateCall(shape, new_operands, to_apply());
break;
case HloOpcode::kConvert:
CHECK_EQ(new_operands.size(), 1);
clone = CreateConvert(shape, new_operands[0]);
break;
case HloOpcode::kBitcastConvert:
CHECK_EQ(new_operands.size(), 1);
clone = CreateBitcastConvert(shape, new_operands[0]);
break;
case HloOpcode::kStochasticConvert:
CHECK_EQ(new_operands.size(), 2);
clone = CreateStochasticConvert(shape, new_operands[0], new_operands[1]);
break;
case HloOpcode::kDynamicUpdateSlice:
clone = CreateDynamicUpdateSlice(shape, new_operands[0], new_operands[1],
new_operands.subspan(2));
break;
case HloOpcode::kTuple:
clone = CreateTuple(new_operands);
*clone->mutable_shape() = shape;
break;
case HloOpcode::kWhile:
CHECK_EQ(new_operands.size(), 1);
clone =
CreateWhile(shape, while_condition(), while_body(), new_operands[0]);
while_body()->SetWhileCallInstruction(const_cast<HloInstruction*>(this));
break;
case HloOpcode::kConditional:
CHECK_EQ(new_operands.size(), branch_count() + 1);
clone = CreateConditional(shape, new_operands[0],
absl::MakeSpan(branch_computations()),
new_operands.subspan(1));
break;
case HloOpcode::kAfterAll:
if (new_operands.empty()) {
clone = CreateToken();
} else {
clone = CreateAfterAll(new_operands);
}
break;
case HloOpcode::kAddDependency:
CHECK_EQ(new_operands.size(), 2);
clone = CreateAddDependency(new_operands[0], new_operands[1]);
break;
case HloOpcode::kReplicaId:
CHECK_EQ(new_operands.size(), 0);
clone = CreateReplicaId(shape);
break;
case HloOpcode::kPartitionId:
CHECK_EQ(new_operands.size(), 0);
clone = CreatePartitionId(shape);
break;
default:
CHECK(0) << "Unsupported opcode: " << opcode_;
}
SetupDerivedInstruction(clone.get());
clone->set_parent(parent_);
clone->backend_config_ = BackendConfigWrapper(backend_config_);
clone->SetAndSanitizeName(name());
if (context != nullptr) {
context->MapInstruction(this, clone.get());
clone->ReplaceCalledComputations([&](HloComputation* callee) {
return callee->parent() != context->module()
? context->module()->DeepCloneComputation(callee, context)
: callee;
});
if (opcode() == HloOpcode::kWhile) {
clone->while_body()->SetWhileCallInstruction(clone.get());
}
}
if (!suffix.empty()) {
clone->AddSuffixToInstructionName(suffix);
}
return clone;
}
void HloInstruction::DetachFromOperandsAndUsers() {
if (cleaned_up_) {
return;
}
cleaned_up_ = true;
for (int64_t operand_num = 0; operand_num < operand_count(); ++operand_num) {
HloInstruction* operand = operands_[operand_num];
if (operand == nullptr) {
continue;
}
operand->users_.MaybeRemoveUser(this);
operands_[operand_num] = nullptr;
}
for (auto& user : this->users()) {
for (int i = 0; i < user->operand_count(); ++i) {
if (user->operands_[i] == this) {
user->operands_[i] = nullptr;
}
}
}
}
std::unique_ptr<HloInstruction> HloInstruction::CloneWithNewShape(
const Shape& shape, const std::string& suffix,
HloCloneContext* context) const {
std::unique_ptr<HloInstruction> clone =
CloneWithNewOperands(shape, operands_, context);
if (suffix.empty()) {
clone->name_.assign(name().begin(), name().end());
} else {
clone->AddSuffixToInstructionName(suffix);
}
return clone;
}
std::unique_ptr<HloInstruction> HloInstruction::Clone(
const std::string& suffix, HloCloneContext* context) const {
std::unique_ptr<HloInstruction> clone =
CloneWithNewShape(shape_, suffix, context);
return clone;
}
std::pair<const HloInstruction*, ShapeIndex>
HloInstruction::LatestNonGteAncestorAndIndex() const {
const HloInstruction* hlo = this;
ShapeIndex index;
while (hlo->opcode() == HloOpcode::kGetTupleElement) {
index.push_back(hlo->tuple_index());
hlo = hlo->operand(0);
}
std::reverse(index.begin(), index.end());
return {hlo, index};
}
const HloInstruction* HloInstruction::LatestNonGteAncestor() const {
const HloInstruction* hlo = this;
while (hlo->opcode() == HloOpcode::kGetTupleElement) {
hlo = hlo->operand(0);
}
return hlo;
}
const HloInstruction* HloInstruction::operand(int64_t i) const {
return operands_[i];
}
HloInstruction* HloInstruction::mutable_operand(int64_t i) {
CHECK(operands_[i] != nullptr);
return operands_[i];
}
int64_t HloInstruction::operand_index(const HloInstruction* target) const {
for (int64_t i = 0; i < operand_count(); ++i) {
if (target == operand(i)) {
return i;
}
}
LOG(FATAL) << "target was not an operand: " << target->ToString();
}
std::vector<int64_t> HloInstruction::operand_indices(
const HloInstruction* target) const {
std::vector<int64_t> indices;
for (int64_t i = 0; i < operand_count(); ++i) {
if (target == operand(i)) {
indices.push_back(i);
}
}
if (indices.empty()) {
LOG(FATAL) << "target was not an operand: " << target->ToString();
}
return indices;
}
HloInstruction::InstructionVector HloInstruction::unique_operands() const {
InstructionVector unique;
absl::flat_hash_set<const HloInstruction*> seen;
for (HloInstruction* operand : operands()) {
if (seen.insert(operand).second) {
unique.push_back(operand);
}
}
return unique;
}
absl::Status HloInstruction::AddControlDependencyTo(
HloInstruction* instruction) {
TF_RET_CHECK(instruction->parent() == parent());
if (!absl::c_linear_search(control_successors(), instruction)) {
mutable_rare()->control_successors.push_back(instruction);
TF_RET_CHECK(!absl::c_linear_search(
instruction->rare()->control_predecessors, this));
instruction->mutable_rare()->control_predecessors.push_back(this);
}
return absl::OkStatus();
}
absl::Status HloInstruction::RemoveControlDependencyTo(
HloInstruction* instruction) {
TF_RET_CHECK(instruction->parent() == parent());
if (has_rare()) {
TF_RETURN_IF_ERROR(EraseElementFromVector(
&mutable_rare()->control_successors, instruction));
}
if (instruction->has_rare()) {
TF_RETURN_IF_ERROR(EraseElementFromVector(
&instruction->mutable_rare()->control_predecessors, this));
}
return absl::OkStatus();
}
absl::Status HloInstruction::DropAllControlDeps() {
if (has_rare()) {
for (auto* ctrl_succ : rare()->control_successors) {
TF_RETURN_IF_ERROR(EraseElementFromVector(
&ctrl_succ->mutable_rare()->control_predecessors, this));
}
for (auto* ctrl_pred : rare()->control_predecessors) {
TF_RETURN_IF_ERROR(EraseElementFromVector(
&ctrl_pred->mutable_rare()->control_successors, this));
}
Rare* r = mutable_rare();
r->control_successors.clear();
r->control_predecessors.clear();
}
return absl::OkStatus();
}
absl::Status HloInstruction::SafelyDropAllControlDependencies() {
if (has_rare()) {
for (HloInstruction* predecessor : rare()->control_predecessors) {
for (HloInstruction* successor : rare()->control_successors) {
TF_RETURN_IF_ERROR(predecessor->AddControlDependencyTo(successor));
}
}
}
TF_RETURN_IF_ERROR(DropAllControlDeps());
return absl::OkStatus();
}
bool HloInstruction::HasControlDependencies() const {
const Rare* r = rare();
return (!r->control_predecessors.empty() || !r->control_successors.empty());
}
absl::Status HloInstruction::CopyAllControlDepsTo(HloInstruction* start,
HloInstruction* end) const {
for (auto* ctrl_pred : control_predecessors()) {
TF_RETURN_IF_ERROR(ctrl_pred->AddControlDependencyTo(start));
}
for (auto* ctrl_succ : control_successors()) {
TF_RETURN_IF_ERROR(end->AddControlDependencyTo(ctrl_succ));
}
return absl::OkStatus();
}
bool HloInstruction::IdenticalInternal(
const HloInstruction& other,
absl::FunctionRef<bool(const HloInstruction*, const HloInstruction*)>
eq_operands,
absl::FunctionRef<bool(const HloComputation*, const HloComputation*)>
eq_computations,
bool layout_sensitive, bool sharding_sensitive,
bool ignore_channel_id_values,
bool ignore_commutative_operand_order) const {
if (this == &other) {
return true;
}
if (opcode() != other.opcode()) {
return false;
}
if (!(layout_sensitive ? ShapeUtil::Equal(shape(), other.shape())
: ShapeUtil::Compatible(shape(), other.shape()))) {
return false;
}
if (sharding_sensitive && has_sharding() && other.has_sharding() &&
sharding() != other.sharding()) {
return false;
}
if (operands().size() != other.operands().size()) {
return false;
}
if (ignore_commutative_operand_order &&
HloOpcodeIsBinaryCommutative(opcode())) {
CHECK_EQ(operand_count(), 2);
if (!(eq_operands(operand(0), other.operand(0)) &&
eq_operands(operand(1), other.operand(1))) &&
!(eq_operands(operand(0), other.operand(1)) &&
eq_operands(operand(1), other.operand(0)))) {
return false;
}
} else {
for (size_t i = 0; i < operands().size(); ++i) {
if (!eq_operands(operand(i), other.operand(i))) {
return false;
}
}
}
if (backend_config_ != other.backend_config_) {
return false;
}
if (ignore_channel_id_values) {
if (auto channel_inst = DynCast<HloChannelInstruction>(this)) {
return channel_inst->IdenticalSlowPathIgnoringChannelIdValues(
other, eq_computations);
}
}
return IdenticalSlowPath(other, eq_computations);
}
void HloInstruction::AppendOperand(HloInstruction* operand) {
if (operand->parent() != nullptr) {
DCHECK(!operand->parent()->IsMarkedAsDead(operand))
<< "Operand " << operand->name() << " is already marked dead";
}
operands_.push_back(operand);
operand->AddUser(this);
}
void HloInstruction::RemoveOperandsAtAscendingIndices(
absl::Span<const int> ascending_indices) {
if (ascending_indices.empty()) {
return;
}
int next_index = 0;
int removed_count = 0;
for (int to_remove : ascending_indices) {
while (next_index < to_remove) {
operands_[next_index - removed_count] = operands_[next_index];
++next_index;
}
CHECK_LT(to_remove, operands_.size());
++removed_count;
++next_index;
}
while (next_index < operands_.size()) {
operands_[next_index - removed_count] = operands_[next_index];
++next_index;
}
CHECK_EQ(removed_count, ascending_indices.size());
operands_.resize(operands_.size() - removed_count);
}
bool HloInstruction::HasConstantOperand() const {
for (const HloInstruction* operand : operands_) {
if (operand->IsConstant()) {
return true;
}
}
return false;
}
bool HloInstruction::IdenticalSlowPath(
const HloInstruction& other,
absl::FunctionRef<bool(const HloComputation*, const HloComputation*)>
eq_computations) const {
switch (opcode()) {
case HloOpcode::kAbs:
case HloOpcode::kAllGatherDone:
case HloOpcode::kAllReduceDone:
case HloOpcode::kAtan2:
case HloOpcode::kAdd:
case HloOpcode::kBitcast:
case HloOpcode::kBitcastConvert:
case HloOpcode::kCeil:
case HloOpcode::kClamp:
case HloOpcode::kClz:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kComplex:
case HloOpcode::kConvert:
case HloOpcode::kCopy:
case HloOpcode::kCopyStart:
case HloOpcode::kCopyDone:
case HloOpcode::kCos:
case HloOpcode::kDivide:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFloor:
case HloOpcode::kImag:
case HloOpcode::kIsFinite:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kAnd:
case HloOpcode::kNot:
case HloOpcode::kOr:
case HloOpcode::kXor:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kMultiply:
case HloOpcode::kNegate:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kPartitionId:
case HloOpcode::kPopulationCount:
case HloOpcode::kPower:
case HloOpcode::kReal:
case HloOpcode::kRemainder:
case HloOpcode::kReshape:
case HloOpcode::kDynamicReshape:
case HloOpcode::kReplicaId:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kRsqrt:
case HloOpcode::kSelect:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical:
case HloOpcode::kLogistic:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSqrt:
case HloOpcode::kStochasticConvert:
case HloOpcode::kCbrt:
case HloOpcode::kSubtract:
case HloOpcode::kTan:
case HloOpcode::kTanh:
case HloOpcode::kTuple:
return true;
case HloOpcode::kAfterAll:
case HloOpcode::kAddDependency:
return false;
case HloOpcode::kCall:
return eq_computations(to_apply(), other.to_apply());
case HloOpcode::kConditional:
for (int j = 0; j < branch_count(); ++j) {
if (!eq_computations(branch_computation(j),
other.branch_computation(j))) {
return false;
}
}
return true;
case HloOpcode::kWhile:
return (eq_computations(while_body(), other.while_body()) &&
eq_computations(while_condition(), other.while_condition()));
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncDone:
case HloOpcode::kBatchNormTraining:
case HloOpcode::kBatchNormInference:
case HloOpcode::kBatchNormGrad:
case HloOpcode::kFft:
case HloOpcode::kCompare:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kReverse:
case HloOpcode::kConcatenate:
case HloOpcode::kReduce:
case HloOpcode::kSort:
case HloOpcode::kTranspose:
case HloOpcode::kBroadcast:
case HloOpcode::kMap:
case HloOpcode::kSlice:
case HloOpcode::kConstant:
case HloOpcode::kIota:
case HloOpcode::kFusion:
case HloOpcode::kRng:
case HloOpcode::kRngBitGenerator:
case HloOpcode::kRngGetAndUpdateState:
case HloOpcode::kParameter:
case HloOpcode::kGetTupleElement:
case HloOpcode::kReducePrecision:
case HloOpcode::kInfeed:
case HloOpcode::kOutfeed:
case HloOpcode::kAllGather:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllToAll:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kConvolution:
case HloOpcode::kCustomCall:
case HloOpcode::kReduceWindow:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kPad:
case HloOpcode::kDynamicSlice:
case HloOpcode::kGather:
case HloOpcode::kScatter:
case HloOpcode::kDot:
case HloOpcode::kDomain:
case HloOpcode::kGetDimensionSize:
case HloOpcode::kSetDimensionSize:
case HloOpcode::kTriangularSolve:
case HloOpcode::kCholesky:
case HloOpcode::kTopK:
LOG(FATAL) << "Base class impl called for opcode with subclass: "
<< opcode();
}
return false;
}
absl::Status HloInstruction::ReplaceUseWith(HloInstruction* user,
HloInstruction* new_producer) {
TF_RET_CHECK(
ShapeUtil::CompatibleIgnoringFpPrecision(shape(), new_producer->shape()))
<< "this shape: " << ShapeUtil::HumanString(shape())
<< ", replacement shape: "
<< ShapeUtil::HumanString(new_producer->shape());
return ReplaceUseWithDifferentShape(user, new_producer);
}
absl::Status HloInstruction::ReplaceUseWithDifferentShape(
HloInstruction* user, HloInstruction* new_producer) {
VLOG(3) << "Replacing uses of " << name() << " in " << user->name()
<< " with " << new_producer->name();
RemoveUser(user);
TF_RET_CHECK(absl::c_count(user->operands_, this) >= 0);
std::replace(user->operands_.begin(), user->operands_.end(), this,
new_producer);
new_producer->AddUser(user);
if (user->opcode() == HloOpcode::kFusion) {
TF_RETURN_IF_ERROR(
Cast<HloFusionInstruction>(user)->DeduplicateFusionOperands());
}
return absl::OkStatus();
}
absl::Status HloInstruction::ReplaceUseWith(HloInstruction* user,
int operand_number,
HloInstruction* new_producer) {
TF_RET_CHECK(
ShapeUtil::CompatibleIgnoringFpPrecision(shape(), new_producer->shape()))
<< "this shape: " << ShapeUtil::HumanString(shape())
<< ", replacement shape: "
<< ShapeUtil::HumanString(new_producer->shape());
return ReplaceUseWithDifferentShape(user, operand_number, new_producer);
}
absl::Status HloInstruction::ReplaceUseWithDifferentShape(
HloInstruction* user, int operand_number, HloInstruction* new_producer) {
VLOG(3) << "Replacing operand " << operand_number << " of " << name()
<< " in " << user->name() << " with " << new_producer->name();
if (absl::c_count(user->operands_, this) == 1) {
RemoveUser(user);
}
TF_RET_CHECK(user->operand(operand_number) == this)
<< "Expected operand " << operand_number << " of " << user->ToString()
<< " to be equal to " << ToString();
user->operands_[operand_number] = new_producer;
new_producer->AddUser(user);
return absl::OkStatus();
}
absl::Status HloInstruction::ReplaceOperandWith(int64_t operand_num,
HloInstruction* new_operand) {
auto old_operand = operand(operand_num);
TF_RET_CHECK(ShapeUtil::CompatibleIgnoringFpPrecision(old_operand->shape(),
new_operand->shape()))
<< old_operand->shape() << " is not compatible with "
<< new_operand->shape();
return ReplaceOperandWithDifferentShape(operand_num, new_operand);
}
absl::Status HloInstruction::ReplaceOperandWithDifferentShape(
int64_t operand_num, HloInstruction* new_operand) {
TF_RET_CHECK(operand_num >= 0);
TF_RET_CHECK(operand_num < operand_count());
HloInstruction* old_operand = mutable_operand(operand_num);
if (old_operand == new_operand) {
return absl::OkStatus();
}
operands_[operand_num] = new_operand;
VLOG(3) << "Replacing operand " << operand_num << " of " << name() << " with "
<< new_operand->name() << ", was " << old_operand->name();
if (!absl::c_linear_search(operands_, old_operand)) {
old_operand->RemoveUser(this);
}
new_operand->AddUser(this);
return absl::OkStatus();
}
absl::Status HloInstruction::Defuse() {
if (opcode() != HloOpcode::kFusion) {
return absl::OkStatus();
}
VLOG(2) << "Defusing instruction: " << ToString();
HloComputation* fused_computation = fused_instructions_computation();
absl::flat_hash_map<const HloInstruction*, HloInstruction*>
defused_instructions;
for (int64_t i = 0; i < operand_count(); ++i) {
defused_instructions[fused_computation->parameter_instruction(i)] =
mutable_operand(i);
}
for (HloInstruction* fused_instruction :
fused_computation->MakeInstructionPostOrder()) {
if (fused_instruction->opcode() == HloOpcode::kParameter) {
continue;
}
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : fused_instruction->operands()) {
new_operands.push_back(defused_instructions.at(operand));
}
HloInstruction* defused_instruction =
parent()->AddInstruction(fused_instruction->CloneWithNewOperands(
fused_instruction->shape(), new_operands));
defused_instructions[fused_instruction] = defused_instruction;
}
TF_RETURN_IF_ERROR(
ReplaceAllUsesWith(defused_instructions.at(fused_expression_root())));
HloModule* module = GetModule();
TF_RETURN_IF_ERROR(parent()->RemoveInstruction(this));
return module->RemoveEmbeddedComputation(fused_computation);
}
absl::StatusOr<HloInstruction*> HloInstruction::UnfuseInstruction(
HloInstruction* instruction) {
CHECK_EQ(opcode(), HloOpcode::kFusion);
std::vector<HloInstruction*> new_operands;
for (int64_t operand_num = 0; operand_num < instruction->operand_count();
++operand_num) {
HloInstruction* operand = instruction->mutable_operand(operand_num);
if (operand->opcode() == HloOpcode::kParameter) {
HloInstruction* extracted_operand =
mutable_operand(operand->parameter_number());
new_operands.push_back(extracted_operand);
} else if (operand->opcode() == HloOpcode::kConstant) {
HloInstruction* cloned_constant = AddInstruction(operand->Clone());
new_operands.push_back(cloned_constant);
} else if (operand->opcode() == HloOpcode::kBroadcast &&
operand->operand(0)->opcode() == HloOpcode::kConstant) {
HloInstruction* cloned_constant =
AddInstruction(operand->operand(0)->Clone());
new_operands.push_back(AddInstruction(
operand->CloneWithNewOperands(operand->shape(), {cloned_constant})));
} else {
return InvalidArgument(
"Unsupported operand type for unfusing: %s. Currently only "
"parameters and constants are supported.",
operand->ToString());
}
}
HloInstruction* unfused_instruction = AddInstruction(
instruction->CloneWithNewOperands(instruction->shape(), new_operands));
HloComputation* fusion_computation = fused_instructions_computation();
HloInstruction* new_parameter = AddFusionOperand(unfused_instruction);
TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(new_parameter));
TF_RETURN_IF_ERROR(
fusion_computation->RemoveInstructionAndUnusedOperands(instruction));
return unfused_instruction;
}
absl::Status HloInstruction::ReplaceUsesWith(
absl::Span<HloInstruction* const> users, HloInstruction* new_producer) {
TF_RET_CHECK(
ShapeUtil::CompatibleIgnoringFpPrecision(shape(), new_producer->shape()))
<< shape() << " is not compatible with " << new_producer->shape();
return ReplaceAllUsesWithDifferentShape(users, new_producer);
}
absl::Status HloInstruction::ReplaceAllUsesWithDifferentShape(
absl::Span<HloInstruction* const> users, HloInstruction* new_producer) {
std::vector<HloInstruction*> users_vector(users.begin(), users.end());
for (HloInstruction* user : users_vector) {
TF_RETURN_IF_ERROR(ReplaceUseWithDifferentShape(user, new_producer));
}
if (parent_ && parent_->root_instruction() == this) {
parent_->set_root_instruction(new_producer,
true);
}
return absl::OkStatus();
}
absl::Status HloInstruction::ReplaceAllUsesWith(HloInstruction* new_producer,
absl::string_view trigger) {
auto print_options = HloPrintOptions::ShortParsable()
.set_print_operand_shape(true)
.set_print_extra_attributes(false);
TF_RET_CHECK(
ShapeUtil::CompatibleIgnoringFpPrecision(shape(), new_producer->shape()))
<< "The shape doesn't match when replacing '" << ToString(print_options)
<< "' with '" << new_producer->ToString(print_options) << "'. " << shape()
<< " is not compatible with " << new_producer->shape() << "\n '"
<< trigger << "' triggered this wrong replacement.";
return ReplaceAllUsesWithDifferentShape(new_producer);
}
absl::Status HloInstruction::ReplaceAllUsesWithDifferentShape(
HloInstruction* new_producer) {
bool new_producer_is_user = false;
std::vector<HloInstruction*> users_vector(users().begin(), users().end());
for (HloInstruction* user : users_vector) {
if (user == new_producer) {
new_producer_is_user = true;
} else {
std::replace(user->operands_.begin(), user->operands_.end(), this,
new_producer);
new_producer->AddUser(user);
if (user->opcode() == HloOpcode::kFusion) {
TF_RETURN_IF_ERROR(
Cast<HloFusionInstruction>(user)->DeduplicateFusionOperands());
}
}
}
users_.Clear();
if (new_producer_is_user) {
AddUser(new_producer);
}
if (parent_ && parent_->root_instruction() == this) {
parent_->set_root_instruction(new_producer,
true);
}
return absl::OkStatus();
}
bool HloInstruction::IsEffectiveBitcast() const {
return opcode_ == HloOpcode::kBitcast ||
(opcode_ == HloOpcode::kTranspose &&
ShapeUtil::TransposeIsBitcast(operand(0)->shape(), shape(),
dimensions()));
}
HloComputation* HloInstruction::to_apply() const {
if (has_to_apply()) {
CHECK_EQ(called_computations().size(), 1)
<< "Expected a to_apply computation for " << opcode();
return called_computations()[0];
}
LOG(FATAL) << "Invalid opcode for to_apply(): " << opcode();
}
void HloInstruction::set_to_apply(HloComputation* computation) {
if (has_to_apply()) {
CHECK_EQ(called_computations().size(), 1)
<< "Expected a to_apply computation for " << opcode();
rare_->called_computations[0] = computation;
return;
}
LOG(FATAL) << "Invalid opcode for to_apply(): " << opcode();
}
bool HloInstruction::has_to_apply() const {
switch (opcode_) {
case HloOpcode::kAllReduce:
case HloOpcode::kAllReduceStart:
case HloOpcode::kCall:
case HloOpcode::kMap:
case HloOpcode::kReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kReduceWindow:
case HloOpcode::kScatter:
case HloOpcode::kSort:
return true;
case HloOpcode::kCustomCall:
return called_computations().size() == 1;
default:
return false;
}
}
HloComputation* HloInstruction::while_condition() const {
CHECK_EQ(HloOpcode::kWhile, opcode_);
return called_computations()[kConditionComputationIndex];
}
HloComputation* HloInstruction::while_body() const {
CHECK_EQ(HloOpcode::kWhile, opcode_);
return called_computations()[kBodyComputationIndex];
}
void HloInstruction::set_while_condition(HloComputation* computation) {
CHECK_EQ(HloOpcode::kWhile, opcode_);
rare_->called_computations[kConditionComputationIndex] = computation;
}
void HloInstruction::set_while_body(HloComputation* computation) {
CHECK_EQ(HloOpcode::kWhile, opcode_);
rare_->called_computations[kBodyComputationIndex] = computation;
}
HloInstruction* HloInstruction::while_init() const {
CHECK_EQ(HloOpcode::kWhile, opcode_);
return operands_[0];
}
HloComputation* HloInstruction::true_computation() const {
CHECK_EQ(HloOpcode::kConditional, opcode_);
CHECK_EQ(PRED, operand(0)->shape().element_type());
return called_computations()[kTrueComputationIndex];
}
HloComputation* HloInstruction::false_computation() const {
CHECK_EQ(HloOpcode::kConditional, opcode_);
CHECK_EQ(PRED, operand(0)->shape().element_type());
return called_computations()[kFalseComputationIndex];
}
const PtrVec<HloComputation*>& HloInstruction::branch_computations() const {
CHECK(HloOpcode::kConditional == opcode_);
return called_computations();
}
int32_t HloInstruction::branch_count() const {
CHECK(HloOpcode::kConditional == opcode_);
return called_computations().size();
}
HloComputation* HloInstruction::branch_computation(int32_t b) const {
CHECK_EQ(HloOpcode::kConditional, opcode_);
CHECK_GE(b, 0);
CHECK_LT(b, called_computations().size());
return called_computations()[b];
}
int32_t HloInstruction::branch_index(HloComputation* computation) const {
CHECK_EQ(HloOpcode::kConditional, opcode_);
CHECK_NE(computation, nullptr);
for (int32_t idx = 0; idx < branch_count(); idx++) {
if (branch_computation(idx) == computation) {
return idx;
}
}
LOG(FATAL) << absl::StrFormat("Conditional %s does not contain branch %s",
name(), computation->name());
}
void HloInstruction::set_branch_computation(int b,
HloComputation* computation) {
CHECK_EQ(HloOpcode::kConditional, opcode_);
rare_->called_computations[b] = computation;
}
std::string HloInstruction::SignatureString() const {
std::string operands =
StrJoin(operands_, ", ", [](std::string* out, HloInstruction* operand) {
StrAppend(out, ShapeUtil::HumanString(operand->shape()));
});
return StrCat("(", operands, ") -> ", ShapeUtil::HumanString(shape()));
}
absl::string_view PrintName(absl::string_view name, bool print_ids) {
if (print_ids) {
return name;
} else {
auto dot_position = name.find_first_of('.');
return name.substr(0, dot_position);
}
}
namespace {
using DFSStack = absl::InlinedVector<std::pair<int, HloInstruction*>, 16>;
void PrintNameInternal(Printer* printer, absl::string_view name,
const HloPrintOptions& options) {
if (options.print_percent()) {
printer->Append("%");
}
printer->Append(PrintName(name, options.print_ids()));
}
std::string PrintCycle(const HloInstruction* child, DFSStack* dfs_stack,
bool ignore_control_predecessors) {
absl::flat_hash_set<const HloInstruction*> subgraph;
while (!dfs_stack->empty() && dfs_stack->back().second != child) {
subgraph.insert(dfs_stack->back().second);
dfs_stack->pop_back();
}
absl::flat_hash_set<const HloInstruction*> visited;
absl::InlinedVector<const HloInstruction*, 16> dfs;
dfs.push_back(child);
std::string result;
while (!dfs.empty() && result.empty()) {
bool found_next_instr = false;
auto process_users_or_successors =
[&](const std::vector<HloInstruction*>& users_or_successors) {
for (const auto& user : users_or_successors) {
if (user == child) {
dfs.push_back(child);
result = "\n\nDirected cycle:\n " +
absl::StrJoin(
dfs, "\n ",
[](std::string* out, const HloInstruction* instr) {
absl::StrAppend(out, instr->name());
});
return;
}
if (!subgraph.contains(user) || visited.contains(user)) {
continue;
}
visited.insert(user);
dfs.push_back(user);
found_next_instr = true;
}
};
const HloInstruction* back = dfs.back();
process_users_or_successors(back->users());
if (!ignore_control_predecessors) {
process_users_or_successors(back->control_successors());
}
if (!found_next_instr) {
dfs.pop_back();
}
}
return result;
}
}
void HloInstruction::Print(Printer* printer,
const HloPrintOptions& options) const {
CanonicalNameMap new_map;
PrintWithCanonicalNameMap(printer, options, &new_map);
}
std::string HloInstruction::ToString(const HloPrintOptions& options) const {
StringPrinter printer;
Print(&printer, options);
return std::move(printer).ToString();
}
std::string HloInstruction::ToString() const {
return ToString(HloPrintOptions::Default());
}
bool HloInstruction::IsOpElementwise(HloOpcode opcode) {
switch (opcode) {
case HloOpcode::kAbs:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kCeil:
case HloOpcode::kClz:
case HloOpcode::kConvert:
case HloOpcode::kBitcastConvert:
case HloOpcode::kCopy:
case HloOpcode::kCos:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFloor:
case HloOpcode::kImag:
case HloOpcode::kIsFinite:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kNot:
case HloOpcode::kNegate:
case HloOpcode::kPopulationCount:
case HloOpcode::kReal:
case HloOpcode::kReducePrecision:
case HloOpcode::kRsqrt:
case HloOpcode::kLogistic:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSqrt:
case HloOpcode::kCbrt:
case HloOpcode::kTan:
case HloOpcode::kTanh:
return true;
case HloOpcode::kAdd:
case HloOpcode::kAtan2:
case HloOpcode::kCompare:
case HloOpcode::kComplex:
case HloOpcode::kDivide:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kMultiply:
case HloOpcode::kPower:
case HloOpcode::kRemainder:
case HloOpcode::kSubtract:
case HloOpcode::kAnd:
case HloOpcode::kOr:
case HloOpcode::kXor:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical:
case HloOpcode::kStochasticConvert:
return true;
case HloOpcode::kSelect:
case HloOpcode::kClamp:
return true;
default:
return false;
}
}
bool HloInstruction::IsElementwiseImpl(
const std::optional<int64_t>& operand_idx) const {
if (opcode_ == HloOpcode::kDynamicUpdateSlice) {
return operand_idx.has_value() && operand_idx.value() == 0;
}
if (opcode_ == HloOpcode::kBitcastConvert &&
primitive_util::BitWidth(shape_.element_type()) !=
primitive_util::BitWidth(operands_[0]->shape().element_type())) {
return false;
}
return IsOpElementwise(opcode_);
}
bool HloInstruction::IsCrossModuleAllReduce() const {
if (opcode() == HloOpcode::kAllReduce ||
opcode() == HloOpcode::kAllReduceStart) {
return channel_id() != std::nullopt;
} else if (opcode() == HloOpcode::kAllReduceDone) {
CHECK_EQ(operand_count(), 1);
const HloInstruction* operand = this->operand(0);
CHECK_EQ(operand->opcode(), HloOpcode::kAllReduceStart);
return operand->channel_id() != std::nullopt;
}
return false;
}
bool HloInstruction::IsCrossReplicaAllReduce() const {
if (opcode() == HloOpcode::kAllReduce ||
opcode() == HloOpcode::kAllReduceStart) {
return channel_id() == std::nullopt;
} else if (opcode() == HloOpcode::kAllReduceDone) {
CHECK_EQ(operand_count(), 1);
const HloInstruction* operand = this->operand(0);
CHECK_EQ(operand->opcode(), HloOpcode::kAllReduceStart);
return operand->channel_id() == std::nullopt;
}
return false;
}
void HloInstruction::PrintWithCanonicalNameMap(
Printer* printer, const HloPrintOptions& options,
CanonicalNameMap* canonical_name_map) const {
if (options.canonicalize_instruction_names()) {
if (options.is_in_nested_computation()) {
DCHECK(!options.print_percent());
printer->Append(canonical_name_map->LookupOrInsert(unique_id()));
printer->Append(" = ");
}
} else {
PrintNameInternal(printer, name(), options);
printer->Append(" = ");
}
if (options.print_result_shape()) {
if (options.include_layout_in_shapes()) {
ShapeUtil::PrintHumanStringWithLayout(printer, shape());
} else {
ShapeUtil::PrintHumanString(printer, shape());
}
printer->Append(" ");
}
if (options.syntax_sugar_async_ops() && HloOpcodeIsAsync(opcode()) &&
(async_wrapped_computation() &&
async_wrapped_computation()->CanExpandIntoSingleInstruction())) {
absl::string_view suffix = [&]() {
switch (opcode()) {
case HloOpcode::kAsyncStart:
return "-start";
case HloOpcode::kAsyncUpdate:
return "-update";
default:
CHECK(opcode() == HloOpcode::kAsyncDone)
<< "Unexpected async opcode: " << opcode();
return "-done";
}
}();
printer->Append(HloOpcodeString(async_wrapped_opcode()));
printer->Append(suffix);
} else {
printer->Append(HloOpcodeString(opcode()));
}
printer->Append("(");
PrintOperandsWithCanonicalNameMap(printer, options, canonical_name_map);
printer->Append(")");
AttributePrinter attr_printer([printer]() {
printer->Append(", ");
return printer;
});
PrintExtraAttributes(attr_printer, options);
if (original_value_) {
printer->Append(", origin={");
printer->Append(OriginalValueToString(*original_value()));
printer->Append("}");
}
if (options.print_metadata() &&
(!metadata_->op_type().empty() || !metadata_->op_name().empty() ||
!metadata_->source_file().empty() ||
!metadata_->scheduling_name().empty())) {
printer->Append(", metadata={");
printer->Append(xla::OpMetadataToString(
*metadata_, options.print_metadata_only_op_name()));
printer->Append("}");
}
if (options.print_backend_config() && !backend_config_.empty()) {
absl::string_view config = backend_config_.GetRawString();
std::string sorted_config;
if (options.sort_backend_config()) {
sorted_config = SortJson(config).value_or(std::string(config));
config = sorted_config;
}
printer->Append(", backend_config=");
if (LexesAsJsonDict(config)) {
printer->Append(config);
} else {
printer->Append("\"");
printer->Append(CEscape(config));
printer->Append("\"");
}
}
}
void HloInstruction::PrintOperandsWithCanonicalNameMap(
Printer* printer, const HloPrintOptions& options,
CanonicalNameMap* canonical_name_map) const {
if (operands_.empty()) return;
absl::Span<HloInstruction* const> slice(operands_);
constexpr int64_t kMaxOperandsToShowIfCompact = 4;
if (options.compact_operands() &&
slice.size() > kMaxOperandsToShowIfCompact) {
slice.remove_suffix(slice.size() - kMaxOperandsToShowIfCompact);
}
auto print_one = [&](const HloInstruction* operand) {
if (operand == nullptr) {
printer->Append("null ");
return;
}
bool add_space = false;
if (options.print_operand_shape()) {
if (options.include_layout_in_shapes()) {
ShapeUtil::PrintHumanStringWithLayout(printer, operand->shape());
} else {
ShapeUtil::PrintHumanString(printer, operand->shape());
}
add_space = true;
}
if (options.canonicalize_instruction_names()) {
if (options.is_in_nested_computation()) {
DCHECK(!options.print_percent());
if (add_space) printer->Append(" ");
printer->Append(
canonical_name_map->LookupOrInsert(operand->unique_id()));
}
} else if (options.print_operand_names()) {
if (add_space) printer->Append(" ");
PrintNameInternal(printer, operand->name(), options);
}
};
print_one(slice[0]);
for (int64_t i = 1; i < slice.size(); ++i) {
if (options.print_operand_index_annotation_interval() != 0 &&
i % options.print_operand_index_annotation_interval() == 0) {
printer->Append(absl::StrFormat(", ", i));
} else {
printer->Append(", ");
}
print_one(slice[i]);
}
const int64_t remaining = operands_.size() - slice.size();
if (remaining > 0) {
printer->Append(", ...(+");
printer->Append(remaining);
printer->Append(")");
}
}
namespace {
bool IsSequentialCall(HloOpcode opcode) {
switch (opcode) {
case HloOpcode::kCall:
case HloOpcode::kConditional:
case HloOpcode::kWhile:
return true;
default:
return false;
}
}
}
void HloInstruction::PrintExtraAttributes(
AttributePrinter& printer, const HloPrintOptions& options) const {
if (options.print_extra_attributes()) {
PrintExtraAttributesImpl(printer, options);
}
const auto subcomputation_mode = options.print_subcomputation_mode();
if (subcomputation_mode ==
HloPrintOptions::PrintSubcomputationMode::kNameOnly) {
if (opcode() == HloOpcode::kWhile) {
printer.Next([this, &options](Printer* printer) {
printer->Append("condition=");
PrintNameInternal(printer, while_condition()->name(), options);
});
printer.Next([this, &options](Printer* printer) {
printer->Append("body=");
PrintNameInternal(printer, while_body()->name(), options);
});
} else if (opcode() == HloOpcode::kSelectAndScatter) {
printer.Next([this, &options](Printer* printer) {
printer->Append("select=");
PrintNameInternal(printer, select()->name(), options);
});
printer.Next([this, &options](Printer* printer) {
printer->Append("scatter=");
PrintNameInternal(printer, scatter()->name(), options);
});
} else if (opcode() == HloOpcode::kConditional) {
if (operand(0)->shape().element_type() == PRED) {
printer.Next([this, &options](Printer* printer) {
printer->Append("true_computation=");
PrintNameInternal(printer, true_computation()->name(), options);
});
printer.Next([this, &options](Printer* printer) {
printer->Append("false_computation=");
PrintNameInternal(printer, false_computation()->name(), options);
});
} else {
printer.Next([this, &options](Printer* printer) {
printer->Append("branch_computations={");
AppendJoin(printer, branch_computations(), ", ",
[&](Printer* printer, const HloComputation* computation) {
PrintNameInternal(printer, computation->name(), options);
});
printer->Append("}");
});
}
} else if (opcode() == HloOpcode::kCall || opcode() == HloOpcode::kMap ||
opcode() == HloOpcode::kReduceWindow ||
opcode() == HloOpcode::kReduce ||
opcode() == HloOpcode::kAllReduce ||
opcode() == HloOpcode::kReduceScatter ||
opcode() == HloOpcode::kAllReduceStart ||
opcode() == HloOpcode::kScatter ||
opcode() == HloOpcode::kTopK || opcode() == HloOpcode::kSort) {
if (!called_computations().empty()) {
printer.Next([this, &options](Printer* printer) {
printer->Append("to_apply=");
PrintNameInternal(printer, to_apply()->name(), options);
});
}
if (opcode() == HloOpcode::kCall && is_composite()) {
printer.Next(
[](Printer* printer) { printer->Append("is_composite=true"); });
}
} else if (opcode() == HloOpcode::kCustomCall) {
if (!called_computations().empty()) {
printer.Next([this, &options](Printer* printer) {
printer->Append("called_computations={");
AppendJoin(printer, called_computations(), ", ",
[&](Printer* printer, const HloComputation* computation) {
PrintNameInternal(printer, computation->name(), options);
});
printer->Append("}");
});
}
} else if (HloOpcodeIsAsync(opcode())) {
if (opcode() == HloOpcode::kAsyncStart &&
(!options.syntax_sugar_async_ops() ||
(async_wrapped_computation() &&
!async_wrapped_computation()->CanExpandIntoSingleInstruction()))) {
printer.Next([this, &options](Printer* printer) {
printer->Append("calls=");
PrintNameInternal(printer, async_wrapped_computation()->name(),
options);
});
}
} else if (!called_computations().empty()) {
printer.Next([this, &options](Printer* printer) {
printer->Append("calls=");
AppendJoin(printer, called_computations(), ", ",
[&](Printer* printer, const HloComputation* computation) {
PrintNameInternal(printer, computation->name(), options);
});
});
}
} else if ((subcomputation_mode ==
HloPrintOptions::PrintSubcomputationMode::kFullBodies) ||
(subcomputation_mode == HloPrintOptions::PrintSubcomputationMode::
kNonSequentialBodies &&
!IsSequentialCall(opcode()))) {
HloPrintOptions new_options = options;
new_options.set_is_in_nested_computation(true);
switch (opcode()) {
case HloOpcode::kWhile:
printer.Next([this, &new_options](Printer* printer) {
printer->Append("condition=\n");
while_condition()->Print(printer, new_options);
});
printer.Next([this, &new_options](Printer* printer) {
printer->Append("body=\n");
while_body()->Print(printer, new_options);
});
break;
case HloOpcode::kSelectAndScatter:
printer.Next([this, &new_options](Printer* printer) {
printer->Append("select=\n");
select()->Print(printer, new_options);
});
printer.Next([this, &new_options](Printer* printer) {
printer->Append("scatter=\n");
scatter()->Print(printer, new_options);
});
break;
case HloOpcode::kConditional:
if (operand(0)->shape().element_type() == PRED) {
printer.Next([this, &new_options](Printer* printer) {
printer->Append("true_computation=\n");
true_computation()->Print(printer, new_options);
});
printer.Next([this, &new_options](Printer* printer) {
printer->Append("false_computation=\n");
false_computation()->Print(printer, new_options);
});
} else {
printer.Next([this, &new_options](Printer* printer) {
printer->Append("branch_computations={\n");
AppendJoin(
printer, branch_computations(), ",\n",
[&](Printer* printer, const HloComputation* computation) {
computation->Print(printer, new_options);
});
printer->Append("\n}");
});
}
break;
case HloOpcode::kCall:
case HloOpcode::kMap:
case HloOpcode::kReduceWindow:
case HloOpcode::kReduce:
case HloOpcode::kAllReduce:
case HloOpcode::kAllReduceStart:
case HloOpcode::kScatter:
case HloOpcode::kSort:
case HloOpcode::kTopK:
if (!called_computations().empty()) {
printer.Next([this, &new_options](Printer* printer) {
printer->Append("to_apply=\n");
to_apply()->Print(printer, new_options);
});
}
if (opcode() == HloOpcode::kCall && is_composite()) {
printer.Next(
[](Printer* printer) { printer->Append("is_composite=true"); });
}
break;
default:
if (!called_computations().empty()) {
printer.Next([this, &new_options](Printer* printer) {
printer->Append("calls=\n");
AppendJoin(
printer, called_computations(), ", ",
[&](Printer* printer, const HloComputation* computation) {
computation->Print(printer, new_options);
});
});
}
break;
}
}
if (has_sharding()) {
printer.Next([this, &options](Printer* printer) {
printer->Append("sharding=");
sharding().Print(printer, options.print_metadata());
});
}
if (!frontend_attributes().map().empty()) {
printer.Next([this](Printer* printer) {
AppendCat(printer, "frontend_attributes=",
FrontendAttributesToString(frontend_attributes()));
});
}
if (opcode() != HloOpcode::kCall) {
CHECK(!is_composite())
<< "Only kCall instructions should have is_composite set";
}
if (options.print_control_dependencies() && !control_predecessors().empty()) {
printer.Next([this, &options](Printer* printer) {
printer->Append("control-predecessors={");
AppendJoin(printer, control_predecessors(), ", ",
[&](Printer* printer, HloInstruction* pre) {
PrintNameInternal(printer, pre->name(), options);
});
printer->Append("}");
});
}
if (!statistics_viz().statistics().empty()) {
printer.Next([this](Printer* printer) {
AppendCat(printer,
"statistics=", StatisticsVizToString(statistics_viz()));
});
}
}
std::vector<std::string> HloInstruction::ExtraAttributesToString(
const HloPrintOptions& options) const {
class MultiStringPrinter : public Printer {
public:
void Append(const absl::AlphaNum& a) override {
if (strings_.empty()) {
strings_.push_back({});
}
absl::StrAppend(&strings_.back(), a);
}
void Next() { strings_.push_back({}); }
std::vector<std::string> ConsumeStrings() && { return std::move(strings_); }
private:
std::vector<std::string> strings_;
} multi_string_printer;
AttributePrinter attr_printer([&multi_string_printer] {
multi_string_printer.Next();
return &multi_string_printer;
});
PrintExtraAttributes(attr_printer, options);
return std::move(multi_string_printer).ConsumeStrings();
}
std::string FrontendAttributesToString(
const FrontendAttributes& frontend_attributes) {
std::vector<std::pair<std::string, std::string>> sorted_attributes(
frontend_attributes.map().begin(), frontend_attributes.map().end());
absl::c_sort(sorted_attributes);
const auto formatter = [](std::string* out,
const std::pair<std::string, std::string>& item) {
if (LexesAsJsonDict(item.second)) {
absl::StrAppend(out, item.first, "=", item.second);
} else {
absl::StrAppend(out, item.first, "=\"", item.second, "\"");
}
};
return absl::StrFormat("{%s}",
absl::StrJoin(sorted_attributes, ",", formatter));
}
std::string HloInstruction::ToShortString() const {
return StrCat("%", name(), " = ", HloOpcodeString(opcode()), "(",
StrJoin(operands_, ", ",
[](std::string* out, HloInstruction* operand) {
StrAppend(out, "%", operand->name());
}),
")");
}
HloInstructionProto HloInstruction::ToProto() const {
HloInstructionProto proto;
CHECK(unique_id_ != -1)
<< "This instruction does not have a valid id. Please make sure the "
"instruction is inside a module before dumping it.";
proto.set_id(unique_id_);
proto.set_name(name_);
*proto.mutable_opcode() = std::string(HloOpcodeString(opcode_));
*proto.mutable_shape() = shape_.ToProto();
for (const HloInstruction* operand : operands_) {
proto.add_operand_ids(operand->unique_id());
}
for (const HloInstruction* control : control_predecessors()) {
proto.add_control_predecessor_ids(control->unique_id());
}
*proto.mutable_metadata() = *metadata_;
proto.set_backend_config(backend_config_.GetRawString());
if (opcode() != HloOpcode::kFusion) {
for (const HloComputation* computation : called_computations()) {
proto.add_called_computation_ids(computation->unique_id());
}
}
if (has_sharding()) {
*proto.mutable_sharding() = sharding().ToProto();
}
*proto.mutable_frontend_attributes() = frontend_attributes();
proto.set_is_composite(is_composite());
*proto.mutable_statistics_viz() = statistics_viz();
if (original_value_) {
*proto.mutable_original_value() = OriginalValueToProto(*original_value_);
}
return proto;
}
std::string HloInstruction::ToCategory() const {
if (opcode() == HloOpcode::kTranspose || opcode() == HloOpcode::kCopy ||
opcode() == HloOpcode::kReshape ||
opcode() == HloOpcode::kDynamicReshape) {
return "data formatting";
}
if (IsElementwise()) {
return "non-fusion elementwise";
}
return std::string(HloOpcodeString(opcode()));
}
bool HloInstruction::IsFused() const {
return parent_ != nullptr && parent_->IsFusionComputation();
}
bool HloInstruction::IsCustomCall(absl::string_view target) const {
return opcode() == HloOpcode::kCustomCall && custom_call_target() == target;
}
bool HloInstruction::IsCustomCall(
absl::Span<const absl::string_view> targets) const {
return opcode() == HloOpcode::kCustomCall &&
absl::c_linear_search(targets, custom_call_target());
}
bool HloInstruction::IsInputFusion() const {
return opcode() == HloOpcode::kFusion && fusion_kind() == FusionKind::kInput;
}
bool HloInstruction::IsLoopFusion() const {
return opcode() == HloOpcode::kFusion && fusion_kind() == FusionKind::kLoop;
}
bool HloInstruction::IsOutputFusion() const {
return opcode() == HloOpcode::kFusion && fusion_kind() == FusionKind::kOutput;
}
bool HloInstruction::IsCustomFusion() const {
return opcode() == HloOpcode::kFusion && fusion_kind() == FusionKind::kCustom;
}
bool HloInstruction::IsFusible() const {
switch (opcode_) {
case HloOpcode::kDomain:
case HloOpcode::kParameter:
case HloOpcode::kWhile:
case HloOpcode::kConditional:
case HloOpcode::kCall:
return false;
case HloOpcode::kFusion:
case HloOpcode::kMap:
case HloOpcode::kReduce:
case HloOpcode::kReduceWindow:
return true;
case HloOpcode::kRng:
return user_count() <= 1;
default:
return !HasSideEffect();
}
}
HloInstruction::HloInstruction(HloOpcode opcode, const Shape& shape)
: unique_id_(-1),
index_in_parent_(~0u),
opcode_(opcode),
is_default_config_(false),
cleaned_up_(false),
marked_as_dead_(false),
is_root_(false),
shape_(shape),
name_(HloOpcodeString(opcode)) {
TF_DCHECK_OK(ShapeUtil::ValidateShapeWithOptionalLayout(shape_));
}
template <typename HloInstructionPtr>
absl::Status HloInstruction::Visit(
DfsHloVisitorBase<HloInstructionPtr>* visitor) {
switch (opcode_) {
case HloOpcode::kAbs:
return visitor->HandleAbs(this);
case HloOpcode::kAtan2:
return visitor->HandleAtan2(this);
case HloOpcode::kRoundNearestAfz:
return visitor->HandleRound(this);
case HloOpcode::kRoundNearestEven:
return visitor->HandleRoundNearestEven(this);
case HloOpcode::kBatchNormTraining:
return visitor->HandleBatchNormTraining(this);
case HloOpcode::kBatchNormInference:
return visitor->HandleBatchNormInference(this);
case HloOpcode::kBatchNormGrad:
return visitor->HandleBatchNormGrad(this);
case HloOpcode::kErf:
return visitor->HandleErf(this);
case HloOpcode::kLogistic:
return visitor->HandleLogistic(this);
case HloOpcode::kSign:
return visitor->HandleSign(this);
case HloOpcode::kConstant:
return visitor->HandleConstant(this);
case HloOpcode::kGetTupleElement:
return visitor->HandleGetTupleElement(this);
case HloOpcode::kParameter:
return visitor->HandleParameter(this);
case HloOpcode::kCompare:
return visitor->HandleCompare(this);
case HloOpcode::kComplex:
return visitor->HandleComplex(this);
case HloOpcode::kAdd:
return visitor->HandleAdd(this);
case HloOpcode::kDivide:
return visitor->HandleDivide(this);
case HloOpcode::kSubtract:
return visitor->HandleSubtract(this);
case HloOpcode::kMaximum:
return visitor->HandleMaximum(this);
case HloOpcode::kMinimum:
return visitor->HandleMinimum(this);
case HloOpcode::kAnd:
return visitor->HandleAnd(this);
case HloOpcode::kOr:
return visitor->HandleOr(this);
case HloOpcode::kXor:
return visitor->HandleXor(this);
case HloOpcode::kShiftLeft:
return visitor->HandleShiftLeft(this);
case HloOpcode::kShiftRightArithmetic:
return visitor->HandleShiftRightArithmetic(this);
case HloOpcode::kShiftRightLogical:
return visitor->HandleShiftRightLogical(this);
case HloOpcode::kConcatenate:
return visitor->HandleConcatenate(this);
case HloOpcode::kConvert:
return visitor->HandleConvert(this);
case HloOpcode::kBitcastConvert:
return visitor->HandleBitcastConvert(this);
case HloOpcode::kStochasticConvert:
return visitor->HandleStochasticConvert(this);
case HloOpcode::kCopy:
return visitor->HandleCopy(this);
case HloOpcode::kMultiply:
return visitor->HandleMultiply(this);
case HloOpcode::kDot:
return visitor->HandleDot(this);
case HloOpcode::kPower:
return visitor->HandlePower(this);
case HloOpcode::kRemainder:
return visitor->HandleRemainder(this);
case HloOpcode::kSelect:
return visitor->HandleSelect(this);
case HloOpcode::kConvolution:
return visitor->HandleConvolution(this);
case HloOpcode::kFft:
return visitor->HandleFft(this);
case HloOpcode::kAllGather:
return visitor->HandleAllGather(this);
case HloOpcode::kAllGatherStart:
return visitor->HandleAllGatherStart(this);
case HloOpcode::kAllGatherDone:
return visitor->HandleAllGatherDone(this);
case HloOpcode::kAllReduce:
return visitor->HandleAllReduce(this);
case HloOpcode::kReduceScatter:
return visitor->HandleReduceScatter(this);
case HloOpcode::kAllReduceStart:
return visitor->HandleAllReduceStart(this);
case HloOpcode::kAllReduceDone:
return visitor->HandleAllReduceDone(this);
case HloOpcode::kAllToAll:
return visitor->HandleAllToAll(this);
case HloOpcode::kCollectiveBroadcast:
return visitor->HandleCollectiveBroadcast(this);
case HloOpcode::kCollectivePermute:
return visitor->HandleCollectivePermute(this);
case HloOpcode::kCollectivePermuteStart:
return visitor->HandleCollectivePermuteStart(this);
case HloOpcode::kCollectivePermuteDone:
return visitor->HandleCollectivePermuteDone(this);
case HloOpcode::kReplicaId:
return visitor->HandleReplicaId(this);
case HloOpcode::kPartitionId:
return visitor->HandlePartitionId(this);
case HloOpcode::kTuple:
return visitor->HandleTuple(this);
case HloOpcode::kMap:
return visitor->HandleMap(this);
case HloOpcode::kClamp:
return visitor->HandleClamp(this);
case HloOpcode::kReduce:
return visitor->HandleReduce(this);
case HloOpcode::kReduceWindow:
return visitor->HandleReduceWindow(this);
case HloOpcode::kSelectAndScatter:
return visitor->HandleSelectAndScatter(this);
case HloOpcode::kNegate:
return visitor->HandleNegate(this);
case HloOpcode::kExp:
return visitor->HandleExp(this);
case HloOpcode::kExpm1:
return visitor->HandleExpm1(this);
case HloOpcode::kFloor:
return visitor->HandleFloor(this);
case HloOpcode::kCeil:
return visitor->HandleCeil(this);
case HloOpcode::kClz:
return visitor->HandleClz(this);
case HloOpcode::kLog:
return visitor->HandleLog(this);
case HloOpcode::kLog1p:
return visitor->HandleLog1p(this);
case HloOpcode::kTan:
return visitor->HandleTan(this);
case HloOpcode::kTanh:
return visitor->HandleTanh(this);
case HloOpcode::kCos:
return visitor->HandleCos(this);
case HloOpcode::kSin:
return visitor->HandleSin(this);
case HloOpcode::kSqrt:
return visitor->HandleSqrt(this);
case HloOpcode::kCbrt:
return visitor->HandleCbrt(this);
case HloOpcode::kRsqrt:
return visitor->HandleRsqrt(this);
case HloOpcode::kReal:
return visitor->HandleReal(this);
case HloOpcode::kImag:
return visitor->HandleImag(this);
case HloOpcode::kIsFinite:
return visitor->HandleIsFinite(this);
case HloOpcode::kNot:
return visitor->HandleNot(this);
case HloOpcode::kPopulationCount:
return visitor->HandlePopulationCount(this);
case HloOpcode::kBitcast:
return visitor->HandleBitcast(this);
case HloOpcode::kBroadcast:
return visitor->HandleBroadcast(this);
case HloOpcode::kPad:
return visitor->HandlePad(this);
case HloOpcode::kReshape:
return visitor->HandleReshape(this);
case HloOpcode::kDynamicReshape:
return visitor->HandleDynamicReshape(this);
case HloOpcode::kTranspose:
return visitor->HandleTranspose(this);
case HloOpcode::kReverse:
return visitor->HandleReverse(this);
case HloOpcode::kReducePrecision:
return visitor->HandleReducePrecision(this);
case HloOpcode::kSlice:
return visitor->HandleSlice(this);
case HloOpcode::kDynamicSlice:
return visitor->HandleDynamicSlice(this);
case HloOpcode::kDynamicUpdateSlice:
return visitor->HandleDynamicUpdateSlice(this);
case HloOpcode::kSort:
return visitor->HandleSort(this);
case HloOpcode::kInfeed:
return visitor->HandleInfeed(this);
case HloOpcode::kOutfeed:
return visitor->HandleOutfeed(this);
case HloOpcode::kRng:
return visitor->HandleRng(this);
case HloOpcode::kRngBitGenerator:
return visitor->HandleRngBitGenerator(this);
case HloOpcode::kRngGetAndUpdateState:
return visitor->HandleRngGetAndUpdateState(this);
case HloOpcode::kWhile:
return visitor->HandleWhile(this);
case HloOpcode::kFusion:
return visitor->HandleFusion(this);
case HloOpcode::kCall:
return visitor->HandleCall(this);
case HloOpcode::kConditional:
return visitor->HandleConditional(this);
case HloOpcode::kCustomCall:
return visitor->HandleCustomCall(this);
case HloOpcode::kAsyncStart:
return visitor->HandleAsyncStart(this);
case HloOpcode::kAsyncUpdate:
return visitor->HandleAsyncUpdate(this);
case HloOpcode::kAsyncDone:
return visitor->HandleAsyncDone(this);
case HloOpcode::kCopyStart:
return visitor->HandleCopyStart(this);
case HloOpcode::kCopyDone:
return visitor->HandleCopyDone(this);
case HloOpcode::kRecv:
return visitor->HandleRecv(this);
case HloOpcode::kTopK:
return visitor->HandleTopK(this);
case HloOpcode::kRecvDone:
return visitor->HandleRecvDone(this);
case HloOpcode::kSend:
return visitor->HandleSend(this);
case HloOpcode::kSendDone:
return visitor->HandleSendDone(this);
case HloOpcode::kGather:
return visitor->HandleGather(this);
case HloOpcode::kScatter:
return visitor->HandleScatter(this);
case HloOpcode::kDomain:
return visitor->HandleDomain(this);
case HloOpcode::kAfterAll:
return visitor->HandleAfterAll(this);
case HloOpcode::kAddDependency:
return visitor->HandleAddDependency(this);
case HloOpcode::kIota:
return visitor->HandleIota(this);
case HloOpcode::kGetDimensionSize:
return visitor->HandleGetDimensionSize(this);
case HloOpcode::kSetDimensionSize:
return visitor->HandleSetDimensionSize(this);
case HloOpcode::kTriangularSolve:
return visitor->HandleTriangularSolve(this);
case HloOpcode::kCholesky:
return visitor->HandleCholesky(this);
case HloOpcode::kOptimizationBarrier:
return visitor->HandleOptimizationBarrier(this);
default:
return Internal(
"Unhandled HloOpcode for DfsHloVisitor: %s. This should not happen - "
"please file a bug for XLA.",
HloOpcodeString(opcode_));
}
}
template absl::Status HloInstruction::Visit(DfsHloVisitor* visitor);
template absl::Status HloInstruction::Visit(ConstDfsHloVisitor* visitor);
template <typename Visitor>
inline bool PushDFSChild(Visitor* visitor, DFSStack* dfs_stack,
HloInstruction* child) {
CHECK(child != nullptr);
const int id = child->unique_id();
CHECK_GE(id, 0) << "instruction may not have a parent computation";
switch (visitor->GetVisitState(id)) {
case Visitor::kVisiting:
return false;
case Visitor::kVisited:
return true;
case Visitor::kNotVisited:
dfs_stack->push_back(std::make_pair(id, child));
return true;
}
}
using InternalCompareFunction =
absl::FunctionRef<bool(std::pair<int, const HloInstruction*>,
std::pair<int, const HloInstruction*>)>;
template <typename Visitor>
static absl::Status PostOrderDFS(
HloInstruction* root, Visitor* visitor,
std::optional<InternalCompareFunction> operand_order,
bool ignore_control_predecessors, bool cross_computation) {
visitor->ReserveVisitStates(root->parent()->instruction_count());
DFSStack dfs_stack;
dfs_stack.emplace_back(root->unique_id(), root);
do {
DCHECK(!dfs_stack.empty());
int current_id = dfs_stack.back().first;
HloInstruction* current_node = dfs_stack.back().second;
CHECK_GE(current_id, 0) << current_id << ": " << current_node
<< ": instruction may not have parent computation";
typename Visitor::VisitState visit_state =
visitor->GetVisitState(current_id);
if (visit_state == Visitor::kVisited) {
dfs_stack.pop_back();
VLOG(3) << "Not visiting HLO (id = " << current_id
<< ") as it was already visited.";
continue;
}
if (visit_state == Visitor::kVisiting) {
dfs_stack.pop_back();
TF_RETURN_IF_ERROR(visitor->Preprocess(current_node));
VLOG(2) << "Visiting HLO %" << current_node->name();
TF_RETURN_IF_ERROR(current_node->Visit(visitor));
visitor->SetVisitState(current_id, Visitor::kVisited);
TF_RETURN_IF_ERROR(visitor->Postprocess(current_node));
continue;
}
visitor->SetVisitState(current_id, Visitor::kVisiting);
const size_t old_dfs_stack_size = dfs_stack.size();
for (HloInstruction* child : current_node->operands()) {
if (!ABSL_PREDICT_TRUE(PushDFSChild(visitor, &dfs_stack, child))) {
return FailedPrecondition(
"A cycle is detected while visiting instruction %s %s",
current_node->ToString(),
PrintCycle(child, &dfs_stack, ignore_control_predecessors));
}
}
if (!ignore_control_predecessors) {
for (HloInstruction* child : current_node->control_predecessors()) {
if (!ABSL_PREDICT_TRUE(PushDFSChild(visitor, &dfs_stack, child))) {
return FailedPrecondition(
"A cycle is detected while visiting instruction %s %s",
current_node->ToString(),
PrintCycle(child, &dfs_stack, ignore_control_predecessors));
}
}
}
if (cross_computation) {
for (const HloComputation* called_computation :
current_node->called_computations()) {
HloInstruction* root_instruction =
called_computation->root_instruction();
if (!ABSL_PREDICT_TRUE(
PushDFSChild(visitor, &dfs_stack, root_instruction))) {
return FailedPrecondition(
"A cycle is detected while visiting instruction %s %s",
current_node->ToString(),
PrintCycle(root_instruction, &dfs_stack,
ignore_control_predecessors));
}
}
}
if (operand_order != std::nullopt) {
std::sort(dfs_stack.begin() + old_dfs_stack_size, dfs_stack.end(),
*operand_order);
}
std::reverse(dfs_stack.begin() + old_dfs_stack_size, dfs_stack.end());
} while (!dfs_stack.empty());
return absl::OkStatus();
}
template <typename HloInstructionPtr>
absl::Status HloInstruction::Accept(
DfsHloVisitorBase<HloInstructionPtr>* visitor, bool call_finish_visit,
bool ignore_control_predecessors, bool cross_computation) {
VLOG(3) << "HloInstruction::Accept(%" << name() << ")";
TF_RETURN_IF_ERROR(PostOrderDFS(this, visitor, std::nullopt,
ignore_control_predecessors,
cross_computation));
if (call_finish_visit) {
TF_RETURN_IF_ERROR(visitor->FinishVisit(this));
}
return absl::OkStatus();
}
template absl::Status HloInstruction::Accept(DfsHloVisitor*, bool, bool, bool);
template absl::Status HloInstruction::Accept(ConstDfsHloVisitor*, bool, bool,
bool);
absl::Status HloInstruction::AcceptWithOperandOrder(
DfsHloVisitor* visitor, CompareFunction operand_order,
bool call_finish_visit) {
VLOG(2) << "HloInstruction::AcceptWithOperandOrder(%" << name() << ")";
auto func = [operand_order](std::pair<int, const HloInstruction*> a,
std::pair<int, const HloInstruction*> b) {
return operand_order(a.second, b.second);
};
TF_RETURN_IF_ERROR(PostOrderDFS(this, visitor, func,
false,
false));
if (call_finish_visit) {
VLOG(3) << "HloInstruction::AcceptWithOperandOrder BEFORE FINISH VISIT";
TF_RETURN_IF_ERROR(visitor->FinishVisit(this));
VLOG(3) << "HloInstruction::AcceptWithOperandOrder AFTER FINISH VISIT";
}
VLOG(2) << "HloInstruction::AcceptWithOperandOrder EXIT";
return absl::OkStatus();
}
const Shape& HloInstruction::shape() const { return shape_; }
absl::InlinedVector<int64_t, 4> HloInstruction::OperandIndices(
const HloInstruction* operand) const {
absl::InlinedVector<int64_t, 4> result;
for (int64_t i = 0; i < operand_count(); ++i) {
if (this->operand(i) == operand) {
result.push_back(i);
}
}
return result;
}
bool HloInstruction::IsElementwiseBinary() const {
return IsElementwise() && operand_count() == 2;
}
bool HloInstruction::IsElementwise() const {
return IsElementwiseImpl(std::nullopt);
}
bool HloInstruction::IsElementwiseOnOperand(int64_t operand_idx) const {
return IsElementwiseImpl(operand_idx);
}
namespace {
enum class UseKind { kReuse = 0, kUse = 1, kNoUse = 2 };
class FusionReusesParamElements {
public:
static UseKind Compute(int64_t i, const HloInstruction& hlo) {
absl::flat_hash_map<const HloInstruction*, UseKind> memoization_cache;
return ComputeInternal(i, hlo, &memoization_cache);
}
private:
static UseKind ComputeInternal(
int64_t outer_param_num, const HloInstruction& hlo,
absl::flat_hash_map<const HloInstruction*, UseKind>* cache);
};
}
static UseKind OperandElementUse(const HloInstruction& instr,
int64_t operand_num) {
switch (instr.opcode()) {
case HloOpcode::kBitcast:
case HloOpcode::kConcatenate:
case HloOpcode::kReshape:
case HloOpcode::kReverse:
case HloOpcode::kSlice:
case HloOpcode::kTranspose:
case HloOpcode::kGather:
return UseKind::kUse;
case HloOpcode::kPad:
return operand_num > 0 ? UseKind::kReuse : UseKind::kUse;
case HloOpcode::kReduce:
return operand_num >= Cast<HloReduceInstruction>(&instr)->input_count()
? UseKind::kReuse
: UseKind::kUse;
case HloOpcode::kFusion:
return FusionReusesParamElements::Compute(operand_num,
*instr.fused_expression_root());
case HloOpcode::kDot:
if (instr.shape().dimensions_size() <= 1) {
if ((operand_num == 0 && instr.operand(1)->shape().rank() <= 1) ||
(operand_num == 1 && instr.operand(0)->shape().rank() <= 1)) {
return UseKind::kUse;
}
}
return UseKind::kReuse;
case HloOpcode::kDynamicUpdateSlice:
if (operand_num == 0 || operand_num == 1) {
return UseKind::kUse;
}
return UseKind::kReuse;
default:
return instr.IsElementwise() ? UseKind::kUse : UseKind::kReuse;
}
}
UseKind FusionReusesParamElements::ComputeInternal(
int64_t outer_param_num, const HloInstruction& hlo,
absl::flat_hash_map<const HloInstruction*, UseKind>* cache) {
if (auto hlo_param = DynCast<HloParameterInstruction>(&hlo)) {
if (hlo_param->parameter_number() == outer_param_num) {
return UseKind::kUse;
}
}
auto p = cache->emplace(&hlo, UseKind::kNoUse);
auto value_it = p.first;
const bool key_is_new = p.second;
if (!key_is_new) {
return value_it->second;
}
for (int64_t operand_num = 0; operand_num < hlo.operands().size();
++operand_num) {
UseKind old_val = value_it->second;
UseKind new_val = [&] {
UseKind hlo_use = OperandElementUse(hlo, operand_num);
if (hlo_use == UseKind::kNoUse) {
return old_val;
}
UseKind operand_use =
ComputeInternal(outer_param_num, *hlo.operand(operand_num), cache);
if (operand_use == UseKind::kNoUse) {
return old_val;
}
return std::min({old_val, hlo_use, operand_use});
}();
value_it = cache->find(&hlo);
value_it->second = new_val;
if (new_val == UseKind::kReuse) {
break;
}
}
return value_it->second;
}
bool HloInstruction::ReusesOperandElements(int64_t i) const {
return OperandElementUse(*this, i) == UseKind::kReuse;
}
std::optional<ShapeUtil::ShapeEqualityDescriptor>
HloInstruction::ReshapeMerelyInsertsOrDeletes1SizedDimensions() const {
if (HloOpcode::kReshape != opcode_) {
return std::nullopt;
}
return ShapeUtil::InsertedOrDeleted1SizedDimensions(operand(0)->shape_,
shape_);
}
absl::string_view ToString(HloInstruction::FusionKind kind) {
switch (kind) {
case HloInstruction::FusionKind::kLoop:
return "kLoop";
case HloInstruction::FusionKind::kInput:
return "kInput";
case HloInstruction::FusionKind::kOutput:
return "kOutput";
case HloInstruction::FusionKind::kCustom:
return "kCustom";
}
}
absl::StatusOr<HloInstruction::FusionKind> StringToFusionKind(
absl::string_view kind_name) {
if (kind_name == "kLoop") {
return HloInstruction::FusionKind::kLoop;
}
if (kind_name == "kInput") {
return HloInstruction::FusionKind::kInput;
}
if (kind_name == "kOutput") {
return HloInstruction::FusionKind::kOutput;
}
if (kind_name == "kCustom") {
return HloInstruction::FusionKind::kCustom;
}
return InvalidArgument("Unknown fusion kind: %s", kind_name);
}
std::string StatisticsVizToString(const StatisticsViz& statistics_viz) {
if (statistics_viz.statistics().empty()) return "{}";
std::vector<Statistic> all_statistics(statistics_viz.statistics().begin(),
statistics_viz.statistics().end());
const auto formatter = [](std::string* out, const Statistic& item) {
absl::StrAppend(out, item.stat_name(), "=", item.stat_val());
};
return absl::StrFormat("{%s,%s}",
absl::StrCat("visualizing_index=",
statistics_viz.stat_index_to_visualize()),
absl::StrJoin(all_statistics, ",", formatter));
}
std::string PaddingConfigToString(const PaddingConfig& padding) {
bool has_interior_padding =
absl::c_any_of(padding.dimensions(),
[](const PaddingConfig::PaddingConfigDimension& dim) {
return dim.interior_padding() != 0;
});
return StrJoin(
padding.dimensions(), "x",
[&](std::string* out, const PaddingConfig::PaddingConfigDimension& dim) {
StrAppend(
out, dim.edge_padding_low(), "_", dim.edge_padding_high(),
has_interior_padding ? StrCat("_", dim.interior_padding()) : "");
});
}
std::string RandomDistributionToString(const RandomDistribution& distribution) {
return absl::AsciiStrToLower(RandomDistribution_Name(distribution));
}
std::string RandomAlgorithmToString(const RandomAlgorithm& algorithm) {
return absl::AsciiStrToLower(RandomAlgorithm_Name(algorithm));
}
std::string PrecisionToString(const PrecisionConfig::Precision& precision) {
return absl::AsciiStrToLower(PrecisionConfig::Precision_Name(precision));
}
std::string AlgorithmToString(const PrecisionConfig::Algorithm& algorithm) {
constexpr absl::string_view kPrefix = "ALG_";
const std::string& name = PrecisionConfig::Algorithm_Name(algorithm);
DCHECK(absl::StartsWith(name, kPrefix));
return absl::AsciiStrToLower(name.substr(kPrefix.size()));
}
static std::string CustomCallScheduleToString(
const CustomCallSchedule& schedule) {
return absl::AsciiStrToLower(CustomCallSchedule_Name(schedule));
}
static std::string CustomCallApiVersionToString(
const CustomCallApiVersion& schedule) {
return absl::AsciiStrToLower(CustomCallApiVersion_Name(schedule));
}
std::string DotDimensionNumbersToString(const DotDimensionNumbers& dnums) {
std::vector<std::string> result;
if (!dnums.lhs_batch_dimensions().empty()) {
result.push_back(StrCat("lhs_batch_dims={",
StrJoin(dnums.lhs_batch_dimensions(), ","), "}"));
}
result.push_back(StrCat("lhs_contracting_dims={",
StrJoin(dnums.lhs_contracting_dimensions(), ","),
"}"));
if (!dnums.rhs_batch_dimensions().empty()) {
result.push_back(StrCat("rhs_batch_dims={",
StrJoin(dnums.rhs_batch_dimensions(), ","), "}"));
}
result.push_back(StrCat("rhs_contracting_dims={",
StrJoin(dnums.rhs_contracting_dimensions(), ","),
"}"));
return StrJoin(result, ", ");
}
std::string ConvolutionDimensionNumbersToString(
const ConvolutionDimensionNumbers& dnums) {
auto len_required = [](int64_t a, int64_t b, absl::Span<const int64_t> cs) {
return std::max({a, b, cs.empty() ? 0 : *absl::c_max_element(cs)}) + 1;
};
std::vector<std::string> lhs_dims(
len_required(dnums.input_batch_dimension(),
dnums.input_feature_dimension(),
dnums.input_spatial_dimensions()),
"?");
lhs_dims[dnums.input_batch_dimension()] = 'b';
lhs_dims[dnums.input_feature_dimension()] = 'f';
for (int64_t i = 0; i < dnums.input_spatial_dimensions().size(); ++i) {
lhs_dims[dnums.input_spatial_dimensions(i)] = StrCat(i);
}
std::vector<std::string> rhs_dims(
len_required(dnums.kernel_input_feature_dimension(),
dnums.kernel_output_feature_dimension(),
dnums.kernel_spatial_dimensions()),
"?");
rhs_dims[dnums.kernel_input_feature_dimension()] = "i";
rhs_dims[dnums.kernel_output_feature_dimension()] = "o";
for (int64_t i = 0; i < dnums.kernel_spatial_dimensions().size(); ++i) {
rhs_dims[dnums.kernel_spatial_dimensions(i)] = StrCat(i);
}
std::vector<std::string> output_dims(
len_required(dnums.output_batch_dimension(),
dnums.output_feature_dimension(),
dnums.output_spatial_dimensions()),
"?");
output_dims[dnums.output_batch_dimension()] = 'b';
output_dims[dnums.output_feature_dimension()] = 'f';
for (int64_t i = 0; i < dnums.output_spatial_dimensions().size(); ++i) {
output_dims[dnums.output_spatial_dimensions(i)] = StrCat(i);
}
return StrCat(StrJoin(lhs_dims, ""), "_", StrJoin(rhs_dims, ""), "->",
StrJoin(output_dims, ""));
}
absl::StatusOr<RandomAlgorithm> StringToRandomAlgorithm(
const std::string& name) {
static absl::flat_hash_map<std::string, RandomAlgorithm>* map = [] {
static auto* map = new absl::flat_hash_map<std::string, RandomAlgorithm>;
for (int i = 0; i < RandomAlgorithm_ARRAYSIZE; i++) {
if (RandomAlgorithm_IsValid(i)) {
auto value = static_cast<RandomAlgorithm>(i);
(*map)[RandomAlgorithmToString(value)] = value;
}
}
return map;
}();
auto found = map->find(absl::AsciiStrToLower(name));
if (found == map->end()) {
return InvalidArgument("Unknown algorithm");
}
return found->second;
}
absl::StatusOr<RandomDistribution> StringToRandomDistribution(
const std::string& name) {
static absl::flat_hash_map<std::string, RandomDistribution>* map = [] {
static auto* map = new absl::flat_hash_map<std::string, RandomDistribution>;
for (int i = 0; i < RandomDistribution_ARRAYSIZE; i++) {
if (RandomDistribution_IsValid(i)) {
auto value = static_cast<RandomDistribution>(i);
(*map)[RandomDistributionToString(value)] = value;
}
}
return map;
}();
auto found = map->find(absl::AsciiStrToLower(name));
if (found == map->end()) {
return InvalidArgument("Unknown distribution");
}
return found->second;
}
absl::StatusOr<PrecisionConfig::Precision> StringToPrecision(
const std::string& name) {
static absl::flat_hash_map<std::string, PrecisionConfig::Precision>* map =
[] {
static auto* map =
new absl::flat_hash_map<std::string, PrecisionConfig::Precision>;
for (int i = 0; i < PrecisionConfig::Precision_ARRAYSIZE; i++) {
if (PrecisionConfig::Precision_IsValid(i)) {
auto value = static_cast<PrecisionConfig::Precision>(i);
(*map)[PrecisionToString(value)] = value;
}
}
return map;
}();
auto found = map->find(absl::AsciiStrToLower(name));
if (found == map->end()) {
return InvalidArgument("Unknown precision");
}
return found->second;
}
absl::StatusOr<PrecisionConfig::Algorithm> StringToAlgorithm(
const std::string& name) {
static absl::flat_hash_map<std::string, PrecisionConfig::Algorithm>* map =
[] {
static auto* map =
new absl::flat_hash_map<std::string, PrecisionConfig::Algorithm>;
for (int i = 0; i < PrecisionConfig::Algorithm_ARRAYSIZE; i++) {
if (PrecisionConfig::Algorithm_IsValid(i)) {
auto value = static_cast<PrecisionConfig::Algorithm>(i);
(*map)[AlgorithmToString(value)] = value;
}
}
return map;
}();
auto found = map->find(absl::AsciiStrToLower(name));
if (found == map->end()) {
return InvalidArgument("Unknown algorithm");
}
return found->second;
}
absl::StatusOr<CustomCallSchedule> StringToCustomCallSchedule(
absl::string_view name) {
static const absl::flat_hash_map<std::string, CustomCallSchedule>* map = [] {
static auto* map = new absl::flat_hash_map<std::string, CustomCallSchedule>;
for (int i = 0; i < CustomCallSchedule_ARRAYSIZE; i++) {
if (CustomCallSchedule_IsValid(i)) {
auto value = static_cast<CustomCallSchedule>(i);
(*map)[CustomCallScheduleToString(value)] = value;
}
}
return map;
}();
auto found = map->find(absl::AsciiStrToLower(name));
if (found == map->end()) {
return InvalidArgument("Unknown schedule");
}
return found->second;
}
absl::StatusOr<CustomCallApiVersion> StringToCustomCallApiVersion(
absl::string_view name) {
static const absl::flat_hash_map<std::string, CustomCallApiVersion>* map =
[] {
static auto* map =
new absl::flat_hash_map<std::string, CustomCallApiVersion>;
for (int i = 0; i < CustomCallApiVersion_ARRAYSIZE; i++) {
if (CustomCallApiVersion_IsValid(i)) {
auto value = static_cast<CustomCallApiVersion>(i);
(*map)[CustomCallApiVersionToString(value)] = value;
}
}
return map;
}();
auto found = map->find(absl::AsciiStrToLower(name));
if (found == map->end()) {
return InvalidArgument("Unknown API version");
}
return found->second;
}
std::ostream& operator<<(std::ostream& os, HloInstruction::FusionKind kind) {
return os << ToString(kind);
}
bool HloPtrComparator::operator()(const HloInstruction* const& lhs,
const HloInstruction* const& rhs) const {
if (rhs == nullptr) {
return false;
}
if (lhs == nullptr) {
return true;
}
auto lhs_module = lhs->GetModule();
auto rhs_module = rhs->GetModule();
CHECK((lhs_module == nullptr && rhs_module == nullptr) ||
(lhs_module != nullptr && rhs_module != nullptr));
if (lhs_module != nullptr &&
lhs_module->unique_id() != rhs_module->unique_id()) {
return lhs_module->unique_id() < rhs_module->unique_id();
}
return lhs->unique_id() < rhs->unique_id();
}
const PrecisionConfig& HloInstruction::precision_config() const {
if (auto* convolution = DynCast<HloConvolutionInstruction>(this)) {
return convolution->precision_config();
}
if (auto* dot = DynCast<HloDotInstruction>(this)) {
return dot->precision_config();
}
if (auto* custom_call = DynCast<HloCustomCallInstruction>(this)) {
return custom_call->precision_config();
}
LOG(FATAL) << "Unimplemented method.";
}
PrecisionConfig* HloInstruction::mutable_precision_config() {
if (auto* convolution = DynCast<HloConvolutionInstruction>(this)) {
return convolution->mutable_precision_config();
}
if (auto* dot = DynCast<HloDotInstruction>(this)) {
return dot->mutable_precision_config();
}
if (auto* custom_call = DynCast<HloCustomCallInstruction>(this)) {
return custom_call->mutable_precision_config();
}
LOG(FATAL) << "Unimplemented method.";
}
HloModule* HloInstruction::GetModule() const {
if (parent_) {
return parent_->parent();
}
return nullptr;
}
void HloInstruction::UniquifyName(NameUniquer* name_uniquer) {
name_ = name_uniquer->GetUniqueName(name_);
}
void HloInstruction::UniquifyName(HloModule* module) {
UniquifyName(&module->instruction_name_uniquer());
}
void HloInstruction::UniquifyId(HloModule* module) {
SetUniqueId(module->NewUniqueInstructionId());
}
void HloInstruction::SortInstructionUsersAndControlLists(
const MappedPtrContainerSorter<HloInstruction>::MapPtrFn& map_fn,
const HloInstruction& sorted_instruction) {
using Sorter = MappedPtrContainerSorter<HloInstruction>;
users_.SortInstructionUsers(map_fn, sorted_instruction.users_);
absl::Status status;
if (has_rare()) {
status = Sorter::Sort(map_fn, Sorter::IndexAfterMappedElementsFn(),
sorted_instruction.control_predecessors(),
mutable_rare()->control_predecessors);
}
if (!status.ok()) {
LOG(ERROR) << "Failed to sort instruction control predecessors for "
<< name() << "; " << status;
}
if (has_rare()) {
status = Sorter::Sort(map_fn, Sorter::IndexAfterMappedElementsFn(),
sorted_instruction.control_successors(),
mutable_rare()->control_successors);
}
if (!status.ok()) {
LOG(ERROR) << "Failed to sort instruction control successors for " << name()
<< "; " << status;
}
}
int64_t HloInstruction::feature_index() const {
return Cast<HloBatchNormInstruction>(this)->feature_index();
}
float HloInstruction::epsilon() const {
return Cast<HloBatchNormInstruction>(this)->epsilon();
}
FftType HloInstruction::fft_type() const {
return Cast<HloFftInstruction>(this)->fft_type();
}
const std::vector<int64_t>& HloInstruction::fft_length() const {
return Cast<HloFftInstruction>(this)->fft_length();
}
int64_t HloInstruction::concatenate_dimension() const {
return Cast<HloConcatenateInstruction>(this)->concatenate_dimension();
}
int64_t HloInstruction::dimension() const {
if (auto set_size = DynCast<HloSetDimensionSizeInstruction>(this)) {
return set_size->dimension();
}
return Cast<HloGetDimensionSizeInstruction>(this)->dimension();
}
int64_t HloInstruction::inferred_dimension() const {
return Cast<HloReshapeInstruction>(this)->inferred_dimension();
}
bool HloInstruction::IsRank2Transpose() const {
auto transpose = DynCast<HloTransposeInstruction>(this);
return transpose != nullptr && transpose->IsRank2Transpose();
}
int64_t HloInstruction::slice_starts(int64_t dimension) const {
return Cast<HloSliceInstruction>(this)->slice_starts(dimension);
}
const std::vector<int64_t>& HloInstruction::slice_starts() const {
return Cast<HloSliceInstruction>(this)->slice_starts();
}
std::vector<int64_t>* HloInstruction::mutable_slice_starts() {
return Cast<HloSliceInstruction>(this)->mutable_slice_starts();
}
int64_t HloInstruction::slice_limits(int64_t dimension) const {
return Cast<HloSliceInstruction>(this)->slice_limits(dimension);
}
const std::vector<int64_t>& HloInstruction::slice_limits() const {
return Cast<HloSliceInstruction>(this)->slice_limits();
}
std::vector<int64_t>* HloInstruction::mutable_slice_limits() {
return Cast<HloSliceInstruction>(this)->mutable_slice_limits();
}
int64_t HloInstruction::slice_strides(int64_t dimension) const {
return Cast<HloSliceInstruction>(this)->slice_strides(dimension);
}
const std::vector<int64_t>& HloInstruction::slice_strides() const {
return Cast<HloSliceInstruction>(this)->slice_strides();
}
std::vector<int64_t>* HloInstruction::mutable_slice_strides() {
return Cast<HloSliceInstruction>(this)->mutable_slice_strides();
}
const Literal& HloInstruction::literal() const {
return Cast<HloConstantInstruction>(this)->literal();
}
bool HloInstruction::IsConstant() const {
return DynCast<HloConstantInstruction>(this) != nullptr;
}
void HloInstruction::RelayoutConstant(const Layout& new_layout,
const ShapeIndex& shape_index) {
Cast<HloConstantInstruction>(this)->RelayoutConstant(new_layout, shape_index);
}
HloInstruction* HloInstruction::AppendInstructionIntoCalledComputation(
HloInstruction* instruction_to_append, bool add_output) {
return Cast<HloCallableInstruction>(this)
->AppendInstructionIntoCalledComputation(instruction_to_append,
add_output);
}
HloInstruction* HloInstruction::AddFusionOperand(HloInstruction* new_operand) {
return Cast<HloFusionInstruction>(this)->AddFusionOperand(new_operand);
}
void HloInstruction::MergeFusionInstruction(
HloInstruction* instruction_to_merge) {
return Cast<HloFusionInstruction>(this)->MergeFusionInstruction(
Cast<HloFusionInstruction>(instruction_to_merge));
}
void HloInstruction::MergeFusionInstructionIntoMultiOutput(
HloInstruction* instruction_to_merge) {
return Cast<HloFusionInstruction>(this)
->MergeFusionInstructionIntoMultiOutput(
Cast<HloFusionInstruction>(instruction_to_merge));
}
HloInstruction* HloInstruction::FuseInstruction(
HloInstruction* instruction_to_fuse) {
return Cast<HloFusionInstruction>(this)->FuseInstruction(instruction_to_fuse);
}
HloInstruction* HloInstruction::FuseInstructionIntoMultiOutput(
HloInstruction* instruction_to_fuse) {
return Cast<HloFusionInstruction>(this)->FuseInstructionIntoMultiOutput(
instruction_to_fuse);
}
HloComputation* HloInstruction::fused_instructions_computation() const {
return Cast<HloFusionInstruction>(this)->fused_instructions_computation();
}
HloInstruction* HloInstruction::fused_expression_root() const {
return Cast<HloFusionInstruction>(this)->fused_expression_root();
}
tsl::gtl::iterator_range<HloInstructionUnwrappingConstIterator>
HloInstruction::fused_instructions() const {
return Cast<HloFusionInstruction>(this)->fused_instructions();
}
tsl::gtl::iterator_range<HloInstructionUnwrappingIterator>
HloInstruction::fused_instructions() {
return Cast<HloFusionInstruction>(this)->fused_instructions();
}
int64_t HloInstruction::fused_instruction_count() const {
return Cast<HloFusionInstruction>(this)->fused_instruction_count();
}
HloInstruction* HloInstruction::fused_parameter(
int64_t parameter_number) const {
return Cast<HloFusionInstruction>(this)->fused_parameter(parameter_number);
}
const HloInstruction::InstructionVector& HloInstruction::fused_parameters()
const {
return Cast<HloFusionInstruction>(this)->fused_parameters();
}
bool HloInstruction::IsMultiOutputFusion() const {
const HloFusionInstruction* fusion = DynCast<HloFusionInstruction>(this);
return fusion != nullptr && fusion->IsMultiOutputFusion();
}
HloInstruction::FusionKind HloInstruction::fusion_kind() const {
return Cast<HloFusionInstruction>(this)->fusion_kind();
}
void HloInstruction::set_fusion_kind(FusionKind kind) {
return Cast<HloFusionInstruction>(this)->set_fusion_kind(kind);
}
RandomDistribution HloInstruction::random_distribution() const {
return Cast<HloRngInstruction>(this)->random_distribution();
}
int64_t HloInstruction::parameter_number() const {
return Cast<HloParameterInstruction>(this)->parameter_number();
}
void HloInstruction::set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool> parameter_replicated_at_leaf_buffers) {
return Cast<HloParameterInstruction>(this)
->set_parameter_replicated_at_leaf_buffers(
parameter_replicated_at_leaf_buffers);
}
void HloInstruction::set_parameter_replicated_at_leaf_buffers(
const std::vector<bool>& parameter_replicated_at_leaf_buffers) {
return Cast<HloParameterInstruction>(this)
->set_parameter_replicated_at_leaf_buffers(
parameter_replicated_at_leaf_buffers);
}
const std::optional<std::vector<bool>>&
HloInstruction::parameter_replicated_at_leaf_buffers() const {
return Cast<HloParameterInstruction>(this)
->parameter_replicated_at_leaf_buffers();
}
int64_t HloInstruction::tuple_index() const {
return Cast<HloGetTupleElementInstruction>(this)->tuple_index();
}
void HloInstruction::set_tuple_index(int64_t new_tuple_index) {
return Cast<HloGetTupleElementInstruction>(this)->set_tuple_index(
new_tuple_index);
}
int32_t HloInstruction::exponent_bits() const {
return Cast<HloReducePrecisionInstruction>(this)->exponent_bits();
}
int32_t HloInstruction::mantissa_bits() const {
return Cast<HloReducePrecisionInstruction>(this)->mantissa_bits();
}
std::string HloInstruction::infeed_config() const {
return Cast<HloInfeedInstruction>(this)->infeed_config();
}
void HloInstruction::set_infeed_config(const std::string& config) {
return Cast<HloInfeedInstruction>(this)->set_infeed_config(config);
}
const Shape& HloInstruction::outfeed_shape() const {
return Cast<HloOutfeedInstruction>(this)->outfeed_shape();
}
Shape* HloInstruction::mutable_outfeed_shape() {
return Cast<HloOutfeedInstruction>(this)->mutable_outfeed_shape();
}
const std::string& HloInstruction::outfeed_config() const {
return Cast<HloOutfeedInstruction>(this)->outfeed_config();
}
void HloInstruction::set_outfeed_config(const std::string& config) {
return Cast<HloOutfeedInstruction>(this)->set_outfeed_config(config);
}
const std::vector<ReplicaGroup>& HloInstruction::replica_groups() const {
return Cast<HloCollectiveInstruction>(this)->replica_groups();
}
const CollectiveDeviceList& HloInstruction::device_list() const {
return Cast<HloCollectiveInstruction>(this)->device_list();
}
const std::vector<std::pair<int64_t, int64_t>>&
HloInstruction::source_target_pairs() const {
return Cast<HloCollectivePermuteInstruction>(this)->source_target_pairs();
}
std::optional<int64_t> HloInstruction::channel_id() const {
return Cast<HloChannelInstruction>(this)->channel_id();
}
void HloInstruction::set_channel_id(const std::optional<int64_t>& channel_id) {
return Cast<HloChannelInstruction>(this)->set_channel_id(channel_id);
}
const ConvolutionDimensionNumbers&
HloInstruction::convolution_dimension_numbers() const {
if (auto convolution = DynCast<HloConvolutionInstruction>(this)) {
return convolution->convolution_dimension_numbers();
}
if (auto custom_call = DynCast<HloCustomCallInstruction>(this)) {
return custom_call->convolution_dimension_numbers();
}
LOG(FATAL) << "Unimplemented method.";
}
void HloInstruction::set_convolution_dimension_numbers(
const ConvolutionDimensionNumbers& dnums) {
if (auto convolution = DynCast<HloConvolutionInstruction>(this)) {
convolution->set_convolution_dimension_numbers(dnums);
} else if (auto custom_call = DynCast<HloCustomCallInstruction>(this)) {
custom_call->set_convolution_dimension_numbers(dnums);
} else {
LOG(FATAL) << "Unimplemented method.";
}
}
int64_t HloInstruction::feature_group_count() const {
if (auto convolution = DynCast<HloConvolutionInstruction>(this)) {
return convolution->feature_group_count();
}
return Cast<HloCustomCallInstruction>(this)->feature_group_count();
}
void HloInstruction::set_feature_group_count(int64_t feature_group_count) {
if (auto convolution = DynCast<HloConvolutionInstruction>(this)) {
return convolution->set_feature_group_count(feature_group_count);
}
Cast<HloCustomCallInstruction>(this)->set_feature_group_count(
feature_group_count);
}
int64_t HloInstruction::batch_group_count() const {
if (auto convolution = DynCast<HloConvolutionInstruction>(this)) {
return convolution->batch_group_count();
}
return Cast<HloCustomCallInstruction>(this)->batch_group_count();
}
void HloInstruction::set_batch_group_count(int64_t batch_group_count) {
if (auto convolution = DynCast<HloConvolutionInstruction>(this)) {
return convolution->set_batch_group_count(batch_group_count);
}
Cast<HloCustomCallInstruction>(this)->set_batch_group_count(
batch_group_count);
}
HloComputation* HloInstruction::select() const {
return Cast<HloSelectAndScatterInstruction>(this)->select();
}
HloComputation* HloInstruction::scatter() const {
return Cast<HloSelectAndScatterInstruction>(this)->scatter();
}
void HloInstruction::set_select(HloComputation* computation) {
return Cast<HloSelectAndScatterInstruction>(this)->set_select(computation);
}
void HloInstruction::set_scatter(HloComputation* computation) {
return Cast<HloSelectAndScatterInstruction>(this)->set_scatter(computation);
}
const std::string& HloInstruction::custom_call_target() const {
return Cast<HloCustomCallInstruction>(this)->custom_call_target();
}
void HloInstruction::set_custom_call_target(absl::string_view target) {
Cast<HloCustomCallInstruction>(this)->set_custom_call_target(target);
}
const PaddingConfig& HloInstruction::padding_config() const {
return Cast<HloPadInstruction>(this)->padding_config();
}
PaddingType HloInstruction::padding_type() const {
return Cast<HloCustomCallInstruction>(this)->padding_type();
}
PaddingConfig* HloInstruction::mutable_padding_config() {
return Cast<HloPadInstruction>(this)->mutable_padding_config();
}
int64_t HloInstruction::slice_sizes(int64_t dimension) const {
return Cast<HloDynamicSliceInstruction>(this)->slice_sizes(dimension);
}
const std::vector<int64_t>& HloInstruction::dynamic_slice_sizes() const {
return Cast<HloDynamicSliceInstruction>(this)->dynamic_slice_sizes();
}
const std::vector<std::vector<int64_t>>&
HloInstruction::dynamic_slice_sizes_list() const {
return Cast<HloCollectivePermuteInstruction>(this)
->dynamic_slice_sizes_list();
}
const GatherDimensionNumbers& HloInstruction::gather_dimension_numbers() const {
return Cast<HloGatherInstruction>(this)->gather_dimension_numbers();
}
absl::Span<const int64_t> HloInstruction::gather_slice_sizes() const {
return Cast<HloGatherInstruction>(this)->gather_slice_sizes();
}
const ScatterDimensionNumbers& HloInstruction::scatter_dimension_numbers()
const {
return Cast<HloScatterInstruction>(this)->scatter_dimension_numbers();
}
const DotDimensionNumbers& HloInstruction::dot_dimension_numbers() const {
return Cast<HloDotInstruction>(this)->dot_dimension_numbers();
}
const DomainMetadata& HloInstruction::operand_side_metadata() const {
return Cast<HloDomainInstruction>(this)->operand_side_metadata();
}
const DomainMetadata& HloInstruction::user_side_metadata() const {
return Cast<HloDomainInstruction>(this)->user_side_metadata();
}
bool HloInstruction::IsAsynchronous() const {
return HloOpcodeIsAsync(opcode());
}
HloInstruction* HloInstruction::async_chain_start() const {
return Cast<HloAsyncInstruction>(this)->async_chain_start();
}
HloInstruction* HloInstruction::async_chain_done() const {
return Cast<HloAsyncInstruction>(this)->async_chain_done();
}
HloComputation* HloInstruction::async_wrapped_computation() const {
return Cast<HloAsyncInstruction>(this)->async_wrapped_computation();
}
HloInstruction* HloInstruction::async_wrapped_instruction() const {
return Cast<HloAsyncInstruction>(this)->async_wrapped_instruction();
}
HloOpcode HloInstruction::async_wrapped_opcode() const {
return Cast<HloAsyncInstruction>(this)->async_wrapped_opcode();
}
absl::string_view HloInstruction::async_execution_thread() const {
return Cast<HloAsyncInstruction>(this)->async_execution_thread();
}
void HloInstruction::set_async_execution_thread(
absl::string_view async_execution_thread) {
Cast<HloAsyncInstruction>(this)->set_async_execution_thread(
async_execution_thread);
}
void HloInstruction::set_called_computations_execution_thread(
absl::string_view async_execution_thread,
bool skip_async_execution_thread_overwrite) {
Cast<HloCallableInstruction>(this)->RecursivelySetComputationsThreadName(
async_execution_thread, skip_async_execution_thread_overwrite);
}
std::optional<int> HloInstruction::cross_program_prefetch_index() const {
return Cast<HloCopyStartInstruction>(this)->cross_program_prefetch_index();
}
ComparisonDirection HloInstruction::comparison_direction() const {
return Cast<HloCompareInstruction>(this)->direction();
}
ComparisonOrder HloInstruction::comparison_order() const {
return Cast<HloCompareInstruction>(this)->order();
}
const TriangularSolveOptions& HloInstruction::triangular_solve_options() const {
return Cast<HloTriangularSolveInstruction>(this)->triangular_solve_options();
}
const CholeskyOptions& HloInstruction::cholesky_options() const {
return Cast<HloCholeskyInstruction>(this)->cholesky_options();
}
const std::vector<std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>&
HloInstruction::output_operand_aliasing() const {
return Cast<HloCallableInstruction>(this)->output_to_operand_aliasing();
}
void HloInstruction::set_output_to_operand_aliasing(
std::vector<std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
aliasing) {
Cast<HloCallableInstruction>(this)->set_output_to_operand_aliasing(
std::move(aliasing));
}
std::shared_ptr<OriginalValue> HloInstruction::original_value() const {
return original_value_;
}
void HloInstruction::set_original_value(
std::shared_ptr<OriginalValue> original_value) {
original_value_ = original_value;
}
} | #include "xla/hlo/ir/hlo_instruction.h"
#include <cstddef>
#include <cstdint>
#include <initializer_list>
#include <limits>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/protobuf_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = ::xla::match;
using ::testing::ElementsAre;
using ::testing::UnorderedElementsAre;
class HloInstructionTest : public HloTestBase {
protected:
Shape r0f32_ = ShapeUtil::MakeShape(F32, {});
};
class OpAndUserCollectingVisitor : public DfsHloVisitorWithDefault {
public:
absl::Status DefaultAction(HloInstruction* hlo_instruction) override {
return Unimplemented("not implemented %s",
HloOpcodeString(hlo_instruction->opcode()));
}
absl::Status HandleParameter(HloInstruction* parameter) override {
EXPECT_FALSE(count_.contains(parameter));
count_[parameter] = GetCountsForNode(parameter);
return absl::OkStatus();
}
absl::Status HandleConstant(HloInstruction* constant) override {
EXPECT_FALSE(count_.contains(constant));
count_[constant] = GetCountsForNode(constant);
return absl::OkStatus();
}
absl::Status HandleAdd(HloInstruction* add) override {
auto lhs = add->operand(0);
auto rhs = add->operand(1);
EXPECT_FALSE(count_.contains(add));
EXPECT_TRUE(count_.contains(lhs));
EXPECT_TRUE(count_.contains(rhs));
count_[add] = GetCountsForNode(add);
return absl::OkStatus();
}
absl::Status HandleNegate(HloInstruction* negate) override {
auto operand = negate->operand(0);
EXPECT_FALSE(count_.contains(negate));
EXPECT_TRUE(count_.contains(operand));
count_[negate] = GetCountsForNode(negate);
return absl::OkStatus();
}
absl::Status HandleMap(HloInstruction* map) override {
EXPECT_FALSE(count_.contains(map));
for (HloInstruction* arg : map->operands()) {
EXPECT_TRUE(count_.contains(arg));
}
count_[map] = GetCountsForNode(map);
return absl::OkStatus();
}
absl::Status HandleReduce(HloInstruction* reduce) override {
auto arg = reduce->operand(0);
auto init_value = reduce->operand(1);
EXPECT_FALSE(count_.contains(reduce));
EXPECT_TRUE(count_.contains(arg));
EXPECT_TRUE(count_.contains(init_value));
count_[reduce] = GetCountsForNode(reduce);
return absl::OkStatus();
}
int64_t NumOperands(const HloInstruction* node) {
auto count_iterator = count_.find(node);
EXPECT_NE(count_.end(), count_iterator);
return count_iterator->second.operand_count;
}
int64_t NumUsers(const HloInstruction* node) {
auto count_iterator = count_.find(node);
EXPECT_NE(count_.end(), count_iterator);
return count_iterator->second.user_count;
}
private:
struct NumOpsAndUsers {
int64_t operand_count;
int64_t user_count;
};
NumOpsAndUsers GetCountsForNode(const HloInstruction* node) {
NumOpsAndUsers counts{node->operand_count(), node->user_count()};
return counts;
}
absl::flat_hash_map<const HloInstruction*, NumOpsAndUsers> count_;
};
TEST_F(HloInstructionTest, BasicProperties) {
auto parameter = HloInstruction::CreateParameter(1, r0f32_, "foo");
EXPECT_EQ(HloOpcode::kParameter, parameter->opcode());
EXPECT_TRUE(ShapeUtil::IsScalarWithElementType(parameter->shape(), F32));
EXPECT_FALSE(ShapeUtil::IsScalarWithElementType(parameter->shape(), S32));
EXPECT_FALSE(parameter->operand_count());
}
TEST_F(HloInstructionTest, UserWithTwoOperands) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto bar =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "bar"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, bar));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_THAT(add->operands(), UnorderedElementsAre(foo, bar));
EXPECT_THAT(foo->users(), UnorderedElementsAre(add));
EXPECT_THAT(bar->users(), UnorderedElementsAre(add));
OpAndUserCollectingVisitor visitor;
ASSERT_IS_OK(add->Accept(&visitor));
EXPECT_EQ(2, visitor.NumOperands(add));
EXPECT_EQ(0, visitor.NumUsers(add));
EXPECT_EQ(1, visitor.NumUsers(foo));
EXPECT_EQ(1, visitor.NumUsers(bar));
}
TEST_F(HloInstructionTest, MultipleUsers) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto bar =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "bar"));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, foo));
auto exp2 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, foo));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, bar));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(3, foo->user_count());
EXPECT_EQ(1, bar->user_count());
EXPECT_EQ(0, exp1->user_count());
EXPECT_EQ(0, exp2->user_count());
EXPECT_EQ(0, add->user_count());
OpAndUserCollectingVisitor visitor;
ASSERT_IS_OK(add->Accept(&visitor));
EXPECT_EQ(2, visitor.NumOperands(add));
EXPECT_EQ(3, visitor.NumUsers(foo));
}
TEST_F(HloInstructionTest, RepeatedUser) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, foo));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(1, foo->user_count());
EXPECT_EQ(2, add->operand_count());
}
TEST_F(HloInstructionTest, MultipleUsersAndOperands) {
HloComputation::Builder builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32_, "param1"));
auto c0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto addleft = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, param0, c0));
auto addright = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, c0, param1));
auto addtotal = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, addleft, addright));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
OpAndUserCollectingVisitor visitor;
ASSERT_IS_OK(addtotal->Accept(&visitor));
EXPECT_EQ(2, visitor.NumUsers(c0));
EXPECT_EQ(2, visitor.NumOperands(addleft));
EXPECT_EQ(2, visitor.NumOperands(addright));
EXPECT_EQ(2, visitor.NumOperands(addtotal));
}
TEST_F(HloInstructionTest, MultipleUsersAndOperandsWithUnaryOps) {
HloComputation::Builder builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32_, "param1"));
auto c0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto neg1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, c0));
auto addleft = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, param0, neg1));
auto addright = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, neg1, param1));
auto addtotal = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, addleft, addright));
auto neg2 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, addtotal));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
OpAndUserCollectingVisitor visitor;
ASSERT_IS_OK(neg2->Accept(&visitor));
EXPECT_EQ(1, visitor.NumUsers(c0));
EXPECT_EQ(2, visitor.NumUsers(neg1));
EXPECT_EQ(2, visitor.NumOperands(addleft));
EXPECT_EQ(2, visitor.NumOperands(addright));
EXPECT_EQ(2, visitor.NumOperands(addtotal));
EXPECT_EQ(1, visitor.NumOperands(neg2));
EXPECT_EQ(0, visitor.NumUsers(neg2));
}
TEST_F(HloInstructionTest, TrivialMap) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
Shape f32a100x10 = ShapeUtil::MakeShape(F32, {100, 10});
auto module = CreateNewVerifiedModule();
auto embedded_builder = HloComputation::Builder("f32+1");
auto param = embedded_builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "x"));
auto value = embedded_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, param, value));
auto add_f32 = module->AddEmbeddedComputation(embedded_builder.Build());
HloComputation::Builder builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32a100x10, "p"));
auto map = builder.AddInstruction(
HloInstruction::CreateMap(f32a100x10, {param0}, add_f32));
module->AddEntryComputation(builder.Build());
OpAndUserCollectingVisitor visitor;
ASSERT_IS_OK(map->Accept(&visitor));
EXPECT_EQ(1, visitor.NumUsers(param0));
EXPECT_EQ(0, visitor.NumUsers(map));
EXPECT_EQ(1, visitor.NumOperands(map));
}
TEST_F(HloInstructionTest, TrivialReduce) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
Shape f32v100 = ShapeUtil::MakeShape(F32, {100});
Shape f32a100x10 = ShapeUtil::MakeShape(F32, {100, 10});
auto embedded_builder = HloComputation::Builder("f32+f32");
auto paramx = embedded_builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "x"));
auto paramy = embedded_builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32, "y"));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, paramx, paramy));
auto module = CreateNewVerifiedModule();
auto add_f32 = module->AddEmbeddedComputation(embedded_builder.Build());
HloComputation::Builder builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32a100x10, "p"));
auto const0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto reduce = builder.AddInstruction(
HloInstruction::CreateReduce(f32v100, param0, const0,
{1}, add_f32));
module->AddEntryComputation(builder.Build());
OpAndUserCollectingVisitor visitor;
ASSERT_IS_OK(reduce->Accept(&visitor));
EXPECT_EQ(1, visitor.NumUsers(param0));
EXPECT_EQ(1, visitor.NumUsers(const0));
EXPECT_EQ(0, visitor.NumUsers(reduce));
EXPECT_EQ(2, visitor.NumOperands(reduce));
}
TEST_F(HloInstructionTest, ReplaceUseInBinaryOps) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto bar =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "bar"));
auto add_foobar = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, bar));
auto add_foofoo = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, foo));
builder.AddInstruction(HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd,
add_foobar, add_foofoo));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(2, foo->user_count());
EXPECT_EQ(1, bar->user_count());
ASSERT_IS_OK(foo->ReplaceUseWith(add_foofoo, bar));
EXPECT_EQ(1, foo->user_count());
EXPECT_EQ(2, bar->user_count());
EXPECT_THAT(foo->users(), UnorderedElementsAre(add_foobar));
EXPECT_THAT(add_foobar->operands(), ElementsAre(foo, bar));
EXPECT_THAT(bar->users(), UnorderedElementsAre(add_foobar, add_foofoo));
EXPECT_THAT(add_foobar->operands(), ElementsAre(foo, bar));
EXPECT_THAT(add_foofoo->operands(), ElementsAre(bar, bar));
}
TEST_F(HloInstructionTest, ReplaceUseInVariadicOp) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto bar =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "bar"));
auto baz =
builder.AddInstruction(HloInstruction::CreateParameter(2, r0f32_, "baz"));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({foo, bar, baz, foo}));
auto add_foobar = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, bar));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(2, foo->user_count());
EXPECT_THAT(foo->users(), UnorderedElementsAre(tuple, add_foobar));
ASSERT_IS_OK(foo->ReplaceUseWith(tuple, bar));
EXPECT_THAT(foo->users(), UnorderedElementsAre(add_foobar));
EXPECT_THAT(tuple->operands(), ElementsAre(bar, bar, baz, bar));
}
TEST_F(HloInstructionTest, ReplaceUseInUnaryOp) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto bar =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "bar"));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, foo));
auto log = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kLog, foo));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(2, foo->user_count());
EXPECT_THAT(foo->users(), UnorderedElementsAre(exp, log));
EXPECT_EQ(0, bar->user_count());
ASSERT_IS_OK(foo->ReplaceUseWith(exp, bar));
EXPECT_EQ(1, foo->user_count());
EXPECT_THAT(foo->users(), UnorderedElementsAre(log));
EXPECT_THAT(log->operands(), ElementsAre(foo));
EXPECT_EQ(1, bar->user_count());
EXPECT_EQ(*bar->users().begin(), exp);
EXPECT_EQ(1, exp->operands().size());
EXPECT_EQ(*exp->operands().begin(), bar);
}
TEST_F(HloInstructionTest, ReplaceAllUsesWithInBinaryOps) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto bar =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "bar"));
auto add_foobar = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, bar));
auto add_foofoo = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, foo));
builder.AddInstruction(HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd,
add_foobar, add_foofoo));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(2, foo->user_count());
EXPECT_EQ(1, bar->user_count());
ASSERT_IS_OK(foo->ReplaceAllUsesWith(bar));
EXPECT_EQ(0, foo->user_count());
EXPECT_EQ(2, bar->user_count());
EXPECT_THAT(bar->users(), UnorderedElementsAre(add_foobar, add_foofoo));
}
TEST_F(HloInstructionTest, ReplaceAllUsesInMultipleOps) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto bar =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "bar"));
auto add_foobar = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, bar));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, foo));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({foo, bar}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(3, foo->user_count());
EXPECT_EQ(2, bar->user_count());
ASSERT_IS_OK(foo->ReplaceAllUsesWith(bar));
EXPECT_EQ(0, foo->user_count());
EXPECT_EQ(3, bar->user_count());
EXPECT_THAT(bar->users(), UnorderedElementsAre(add_foobar, exp, tuple));
}
class NodeCollectorAndPostProcessor : public DfsHloVisitorWithDefault {
public:
NodeCollectorAndPostProcessor() {}
absl::Status Postprocess(HloInstruction* hlo) override {
post_processed_nodes_.push_back(hlo);
return absl::OkStatus();
}
absl::Status DefaultAction(HloInstruction* hlo_instruction) override {
visited_nodes_.push_back(hlo_instruction);
return absl::OkStatus();
}
const std::vector<const HloInstruction*>& visited_nodes() {
return visited_nodes_;
}
const std::vector<const HloInstruction*>& post_processed_nodes() {
return post_processed_nodes_;
}
private:
std::vector<const HloInstruction*> visited_nodes_;
std::vector<const HloInstruction*> post_processed_nodes_;
};
bool Distinct(const std::vector<const HloInstruction*>& vec) {
std::set<const HloInstruction*> distinct_nodes(vec.begin(), vec.end());
return distinct_nodes.size() == vec.size();
}
TEST_F(HloInstructionTest, PostProcessAllVisitedNodes) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, foo));
auto log = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kLog, foo));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, exp, log));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
NodeCollectorAndPostProcessor visitor;
ASSERT_IS_OK(add->Accept(&visitor));
EXPECT_EQ(visitor.visited_nodes(), visitor.post_processed_nodes());
EXPECT_TRUE(Distinct(visitor.visited_nodes()));
}
TEST_F(HloInstructionTest, PostProcessAllVisitedNodesMultiComputation) {
const std::string& hlo_string = R"(
HloModule axpy_module
calculate_alpha {
c.1 = f32[] constant(1)
c.2 = f32[] constant(2)
c.3 = f32[] add(c.1, c.2)
c.4 = f32[] constant(4)
ROOT ret = f32[] multiply(c.4, c.3)
}
ENTRY axpy_computation {
p.0 = f32[10] parameter(0)
p.1 = f32[10] parameter(1)
add.0 = f32[10] add(p.0, p.1)
alpha = f32[] call(), to_apply=calculate_alpha
broadcast = f32[10] broadcast(alpha), dimensions={}
p.2 = f32[10] parameter(2)
y = f32[10] multiply(broadcast, p.2)
x = f32[10] subtract(y, add.0)
p.3 = f32[10] parameter(3)
ROOT add.1 = f32[10] add(x, p.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* add1 = FindInstruction(module.get(), "add.1");
EXPECT_EQ(add1, module->entry_computation()->root_instruction());
NodeCollectorAndPostProcessor visitor;
ASSERT_IS_OK(add1->Accept(&visitor, true,
false,
true));
EXPECT_EQ(visitor.visited_nodes(), visitor.post_processed_nodes());
EXPECT_TRUE(Distinct(visitor.visited_nodes()));
}
TEST_F(HloInstructionTest, SingletonFusionOp) {
HloComputation::Builder builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{exp}, HloInstruction::FusionKind::kLoop);
EXPECT_THAT(fusion->operands(), ElementsAre(constant));
EXPECT_THAT(constant->users(), ElementsAre(fusion));
}
TEST_F(HloInstructionTest, BinaryFusionOp) {
HloComputation::Builder builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.1f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{add}, HloInstruction::FusionKind::kLoop);
EXPECT_THAT(fusion->operands(), ElementsAre(constant1, constant2));
EXPECT_THAT(constant1->users(), ElementsAre(fusion));
EXPECT_THAT(constant2->users(), ElementsAre(fusion));
}
TEST_F(HloInstructionTest, ChainFusionOp) {
HloComputation::Builder builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant));
auto exp2 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp1));
auto exp3 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp2));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{exp3, exp2, exp1}, HloInstruction::FusionKind::kLoop);
EXPECT_THAT(fusion->operands(), ElementsAre(constant));
EXPECT_THAT(constant->users(), ElementsAre(fusion));
}
TEST_F(HloInstructionTest, PreserveMetadataInFusionAndClone) {
HloComputation::Builder builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant));
auto exp2 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp1));
OpMetadata metadata;
metadata.set_op_name("tf_op");
exp1->set_metadata(metadata);
exp2->set_metadata(metadata);
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{exp2, exp1}, HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(protobuf_util::ProtobufEquals(metadata, fusion->metadata()));
EXPECT_TRUE(protobuf_util::ProtobufEquals(
metadata, fusion->fused_expression_root()->metadata()));
EXPECT_TRUE(protobuf_util::ProtobufEquals(
metadata, fusion->fused_expression_root()->operand(0)->metadata()));
std::string new_name = "foobarfoo";
auto cloned = fusion->CloneWithNewOperands(fusion->shape(), {}, new_name);
EXPECT_TRUE(protobuf_util::ProtobufEquals(metadata, fusion->metadata()));
size_t index = cloned->name().rfind(new_name);
EXPECT_TRUE(index != std::string::npos);
}
TEST_F(HloInstructionTest, BinaryCallOp) {
HloComputation::Builder builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.1f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* call = computation->CreateCallInstruction({add});
EXPECT_THAT(call->operands(), ElementsAre(constant1, constant2));
EXPECT_THAT(constant1->users(), ElementsAre(call));
EXPECT_THAT(constant2->users(), ElementsAre(call));
}
TEST_F(HloInstructionTest, ChainCallOp) {
HloComputation::Builder builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant));
auto exp2 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp1));
auto exp3 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp2));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* call = computation->CreateCallInstruction({exp3, exp2, exp1});
EXPECT_THAT(call->operands(), ElementsAre(constant));
EXPECT_THAT(constant->users(), ElementsAre(call));
}
TEST_F(HloInstructionTest, MultiOutputCallOp) {
HloComputation::Builder builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant));
auto exp2 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp1));
auto exp3 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp2));
auto exp4 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, exp3, exp4));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* call = computation->CreateCallInstruction({exp3, exp2, exp1});
call->AppendInstructionIntoCalledComputation(exp4, true);
EXPECT_THAT(call->operands(), ElementsAre(constant));
EXPECT_EQ(add->operand(0)->opcode(), HloOpcode::kGetTupleElement);
EXPECT_THAT(add->operand(0)->operands(), ElementsAre(call));
EXPECT_EQ(add->operand(1)->opcode(), HloOpcode::kGetTupleElement);
EXPECT_THAT(add->operand(1)->operands(), ElementsAre(call));
}
TEST_F(HloInstructionTest, AsyncOp) {
HloComputation::Builder builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.1f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(
auto* async_done,
computation->CreateAsyncInstructions(
add, {ShapeUtil::MakeScalarShape(U32)}, "parallel_thread"));
auto* async_start = async_done->operand(0);
EXPECT_EQ(async_start->shape().tuple_shapes_size(), 3);
EXPECT_EQ(async_start->async_execution_thread(), "parallel_thread");
EXPECT_EQ(async_done->async_execution_thread(), "parallel_thread");
EXPECT_TRUE(ShapeUtil::Equal(async_start->shape().tuple_shapes(2),
ShapeUtil::MakeScalarShape(U32)));
EXPECT_EQ(async_start->async_wrapped_computation()->execution_thread(),
"parallel_thread");
EXPECT_EQ(async_done->async_wrapped_computation()->execution_thread(),
"parallel_thread");
EXPECT_THAT(async_start->operands(), ElementsAre(constant1, constant2));
EXPECT_THAT(constant1->users(), ElementsAre(async_start));
EXPECT_THAT(constant2->users(), ElementsAre(async_start));
EXPECT_EQ(computation->root_instruction(), async_done);
}
TEST_F(HloInstructionTest, AsyncOpWithDeps) {
HloComputation::Builder builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.1f)));
auto constant3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto constant4 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.1f)));
auto add1 = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant3, constant4));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
auto add2 = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
TF_ASSERT_OK(add1->AddControlDependencyTo(add));
TF_ASSERT_OK(add->AddControlDependencyTo(add2));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(
auto* async_done,
computation->CreateAsyncInstructions(
add, {ShapeUtil::MakeScalarShape(U32)}, "parallel_thread"));
auto* async_start = async_done->operand(0);
EXPECT_EQ(async_start->control_predecessors().size(), 1);
EXPECT_EQ(async_start->control_predecessors()[0], add1);
EXPECT_EQ(async_done->control_successors().size(), 1);
EXPECT_EQ(async_done->control_successors()[0], add2);
EXPECT_EQ(async_start->shape().tuple_shapes_size(), 3);
EXPECT_EQ(async_start->async_execution_thread(), "parallel_thread");
EXPECT_EQ(async_done->async_execution_thread(), "parallel_thread");
EXPECT_TRUE(ShapeUtil::Equal(async_start->shape().tuple_shapes(2),
ShapeUtil::MakeScalarShape(U32)));
EXPECT_EQ(async_start->async_wrapped_computation()->execution_thread(),
"parallel_thread");
EXPECT_EQ(async_done->async_wrapped_computation()->execution_thread(),
"parallel_thread");
EXPECT_THAT(async_start->operands(), ElementsAre(constant1, constant2));
}
TEST_F(HloInstructionTest, PreserveOutfeedShapeThroughClone) {
HloComputation::Builder builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2<float>({
{1, 2},
{3, 4},
})));
auto shape10 = ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2}, {1, 0});
auto shape01 = ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2}, {0, 1});
auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto outfeed10 = builder.AddInstruction(
HloInstruction::CreateOutfeed(shape10, constant, token, ""));
auto outfeed01 = builder.AddInstruction(
HloInstruction::CreateOutfeed(shape01, constant, token, ""));
auto clone01 = builder.AddInstruction(outfeed01->Clone());
auto clone10 = builder.AddInstruction(outfeed10->Clone());
EXPECT_TRUE(ShapeUtil::Equal(clone01->outfeed_shape(), shape01));
EXPECT_TRUE(ShapeUtil::Equal(clone10->outfeed_shape(), shape10));
}
TEST_F(HloInstructionTest, PreserveTupleShapeThroughClone) {
HloComputation::Builder builder(TestName());
auto* constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2<float>({
{1, 2},
{3, 4},
})));
auto* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({constant, constant}));
*ShapeUtil::GetMutableSubshape(tuple->mutable_shape(), {0})
->mutable_layout() = LayoutUtil::MakeLayout({0, 1});
*ShapeUtil::GetMutableSubshape(tuple->mutable_shape(), {1})
->mutable_layout() = LayoutUtil::MakeLayout({1, 0});
auto tuple_clone = tuple->Clone();
EXPECT_TRUE(ShapeUtil::Equal(tuple_clone->shape(), tuple->shape()));
}
TEST_F(HloInstructionTest, PreserveShardingThroughCompatibleClone) {
HloSharding sharding = HloSharding::AssignDevice(5);
HloComputation::Builder builder(TestName());
auto* constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2<float>({
{1, 2},
{3, 4},
})));
auto* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({constant, constant}));
HloSharding tuple_sharding =
HloSharding::SingleTuple(tuple->shape(), sharding);
tuple->set_sharding(tuple_sharding);
auto clone_shape = ShapeUtil::MakeShape(F32, {3, 3});
clone_shape = ShapeUtil::MakeTupleShape({clone_shape, clone_shape});
auto tuple_clone = tuple->CloneWithNewOperands(clone_shape, {});
EXPECT_EQ(tuple_clone->sharding(), tuple_sharding);
}
TEST_F(HloInstructionTest,
DoNotPreserveShardingThroughTupleTreeIncompatibleClone) {
HloSharding sharding = HloSharding::AssignDevice(5);
HloComputation::Builder builder(TestName());
auto* constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2<float>({
{1, 2},
{3, 4},
})));
auto* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({constant, constant}));
tuple->set_sharding(HloSharding::SingleTuple(tuple->shape(), sharding));
auto clone_shape = ShapeUtil::MakeShape(F32, {2, 2});
clone_shape =
ShapeUtil::MakeTupleShape({clone_shape, clone_shape, clone_shape});
auto tuple_clone = tuple->CloneWithNewOperands(clone_shape, {});
EXPECT_FALSE(tuple_clone->has_sharding());
}
TEST_F(HloInstructionTest,
DoNotPreserveShardingThroughLeafRankIncompatibleClone) {
HloSharding sharding = HloSharding::AssignDevice(5);
HloComputation::Builder builder(TestName());
auto* constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2<float>({
{1, 2},
{3, 4},
})));
auto* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({constant, constant}));
tuple->set_sharding(HloSharding::SingleTuple(tuple->shape(), sharding));
auto clone_shape = ShapeUtil::MakeShape(F32, {1, 2, 3});
clone_shape = ShapeUtil::MakeTupleShape({clone_shape, clone_shape});
auto tuple_clone = tuple->CloneWithNewOperands(clone_shape, {});
EXPECT_FALSE(tuple_clone->has_sharding());
}
TEST_F(HloInstructionTest, FusionOpWithCalledComputations) {
const Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
auto module = CreateNewVerifiedModule();
auto make_map_computation = [&]() {
auto builder = HloComputation::Builder("FusionMap");
builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
return module->AddEmbeddedComputation(builder.Build());
};
HloComputation* computation_x = make_map_computation();
HloComputation* computation_y = make_map_computation();
HloComputation::Builder builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto map_1_x = builder.AddInstruction(
HloInstruction::CreateMap(scalar_shape, {constant}, computation_x));
auto map_2_x = builder.AddInstruction(
HloInstruction::CreateMap(scalar_shape, {map_1_x}, computation_x));
auto map_3_y = builder.AddInstruction(
HloInstruction::CreateMap(scalar_shape, {map_2_x}, computation_y));
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{map_3_y}, HloInstruction::FusionKind::kLoop);
auto* fused_computation = fusion->fused_instructions_computation();
EXPECT_THAT(fusion->called_computations(), ElementsAre(fused_computation));
fusion->FuseInstruction(map_2_x);
EXPECT_THAT(fusion->called_computations(), ElementsAre(fused_computation));
fusion->FuseInstruction(map_1_x);
EXPECT_THAT(fusion->called_computations(), ElementsAre(fused_computation));
}
TEST_F(HloInstructionTest, ComplexFusionOp) {
HloComputation::Builder builder(TestName());
auto c1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto c2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.1f)));
auto c3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(9.0f)));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, c1, c2));
auto clamp = builder.AddInstruction(
HloInstruction::CreateTernary(r0f32_, HloOpcode::kClamp, c2, add, add));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, add));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kMultiply, exp, c3));
auto sub = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kSubtract, mul, clamp));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({sub, sub, mul, c1}));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{tuple, sub, mul, exp, clamp, add}, HloInstruction::FusionKind::kLoop);
EXPECT_THAT(fusion->operands(), ElementsAre(c1, c3, c2));
EXPECT_THAT(c1->users(), ElementsAre(fusion));
}
static bool Identical(const HloInstruction& instruction1,
const HloInstruction& instruction2) {
EXPECT_TRUE(instruction1.Identical(instruction1));
EXPECT_TRUE(instruction2.Identical(instruction2));
bool is_equal = instruction1.Identical(instruction2);
EXPECT_EQ(is_equal, instruction2.Identical(instruction1));
return is_equal;
}
static bool StructuralEqual(const HloInstruction& instruction1,
const HloInstruction& instruction2) {
auto eq_operand_shapes = [](const HloInstruction* a,
const HloInstruction* b) {
return ShapeUtil::Equal(a->shape(), b->shape());
};
auto eq_computations = [](const HloComputation* a, const HloComputation* b) {
return *a == *b;
};
EXPECT_TRUE(
instruction1.Identical(instruction1, eq_operand_shapes, eq_computations));
EXPECT_TRUE(
instruction2.Identical(instruction2, eq_operand_shapes, eq_computations));
bool is_equal =
instruction1.Identical(instruction2, eq_operand_shapes, eq_computations);
EXPECT_EQ(is_equal, instruction2.Identical(instruction1, eq_operand_shapes,
eq_computations));
return is_equal;
}
TEST_F(HloInstructionTest, IdenticalInstructions) {
auto operand1 = HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}));
auto operand2 = HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{10.0, 20.0}, {30.0, 40.0}}));
auto vector_operand = HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({42.0, 123.0}));
Shape shape = operand1->shape();
HloInstruction* op1 = operand1.get();
HloInstruction* op2 = operand2.get();
EXPECT_TRUE(
Identical(*HloInstruction::CreateUnary(shape, HloOpcode::kCopy, op1),
*HloInstruction::CreateUnary(shape, HloOpcode::kCopy, op1)));
EXPECT_FALSE(
Identical(*HloInstruction::CreateUnary(shape, HloOpcode::kCopy, op1),
*HloInstruction::CreateUnary(shape, HloOpcode::kCopy, op2)));
EXPECT_FALSE(
Identical(*HloInstruction::CreateUnary(shape, HloOpcode::kCopy, op1),
*HloInstruction::CreateUnary(shape, HloOpcode::kNegate, op1)));
EXPECT_TRUE(Identical(*HloInstruction::CreateTuple({op1, op2}),
*HloInstruction::CreateTuple({op1, op2})));
EXPECT_FALSE(Identical(*HloInstruction::CreateTuple({op1, op2}),
*HloInstruction::CreateTuple({op2, op1})));
EXPECT_TRUE(Identical(*HloInstruction::CreateBroadcast(shape, op1, {0, 1}),
*HloInstruction::CreateBroadcast(shape, op1, {0, 1})));
EXPECT_FALSE(Identical(*HloInstruction::CreateBroadcast(shape, op1, {0, 1}),
*HloInstruction::CreateBroadcast(shape, op1, {1, 0})));
Shape bcast_shape1 = ShapeUtil::MakeShape(F32, {2, 2, 42});
Shape bcast_shape2 = ShapeUtil::MakeShape(F32, {2, 2, 123});
EXPECT_FALSE(
Identical(*HloInstruction::CreateBroadcast(bcast_shape1, op1, {0, 1}),
*HloInstruction::CreateBroadcast(bcast_shape2, op1, {0, 1})));
EXPECT_TRUE(Identical(
*HloInstruction::CreateBinary(shape, HloOpcode::kAdd, op1, op2),
*HloInstruction::CreateBinary(shape, HloOpcode::kAdd, op1, op2)));
EXPECT_FALSE(Identical(
*HloInstruction::CreateBinary(shape, HloOpcode::kAdd, op1, op2),
*HloInstruction::CreateBinary(shape, HloOpcode::kDivide, op2, op1)));
EXPECT_FALSE(Identical(
*HloInstruction::CreateBinary(shape, HloOpcode::kAdd, op1, op2),
*HloInstruction::CreateBinary(shape, HloOpcode::kDivide, op1, op2)));
}
TEST_F(HloInstructionTest, IdenticalCallInstructions) {
const char* const hlo_string = R"(
HloModule Module
subcomp1 (x: f32[]) -> f32[] {
x = f32[] parameter(0)
ROOT n = f32[] sine(x)
}
subcomp2 (x: f32[]) -> f32[] {
x = f32[] parameter(0)
ROOT n = f32[] cosine(x)
}
ENTRY entry (param: f32[]) -> (f32[], f32[], f32[]) {
p = f32[] parameter(0)
t1 = f32[] call(p), to_apply=subcomp1
t2 = f32[] call(p), to_apply=subcomp1
t3 = f32[] call(p), to_apply=subcomp2
ROOT t = (f32[], f32[], f32[]) tuple(t1, t2, t3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto* root = module->entry_computation()->root_instruction();
auto* t1 = root->operand(0);
auto* t2 = root->operand(1);
auto* t3 = root->operand(2);
EXPECT_TRUE(StructuralEqual(*t1, *t2));
EXPECT_FALSE(StructuralEqual(*t1, *t3));
}
TEST_F(HloInstructionTest, FunctionVisitor) {
const Shape f32 = ShapeUtil::MakeShape(F32, {});
HloComputation::Builder builder(TestName());
auto param =
builder.AddInstruction(HloInstruction::CreateParameter(0, f32, "0"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32, HloOpcode::kNegate, param));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(f32, HloOpcode::kExp, param));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32, HloOpcode::kAdd, negate, exp));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
int visit_num = 0;
absl::flat_hash_map<HloInstruction*, int> visit_order;
FunctionVisitor visitor([&visit_num, &visit_order](HloInstruction* inst) {
EXPECT_FALSE(visit_order.contains(inst));
visit_order[inst] = visit_num;
visit_num++;
return absl::OkStatus();
});
EXPECT_IS_OK(add->Accept(&visitor));
EXPECT_EQ(0, visit_order.at(param));
EXPECT_TRUE(visit_order.at(exp) == 1 || visit_order.at(exp) == 2);
EXPECT_TRUE(visit_order.at(negate) == 1 || visit_order.at(negate) == 2);
EXPECT_NE(visit_order.at(exp), visit_order.at(negate));
EXPECT_EQ(3, visit_order.at(add));
}
TEST_F(HloInstructionTest, FullyElementwise) {
const Shape r1f32 = ShapeUtil::MakeShape(F32, {5});
HloComputation::Builder builder(TestName());
auto x =
builder.AddInstruction(HloInstruction::CreateParameter(0, r1f32, "x"));
auto y =
builder.AddInstruction(HloInstruction::CreateParameter(1, r1f32, "y"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r1f32, HloOpcode::kAdd, x, y));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(add->IsElementwise());
for (int i = 0; i < add->operand_count(); ++i) {
EXPECT_TRUE(add->IsElementwiseOnOperand(i));
}
}
TEST_F(HloInstructionTest, MapIsElementwise) {
auto module = CreateNewVerifiedModule();
const Shape r2f32 =
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 10}, {1, 0});
HloComputation::Builder builder(TestName());
HloComputation::Builder map_builder("id");
map_builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p0"));
auto map_computation = module->AddEmbeddedComputation(map_builder.Build());
auto x =
builder.AddInstruction(HloInstruction::CreateParameter(0, r2f32, "x"));
auto map = builder.AddInstruction(
HloInstruction::CreateMap(r2f32, {x}, map_computation));
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(map->IsElementwise());
}
TEST_F(HloInstructionTest, PartiallyElementwise) {
const Shape r1f32 = ShapeUtil::MakeShape(F32, {5});
const Shape r2f32 = ShapeUtil::MakeShape(F32, {3, 5});
HloComputation::Builder builder("PartiallyElementwise");
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, r2f32, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, r2f32, "p1"));
HloInstruction* p2 =
builder.AddInstruction(HloInstruction::CreateParameter(2, r2f32, "p2"));
HloInstruction* p3 =
builder.AddInstruction(HloInstruction::CreateParameter(3, r1f32, "p3"));
HloInstruction* mul = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kMultiply, p0, p1));
HloInstruction* div = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kDivide, mul, p2));
HloInstruction* broadcast =
builder.AddInstruction(HloInstruction::CreateBroadcast(r2f32, p3, {1}));
HloInstruction* max = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kMaximum, div, broadcast));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
HloInstruction* fusion = computation->CreateFusionInstruction(
{max, broadcast, div, mul}, HloInstruction::FusionKind::kLoop);
EXPECT_FALSE(fusion->IsElementwise());
for (int64_t operand_idx = 0; operand_idx < fusion->operand_count();
++operand_idx) {
const HloInstruction* operand = fusion->operand(operand_idx);
if (operand == p3) {
EXPECT_FALSE(fusion->IsElementwiseOnOperand(operand_idx));
} else {
EXPECT_TRUE(fusion->IsElementwiseOnOperand(operand_idx));
}
}
}
TEST_F(HloInstructionTest, PartiallyElementwiseWithReuse) {
const Shape r0f32 = ShapeUtil::MakeShape(F32, {});
const Shape r1f32 = ShapeUtil::MakeShape(F32, {5});
HloComputation::Builder builder("PartiallyElementwiseWithReuse");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, r1f32, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32, "y"));
HloInstruction* broadcast =
builder.AddInstruction(HloInstruction::CreateBroadcast(r1f32, y, {}));
HloInstruction* min = builder.AddInstruction(
HloInstruction::CreateBinary(r1f32, HloOpcode::kMinimum, x, broadcast));
HloInstruction* sub = builder.AddInstruction(HloInstruction::CreateBinary(
r1f32, HloOpcode::kSubtract, min, broadcast));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
HloInstruction* fusion = computation->CreateFusionInstruction(
{sub, broadcast, min}, HloInstruction::FusionKind::kLoop);
EXPECT_FALSE(fusion->IsElementwise());
for (int64_t operand_idx = 0; operand_idx < fusion->operand_count();
++operand_idx) {
if (fusion->operand(operand_idx) == y) {
EXPECT_FALSE(fusion->IsElementwiseOnOperand(operand_idx));
} else {
EXPECT_TRUE(fusion->IsElementwiseOnOperand(operand_idx));
}
}
}
TEST_F(HloInstructionTest, CloneOfFusionPreservesShape) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
HloInstruction* dot = builder.AddInstruction(HloInstruction::CreateDot(
sout, x, reshape, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
HloInstruction* fusion = computation->CreateFusionInstruction(
{dot, reshape}, HloInstruction::FusionKind::kLoop);
auto fusion2 = fusion->Clone();
const HloInstruction* root = fusion->fused_expression_root();
const HloInstruction* root2 = fusion2->fused_expression_root();
EXPECT_TRUE(ShapeUtil::Equal(root->shape(), root2->shape()));
EXPECT_TRUE(
ShapeUtil::Equal(root->operand(0)->shape(), root2->operand(0)->shape()));
EXPECT_TRUE(
ShapeUtil::Equal(root->operand(1)->shape(), root2->operand(1)->shape()));
EXPECT_TRUE(ShapeUtil::Equal(root->operand(1)->operand(0)->shape(),
root2->operand(1)->operand(0)->shape()));
EXPECT_TRUE(StructuralEqual(*fusion, *fusion2));
}
TEST_F(HloInstructionTest, FuseInstructionKeepsInstruction) {
constexpr char kHloString[] = R"(
HloModule test_module
fused_add {
p0 = f32[32,32]{1,0} parameter(0)
p1 = f32[32,32]{1,0} parameter(1)
ROOT add = f32[32,32]{1,0} add(p0, p1)
}
ENTRY reduce {
p2 = f32[32,32]{1,0} parameter(0)
p3 = f32[32,32]{1,0} parameter(1)
c1 = f32[] constant(1)
broadcast = f32[32,32]{1,0} broadcast(c1), dimensions={}
mul = f32[32,32]{1,0} multiply(p2, p3)
ROOT add = f32[32,32]{1,0} fusion(mul, broadcast), kind=kLoop, calls=fused_add
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
HloInstruction* fused_add = module->entry_computation()->root_instruction();
HloInstruction* mul = fused_add->mutable_operand(0);
EXPECT_EQ(1, mul->user_count());
fused_add->FuseInstruction(mul);
EXPECT_EQ(0, mul->user_count());
EXPECT_EQ(fused_add->parent(), mul->parent());
}
TEST_F(HloInstructionTest, FuseInstructionIntoMultiOutputKeepsInstruction) {
constexpr char kHloString[] = R"(
HloModule test_module
fused_add {
p0 = f32[32,32]{1,0} parameter(0)
p1 = f32[32,32]{1,0} parameter(1)
ROOT add = f32[32,32]{1,0} add(p0, p1)
}
ENTRY reduce {
p2 = f32[32,32]{1,0} parameter(0)
p3 = f32[32,32]{1,0} parameter(1)
c1 = f32[] constant(1)
mul = f32[32,32]{1,0} multiply(p2, p3)
broadcast = f32[32,32]{1,0} broadcast(c1), dimensions={}
add = f32[32,32]{1,0} fusion(mul, broadcast), kind=kLoop, calls=fused_add
ROOT root = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(mul, add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
HloInstruction* root = module->entry_computation()->root_instruction();
HloInstruction* mul = root->mutable_operand(0);
HloInstruction* fused_add = root->mutable_operand(1);
EXPECT_EQ(2, mul->user_count());
fused_add->FuseInstructionIntoMultiOutput(mul);
EXPECT_EQ(0, mul->user_count());
EXPECT_EQ(root->parent(), mul->parent());
}
TEST_F(HloInstructionTest, NoRedundantFusionOperandsAfterReplacingUse) {
const Shape s = ShapeUtil::MakeShape(F32, {10, 10});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
HloInstruction* dot = builder.AddInstruction(HloInstruction::CreateDot(
s, x, reshape, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
HloInstruction* fusion = computation->CreateFusionInstruction(
{dot, reshape}, HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(x->ReplaceAllUsesWith(y).ok());
EXPECT_THAT(fusion->operands(), UnorderedElementsAre(y));
EXPECT_EQ(fusion->fused_instructions_computation()->num_parameters(), 1);
}
TEST_F(HloInstructionTest, FusionEquality) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
auto parameter =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "x"));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, parameter));
auto neg = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, parameter));
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{exp}, HloInstruction::FusionKind::kLoop);
auto* fusion2 = computation->CreateFusionInstruction(
{neg}, HloInstruction::FusionKind::kLoop);
EXPECT_FALSE(StructuralEqual(*fusion, *fusion2));
auto clone = fusion->Clone();
EXPECT_TRUE(StructuralEqual(*fusion, *clone));
}
TEST_F(HloInstructionTest, NestedFusionEquality) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2});
auto a = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 0.0}, {0.0, 1.0}})));
auto b = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}})));
auto b_t = builder.AddInstruction(
HloInstruction::CreateTranspose(data_shape, b, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
data_shape, a, b_t, dot_dnums, DefaultPrecisionConfig(2)));
auto one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto add_operand = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape, one, {}));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape, HloOpcode::kAdd, dot, add_operand));
auto sub = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape, HloOpcode::kSubtract, dot, add_operand));
builder.AddInstruction(
HloInstruction::CreateBinary(data_shape, HloOpcode::kMultiply, add, sub));
auto computation = module->AddEntryComputation(builder.Build());
auto nested_fusion = computation->CreateFusionInstruction(
{dot, b_t}, HloInstruction::FusionKind::kLoop);
auto fusion = computation->CreateFusionInstruction(
{add, nested_fusion}, HloInstruction::FusionKind::kOutput);
auto fusion2 = computation->CreateFusionInstruction(
{sub, nested_fusion}, HloInstruction::FusionKind::kOutput);
auto clone = fusion->Clone();
EXPECT_TRUE(StructuralEqual(*fusion, *clone));
EXPECT_FALSE(StructuralEqual(*fusion, *fusion2));
}
TEST_F(HloInstructionTest, CloneSuffixNames) {
auto foo =
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "foo");
EXPECT_EQ(foo->Clone()->name(), "foo.clone");
EXPECT_EQ(foo->Clone()->Clone()->name(), "foo.clone2");
EXPECT_EQ(foo->Clone()->Clone()->Clone()->name(), "foo.clone3");
EXPECT_EQ(foo->Clone("bar")->name(), "foo.bar");
EXPECT_EQ(foo->Clone("bar")->Clone("bar")->name(), "foo.bar2");
EXPECT_EQ(foo->Clone("bar")->Clone("bar")->Clone()->name(), "foo.bar2.clone");
auto foo_baz = HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "foo.baz");
EXPECT_EQ(foo_baz->Clone()->name(), "foo.baz.clone");
auto foo_clone234 = HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "foo.clone234");
EXPECT_EQ(foo_clone234->Clone()->name(), "foo.clone235");
auto foo_clonexyz = HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "foo.clonexyz");
EXPECT_EQ(foo_clonexyz->Clone()->name(), "foo.clonexyz.clone");
auto foo_clone_clone3 = HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "foo.clone.clone3");
EXPECT_EQ(foo_clone_clone3->Clone()->name(), "foo.clone.clone4");
}
TEST_F(HloInstructionTest, StringifyDot) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
HloInstruction* dot = builder.AddInstruction(HloInstruction::CreateDot(
sout, x, reshape, dot_dnums, DefaultPrecisionConfig(2)));
auto options = HloPrintOptions().set_print_metadata(false);
EXPECT_EQ(dot->ToString(options),
"%dot = f32[5,20]{1,0} dot(f32[5,10]{1,0} %x, f32[10,20]{1,0} "
"%transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0}");
auto options2 = HloPrintOptions()
.set_print_metadata(false)
.set_print_operand_shape(false)
.set_print_percent(false)
.set_include_layout_in_shapes(false);
EXPECT_EQ(dot->ToString(options2),
"dot = f32[5,20] dot(x, transpose), "
"lhs_contracting_dims={1}, rhs_contracting_dims={0}");
}
TEST_F(HloInstructionTest, StringifySparseDot) {
HloComputation::Builder builder("SparseDot");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {5, 16}), "x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {32, 20}), "y"));
HloInstruction* meta = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(U16, {5, 2}), "meta"));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
SparsityDescriptor sparsity_descriptor;
sparsity_descriptor.set_type(SparsityType::SPARSITY_STRUCTURED_N_M);
sparsity_descriptor.set_n(2);
sparsity_descriptor.set_m(4);
sparsity_descriptor.set_index(0);
sparsity_descriptor.set_dimension(1);
std::vector<HloInstruction*> meta_operands = {meta};
HloInstruction* dot = builder.AddInstruction(HloInstruction::CreateDot(
ShapeUtil::MakeShape(F32, {5, 20}), x, y, dot_dnums,
DefaultPrecisionConfig(2), {sparsity_descriptor}, meta_operands));
EXPECT_EQ(dot->ToString(),
"%dot = f32[5,20]{1,0} dot(f32[5,16]{1,0} %x, f32[32,20]{1,0} %y, "
"u16[5,2]{1,0} %meta), lhs_contracting_dims={1}, "
"rhs_contracting_dims={0}, sparsity=L.1@2:4");
}
TEST_F(HloInstructionTest, StringifyConditional) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
builder.AddInstruction(HloInstruction::CreateDot(sout, x, reshape, dot_dnums,
DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto options = HloPrintOptions().set_print_metadata(false);
auto pred = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloInstruction* conditional =
builder.AddInstruction(HloInstruction::CreateConditional(
sout, pred, x, computation, x, computation));
EXPECT_EQ(conditional->ToString(options),
"%conditional = f32[5,20]{1,0} conditional(pred[] %constant, "
"f32[5,10]{1,0} %x, f32[5,10]{1,0} %x), "
"true_computation=%TransposeDot, false_computation=%TransposeDot");
}
TEST_F(HloInstructionTest, StringifyWhile) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
builder.AddInstruction(HloInstruction::CreateDot(sout, x, reshape, dot_dnums,
DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto options = HloPrintOptions().set_print_metadata(false);
HloInstruction* loop = builder.AddInstruction(
HloInstruction::CreateWhile(sout, computation, computation, x));
EXPECT_EQ(loop->ToString(options),
"%while = f32[5,20]{1,0} while(f32[5,10]{1,0} %x), "
"condition=%TransposeDot, body=%TransposeDot");
}
TEST_F(HloInstructionTest, GetSetStatisticsViz) {
const Shape shape = ShapeUtil::MakeShape(F32, {5, 10});
HloComputation::Builder builder(TestName());
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "x"));
StatisticsViz statistics_viz;
statistics_viz.set_stat_index_to_visualize(-1);
x->set_statistics_viz(statistics_viz);
EXPECT_FALSE(x->has_statistics());
EXPECT_EQ(x->statistics_viz().stat_index_to_visualize(), -1);
Statistic statistic;
statistic.set_stat_name("stat-1");
statistic.set_stat_val(30.0);
x->add_single_statistic(statistic);
x->set_stat_index_to_visualize(0);
EXPECT_TRUE(x->has_statistics());
EXPECT_TRUE(
protobuf_util::ProtobufEquals(x->statistic_to_visualize(), statistic));
statistic.set_stat_val(40.0);
*statistics_viz.add_statistics() = statistic;
x->set_statistics_viz(statistics_viz);
EXPECT_TRUE(
protobuf_util::ProtobufEquals(x->statistics_viz(), statistics_viz));
}
TEST_F(HloInstructionTest, StringifyStatisticsViz) {
const Shape shape = ShapeUtil::MakeShape(F32, {5, 10});
HloComputation::Builder builder(TestName());
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "y"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, x, y));
add->set_statistics_viz({});
EXPECT_EQ(add->ToString(),
"%add = f32[5,10]{1,0} add(f32[5,10]{1,0} %x, f32[5,10]{1,0} %y)");
auto CreateStatisticsVizWithStatistics =
[](int64_t stat_index_to_visualize,
std::initializer_list<std::pair<absl::string_view, double>> statistics)
-> StatisticsViz {
StatisticsViz statistics_viz;
statistics_viz.set_stat_index_to_visualize(stat_index_to_visualize);
auto create_statistic = [](absl::string_view statistic_name,
double statistic_value) {
Statistic statistic;
statistic.set_stat_name(std::string(statistic_name));
statistic.set_stat_val(statistic_value);
return statistic;
};
for (const auto& [statistic_name, statistic_value] : statistics) {
*statistics_viz.add_statistics() =
create_statistic(statistic_name, statistic_value);
}
return statistics_viz;
};
add->set_statistics_viz(CreateStatisticsVizWithStatistics(
1, {{"stat-1", 33.0}, {"stat-2", 44.0}}));
EXPECT_EQ(add->ToString(),
"%add = f32[5,10]{1,0} add(f32[5,10]{1,0} %x, f32[5,10]{1,0} %y), "
"statistics={visualizing_index=1,stat-1=33,stat-2=44}");
}
TEST_F(HloInstructionTest, StringifyGather_0) {
Shape input_tensor_shape = ShapeUtil::MakeShape(F32, {50, 49, 48, 47, 46});
Shape start_indices_tensor_shape =
ShapeUtil::MakeShape(S64, {10, 9, 8, 7, 5});
Shape gather_result_shape =
ShapeUtil::MakeShape(F32, {10, 9, 8, 7, 30, 29, 28, 27, 26});
HloComputation::Builder builder("Gather");
HloInstruction* input = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_tensor_shape, "input_tensor"));
HloInstruction* start_indices =
builder.AddInstruction(HloInstruction::CreateParameter(
1, start_indices_tensor_shape, "start_indices"));
HloInstruction* gather_instruction = builder.AddInstruction(
HloInstruction::CreateGather(gather_result_shape, input, start_indices,
HloGatherInstruction::MakeGatherDimNumbers(
{4, 5, 6, 7, 8},
{},
{0, 1, 2, 3, 4},
4),
{30, 29, 28, 27, 26},
false));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(gather_instruction->ToString(),
"%gather = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} "
"gather(f32[50,49,48,47,46]{4,3,2,1,0} %input_tensor, "
"s64[10,9,8,7,5]{4,3,2,1,0} %start_indices), "
"offset_dims={4,5,6,7,8}, collapsed_slice_dims={}, "
"start_index_map={0,1,2,3,4}, "
"index_vector_dim=4, slice_sizes={30,29,28,27,26}");
}
TEST_F(HloInstructionTest, StringifyGather_1) {
Shape input_tensor_shape = ShapeUtil::MakeShape(F32, {50, 49, 48, 47, 46});
Shape start_indices_tensor_shape =
ShapeUtil::MakeShape(S64, {10, 9, 5, 7, 6});
Shape gather_result_shape =
ShapeUtil::MakeShape(F32, {10, 9, 7, 6, 30, 29, 28, 27, 26});
HloComputation::Builder builder("Gather");
HloInstruction* input = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_tensor_shape, "input_tensor"));
HloInstruction* start_indices =
builder.AddInstruction(HloInstruction::CreateParameter(
1, start_indices_tensor_shape, "start_indices"));
HloInstruction* gather_instruction = builder.AddInstruction(
HloInstruction::CreateGather(gather_result_shape, input, start_indices,
HloGatherInstruction::MakeGatherDimNumbers(
{4, 5, 6, 7, 8},
{},
{0, 1, 2, 3, 4},
2),
{30, 29, 28, 27, 26},
false));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(gather_instruction->ToString(),
"%gather = f32[10,9,7,6,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} "
"gather(f32[50,49,48,47,46]{4,3,2,1,0} %input_tensor, "
"s64[10,9,5,7,6]{4,3,2,1,0} %start_indices), "
"offset_dims={4,5,6,7,8}, collapsed_slice_dims={}, "
"start_index_map={0,1,2,3,4}, "
"index_vector_dim=2, slice_sizes={30,29,28,27,26}");
}
TEST_F(HloInstructionTest, StringifyScatter) {
Shape input_tensor_shape = ShapeUtil::MakeShape(F32, {50, 49, 48, 47, 46});
Shape scatter_indices_tensor_shape =
ShapeUtil::MakeShape(S64, {10, 9, 5, 7, 6});
Shape scatter_updates_shape =
ShapeUtil::MakeShape(F32, {10, 9, 7, 6, 30, 29, 28, 27, 26});
HloComputation::Builder builder("Scatter");
HloInstruction* input = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_tensor_shape, "input_tensor"));
HloInstruction* scatter_indices =
builder.AddInstruction(HloInstruction::CreateParameter(
1, scatter_indices_tensor_shape, "scatter_indices"));
HloInstruction* scatter_updates =
builder.AddInstruction(HloInstruction::CreateParameter(
2, scatter_updates_shape, "scatter_updates"));
HloComputation::Builder update_builder("Scatter.update");
update_builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p1"));
update_builder.AddInstruction(
HloInstruction::CreateParameter(1, ShapeUtil::MakeShape(F32, {}), "p2"));
auto module = CreateNewVerifiedModule();
auto* update_computation =
module->AddEmbeddedComputation(update_builder.Build());
HloInstruction* scatter_instruction =
builder.AddInstruction(HloInstruction::CreateScatter(
input_tensor_shape, input, scatter_indices, scatter_updates,
update_computation,
HloScatterInstruction::MakeScatterDimNumbers(
{4, 5, 6, 7, 8},
{},
{0, 1, 2, 3, 4},
2),
false,
false));
module->AddEntryComputation(builder.Build());
EXPECT_EQ(
scatter_instruction->ToString(),
"%scatter = f32[50,49,48,47,46]{4,3,2,1,0} "
"scatter(f32[50,49,48,47,46]{4,3,2,1,0} %input_tensor, "
"s64[10,9,5,7,6]{4,3,2,1,0} %scatter_indices, "
"f32[10,9,7,6,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} %scatter_updates), "
"update_window_dims={4,5,6,7,8}, inserted_window_dims={}, "
"scatter_dims_to_operand_dims={0,1,2,3,4}, index_vector_dim=2, "
"to_apply=%Scatter.update");
}
TEST_F(HloInstructionTest, StringifyAsyncOps) {
const Shape s1 = ShapeUtil::MakeShape(F32, {10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20});
const Shape s_tuple = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({s1}), s2, ShapeUtil::MakeShape(S32, {})});
HloComputation::Builder async_builder("AsyncOp");
HloInstruction* param = async_builder.AddInstruction(
HloInstruction::CreateParameter(0, s1, "p0"));
async_builder.AddInstruction(
HloInstruction::CreateCustomCall(s2, {param},
"foo"));
std::unique_ptr<HloComputation> async_computation = async_builder.Build();
HloComputation::Builder entry_builder("Entry");
HloInstruction* entry_param = entry_builder.AddInstruction(
HloInstruction::CreateParameter(0, s1, "p0"));
HloInstruction* async_start =
entry_builder.AddInstruction(HloInstruction::CreateAsyncStart(
s_tuple, {entry_param}, async_computation.get(),
"parallel_thread"));
HloInstruction* async_update = entry_builder.AddInstruction(
HloInstruction::CreateAsyncUpdate(s_tuple, async_start));
entry_builder.AddInstruction(
HloInstruction::CreateAsyncDone(s2, async_update));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(entry_builder.Build());
module->AddEmbeddedComputation(std::move(async_computation));
const std::string expected_with_syntax_sugar =
R"(HloModule StringifyAsyncOps, entry_computation_layout={(f32[10]{0})->f32[20]{0}}
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%custom-call-start = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-start(f32[10]{0} %p0), async_execution_thread="parallel_thread", custom_call_target="foo"
%custom-call-update = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-update(((f32[10]{0}), f32[20]{0}, s32[]) %custom-call-start)
ROOT %custom-call-done = f32[20]{0} custom-call-done(((f32[10]{0}), f32[20]{0}, s32[]) %custom-call-update)
}
)";
EXPECT_EQ(module->ToString(), expected_with_syntax_sugar);
const std::string expected_without_syntax_sugar =
R"(HloModule StringifyAsyncOps, entry_computation_layout={(f32[10]{0})->f32[20]{0}}
%AsyncOp (p0.1: f32[10]) -> f32[20] {
%p0.1 = f32[10]{0} parameter(0)
ROOT %custom-call = f32[20]{0} custom-call(f32[10]{0} %p0.1), custom_call_target="foo"
}, execution_thread="parallel_thread"
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%custom-call-start = ((f32[10]{0}), f32[20]{0}, s32[]) async-start(f32[10]{0} %p0), async_execution_thread="parallel_thread", calls=%AsyncOp
%custom-call-update = ((f32[10]{0}), f32[20]{0}, s32[]) async-update(((f32[10]{0}), f32[20]{0}, s32[]) %custom-call-start)
ROOT %custom-call-done = f32[20]{0} async-done(((f32[10]{0}), f32[20]{0}, s32[]) %custom-call-update)
}
)";
auto options = HloPrintOptions().set_syntax_sugar_async_ops(false);
EXPECT_EQ(module->ToString(options), expected_without_syntax_sugar);
}
TEST_F(HloInstructionTest, StringifyAsyncOpsWithReduceScatter) {
const Shape rs_input_shape = ShapeUtil::MakeShape(F32, {20});
const Shape rs_output_shape = ShapeUtil::MakeShape(F32, {10});
std::unique_ptr<HloComputation> add_computation;
{
const Shape scalar_shape = ShapeUtil::MakeScalarShape(F32);
HloComputation::Builder add_builder("add");
HloInstruction* param0 = add_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "p0"));
HloInstruction* param1 = add_builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "p1"));
add_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, param0, param1));
add_computation = add_builder.Build();
}
std::unique_ptr<HloComputation> async_computation;
{
HloComputation::Builder async_builder("AsyncOp");
HloInstruction* param = async_builder.AddInstruction(
HloInstruction::CreateParameter(0, rs_input_shape, "pasync"));
async_builder.AddInstruction(HloInstruction::CreateReduceScatter(
rs_output_shape, {param}, add_computation.get(), CollectiveDeviceList(),
false, std::nullopt, false, 0));
async_computation = async_builder.Build();
}
const Shape async_start_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({rs_input_shape}), rs_output_shape});
HloComputation::Builder entry_builder("Entry");
HloInstruction* entry_param = entry_builder.AddInstruction(
HloInstruction::CreateParameter(0, rs_input_shape, "pentry"));
HloInstruction* async_start =
entry_builder.AddInstruction(HloInstruction::CreateAsyncStart(
async_start_shape, {entry_param}, async_computation.get(),
"parallel_thread"));
HloInstruction* async_update = entry_builder.AddInstruction(
HloInstruction::CreateAsyncUpdate(async_start_shape, async_start));
entry_builder.AddInstruction(
HloInstruction::CreateAsyncDone(rs_output_shape, async_update));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(entry_builder.Build());
module->AddEmbeddedComputation(std::move(async_computation));
module->AddEmbeddedComputation(std::move(add_computation));
const std::string expected_with_syntax_sugar =
R"(HloModule StringifyAsyncOpsWithReduceScatter, entry_computation_layout={(f32[20]{0})->f32[10]{0}}
%add (p0: f32[], p1: f32[]) -> f32[] {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %p0, f32[] %p1)
}, execution_thread="parallel_thread"
ENTRY %Entry (pentry: f32[20]) -> f32[10] {
%pentry = f32[20]{0} parameter(0)
%reduce-scatter-start = ((f32[20]{0}), f32[10]{0}) reduce-scatter-start(f32[20]{0} %pentry), async_execution_thread="parallel_thread", replica_groups={}, dimensions={0}, to_apply=%add
%reduce-scatter-update = ((f32[20]{0}), f32[10]{0}) reduce-scatter-update(((f32[20]{0}), f32[10]{0}) %reduce-scatter-start)
ROOT %reduce-scatter-done = f32[10]{0} reduce-scatter-done(((f32[20]{0}), f32[10]{0}) %reduce-scatter-update)
}
)";
EXPECT_EQ(module->ToString(), expected_with_syntax_sugar);
const std::string expected_without_syntax_sugar =
R"(HloModule StringifyAsyncOpsWithReduceScatter, entry_computation_layout={(f32[20]{0})->f32[10]{0}}
%add (p0: f32[], p1: f32[]) -> f32[] {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %p0, f32[] %p1)
}, execution_thread="parallel_thread"
%AsyncOp (pasync: f32[20]) -> f32[10] {
%pasync = f32[20]{0} parameter(0)
ROOT %reduce-scatter = f32[10]{0} reduce-scatter(f32[20]{0} %pasync), replica_groups={}, dimensions={0}, to_apply=%add
}, execution_thread="parallel_thread"
ENTRY %Entry (pentry: f32[20]) -> f32[10] {
%pentry = f32[20]{0} parameter(0)
%reduce-scatter-start = ((f32[20]{0}), f32[10]{0}) async-start(f32[20]{0} %pentry), async_execution_thread="parallel_thread", calls=%AsyncOp
%reduce-scatter-update = ((f32[20]{0}), f32[10]{0}) async-update(((f32[20]{0}), f32[10]{0}) %reduce-scatter-start)
ROOT %reduce-scatter-done = f32[10]{0} async-done(((f32[20]{0}), f32[10]{0}) %reduce-scatter-update)
}
)";
auto options = HloPrintOptions().set_syntax_sugar_async_ops(false);
EXPECT_EQ(module->ToString(options), expected_without_syntax_sugar);
}
TEST_F(HloInstructionTest, CanonicalStringificationFusion) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
HloInstruction* dot = builder.AddInstruction(HloInstruction::CreateDot(
sout, x, reshape, dot_dnums, DefaultPrecisionConfig(2)));
auto options = HloPrintOptions().Canonical();
EXPECT_EQ(dot->ToString(options),
"f32[5,20]{1,0} dot(f32[5,10]{1,0}, f32[10,20]{1,0}), "
"lhs_contracting_dims={1}, rhs_contracting_dims={0}");
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
constexpr char kParallelThreadName[] = "parallel_thread";
computation->SetExecutionThread(kParallelThreadName);
HloInstruction* fusion = computation->CreateFusionInstruction(
{dot, reshape}, HloInstruction::FusionKind::kLoop);
fusion->set_called_computations_execution_thread(
kParallelThreadName,
false);
const std::string expected_fusion =
R"(f32[5,20]{1,0} fusion(f32[5,10]{1,0}, f32[20,10]{1,0}), kind=kLoop, calls=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
tmp_2 = f32[10,20]{1,0} transpose(f32[20,10]{1,0} tmp_1), dimensions={1,0}
ROOT tmp_3 = f32[5,20]{1,0} dot(f32[5,10]{1,0} tmp_0, f32[10,20]{1,0} tmp_2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}, execution_thread="parallel_thread")";
EXPECT_EQ(fusion->ToString(options), expected_fusion);
}
TEST_F(HloInstructionTest, CanonicalStringificationWhile) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
HloInstruction* dot = builder.AddInstruction(HloInstruction::CreateDot(
sout, x, reshape, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
computation->CreateFusionInstruction({dot, reshape},
HloInstruction::FusionKind::kLoop);
HloInstruction* loop = builder.AddInstruction(
HloInstruction::CreateWhile(sout, computation, computation, x));
auto options = HloPrintOptions().Canonical();
const std::string expected_loop =
R"(f32[5,20]{1,0} while(f32[5,10]{1,0}), condition=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
ROOT tmp_2 = f32[5,20]{1,0} fusion(f32[5,10]{1,0} tmp_0, f32[20,10]{1,0} tmp_1), kind=kLoop, calls=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
tmp_2 = f32[10,20]{1,0} transpose(f32[20,10]{1,0} tmp_1), dimensions={1,0}
ROOT tmp_3 = f32[5,20]{1,0} dot(f32[5,10]{1,0} tmp_0, f32[10,20]{1,0} tmp_2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
}, body=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
ROOT tmp_2 = f32[5,20]{1,0} fusion(f32[5,10]{1,0} tmp_0, f32[20,10]{1,0} tmp_1), kind=kLoop, calls=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
tmp_2 = f32[10,20]{1,0} transpose(f32[20,10]{1,0} tmp_1), dimensions={1,0}
ROOT tmp_3 = f32[5,20]{1,0} dot(f32[5,10]{1,0} tmp_0, f32[10,20]{1,0} tmp_2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
})";
EXPECT_EQ(loop->ToString(options), expected_loop);
}
TEST_F(HloInstructionTest, CanonicalStringificationConditional) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
HloInstruction* dot = builder.AddInstruction(HloInstruction::CreateDot(
sout, x, reshape, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
computation->CreateFusionInstruction({dot, reshape},
HloInstruction::FusionKind::kLoop);
auto pred = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloInstruction* conditional =
builder.AddInstruction(HloInstruction::CreateConditional(
sout, pred, x, computation, x, computation));
auto options = HloPrintOptions().Canonical();
const std::string expected_conditional =
R"(f32[5,20]{1,0} conditional(pred[], f32[5,10]{1,0}, f32[5,10]{1,0}), true_computation=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
ROOT tmp_2 = f32[5,20]{1,0} fusion(f32[5,10]{1,0} tmp_0, f32[20,10]{1,0} tmp_1), kind=kLoop, calls=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
tmp_2 = f32[10,20]{1,0} transpose(f32[20,10]{1,0} tmp_1), dimensions={1,0}
ROOT tmp_3 = f32[5,20]{1,0} dot(f32[5,10]{1,0} tmp_0, f32[10,20]{1,0} tmp_2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
}, false_computation=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
ROOT tmp_2 = f32[5,20]{1,0} fusion(f32[5,10]{1,0} tmp_0, f32[20,10]{1,0} tmp_1), kind=kLoop, calls=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
tmp_2 = f32[10,20]{1,0} transpose(f32[20,10]{1,0} tmp_1), dimensions={1,0}
ROOT tmp_3 = f32[5,20]{1,0} dot(f32[5,10]{1,0} tmp_0, f32[10,20]{1,0} tmp_2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
})";
EXPECT_EQ(conditional->ToString(options), expected_conditional);
}
TEST_F(HloInstructionTest, CheckDeepClone) {
const char* const hlo_string = R"(
HloModule Module
addy (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT zadd = s32[] add(lhs, rhs)
}
calla (x: s32[]) -> s32[] {
x = s32[] parameter(0)
reduce = s32[] reduce-window(x, x), to_apply=addy
ROOT xadd = s32[] add(x, reduce)
}
body (bparam: s32[]) -> s32[] {
constant = s32[] constant(1)
bparam = s32[] parameter(0)
v = s32[] call(bparam), to_apply=calla
ROOT add = s32[] add(constant, bparam)
}
condition (cparam: s32[]) -> pred[] {
xconstant = s32[] constant(5)
cparam = s32[] parameter(0)
ROOT greater-than = pred[] compare(xconstant, cparam), direction=GT
}
ENTRY entry (param: s32[]) -> s32[] {
eparam = s32[] parameter(0)
ROOT while = s32[] while(eparam), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
std::unique_ptr<HloModule> clone = module->Clone();
for (HloComputation* computation : clone->computations()) {
EXPECT_EQ(computation->parent(), clone.get());
for (HloInstruction* instruction : computation->instructions()) {
EXPECT_EQ(instruction->GetModule(), clone.get());
}
}
}
TEST_F(HloInstructionTest, IdenticalAccountsForBackendConfig) {
const Shape shape = ShapeUtil::MakeShape(F32, {42});
HloComputation::Builder builder("test");
HloInstruction* p =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p"));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p, p));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p, p));
EXPECT_TRUE(add1->Identical(*add2));
add1->set_raw_backend_config_string("abc");
EXPECT_FALSE(add1->Identical(*add2));
}
TEST_F(HloInstructionTest, IdenticalAccountsForCustomCallWindow) {
auto instr1 = HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo");
auto instr2 = instr1->Clone();
EXPECT_TRUE(instr1->Identical(*instr2));
Window w = window_util::MakeWindow({1, 2, 3});
instr1->set_window(w);
EXPECT_FALSE(instr1->Identical(*instr2));
}
TEST_F(HloInstructionTest, IdenticalAccountsForCustomCallDnums) {
auto instr1 = HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo");
auto instr2 = instr1->Clone();
EXPECT_TRUE(instr1->Identical(*instr2));
ConvolutionDimensionNumbers dnums;
dnums.set_output_batch_dimension(42);
instr1->set_convolution_dimension_numbers(dnums);
EXPECT_FALSE(instr1->Identical(*instr2));
}
TEST_F(HloInstructionTest, IdenticalAccountsForCustomCallHasSideEffect) {
auto instr1 = HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo");
auto instr2 = instr1->Clone();
EXPECT_TRUE(instr1->Identical(*instr2));
auto custom_call_instr1 = Cast<HloCustomCallInstruction>(instr1.get());
custom_call_instr1->set_custom_call_has_side_effect(true);
EXPECT_FALSE(instr1->Identical(*instr2));
}
TEST_F(HloInstructionTest, CloneWindowOnCustomCall) {
auto instr = HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo");
Window w = window_util::MakeWindow({1, 2, 3});
instr->set_window(w);
auto clone = instr->Clone();
EXPECT_TRUE(protobuf_util::ProtobufEquals(clone->window(), w))
<< clone->window().DebugString();
}
TEST_F(HloInstructionTest, CloneDnumsOnCustomCall) {
auto instr = HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo");
ConvolutionDimensionNumbers dnums;
dnums.set_output_batch_dimension(42);
instr->set_convolution_dimension_numbers(dnums);
auto clone = instr->Clone();
EXPECT_TRUE(protobuf_util::ProtobufEquals(
clone->convolution_dimension_numbers(), dnums))
<< clone->convolution_dimension_numbers().DebugString();
}
TEST_F(HloInstructionTest, CloneHasSideEffectOnCustomCall) {
auto instr = HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo");
auto custom_call_instr = Cast<HloCustomCallInstruction>(instr.get());
EXPECT_FALSE(custom_call_instr->custom_call_has_side_effect());
custom_call_instr->set_custom_call_has_side_effect(true);
EXPECT_TRUE(custom_call_instr->custom_call_has_side_effect());
auto clone = instr->Clone();
auto custom_call_clone = Cast<HloCustomCallInstruction>(clone.get());
EXPECT_TRUE(custom_call_clone->custom_call_has_side_effect());
}
TEST_F(HloInstructionTest, CustomCallHasSideEffect) {
auto instr = HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo");
auto custom_call_instr = Cast<HloCustomCallInstruction>(instr.get());
EXPECT_FALSE(instr->HasSideEffect());
custom_call_instr->set_custom_call_has_side_effect(true);
EXPECT_TRUE(instr->HasSideEffect());
}
TEST_F(HloInstructionTest, PreserveOperandPrecisionOnCloneConv) {
constexpr char kHloString[] = R"(
HloModule test_module
ENTRY test {
arg0 = f32[1,2,1] parameter(0)
arg1 = f32[1,1,1] parameter(1)
ROOT conv = f32[1,2,1] convolution(arg0, arg1), window={size=1},
dim_labels=b0f_0io->b0f, operand_precision={high,default}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
auto* conv = module->entry_computation()->root_instruction();
auto clone = conv->Clone();
EXPECT_THAT(
clone->precision_config().operand_precision(),
::testing::ElementsAre(PrecisionConfig::HIGH, PrecisionConfig::DEFAULT));
}
TEST_F(HloInstructionTest, ReuseReshapeOfFusionParameter) {
constexpr char kHloString[] = R"(
HloModule test_module
f {
p = f32[3,2] parameter(0)
r = f32[2,3] reshape(p)
x = f32[2,3] multiply(r, r)
y = f32[2,3] add(r, r)
ROOT sum = f32[2,3] add(x, y)
}
ENTRY test {
p = f32[3,2] parameter(0)
ROOT fusion = f32[2,3] fusion(p), calls=f, kind=kLoop
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_FALSE(root->ReusesOperandElements(0));
}
TEST_F(HloInstructionTest, ReuseMultipleReshapesOfFusionParameter) {
constexpr char kHloString[] = R"(
HloModule test_module
f {
p = f32[3,2] parameter(0)
r1 = f32[2,3] reshape(p)
r2 = f32[6,1] reshape(p)
ROOT result = (f32[2,3], f32[6,1]) tuple(r1, r2)
}
ENTRY test {
p = f32[3,2] parameter(0)
ROOT fusion = (f32[2,3], f32[6,1]) fusion(p), calls=f, kind=kLoop
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_TRUE(root->ReusesOperandElements(0));
}
TEST_F(HloInstructionTest, BitcastDoesNotReuseElements) {
constexpr char kHloString[] = R"(
HloModule test_module
ENTRY test {
p = f32[3,2]{0,1} parameter(0)
ROOT bitcast = f32[6] bitcast(p)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_FALSE(root->ReusesOperandElements(0));
}
TEST_F(HloInstructionTest, GatherDoesNotReuseElements) {
constexpr char kHloString[] = R"(
HloModule test_module
ENTRY test {
input = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0)
idx = s64[10,9,8,7,5]{4,3,2,1,0} parameter(1)
ROOT gather = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0}
gather(input, idx), offset_dims={4,5,6,7,8}, collapsed_slice_dims={},
start_index_map={0,1,2,3,4}, index_vector_dim=4,
slice_sizes={30,29,28,27,26}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_FALSE(root->ReusesOperandElements(0));
EXPECT_FALSE(root->ReusesOperandElements(1));
}
TEST_F(HloInstructionTest, BackendConfigCanContainNonFiniteFloats) {
HloComputation::Builder b(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
auto p0 = b.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = b.AddInstruction(HloInstruction::CreateDot(
shape, p0, p0, dot_dnums, DefaultPrecisionConfig(2)));
gpu::GpuBackendConfig gpu_config;
gpu::GemmBackendConfig& orig_config =
*gpu_config.mutable_gemm_backend_config();
orig_config.set_alpha_real(std::numeric_limits<double>::infinity());
orig_config.set_alpha_imag(std::numeric_limits<double>::quiet_NaN());
TF_ASSERT_OK(dot->set_backend_config(gpu_config));
TF_ASSERT_OK_AND_ASSIGN(auto new_gpu_config,
dot->backend_config<gpu::GpuBackendConfig>());
EXPECT_GT(new_gpu_config.gemm_backend_config().alpha_real(),
std::numeric_limits<double>::max());
EXPECT_NE(new_gpu_config.gemm_backend_config().alpha_imag(),
new_gpu_config.gemm_backend_config().alpha_imag());
}
TEST_F(HloInstructionTest, VerifyToApplyRegionPointsToReduceScatter) {
const Shape rs_input_shape = ShapeUtil::MakeShape(F32, {20});
const Shape rs_output_shape = ShapeUtil::MakeShape(F32, {10});
std::unique_ptr<HloComputation> add_computation;
{
const Shape scalar_shape = ShapeUtil::MakeScalarShape(F32);
HloComputation::Builder add_builder("add");
HloInstruction* param0 = add_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "p0"));
HloInstruction* param1 = add_builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "p1"));
add_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, param0, param1));
add_computation = add_builder.Build();
}
std::unique_ptr<HloComputation> main_computation;
HloComputation::Builder main_builder("Entry");
HloInstruction* param = main_builder.AddInstruction(
HloInstruction::CreateParameter(0, rs_input_shape, "input"));
main_builder.AddInstruction(HloInstruction::CreateReduceScatter(
rs_output_shape, {param}, add_computation.get(), CollectiveDeviceList(),
false, std::nullopt, false, 0));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(main_builder.Build());
module->AddEmbeddedComputation(std::move(add_computation));
for (HloComputation* comp : module->MakeComputationPostOrder()) {
if (!comp->IsEntryComputation()) {
EXPECT_TRUE(comp->IsCollectiveCalledComputation());
EXPECT_EQ(comp->CollectiveCallInstruction(),
module->entry_computation()->root_instruction());
}
}
}
TEST_F(HloInstructionTest, VerifyToApplyRegionPointsToAllReduce) {
const Shape ar_input_shape = ShapeUtil::MakeShape(F32, {20});
std::unique_ptr<HloComputation> add_computation;
{
const Shape scalar_shape = ShapeUtil::MakeScalarShape(F32);
HloComputation::Builder add_builder("add");
HloInstruction* param0 = add_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "p0"));
HloInstruction* param1 = add_builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "p1"));
add_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, param0, param1));
add_computation = add_builder.Build();
}
std::unique_ptr<HloComputation> main_computation;
HloComputation::Builder main_builder("Entry");
HloInstruction* param = main_builder.AddInstruction(
HloInstruction::CreateParameter(0, ar_input_shape, "input"));
main_builder.AddInstruction(HloInstruction::CreateAllReduce(
ar_input_shape, {param}, add_computation.get(), CollectiveDeviceList(),
false, std::nullopt, false));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(main_builder.Build());
module->AddEmbeddedComputation(std::move(add_computation));
for (HloComputation* comp : module->MakeComputationPostOrder()) {
if (!comp->IsEntryComputation()) {
EXPECT_TRUE(comp->IsCollectiveCalledComputation());
EXPECT_EQ(comp->CollectiveCallInstruction(),
module->entry_computation()->root_instruction());
}
}
}
TEST_F(HloInstructionTest, PrintCycle) {
constexpr char kHloString[] = R"(
ENTRY main {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}}"
}
send = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all),
channel_id=2, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}}"
}, control-predecessors={recv}
send-done = token[] send-done(send), channel_id=2
recv-done = (f32[1, 1024, 1024], token[]) recv-done(recv), channel_id=2
ROOT recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
HloInstruction* recv = FindInstruction(module.get(), "recv");
HloInstruction* send_done = FindInstruction(module.get(), "send-done");
ASSERT_IS_OK(send_done->AddControlDependencyTo(recv));
HloInstruction* root = FindInstruction(module.get(), "recv-data");
NodeCollectorAndPostProcessor visitor;
auto status = root->Accept(&visitor);
EXPECT_FALSE(status.ok());
EXPECT_THAT(status.message(),
::testing::HasSubstr("recv\n send\n send-done\n recv"));
ASSERT_IS_OK(send_done->DropAllControlDeps());
}
TEST_F(HloInstructionTest, VerifyBodyComputationPointsToWhile) {
auto module = CreateNewVerifiedModule();
const Shape scalar_shape = ShapeUtil::MakeScalarShape(F32);
HloComputation::Builder cond_builder("cond");
{
HloInstruction* param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "p0"));
HloInstruction* constant = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1024.0)));
cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), param,
constant, ComparisonDirection::kLt));
}
auto cond_computation = module->AddEmbeddedComputation(cond_builder.Build());
HloComputation::Builder body_builder("body");
{
HloInstruction* param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "p0"));
body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kMultiply, param, param));
}
auto body_computation = module->AddEmbeddedComputation(body_builder.Build());
std::unique_ptr<HloComputation> main_computation;
HloComputation::Builder main_builder("Entry");
HloInstruction* param = main_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "input"));
main_builder.AddInstruction(HloInstruction::CreateWhile(
scalar_shape, cond_computation, body_computation, param));
module->AddEntryComputation(main_builder.Build());
int num_while_body_comp = 0;
for (HloComputation* comp : module->MakeComputationPostOrder()) {
if (comp->IsWhileBodyComputation()) {
num_while_body_comp += 1;
EXPECT_EQ(comp->WhileCallInstruction(),
module->entry_computation()->root_instruction());
}
}
EXPECT_EQ(num_while_body_comp, 1);
for (HloInstruction* instruction :
module->entry_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kWhile) {
HloComputation* while_body = instruction->while_body();
EXPECT_TRUE(while_body->IsWhileBodyComputation());
HloInstruction* while_back_ref = while_body->WhileCallInstruction();
EXPECT_EQ(while_back_ref->while_body(), while_body);
}
}
}
TEST_F(HloInstructionTest,
VerifyBranchComputationPointsToConditonal_TrueFalseConstructor) {
auto module = CreateNewVerifiedModule();
const Shape scalar_shape = ShapeUtil::MakeScalarShape(F32);
HloComputation::Builder branch_0_builder("branch_0");
{
HloInstruction* param = branch_0_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "p0"));
HloInstruction* constant = branch_0_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1024.0)));
branch_0_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, param, constant));
}
auto branch_0_computation =
module->AddEmbeddedComputation(branch_0_builder.Build());
HloComputation::Builder branch_1_builder("branch_1");
{
HloInstruction* param = branch_1_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "p0"));
branch_1_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kMultiply, param, param));
}
auto branch_1_computation =
module->AddEmbeddedComputation(branch_1_builder.Build());
std::unique_ptr<HloComputation> main_computation;
HloComputation::Builder main_builder("Entry");
HloInstruction* pred_param =
main_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(PRED, {}), "pred_param"));
HloInstruction* param = main_builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "input"));
main_builder.AddInstruction(HloInstruction::CreateConditional(
scalar_shape, pred_param, param,
branch_0_computation,
param,
branch_1_computation));
module->AddEntryComputation(main_builder.Build());
int num_conditional_branch_comp = 0;
for (HloComputation* comp : module->MakeComputationPostOrder()) {
if (comp->IsConditionalBranchComputation()) {
num_conditional_branch_comp += 1;
EXPECT_EQ(comp->ConditionalCallInstruction(),
module->entry_computation()->root_instruction());
}
}
EXPECT_EQ(num_conditional_branch_comp, 2);
}
TEST_F(HloInstructionTest,
VerifyBranchComputationPointsToConditonal_BranchIndexConstructor) {
auto module = CreateNewVerifiedModule();
const Shape scalar_shape = ShapeUtil::MakeScalarShape(F32);
std::vector<HloComputation*> branch_computations;
{
HloComputation::Builder builder("branch_0");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "p0"));
HloInstruction* constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1024.0)));
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, param, constant));
branch_computations.push_back(
module->AddEmbeddedComputation(builder.Build()));
}
{
HloComputation::Builder builder("branch_1");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "p0"));
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kMultiply, param, param));
branch_computations.push_back(
module->AddEmbeddedComputation(builder.Build()));
}
{
HloComputation::Builder builder("branch_2");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "p0"));
builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape, HloOpcode::kLog, param));
branch_computations.push_back(
module->AddEmbeddedComputation(builder.Build()));
}
std::unique_ptr<HloComputation> main_computation;
HloComputation::Builder main_builder("Entry");
HloInstruction* branch_index =
main_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeScalarShape(S32), "branch_index_param"));
HloInstruction* param = main_builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "input"));
std::vector<HloInstruction*> branch_computation_args(
branch_computations.size(), param);
main_builder.AddInstruction(HloInstruction::CreateConditional(
scalar_shape, branch_index, branch_computations,
branch_computation_args));
module->AddEntryComputation(main_builder.Build());
int num_conditional_branch_comp = 0;
for (HloComputation* comp : module->MakeComputationPostOrder()) {
if (comp->IsConditionalBranchComputation()) {
num_conditional_branch_comp += 1;
EXPECT_EQ(comp->ConditionalCallInstruction(),
module->entry_computation()->root_instruction());
}
}
EXPECT_EQ(num_conditional_branch_comp, branch_computations.size());
}
TEST_F(HloInstructionTest, BackendConfigCopiedToDerived) {
HloComputation::Builder b(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
auto p0 = b.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
auto p1 = b.AddInstruction(HloInstruction::CreateParameter(0, shape, "p1"));
auto add = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, p1));
gpu::GpuBackendConfig gpu_config;
gpu_config.set_operation_queue_id(2);
TF_ASSERT_OK(add->set_backend_config(gpu_config));
auto add2 = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, p0));
add->SetupDerivedInstruction(add2);
auto backend_config = add2->backend_config<gpu::GpuBackendConfig>();
EXPECT_TRUE(backend_config.ok());
EXPECT_EQ(backend_config->operation_queue_id(), 2);
}
TEST_F(HloInstructionTest, BackendConfigNotCopiedToDerivedWithDiffOpcode) {
HloComputation::Builder b(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
auto p0 = b.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
auto p1 = b.AddInstruction(HloInstruction::CreateParameter(0, shape, "p1"));
auto or1 = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kOr, p0, p1));
gpu::GpuBackendConfig gpu_config;
gpu_config.set_operation_queue_id(2);
TF_ASSERT_OK(or1->set_backend_config(gpu_config));
auto add2 = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, p1));
or1->SetupDerivedInstruction(add2);
EXPECT_FALSE(add2->has_backend_config());
}
TEST_F(HloInstructionTest,
MergeMultiOutputProducerFusionIntoMultiOutputFusion) {
const std::string& hlo_string = R"(
HloModule mof
mof_producer {
param0 = f32[10]{0} parameter(0)
param1 = f32[10]{0} parameter(1)
add = f32[10]{0} add(param0, param1)
sub = f32[10]{0} subtract(param0, param1)
ROOT res = (f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(param1, add, sub, param0)
}
mof_consumer {
param0.0 = f32[10]{0} parameter(0)
param1.0 = f32[10]{0} parameter(1)
param2.0 = f32[10]{0} parameter(2)
mul = f32[10]{0} multiply(param0.0, param1.0)
div = f32[10]{0} divide(param0.0, param1.0)
ROOT res = (f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(mul, div, param2.0)
}
ENTRY main {
p0 = f32[10]{0} parameter(0)
p1 = f32[10]{0} parameter(1)
producer = (f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}) fusion(p0, p1), kind=kLoop, calls=mof_producer
gte0 = f32[10]{0} get-tuple-element(producer), index=0
gte1 = f32[10]{0} get-tuple-element(producer), index=1
gte2 = f32[10]{0} get-tuple-element(producer), index=2
gte3 = f32[10]{0} get-tuple-element(producer), index=3
consumer = (f32[10]{0}, f32[10]{0}, f32[10]{0}) fusion(gte1, gte2, gte3), kind=kLoop, calls=mof_consumer
gte4 = f32[10]{0} get-tuple-element(consumer), index=0
gte5 = f32[10]{0} get-tuple-element(consumer), index=1
gte6 = f32[10]{0} get-tuple-element(consumer), index=2
ROOT res = (f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(gte0, gte1, gte3, gte4, gte5, gte6)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* producer = FindInstruction(module.get(), "producer");
HloInstruction* consumer = FindInstruction(module.get(), "consumer");
consumer->MergeFusionInstructionIntoMultiOutput(producer);
HloInstruction* fusion = nullptr;
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Parameter(1), m::GetTupleElement(m::Fusion(&fusion), 3),
m::Parameter(0), m::GetTupleElement(m::Fusion(), 0),
m::GetTupleElement(m::Fusion(), 1),
m::GetTupleElement(m::Fusion(), 2))));
EXPECT_THAT(fusion->fused_instructions_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Multiply(m::Add(m::Parameter(0), m::Parameter(1)),
m::Subtract(m::Parameter(0), m::Parameter(1))),
m::Divide(m::Add(m::Parameter(0), m::Parameter(1)),
m::Subtract(m::Parameter(0), m::Parameter(1))),
m::Parameter(0), m::Add(m::Parameter(0), m::Parameter(1)))));
}
TEST_F(HloInstructionTest,
MergeMultiOutputProducerFusionIntoMultiOutputFusionAvoidDuplicateRoots) {
const std::string& hlo_string = R"(
HloModule mof
mof_producer {
param0 = f32[10]{0} parameter(0)
param1 = f32[10]{0} parameter(1)
add = f32[10]{0} add(param0, param1)
sub = f32[10]{0} subtract(param0, param1)
ROOT res = (f32[10]{0}, f32[10]{0}) tuple(add, sub)
}
mof_consumer {
param0.0 = f32[10]{0} parameter(0)
param1.0 = f32[10]{0} parameter(1)
mul = f32[10]{0} multiply(param0.0, param1.0)
div = f32[10]{0} divide(param0.0, param1.0)
ROOT res = (f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(mul, div, param0.0)
}
ENTRY main {
p0 = f32[10]{0} parameter(0)
p1 = f32[10]{0} parameter(1)
producer = (f32[10]{0}, f32[10]{0}) fusion(p0, p1), kind=kLoop, calls=mof_producer
gte1 = f32[10]{0} get-tuple-element(producer), index=0
gte2 = f32[10]{0} get-tuple-element(producer), index=1
consumer = (f32[10]{0}, f32[10]{0}, f32[10]{0}) fusion(gte1, gte2), kind=kLoop, calls=mof_consumer
gte3 = f32[10]{0} get-tuple-element(consumer), index=0
gte4 = f32[10]{0} get-tuple-element(consumer), index=1
gte5 = f32[10]{0} get-tuple-element(consumer), index=2
ROOT res = (f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(gte1, gte3, gte4, gte5)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* producer = FindInstruction(module.get(), "producer");
HloInstruction* consumer = FindInstruction(module.get(), "consumer");
consumer->MergeFusionInstructionIntoMultiOutput(producer);
HloInstruction* fusion = nullptr;
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion), 2),
m::GetTupleElement(m::Fusion(), 0),
m::GetTupleElement(m::Fusion(), 1),
m::GetTupleElement(m::Fusion(), 2))));
EXPECT_THAT(fusion->fused_instructions_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Multiply(m::Add(m::Parameter(0), m::Parameter(1)),
m::Subtract(m::Parameter(0), m::Parameter(1))),
m::Divide(m::Add(m::Parameter(0), m::Parameter(1)),
m::Subtract(m::Parameter(0), m::Parameter(1))),
m::Add(m::Parameter(0), m::Parameter(1)))));
}
TEST_F(HloInstructionTest,
MergeMultiOutputSiblingFusionsAvoidDuplicateFusionParameters) {
const std::string& hlo_string = R"(
HloModule mof
mof_sibling1 {
param0 = f32[10]{0} parameter(0)
param1 = f32[10]{0} parameter(1)
add = f32[10]{0} add(param0, param1)
ROOT res = (f32[10]{0}, f32[10]{0}) tuple(param1, add)
}
mof_sibling2 {
param0.0 = f32[10]{0} parameter(0)
param1.0 = f32[10]{0} parameter(1)
mul = f32[10]{0} multiply(param0.0, param1.0)
ROOT res = (f32[10]{0}, f32[10]{0}) tuple(mul, param1.0)
}
ENTRY main {
p0 = f32[10]{0} parameter(0)
p1 = f32[10]{0} parameter(1)
sibling1 = (f32[10]{0}, f32[10]{0}) fusion(p0, p1), kind=kLoop, calls=mof_sibling1
gte0 = f32[10]{0} get-tuple-element(sibling1), index=0
gte1 = f32[10]{0} get-tuple-element(sibling1), index=1
sibling2 = (f32[10]{0}, f32[10]{0}) fusion(p0, p1), kind=kLoop, calls=mof_sibling2
gte2 = f32[10]{0} get-tuple-element(sibling2), index=0
gte3 = f32[10]{0} get-tuple-element(sibling2), index=1
ROOT res = (f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(gte0, gte1, gte2, gte3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* sibling1 = FindInstruction(module.get(), "sibling1");
HloInstruction* sibling2 = FindInstruction(module.get(), "sibling2");
sibling2->MergeFusionInstructionIntoMultiOutput(sibling1);
HloInstruction* fusion = nullptr;
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Parameter(1),
m::GetTupleElement(m::Fusion(&fusion), 2),
m::GetTupleElement(m::Fusion(), 0),
m::GetTupleElement(m::Fusion(), 1))));
EXPECT_THAT(fusion->fused_instructions_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Multiply(m::Parameter(0), m::Parameter(1)),
m::Parameter(1),
m::Add(m::Parameter(0), m::Parameter(1)))));
}
TEST_F(HloInstructionTest, UnfuseInstruction) {
const std::string& hlo_string = R"(
HloModule mof
fusion_comp {
param0 = f32[10]{0} parameter(0)
param1 = f32[10]{0} parameter(1)
add = f32[10]{0} add(param0, param1)
ROOT res = (f32[10]{0}, f32[10]{0}) tuple(param1, add)
}
ENTRY main {
p0 = f32[10]{0} parameter(0)
p1 = f32[10]{0} parameter(1)
fusion.1 = (f32[10]{0}, f32[10]{0}) fusion(p0, p1), kind=kLoop, calls=fusion_comp
gte0 = f32[10]{0} get-tuple-element(fusion.1), index=0
gte1 = f32[10]{0} get-tuple-element(fusion.1), index=1
ROOT res = (f32[10]{0}, f32[10]{0}) tuple(gte0, gte1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* fusion = FindInstruction(module.get(), "fusion.1");
HloInstruction* add = fusion->fused_instructions_computation()
->root_instruction()
->mutable_operand(1);
TF_ASSERT_OK_AND_ASSIGN(auto unfused, fusion->UnfuseInstruction(add));
EXPECT_THAT(unfused, GmockMatch(m::Add(m::Parameter(0), m::Parameter(1))));
}
TEST_F(HloInstructionTest, UnfuseInstruction2) {
const std::string& hlo_string = R"(
HloModule mof
fusion_comp {
param0 = f32[10]{0} parameter(0)
param1 = f32[10]{0} parameter(1)
add = f32[10]{0} add(param0, param1)
add2 = f32[10]{0} add(add, param1)
ROOT res = (f32[10]{0}, f32[10]{0}) tuple(param1, add2)
}
ENTRY main {
p0 = f32[10]{0} parameter(0)
p1 = f32[10]{0} parameter(1)
fusion.1 = (f32[10]{0}, f32[10]{0}) fusion(p0, p1), kind=kLoop, calls=fusion_comp
gte0 = f32[10]{0} get-tuple-element(fusion.1), index=0
gte1 = f32[10]{0} get-tuple-element(fusion.1), index=1
ROOT res = (f32[10]{0}, f32[10]{0}) tuple(gte0, gte1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* fusion = FindInstruction(module.get(), "fusion.1");
HloInstruction* add2 = fusion->fused_instructions_computation()
->root_instruction()
->mutable_operand(1);
HloInstruction* add = add2->mutable_operand(0);
EXPECT_FALSE(fusion->UnfuseInstruction(add2).ok());
TF_ASSERT_OK_AND_ASSIGN(auto unfused, fusion->UnfuseInstruction(add));
EXPECT_THAT(unfused, GmockMatch(m::Add(m::Parameter(0), m::Parameter(1))));
}
TEST_F(HloInstructionTest, UnfuseInstructionWithConstantOperand) {
const std::string& hlo_string = R"(
HloModule mof
fusion_comp {
param0 = f32[10]{0} parameter(0)
param1 = f32[10]{0} parameter(1)
const = f32[] constant(1.0)
broadcast = f32[10]{0} broadcast(const), dimensions={}
add = f32[10]{0} add(param0, broadcast)
ROOT res = (f32[10]{0}, f32[10]{0}) tuple(param1, add)
}
ENTRY main {
p0 = f32[10]{0} parameter(0)
p1 = f32[10]{0} parameter(1)
fusion.1 = (f32[10]{0}, f32[10]{0}) fusion(p0, p1), kind=kLoop, calls=fusion_comp
gte0 = f32[10]{0} get-tuple-element(fusion.1), index=0
gte1 = f32[10]{0} get-tuple-element(fusion.1), index=1
ROOT res = (f32[10]{0}, f32[10]{0}) tuple(gte0, gte1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* fusion = FindInstruction(module.get(), "fusion.1");
HloInstruction* add = fusion->fused_instructions_computation()
->root_instruction()
->mutable_operand(1);
TF_ASSERT_OK_AND_ASSIGN(auto unfused, fusion->UnfuseInstruction(add));
EXPECT_THAT(unfused,
GmockMatch(m::Add(m::Parameter(0), m::Broadcast(m::Constant()))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_instruction.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_instruction_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b385adf9-728d-43f0-a409-fe53a52414c6 | cpp | tensorflow/tensorflow | hlo_input_output_alias_config | third_party/xla/xla/hlo/ir/hlo_input_output_alias_config.cc | third_party/xla/xla/service/hlo_input_output_alias_config_test.cc | #include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include <cstdint>
#include <optional>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/layout_util.h"
#include "xla/service/hlo.pb.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
bool HloInputOutputAliasConfig::OutputHasAlias(
const ShapeIndex& output_index) const {
return alias_.element(output_index).has_value();
}
absl::Status HloInputOutputAliasConfig::SetUpAlias(
const ShapeIndex& output_index, int64_t param_number,
const ShapeIndex& param_index,
HloInputOutputAliasConfig::AliasKind must_alias) {
TF_RET_CHECK(ShapeUtil::IndexIsValid(alias_.shape(), output_index))
<< "Trying to set up alias at " << output_index.ToString()
<< " which is an invalid index for shape "
<< ShapeUtil::HumanString(alias_.shape());
TF_RET_CHECK(param_number >= 0) << param_number;
TF_RET_CHECK(!alias_.element(output_index)) << absl::StrFormat(
"Trying to set up output alias for param %lld at %s but failed: output "
"index %s is already aliased with param %lld at %s",
param_number, param_index.ToString(), output_index.ToString(),
alias_.element(output_index)->parameter_number,
alias_.element(output_index)->parameter_index.ToString());
(*alias_.mutable_element(output_index)) =
Alias(param_number, param_index, must_alias);
VLOG(4) << "Set up alias between output index " << output_index.ToString()
<< " and parameter " << param_number << " at index "
<< param_index.ToString();
return absl::OkStatus();
}
HloInputOutputAliasProto HloInputOutputAliasConfig::ToProto() const {
HloInputOutputAliasProto result;
alias_.ForEachElement(
[&](const ShapeIndex& index, const std::optional<Alias>& data) {
if (data) {
HloInputOutputAliasProto::AliasEntryProto entry;
for (int64_t i : index) {
entry.add_output_shape_index(i);
}
entry.set_parameter_number(data->parameter_number);
for (int64_t i : data->parameter_index) {
entry.add_parameter_shape_index(i);
}
if (data->must_alias()) {
entry.set_kind(Kind::MUST_ALIAS);
} else {
entry.set_kind(Kind::MAY_ALIAS);
}
result.add_entries()->Swap(&entry);
}
});
return result;
}
absl::StatusOr<HloInputOutputAliasConfig>
HloInputOutputAliasConfig::CreateFromProto(
Shape output_shape, const HloInputOutputAliasProto& proto) {
HloInputOutputAliasConfig result(std::move(output_shape));
for (const HloInputOutputAliasProto::AliasEntryProto& entry :
proto.entries()) {
ShapeIndex output_index(entry.output_shape_index().begin(),
entry.output_shape_index().end());
int64_t param_number = entry.parameter_number();
ShapeIndex param_index(entry.parameter_shape_index().begin(),
entry.parameter_shape_index().end());
AliasKind kind = entry.kind() == Kind::MAY_ALIAS ? kMayAlias : kMustAlias;
TF_RETURN_IF_ERROR(
result.SetUpAlias(output_index, param_number, param_index, kind));
}
return result;
}
const Shape& HloInputOutputAliasConfig::shape() const { return alias_.shape(); }
std::string HloInputOutputAliasConfig::ToString() const {
std::vector<std::string> pieces;
pieces.push_back("HloInputOutputAliasConfig");
pieces.push_back(
absl::StrFormat(" Output shape: %s", alias_.shape().ToString()));
ForEachAlias([&](const ShapeIndex& output_index, const Alias& alias) {
pieces.push_back(absl::StrFormat(
" OutputIndex %s is %saliased with parameter %lld at %s:",
output_index.ToString(), alias.kind == kMustAlias ? "must-" : "may-",
alias.parameter_number, alias.parameter_index.ToString()));
});
return absl::StrJoin(pieces, "\n");
}
std::string HloInputOutputAliasConfig::ToShortString() const {
std::vector<std::string> pieces;
for (const auto& p : alias_) {
const ShapeIndex& index = p.first;
if (std::optional<Alias> alias = p.second) {
pieces.push_back(
absl::StrFormat("%s: %s", index.ToString(), alias->ToString()));
}
}
return absl::StrJoin(pieces, ", ");
}
bool HloInputOutputAliasConfig::ParameterMustAlias(
int64_t param_number, const ShapeIndex& param_index) const {
bool result = false;
alias_.ForEachElement(
[&](const xla::ShapeIndex&, std::optional<Alias> alias) {
if (alias && alias->parameter_number == param_number &&
alias->parameter_index == param_index && alias->must_alias()) {
result = true;
}
});
return result;
}
std::optional<ShapeIndex> HloInputOutputAliasConfig::GetAliasedOutput(
int64_t param_number, const ShapeIndex& param_index) const {
for (auto it = alias_.rbegin(); it != alias_.rend(); ++it) {
if (it->second.has_value() &&
it->second->parameter_number == param_number &&
it->second->parameter_index == param_index) {
return it->first;
}
}
return std::nullopt;
}
std::optional<HloInputOutputAliasConfig::Alias>
HloInputOutputAliasConfig::GetAliasedParameter(
const ShapeIndex& output_index) const {
CHECK(ShapeUtil::IndexIsValid(alias_.shape(), output_index))
<< ToString() << " " << alias_.shape().ToString() << " " << output_index;
return alias_.element(output_index);
}
void HloInputOutputAliasConfig::ForEachAlias(AliasFn fn) const {
alias_.ForEachElement(
[&](const ShapeIndex& output_index, std::optional<Alias> aliased) {
if (aliased) {
fn(output_index, *aliased);
}
});
}
absl::Status HloInputOutputAliasConfig::ForEachAliasWithStatus(
AliasFnWithStatus fn) const {
return alias_.ForEachElementWithStatus(
[&](const ShapeIndex& output_index, std::optional<Alias> aliased) {
if (aliased) {
TF_RETURN_IF_ERROR(fn(output_index, *aliased));
}
return absl::OkStatus();
});
}
absl::Status HloInputOutputAliasConfig::Verify(
const HloModule& module,
absl::FunctionRef<int64_t(const Shape&)> size_func) const {
std::vector<ShapeTree<bool>> param_has_seen;
const HloComputation* entry = module.entry_computation();
for (int64_t i = 0; i < entry->num_parameters(); ++i) {
HloInstruction* param = entry->parameter_instruction(i);
param_has_seen.emplace_back(param->shape());
}
return ForEachAliasWithStatus([&](const ShapeIndex& output_index,
const Alias& alias) -> absl::Status {
TF_RET_CHECK(0 <= alias.parameter_number);
TF_RET_CHECK(entry->num_parameters() > alias.parameter_number);
const Shape& param_shape =
module.entry_computation_layout().parameter_shape(
alias.parameter_number);
const Shape& output_shape =
module.entry_computation_layout().result_shape();
TF_RET_CHECK(ShapeUtil::IndexIsValid(param_shape, alias.parameter_index));
TF_RET_CHECK(ShapeUtil::IndexIsValid(output_shape, output_index));
const Shape& param_subshape =
ShapeUtil::GetSubshape(param_shape, alias.parameter_index);
const Shape& output_subshape =
ShapeUtil::GetSubshape(output_shape, output_index);
TF_RET_CHECK(LayoutUtil::IsDenseArray(param_subshape));
TF_RET_CHECK(LayoutUtil::IsDenseArray(output_subshape));
if (size_func(param_subshape) != size_func(output_subshape)) {
return Internal(
"Expected aliased input %lld at index %s and output at index %s to "
"have the same size. Input sub-shape is %s with size %lld, output "
"sub-shape is %s with size %lld",
alias.parameter_number, alias.parameter_index.ToString(),
output_index.ToString(),
ShapeUtil::HumanStringWithLayout(param_subshape),
size_func(param_subshape),
ShapeUtil::HumanStringWithLayout(output_subshape),
size_func(output_subshape));
}
TF_RET_CHECK(param_has_seen[alias.parameter_number].element(
alias.parameter_index) == false);
*(param_has_seen[alias.parameter_number].mutable_element(
alias.parameter_index)) = true;
return absl::OkStatus();
});
}
std::ostream& operator<<(std::ostream& out,
const HloInputOutputAliasConfig& config) {
out << config.ToString();
return out;
}
absl::Status HloBufferDonorConfig::AddBufferDonor(
int64_t param_number, const ShapeIndex& param_index) {
TF_RET_CHECK(param_number >= 0) << param_number;
VLOG(4) << "Register the parameter " << param_number << " at index "
<< param_index.ToString() << " as a buffer donor.";
buffer_donor_.emplace(BufferDonor(param_number, param_index));
return absl::OkStatus();
}
absl::Status HloBufferDonorConfig::RemoveBufferDonor(
int64_t param_number, const ShapeIndex& param_index) {
TF_RET_CHECK(param_number >= 0) << param_number;
buffer_donor_.erase(BufferDonor(param_number, param_index));
return absl::OkStatus();
}
HloBufferDonorProto HloBufferDonorConfig::ToProto() const {
HloBufferDonorProto result;
for (const auto& donor : buffer_donor_) {
HloBufferDonorProto::BufferDonorEntryProto entry;
entry.set_parameter_number(donor.param_number);
for (int64_t i : donor.param_index) {
entry.add_parameter_shape_index(i);
}
result.add_entries()->Swap(&entry);
}
return result;
}
absl::StatusOr<HloBufferDonorConfig> HloBufferDonorConfig::CreateFromProto(
const HloBufferDonorProto& proto) {
HloBufferDonorConfig result;
for (const HloBufferDonorProto::BufferDonorEntryProto& entry :
proto.entries()) {
int64_t param_number = entry.parameter_number();
ShapeIndex param_index(entry.parameter_shape_index().begin(),
entry.parameter_shape_index().end());
TF_RETURN_IF_ERROR(result.AddBufferDonor(param_number, param_index));
}
return result;
}
std::string HloBufferDonorConfig::ToString() const {
std::vector<std::string> pieces;
pieces.push_back("HloBufferDonorConfig");
for (const auto& donor : buffer_donor_) {
pieces.push_back(absl::StrFormat(
" Parameter %lld at %s is registered as a buffer donor.",
donor.param_number, donor.param_index.ToString()));
}
return absl::StrJoin(pieces, "\n");
}
std::string HloBufferDonorConfig::ToShortString() const {
std::vector<std::string> pieces;
pieces.reserve(buffer_donor_.size());
for (const auto& donor : buffer_donor_) {
pieces.push_back(absl::StrFormat("(%lld, %s)", donor.param_number,
donor.param_index.ToString()));
}
return absl::StrJoin(pieces, ", ");
}
bool HloBufferDonorConfig::ParameterIsBufferDonor(
int64_t param_number, const ShapeIndex& param_index) const {
auto it = buffer_donor_.find(BufferDonor(param_number, param_index));
return it != buffer_donor_.end();
}
absl::Status HloBufferDonorConfig::Verify(const HloModule& module) const {
const HloComputation* entry = module.entry_computation();
const auto& alias_config = module.input_output_alias_config();
for (const auto& donor : buffer_donor_) {
TF_RET_CHECK(donor.param_number >= 0);
TF_RET_CHECK(donor.param_number < entry->num_parameters());
const Shape& param_shape =
module.entry_computation_layout().parameter_shape(donor.param_number);
TF_RET_CHECK(ShapeUtil::IndexIsValid(param_shape, donor.param_index));
const Shape& param_subshape =
ShapeUtil::GetSubshape(param_shape, donor.param_index);
TF_RET_CHECK(LayoutUtil::IsDenseArray(param_subshape));
if (alias_config.ParameterHasAlias(donor.param_number, donor.param_index)) {
return Internal(
"Input %lld at index %s is registered as a buffer donor. However, it "
"is also in the input output alias config.",
donor.param_number, donor.param_index.ToString());
}
}
return absl::OkStatus();
}
std::ostream& operator<<(std::ostream& out,
const HloBufferDonorConfig& config) {
out << config.ToString();
return out;
}
} | #include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/hlo_ordering.h"
#include "xla/shape_util.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class HloInputOutputAliasConfigTest : public HloTestBase {
protected:
void expect_aliased(const ShapeIndex& output_index, int64_t param_number,
const ShapeIndex& param_index,
const HloInputOutputAliasConfig& config) {
std::optional<ShapeIndex> aliased_output =
config.GetAliasedOutput(param_number, param_index);
EXPECT_TRUE(aliased_output);
EXPECT_EQ(aliased_output.value(), output_index);
std::optional<HloInputOutputAliasConfig::Alias> aliased_param =
config.GetAliasedParameter(output_index);
EXPECT_TRUE(aliased_param);
EXPECT_EQ(aliased_param->parameter_number, param_number);
EXPECT_EQ(aliased_param->parameter_index, param_index);
}
void expect_not_aliased(const ShapeIndex& output_index, int64_t param_number,
const ShapeIndex& param_index,
const HloInputOutputAliasConfig& config) {
std::optional<ShapeIndex> aliased_output =
config.GetAliasedOutput(param_number, param_index);
EXPECT_FALSE(aliased_output && aliased_output == output_index);
std::optional<HloInputOutputAliasConfig::Alias> aliased_param =
config.GetAliasedParameter(output_index);
EXPECT_FALSE(aliased_param &&
aliased_param->parameter_number == param_number &&
aliased_param->parameter_index == param_index);
}
};
TEST_F(HloInputOutputAliasConfigTest, SimpleAliasing) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT root = (f32[], f32[]) tuple(%a, %b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloInputOutputAliasConfig config(
module->entry_computation()->root_instruction()->shape());
TF_ASSERT_OK(config.SetUpAlias(
{0}, 1,
{}));
expect_aliased({0}, 1,
{}, config);
expect_not_aliased({1}, 1,
{}, config);
expect_not_aliased({0}, 0,
{}, config);
}
TEST_F(HloInputOutputAliasConfigTest, SimpleAliasingWithTupleInput) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
param = (f32[], f32[]) parameter(0)
gte1 = f32[] get-tuple-element(%param), index=0
gte2 = f32[] get-tuple-element(%param), index=1
ROOT root = (f32[], f32[]) tuple(%gte1, %gte2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloInputOutputAliasConfig config(
module->entry_computation()->root_instruction()->shape());
TF_ASSERT_OK(config.SetUpAlias(
{0}, 0,
{0}));
TF_ASSERT_OK(config.SetUpAlias(
{1}, 0,
{1}));
expect_aliased({0}, 0,
{0}, config);
expect_aliased({1}, 0,
{1}, config);
expect_not_aliased({1}, 1,
{}, config);
expect_not_aliased({0}, 0,
{}, config);
}
TEST_F(HloInputOutputAliasConfigTest, InputDoNotAliasTwice) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT root = (f32[], f32[]) tuple(%a, %b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloInputOutputAliasConfig config(
module->entry_computation()->root_instruction()->shape());
TF_ASSERT_OK(config.SetUpAlias(
{0}, 0,
{}));
TF_ASSERT_OK(config.SetUpAlias(
{1}, 0,
{}));
ASSERT_IS_NOT_OK(config.Verify(*module, [](const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape);
}));
}
TEST_F(HloInputOutputAliasConfigTest, SizesMustMatch) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
a = f32[] parameter(0)
b = f32[4096] parameter(1)
ROOT root = (f32[], f32[4096]) tuple(%a, %b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloInputOutputAliasConfig config(
module->entry_computation()->root_instruction()->shape());
TF_ASSERT_OK(config.SetUpAlias(
{1}, 0,
{}));
ASSERT_IS_NOT_OK(config.Verify(*module, [](const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape);
}));
}
TEST_F(HloInputOutputAliasConfigTest, OutputDoNotAliasTwice) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT root = (f32[], f32[]) tuple(%a, %b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloInputOutputAliasConfig config(
module->entry_computation()->root_instruction()->shape());
TF_ASSERT_OK(config.SetUpAlias(
{0}, 0,
{}));
ASSERT_IS_NOT_OK(config.SetUpAlias(
{0}, 1,
{}));
}
class HloBufferDonorConfigTest : public HloTestBase {};
TEST_F(HloBufferDonorConfigTest, SimpleBufferDonor) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT root = (f32[], f32[]) tuple(%a, %b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloBufferDonorConfig config;
TF_ASSERT_OK(config.AddBufferDonor(0, {}));
EXPECT_TRUE(config.ParameterIsBufferDonor(0, {}));
EXPECT_FALSE(config.ParameterIsBufferDonor(1, {}));
TF_ASSERT_OK(config.AddBufferDonor(1, {}));
EXPECT_TRUE(config.ParameterIsBufferDonor(0, {}));
EXPECT_TRUE(config.ParameterIsBufferDonor(1, {}));
TF_ASSERT_OK(config.RemoveBufferDonor(0, {}));
EXPECT_FALSE(config.ParameterIsBufferDonor(0, {}));
EXPECT_TRUE(config.ParameterIsBufferDonor(1, {}));
TF_ASSERT_OK(config.Verify(*module));
TF_ASSERT_OK(config.AddBufferDonor(2, {}));
ASSERT_IS_NOT_OK(config.Verify(*module));
}
TEST_F(HloBufferDonorConfigTest, SimpleBufferDonorWithTupleInput) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
param = (f32[], f32[]) parameter(0)
gte1 = f32[] get-tuple-element(%param), index=0
gte2 = f32[] get-tuple-element(%param), index=1
ROOT root = (f32[], f32[]) tuple(%gte1, %gte2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloBufferDonorConfig config;
TF_ASSERT_OK(config.AddBufferDonor(0, {0}));
EXPECT_TRUE(config.ParameterIsBufferDonor(0, {0}));
EXPECT_FALSE(config.ParameterIsBufferDonor(0, {1}));
EXPECT_FALSE(config.ParameterIsBufferDonor(0, {}));
EXPECT_FALSE(config.ParameterIsBufferDonor(1, {}));
TF_ASSERT_OK(config.AddBufferDonor(0, {1}));
EXPECT_TRUE(config.ParameterIsBufferDonor(0, {0}));
EXPECT_TRUE(config.ParameterIsBufferDonor(0, {1}));
EXPECT_FALSE(config.ParameterIsBufferDonor(0, {}));
EXPECT_FALSE(config.ParameterIsBufferDonor(1, {}));
TF_ASSERT_OK(config.Verify(*module));
TF_ASSERT_OK(config.AddBufferDonor(0, {2}));
ASSERT_IS_NOT_OK(config.Verify(*module));
}
TEST_F(HloBufferDonorConfigTest, BufferDonorInputOutputAliasOverlap) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
param = (f32[], f32[]) parameter(0)
gte1 = f32[] get-tuple-element(%param), index=0
gte2 = f32[] get-tuple-element(%param), index=1
ROOT root = (f32[], f32[]) tuple(%gte1, %gte2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloBufferDonorConfig config;
TF_ASSERT_OK(config.AddBufferDonor(0, {0}));
TF_ASSERT_OK(config.Verify(*module));
TF_ASSERT_OK(module->input_output_alias_config().SetUpAlias({0}, 0, {0}));
ASSERT_IS_NOT_OK(config.Verify(*module));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_input_output_alias_config.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_input_output_alias_config_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c3a95f6c-7a3e-41ce-a6b7-c77b05f1b7d2 | cpp | tensorflow/tensorflow | hlo_module | third_party/xla/xla/hlo/ir/hlo_module.cc | third_party/xla/xla/service/hlo_module_test.cc | #include "xla/hlo/ir/hlo_module.h"
#include <algorithm>
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/map_util.h"
#include "xla/printer.h"
#include "xla/service/compilation_environments.h"
#include "xla/service/computation_layout.h"
#include "xla/service/computation_placer.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/mapped_ptr_container_sorter.h"
#include "xla/service/name_uniquer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tsl/lib/gtl/map_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/fingerprint.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
HloModule::HloModule(const std::string& name, HloModuleConfig config)
: HloModule(name, std::move(config),
std::make_unique<CompilationEnvironments>()) {}
HloModule::HloModule(const std::string& name, HloModuleConfig config,
std::unique_ptr<CompilationEnvironments> comp_envs)
: HloModule(name, std::make_unique<HloModuleConfig>(std::move(config)),
std::move(comp_envs)) {}
HloModule::HloModule(const std::string& name,
std::variant<std::unique_ptr<HloModuleConfig>,
std::shared_ptr<const HloModuleConfig>>
config,
std::unique_ptr<CompilationEnvironments> comp_envs)
: name_(NameUniquer::GetSanitizedName(name)),
config_(std::move(config)),
unique_id_(next_unique_module_id_++),
metadata_(tsl::Env::Default()),
autofdo_fingerprint_(""),
comp_envs_(std::move(comp_envs)) {
metadata_.set_canonical_module_id(unique_id_);
}
absl::Status HloModule::set_schedule(HloSchedule schedule) {
TF_RET_CHECK(schedule.module() == this);
TF_RETURN_IF_ERROR(schedule.Verify());
schedule_ = std::move(schedule);
return absl::OkStatus();
}
void HloModule::ReplaceEntryComputation(HloComputation* entry_computation) {
entry_computation_ = entry_computation;
config_.get_mutable().SetDefaultComputationLayout(
entry_computation_->ComputeProgramShape());
input_output_alias_config_ = HloInputOutputAliasConfig(
entry_computation_->root_instruction()->shape());
buffer_donor_config_ = HloBufferDonorConfig();
}
HloModule::StackFrame HloModule::get_stack_frame(int id) const {
HloModule::StackFrame stack_frame;
if (!stack_frame_index_.has_value() || id < 1 ||
id > stack_frame_index_->stack_frames().size()) {
return stack_frame;
}
auto& frame = stack_frame_index_->stack_frames(id - 1);
auto& file_location =
stack_frame_index_->file_locations(frame.file_location_id() - 1);
stack_frame.file_name =
stack_frame_index_->file_names(file_location.file_name_id() - 1);
stack_frame.function_name =
stack_frame_index_->function_names(file_location.function_name_id() - 1);
stack_frame.line = file_location.line();
stack_frame.column = file_location.column();
stack_frame.parent_frame_id = frame.parent_frame_id();
return stack_frame;
}
HloComputation* HloModule::AddComputationInternal(
std::unique_ptr<HloComputation> computation, bool is_entry,
bool uniquify_identifiers, bool preserve_entry_layouts) {
if (is_entry) {
CHECK_EQ(nullptr, entry_computation_);
entry_computation_ = computation.get();
if (preserve_entry_layouts) {
config_.get_mutable().SetComputationLayoutIfExists(
entry_computation_->ComputeProgramShape());
} else if (!config_.get().has_entry_computation_layout()) {
config_.get_mutable().SetDefaultComputationLayout(
entry_computation_->ComputeProgramShape());
}
input_output_alias_config_ = HloInputOutputAliasConfig(
entry_computation_->root_instruction()->shape());
buffer_donor_config_ = HloBufferDonorConfig();
}
if (uniquify_identifiers) {
computation->UniquifyName(&computation_name_uniquer_);
for (auto* instruction : computation->instructions()) {
instruction->UniquifyName(&instruction_name_uniquer_);
}
for (auto* instruction : computation->instructions()) {
instruction->SetUniqueId(NewUniqueInstructionId());
}
CHECK_NE(computation->root_instruction()->unique_id(), -1)
<< "Root has no valid id: " << computation->ToString();
computation->SetUniqueId(computation->root_instruction()->unique_id());
} else {
computation_name_uniquer_.GetUniqueName(computation->name());
for (auto* instruction : computation->instructions()) {
instruction_name_uniquer_.GetUniqueName(instruction->name());
next_unique_id_ = std::max(next_unique_id_, instruction->unique_id() + 1);
}
if (next_unique_id_ < computation->unique_id() + 1) {
next_unique_id_ = computation->unique_id() + 1;
}
}
computation->set_parent(this);
computations_.push_back(std::move(computation));
return computations_.back().get();
}
HloComputation* HloModule::AddEntryComputation(
std::unique_ptr<HloComputation> computation) {
return AddComputationInternal(std::move(computation), true,
true,
false);
}
HloComputation* HloModule::AddEntryComputationWithLayouts(
std::unique_ptr<HloComputation> computation) {
return AddComputationInternal(std::move(computation), true,
true,
true);
}
absl::Status HloModule::RemoveEmbeddedComputation(HloComputation* to_remove) {
if (has_schedule()) {
schedule_->remove_computation(to_remove);
}
auto it = absl::c_find_if(
computations_, [&to_remove](const std::unique_ptr<HloComputation>& comp) {
return comp.get() == to_remove;
});
TF_RET_CHECK(it != computations_.end());
TF_RET_CHECK(it->get() == to_remove);
computations_.erase(it);
return absl::OkStatus();
}
HloComputation* HloModule::AddEmbeddedComputation(
std::unique_ptr<HloComputation> computation) {
return AddComputationInternal(std::move(computation), false,
true,
false);
}
void HloModule::MarkFusionDuplications(
const absl::flat_hash_map<HloComputation*, HloComputation*>& replacements) {
for (std::unique_ptr<HloComputation>& computation : computations_) {
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kFusion) {
auto rep =
replacements.find(instruction->fused_instructions_computation());
if (rep != replacements.end()) {
xla::HloComputation* new_comp = rep->second;
if (new_comp->IsFusionComputation()) {
auto dedup_name = new_comp->FusionInstruction()->name();
new_comp->FusionInstruction()->set_metadata_deduplicated_name(
std::string(dedup_name));
instruction->set_metadata_deduplicated_name(
std::string(dedup_name));
}
}
}
}
}
}
void HloModule::MoveComputationsFrom(HloModule* module,
bool make_names_unique) {
for (size_t i = 0; i < module->computation_count(); ++i) {
for (auto* instruction : module->computations_[i]->instructions()) {
instruction->ClearUniqueIdInternal();
}
module->computations_[i]->ClearUniqueIdInternal();
auto computation_raw_ptr = module->computations_[i].get();
if (computation_raw_ptr->IsEntryComputation()) {
this->entry_computation_ = nullptr;
}
this->AddComputationInternal(
std::move(module->computations_[i]),
computation_raw_ptr->IsEntryComputation(),
false,
false);
if (make_names_unique) {
computation_raw_ptr->UniquifyName(&computation_name_uniquer_);
for (auto* instruction : computation_raw_ptr->instructions()) {
instruction->UniquifyName(&instruction_name_uniquer_);
}
}
for (auto* instruction : computation_raw_ptr->instructions()) {
instruction->SetUniqueId(NewUniqueInstructionId());
}
CHECK_NE(computation_raw_ptr->root_instruction()->unique_id(), -1)
<< "Root has no valid id: " << computation_raw_ptr->ToString();
computation_raw_ptr->SetUniqueId(
computation_raw_ptr->root_instruction()->unique_id());
}
module->computations_.clear();
}
void HloModule::ReplaceComputations(
const absl::flat_hash_map<HloComputation*, HloComputation*>& replacements) {
std::vector<std::unique_ptr<HloComputation>> new_computations;
new_computations.reserve(computations_.size());
for (std::unique_ptr<HloComputation>& computation : computations_) {
for (auto* instruction : computation->instructions()) {
if (instruction->has_to_apply()) {
HloComputation* new_arg = tsl::gtl::FindWithDefault(
replacements, instruction->to_apply(), nullptr);
if (new_arg != nullptr) {
instruction->set_to_apply(new_arg);
}
continue;
}
switch (instruction->opcode()) {
case HloOpcode::kWhile: {
HloComputation* new_condition = tsl::gtl::FindWithDefault(
replacements, instruction->while_condition(), nullptr);
if (new_condition != nullptr) {
instruction->set_while_condition(new_condition);
}
HloComputation* new_body = tsl::gtl::FindWithDefault(
replacements, instruction->while_body(), nullptr);
if (new_body != nullptr) {
instruction->set_while_body(new_body);
}
break;
}
case HloOpcode::kConditional: {
for (int b = 0; b < instruction->branch_count(); ++b) {
HloComputation* new_computation = tsl::gtl::FindWithDefault(
replacements, instruction->branch_computation(b), nullptr);
if (new_computation != nullptr) {
instruction->set_branch_computation(b, new_computation);
}
}
break;
}
case HloOpcode::kSelectAndScatter: {
HloComputation* new_select = tsl::gtl::FindWithDefault(
replacements, instruction->select(), nullptr);
if (new_select != nullptr) {
instruction->set_select(new_select);
}
HloComputation* new_scatter = tsl::gtl::FindWithDefault(
replacements, instruction->scatter(), nullptr);
if (new_scatter != nullptr) {
instruction->set_scatter(new_scatter);
}
break;
}
default:
break;
}
}
if (replacements.find(computation.get()) == replacements.end()) {
new_computations.push_back(std::move(computation));
}
}
entry_computation_ = tsl::gtl::FindWithDefault(
replacements, entry_computation_, entry_computation_);
computations_ = std::move(new_computations);
}
void HloModule::Print(Printer* printer, const HloPrintOptions& options) const {
printer->Append("HloModule ");
if (options.print_ids()) {
printer->Append(name());
}
if (has_schedule()) {
TF_CHECK_OK(schedule().Verify());
printer->Append(", is_scheduled=true");
}
std::string serialized_aliasing = input_output_alias_config().ToShortString();
if (!serialized_aliasing.empty()) {
printer->Append(", input_output_alias={ ");
printer->Append(std::move(serialized_aliasing));
printer->Append(" }");
}
std::string serialized_buffer_donor = buffer_donor_config().ToShortString();
if (!serialized_buffer_donor.empty()) {
printer->Append(", buffer_donor={ ");
printer->Append(std::move(serialized_buffer_donor));
printer->Append(" }");
}
const auto& config = config_.get();
if (config.alias_passthrough_params()) {
printer->Append(", alias_passthrough_params=true");
}
if (config.has_entry_computation_layout()) {
printer->Append(", entry_computation_layout={");
entry_computation_layout().Print(printer);
printer->Append("}");
}
if (config.allow_spmd_sharding_propagation_to_parameters().size() != 1 ||
config.allow_spmd_sharding_propagation_to_parameters().back()) {
printer->Append(", allow_spmd_sharding_propagation_to_parameters={");
AppendJoin(printer, config.allow_spmd_sharding_propagation_to_parameters(),
",", [](Printer* printer, bool i) {
printer->Append(i ? "true" : "false");
});
printer->Append("}");
}
if (config.allow_spmd_sharding_propagation_to_output().size() != 1 ||
config.allow_spmd_sharding_propagation_to_output().back()) {
printer->Append(", allow_spmd_sharding_propagation_to_output={");
AppendJoin(printer, config.allow_spmd_sharding_propagation_to_output(), ",",
[](Printer* printer, bool i) {
printer->Append(i ? "true" : "false");
});
printer->Append("}");
}
if (config.replica_count() != 1) {
printer->Append(", replica_count=");
printer->Append(config.replica_count());
}
if (config.num_partitions() != 1) {
printer->Append(", num_partitions=");
printer->Append(config.num_partitions());
}
if (!frontend_attributes_.map().empty()) {
AppendCat(printer, ", frontend_attributes=",
FrontendAttributesToString(frontend_attributes_));
}
printer->Append("\n\n");
const auto& computations = options.canonicalize_computations()
? MakeComputationSorted()
: MakeComputationPostOrder();
for (const HloComputation* computation : computations) {
if (options.syntax_sugar_async_ops() && computation->IsAsyncComputation() &&
computation->CanExpandIntoSingleInstruction()) {
continue;
}
if (computation == entry_computation()) {
printer->Append("ENTRY ");
}
if (has_schedule() && schedule().is_computation_scheduled(computation)) {
computation->Print(printer, options,
schedule().sequence(computation).instructions());
} else {
computation->Print(printer, options);
}
printer->Append("\n\n");
}
}
std::string HloModule::ToString(const HloPrintOptions& options) const {
StringPrinter printer;
Print(&printer, options);
return std::move(printer).ToString();
}
absl::Cord HloModule::ToCord(const HloPrintOptions& options) const {
CordPrinter printer;
Print(&printer, options);
return std::move(printer).ToCord();
}
HloModuleProto HloModule::ToProto() const {
HloModuleProto proto;
proto.set_id(unique_id_);
proto.set_name(name_);
if (entry_computation_) {
*proto.mutable_entry_computation_name() =
std::string(entry_computation_->name());
proto.set_entry_computation_id(entry_computation_->unique_id());
*proto.mutable_host_program_shape() =
entry_computation_layout().ComputeProgramShape().ToProto();
}
for (const HloComputation* computation : MakeComputationPostOrder()) {
HloComputationProto computation_proto = computation->ToProto();
proto.add_computations()->Swap(&computation_proto);
}
if (has_schedule()) {
*proto.mutable_schedule() = schedule().ToProto().value();
}
*proto.mutable_input_output_alias() = input_output_alias_config().ToProto();
*proto.mutable_buffer_donor() = buffer_donor_config().ToProto();
for (const auto& [parameter, indices, alt_memory_offset] :
CrossProgramPrefetches()) {
auto* prefetch = proto.mutable_cross_program_prefetches()->Add();
prefetch->set_parameter(parameter);
for (auto index : indices) {
prefetch->add_index(index);
}
if (alt_memory_offset) {
prefetch->set_offset(*alt_memory_offset);
}
}
proto.set_is_dynamic(is_dynamic_);
if (has_spmd_output_sharding()) {
*proto.mutable_spmd_output_sharding() = spmd_output_sharding().ToProto();
}
*proto.mutable_frontend_attributes() = frontend_attributes_;
if (has_spmd_parameters_shardings()) {
for (const auto& parameter_sharding : spmd_parameters_shardings()) {
*proto.add_spmd_parameters_shardings() = parameter_sharding.ToProto();
}
}
proto.set_use_auto_spmd_partitioning(use_auto_spmd_partitioning_);
for (const HloModuleProto::ProfileInfo& profile_info : profile_info_list_) {
HloModuleProto::ProfileInfo& profile_info_proto =
*proto.mutable_profile_info()->Add();
profile_info_proto.set_profile_type(profile_info.profile_type());
profile_info_proto.set_relative_speedup(profile_info.relative_speedup());
profile_info_proto.set_profile_source(profile_info.profile_source());
profile_info_proto.set_compilation_event(profile_info.compilation_event());
profile_info_proto.set_fingerprint(profile_info.fingerprint());
}
if (config_.get().has_static_device_assignment()) {
DeviceAssignmentProto device_assignment;
config_.get().static_device_assignment().Serialize(&device_assignment);
(*proto.mutable_device_assignment()) = device_assignment;
}
if (stack_frame_index_.has_value()) {
(*proto.mutable_stack_frame_index()) = *stack_frame_index_;
}
return proto;
}
HloModuleProtoWithConfig HloModule::ToProtoWithConfig() const {
HloModuleProtoWithConfig result;
*result.mutable_config() = config_.get().ToProto();
*result.mutable_hlo_module() = ToProto();
return result;
}
absl::Status HloModule::CheckUniqueNamesAndIdsForComputationsAndInstructions()
const {
absl::flat_hash_set<absl::string_view> computation_names;
absl::flat_hash_set<int> computation_ids;
absl::flat_hash_set<absl::string_view> instruction_names;
absl::flat_hash_set<int> instruction_ids;
for (const HloComputation* computation : computations()) {
TF_RET_CHECK(!ContainsKey(computation_names, computation->name()))
<< "Computation name is not unique: " << computation->name();
computation_names.insert(computation->name());
TF_RET_CHECK(!ContainsKey(computation_ids, computation->unique_id()))
<< "Computation id is not unique: " << computation->unique_id();
computation_ids.insert(computation->unique_id());
for (const HloInstruction* instruction : computation->instructions()) {
TF_RET_CHECK(!ContainsKey(instruction_names, instruction->name()))
<< "Instruction name is not unique: " << instruction->name();
instruction_names.insert(instruction->name());
TF_RET_CHECK(!ContainsKey(instruction_ids, instruction->unique_id()))
<< "Instruction id is not unique: " << instruction->unique_id();
instruction_ids.insert(instruction->unique_id());
}
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<HloModule>> HloModule::CreateFromProto(
const HloModuleProto& proto, const HloModuleConfig& module_config,
bool prohibit_empty_literal) {
VLOG(2) << "CreateFromProto()";
XLA_VLOG_LINES(3, proto.DebugString());
TF_RET_CHECK(proto.has_host_program_shape())
<< "No program shape found in the proto";
ProgramShape expected_program_shape(proto.host_program_shape());
TF_RET_CHECK(expected_program_shape.parameters_size() ==
module_config.entry_computation_layout().parameter_count());
for (int i = 0; i < expected_program_shape.parameters_size(); ++i) {
const Shape& parameter_shape =
module_config.entry_computation_layout().parameter_layout(i).shape();
TF_RET_CHECK(ShapeUtil::Compatible(expected_program_shape.parameters(i),
parameter_shape))
<< "HloModuleConfig has different shape for parameter " << i
<< " than the HLO module. Expected: "
<< ShapeUtil::HumanStringWithLayout(
expected_program_shape.parameters(i))
<< ", actual: " << ShapeUtil::HumanStringWithLayout(parameter_shape);
}
const Shape& result_shape =
module_config.entry_computation_layout().result_layout().shape();
TF_RET_CHECK(
ShapeUtil::Compatible(expected_program_shape.result(), result_shape))
<< "HloModuleConfig has different result shape than the HLO module. "
"Expected: "
<< ShapeUtil::HumanStringWithLayout(expected_program_shape.result())
<< ", actual: " << ShapeUtil::HumanStringWithLayout(result_shape);
absl::flat_hash_map<int64_t, HloComputation*> computation_map;
absl::flat_hash_map<HloComputation*, int64_t> to_proto_id;
std::vector<std::unique_ptr<HloComputation>> computations;
HloComputation* entry = nullptr;
for (const HloComputationProto& computation_proto : proto.computations()) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloComputation> computation,
HloComputation::CreateFromProto(computation_proto, computation_map,
prohibit_empty_literal));
CHECK_NE(computation.get(), nullptr);
int64_t computation_id = computation_proto.id();
TF_RET_CHECK(computation_id != -1);
TF_RET_CHECK(!ContainsKey(computation_map, computation_id));
computation_map[computation_id] = computation.get();
to_proto_id[computation.get()] = computation_id;
if (computation_id == proto.entry_computation_id()) {
entry = computation.get();
}
computations.push_back(std::move(computation));
}
TF_RET_CHECK(entry != nullptr);
auto module = std::make_unique<HloModule>(proto.name(), module_config);
absl::c_sort(computations, [&](const std::unique_ptr<HloComputation>& a,
const std::unique_ptr<HloComputation>& b) {
return to_proto_id[a.get()] < to_proto_id[b.get()];
});
for (auto& computation : computations) {
bool is_entry = computation.get() == entry;
module->AddComputationInternal(std::move(computation), is_entry,
false,
false);
}
TF_RET_CHECK(module->entry_computation_ != nullptr);
TF_ASSIGN_OR_RETURN(
module->input_output_alias_config_,
HloInputOutputAliasConfig::CreateFromProto(
entry->ComputeProgramShape().result(), proto.input_output_alias()));
TF_ASSIGN_OR_RETURN(
module->buffer_donor_config_,
HloBufferDonorConfig::CreateFromProto(proto.buffer_donor()));
TF_RETURN_IF_ERROR(
module->CheckUniqueNamesAndIdsForComputationsAndInstructions());
if (proto.has_schedule()) {
TF_ASSIGN_OR_RETURN(
HloSchedule schedule,
HloSchedule::CreateFromProto(module.get(), proto.schedule()));
TF_RETURN_IF_ERROR(module->set_schedule(std::move(schedule)));
}
for (const auto& prefetch : proto.cross_program_prefetches()) {
module->AddCrossProgramPrefetch(
prefetch.parameter(),
ShapeIndex(prefetch.index().begin(), prefetch.index().end()),
prefetch.offset());
}
module->set_is_dynamic(proto.is_dynamic());
if (proto.has_frontend_attributes()) {
module->set_frontend_attributes(proto.frontend_attributes());
}
if (proto.has_spmd_output_sharding()) {
TF_ASSIGN_OR_RETURN(HloSharding hlo_sharding,
HloSharding::FromProto(proto.spmd_output_sharding()));
module->set_spmd_output_sharding(hlo_sharding);
}
std::vector<HloSharding> param_shardings;
for (const auto& sharding_proto : proto.spmd_parameters_shardings()) {
TF_ASSIGN_OR_RETURN(HloSharding sharding,
HloSharding::FromProto(sharding_proto));
param_shardings.push_back(sharding);
}
if (!param_shardings.empty()) {
module->set_spmd_parameters_shardings(param_shardings);
}
module->set_use_auto_spmd_partitioning(proto.use_auto_spmd_partitioning());
for (const auto& profile_info : proto.profile_info()) {
module->add_profile_info(profile_info);
}
if (proto.has_device_assignment()) {
if (!module->config_.get().has_static_device_assignment()) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<DeviceAssignment> device_assignment,
DeviceAssignment::Deserialize(proto.device_assignment()));
module->config_.get_mutable().set_static_device_assignment(
*device_assignment);
}
}
if (proto.has_stack_frame_index()) {
if (!module->stack_frame_index_.has_value()) {
module->stack_frame_index_ = std::move(proto.stack_frame_index());
}
}
return std::move(module);
}
absl::StatusOr<HloModuleConfig> HloModule::CreateModuleConfigFromShape(
const ProgramShape& program_shape, const DebugOptions& debug_options,
const ExecutionOptions* execution_options) {
HloModuleConfig module_config(ProgramShape{program_shape});
module_config.set_debug_options(debug_options);
if (execution_options) {
if (execution_options->num_replicas() > 0) {
module_config.set_replica_count(execution_options->num_replicas());
}
if (execution_options->num_partitions() > 0) {
module_config.set_num_partitions(execution_options->num_partitions());
}
module_config.set_use_spmd_partitioning(
execution_options->use_spmd_partitioning());
module_config.set_use_auto_spmd_partitioning(
execution_options->use_auto_spmd_partitioning());
module_config.set_auto_spmd_partitioning_mesh_shape(std::vector<int64_t>(
execution_options->auto_spmd_partitioning_mesh_shape().begin(),
execution_options->auto_spmd_partitioning_mesh_shape().end()));
module_config.set_auto_spmd_partitioning_mesh_ids(std::vector<int64_t>(
execution_options->auto_spmd_partitioning_mesh_ids().begin(),
execution_options->auto_spmd_partitioning_mesh_ids().end()));
module_config.set_deduplicate_hlo(execution_options->deduplicate_hlo());
if (!execution_options->allow_spmd_sharding_propagation_to_parameters()
.empty()) {
module_config.set_allow_spmd_sharding_propagation_to_parameters(
execution_options->allow_spmd_sharding_propagation_to_parameters());
}
if (!execution_options->allow_spmd_sharding_propagation_to_output()
.empty()) {
module_config.set_allow_spmd_sharding_propagation_to_output(
execution_options->allow_spmd_sharding_propagation_to_output());
}
if (execution_options->has_device_assignment()) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<DeviceAssignment> device_assignment,
DeviceAssignment::Deserialize(
execution_options->device_assignment()));
module_config.set_static_device_assignment(*device_assignment);
if (execution_options->num_replicas() > 0) {
CHECK_EQ(module_config.static_device_assignment().replica_count(),
module_config.replica_count());
}
if (execution_options->num_partitions() > 0) {
CHECK_EQ(module_config.static_device_assignment().computation_count(),
module_config.num_partitions());
}
}
module_config.set_param_requires_broadcast_via_collectives(std::vector<
bool>(
execution_options->param_requires_broadcast_via_collectives().begin(),
execution_options->param_requires_broadcast_via_collectives().end()));
module_config.set_allow_separate_sharding_programs(
execution_options->allow_separate_sharding_programs());
HloModuleConfig::AssignStructShardableValueUpdatePairs(
module_config, execution_options->shardable_value_update_pairs());
module_config.set_use_shardy_partitioner(
execution_options->use_shardy_partitioner());
}
ComputationLayout* entry_layout =
module_config.mutable_entry_computation_layout();
for (int64_t i = 0; i < entry_layout->parameter_count(); ++i) {
TF_RETURN_IF_ERROR(
entry_layout->mutable_parameter_layout(i)->CopyLayoutFromShape(
program_shape.parameters(i)));
}
TF_RETURN_IF_ERROR(entry_layout->mutable_result_layout()->CopyLayoutFromShape(
program_shape.result()));
return module_config;
}
absl::StatusOr<HloModuleConfig> HloModule::CreateModuleConfigFromProto(
const HloModuleProto& module, const DebugOptions& debug_options,
const ExecutionOptions* execution_options) {
if (!module.has_host_program_shape()) {
return tsl::errors::FailedPrecondition(
"No program shape found in the proto");
}
ProgramShape program_shape(module.host_program_shape());
TF_ASSIGN_OR_RETURN(HloModuleConfig config,
CreateModuleConfigFromShape(program_shape, debug_options,
execution_options));
if (!config.has_static_device_assignment()) {
if (module.has_device_assignment()) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<DeviceAssignment> device_assignment,
DeviceAssignment::Deserialize(module.device_assignment()));
config.set_static_device_assignment(*device_assignment);
}
}
return config;
}
absl::StatusOr<std::unique_ptr<HloModule>> HloModule::CreateFromProtoWithConfig(
const HloModuleProtoWithConfig& proto, bool prohibit_empty_literal) {
const auto& hlo_module_proto = proto.hlo_module();
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModuleConfig> config_ptr,
HloModuleConfig::CreateFromProto(proto.config()));
return HloModule::CreateFromProto(hlo_module_proto, *config_ptr,
prohibit_empty_literal);
}
namespace {
bool IsUsedOutsideSubcomputation(const HloInstruction& hlo,
const absl::flat_hash_set<HloInstruction*>&
instructions_in_subcomputation) {
return absl::c_any_of(hlo.users(), [&](HloInstruction* user) {
return !instructions_in_subcomputation.contains(user);
});
}
}
HloInstruction* HloModule::OutlineExpressionFromComputation(
absl::Span<HloInstruction* const> instructions_to_outline,
const std::string& outlined_computation_name, HloComputation* computation) {
auto builder = HloComputation::Builder(outlined_computation_name);
absl::flat_hash_map<HloInstruction*, HloInstruction*> outlined_instructions;
absl::flat_hash_set<HloInstruction*> instruction_set_to_outline(
instructions_to_outline.begin(), instructions_to_outline.end());
std::vector<HloInstruction*> arguments;
std::vector<HloInstruction*> outputs;
int64_t parameter_count = 0;
for (HloInstruction* instruction_to_outline : instructions_to_outline) {
HloInstruction* outlined_instruction =
builder.AddInstruction(instruction_to_outline->Clone());
for (int64_t operand_num = 0;
operand_num < outlined_instruction->operand_count(); ++operand_num) {
HloInstruction* old_operand =
outlined_instruction->mutable_operand(operand_num);
HloInstruction** operand_slot = &(outlined_instructions[old_operand]);
if (*operand_slot == nullptr) {
arguments.push_back(old_operand);
*operand_slot = builder.AddInstruction(HloInstruction::CreateParameter(
parameter_count, old_operand->shape(), "p"));
++parameter_count;
}
TF_CHECK_OK(
outlined_instruction->ReplaceOperandWith(operand_num, *operand_slot));
}
InsertOrDie(&outlined_instructions, instruction_to_outline,
outlined_instruction);
if (instruction_to_outline->user_count() == 0 ||
IsUsedOutsideSubcomputation(*instruction_to_outline,
instruction_set_to_outline)) {
outputs.push_back(instruction_to_outline);
}
}
if (outputs.size() != 1) {
std::string error_message =
"The subcomputation to outline has multiple outputs:\n";
for (HloInstruction* output : outputs) {
absl::StrAppend(&error_message, output->ToString(), "\n");
}
LOG(FATAL) << error_message;
}
HloInstruction* output = outputs[0];
HloComputation* nested_computation = AddEmbeddedComputation(
builder.Build(FindOrDie(outlined_instructions, output)));
HloInstruction* call = computation->AddInstruction(HloInstruction::CreateCall(
output->shape(), arguments, nested_computation));
VLOG(2) << "Outlining the following instructions";
for (auto* instruction_to_outline : instructions_to_outline) {
VLOG(2) << " " << instruction_to_outline->ToString();
}
VLOG(2) << "as a call " << call->ToString();
VLOG(2) << "to " << nested_computation->ToString();
TF_CHECK_OK(output->ReplaceAllUsesWith(call));
for (auto i = instructions_to_outline.rbegin();
i != instructions_to_outline.rend(); ++i) {
TF_CHECK_OK(computation->RemoveInstruction(*i));
}
return call;
}
int64_t HloModule::instruction_count() const {
int64_t n = 0;
for (const auto& computation : computations_) {
n += computation->instruction_count();
}
return n;
}
std::vector<HloComputation*> HloModule::MakeComputationPostOrder(
const absl::flat_hash_set<absl::string_view>& execution_threads,
const absl::flat_hash_set<HloComputation*>& allow_list) const {
std::vector<HloComputation*> post_order =
this->MakeComputationPostOrder(execution_threads);
post_order.erase(std::remove_if(post_order.begin(), post_order.end(),
[&allow_list](HloComputation* computation) {
return !allow_list.contains(computation);
}),
post_order.end());
return post_order;
}
std::vector<HloComputation*> HloModule::MakeComputationPostOrder(
const absl::flat_hash_set<absl::string_view>& execution_threads) const {
if (computations_.empty()) {
return {};
}
absl::flat_hash_set<HloComputation*> nonroot_computations;
nonroot_computations.reserve(computations_.size() - 1);
for (auto& computation : computations_) {
for (const HloInstructionInfo& inst :
computation->instructions_with_info()) {
if (HloInstruction::MightHaveCalledComputations(inst.opcode())) {
for (HloComputation* called_computation : inst->called_computations()) {
nonroot_computations.insert(called_computation);
}
}
}
}
absl::flat_hash_set<HloComputation*> added_computations;
std::vector<HloComputation*> post_order;
added_computations.reserve(computations_.size());
post_order.reserve(computations_.size());
for (auto& computation : computations_) {
if (nonroot_computations.contains(computation.get())) {
continue;
}
for (HloComputation* embedded_computation :
computation->MakeEmbeddedComputationsList()) {
if (added_computations.insert(embedded_computation).second) {
post_order.push_back(embedded_computation);
}
}
CHECK(!added_computations.contains(computation.get()));
post_order.push_back(computation.get());
added_computations.insert(computation.get());
}
if (post_order.size() != computations_.size()) {
for (HloComputation* computation : post_order) {
LOG(ERROR) << "Post Order: " << computation->name() << " ("
<< computation->parent()->name() << ")";
}
for (auto& computation : computations_) {
LOG(ERROR) << "Computations: " << computation->name() << " ("
<< computation->parent()->name() << ")";
}
LOG(FATAL) << "Mismatch computation count: post_order=" << post_order.size()
<< " computation_count=" << computations_.size();
}
if (!execution_threads.empty()) {
post_order.erase(std::remove_if(post_order.begin(), post_order.end(),
[&](HloComputation* computation) {
return !execution_threads.contains(
computation->execution_thread());
}),
post_order.end());
}
return post_order;
}
namespace {
class FingerprintMap {
public:
void Reserve(int capacity) { fingerprint_map_.reserve(capacity); }
uint64_t GetFingerprint(const HloComputation* computation) {
auto result = fingerprint_map_.try_emplace(computation, 0);
if (result.second) {
result.first->second =
tsl::Fingerprint64(computation->ToString(print_options_));
}
return result.first->second;
}
private:
HloPrintOptions print_options_ = HloPrintOptions::ModuleFingerprint();
absl::flat_hash_map<const HloComputation*, uint64_t> fingerprint_map_;
};
void SortComputationsByContent(std::vector<HloComputation*>* computations) {
FingerprintMap fingerprint_map;
fingerprint_map.Reserve(computations->size());
auto cmp = [&fingerprint_map](const HloComputation* a,
const HloComputation* b) {
if (a->instruction_count() != b->instruction_count()) {
return a->instruction_count() < b->instruction_count();
}
if (a == b) return false;
return fingerprint_map.GetFingerprint(a) <
fingerprint_map.GetFingerprint(b);
};
absl::c_sort(*computations, cmp);
}
}
std::vector<HloComputation*> HloModule::MakeComputationSorted(
const absl::flat_hash_set<absl::string_view>& execution_threads) const {
std::vector<HloComputation*> result =
MakeComputationPostOrder(execution_threads);
if (config().content_aware_computation_sorting()) {
SortComputationsByContent(&result);
}
return result;
}
std::vector<HloComputation*> HloModule::MakeNonfusionComputations(
const absl::flat_hash_set<absl::string_view>& execution_threads) const {
std::vector<HloComputation*> result =
MakeComputationPostOrder(execution_threads);
result.erase(std::remove_if(
result.begin(), result.end(),
[](HloComputation* c) { return c->IsFusionComputation(); }),
result.end());
return result;
}
std::vector<HloComputation*> HloModule::MakeNonfusionComputationsSorted(
const absl::flat_hash_set<absl::string_view>& execution_threads) const {
auto result = MakeNonfusionComputations(execution_threads);
if (config().content_aware_computation_sorting()) {
SortComputationsByContent(&result);
}
return result;
}
std::unique_ptr<HloModule> HloModule::Clone(const std::string& suffix) const {
return Clone(config_.FreezeAndShare(), suffix);
}
std::unique_ptr<HloModule> HloModule::Clone(const HloModuleConfig& config,
const std::string& suffix) const {
return Clone(std::make_shared<const HloModuleConfig>(config), suffix);
}
std::unique_ptr<HloModule> HloModule::Clone(
std::shared_ptr<const HloModuleConfig> config,
const std::string& suffix) const {
VLOG(1) << "Cloning module :" << name_ << " --> " << suffix << "\n";
auto module = std::make_unique<HloModule>(
absl::StrCat(name_, suffix.empty() ? "" : "-", suffix), std::move(config),
std::make_unique<CompilationEnvironments>(*comp_envs_));
HloCloneContext context(module.get(), suffix);
if (entry_computation_) {
auto cloned_computation = entry_computation_->Clone(suffix, &context);
module->AddEntryComputation(std::move(cloned_computation));
}
module->input_output_alias_config() = input_output_alias_config();
module->buffer_donor_config() = buffer_donor_config();
module->set_is_dynamic(is_dynamic());
module->set_frontend_attributes(frontend_attributes());
if (has_schedule() && schedule().Verify().ok()) {
HloSchedule clone_schedule(module.get());
for (HloComputation* computation : computations()) {
if (schedule().is_computation_scheduled(computation)) {
HloComputation* new_computation = context.FindComputation(computation);
if (new_computation != nullptr) {
HloInstructionSequence& clone_sequence =
clone_schedule.GetOrCreateSequence(new_computation);
for (const HloInstruction* instruction :
schedule().sequence(computation).instructions()) {
clone_sequence.push_back(context.GetInstruction(instruction));
}
}
}
}
TF_CHECK_OK(module->set_schedule(std::move(clone_schedule)));
}
for (const auto& [parameter, indices, offset] : CrossProgramPrefetches()) {
module->AddCrossProgramPrefetch(parameter, indices, offset);
}
using ComputationSorter = MappedPtrContainerSorter<HloComputation>;
auto computation_map_fn = [&context](const HloComputation* c) {
return context.FindComputation(c);
};
auto status = ComputationSorter::Sort(
computation_map_fn, ComputationSorter::IndexAfterMappedElementsFn(),
computations_, module->computations_);
if (!status.ok()) {
LOG(ERROR) << "Failed to sort module computations for " << name() << "; "
<< status;
}
return module;
}
absl::Status HloModule::RemoveUnusedComputations() {
std::string suffix = "tmp";
auto module = std::make_unique<HloModule>(
absl::StrCat(name_, "-", suffix), config(),
std::make_unique<CompilationEnvironments>(*comp_envs_));
HloCloneContext context(module.get(), suffix);
entry_computation_->Clone(suffix, &context);
std::vector<HloComputation*> to_remove;
for (auto computation : computations()) {
auto found_computation = context.FindComputation(computation);
if (found_computation == nullptr) {
to_remove.push_back(computation);
}
}
for (auto computation : to_remove) {
TF_RETURN_IF_ERROR(RemoveEmbeddedComputation(computation));
}
return absl::OkStatus();
}
HloComputation* HloModule::DeepCloneComputation(HloComputation* computation,
HloCloneContext* context) {
HloComputation* new_computation;
if (context != nullptr) {
if ((new_computation = context->FindComputation(computation)) != nullptr) {
return new_computation;
}
new_computation =
AddEmbeddedComputation(computation->Clone(context->suffix(), context));
} else {
new_computation = AddEmbeddedComputation(computation->Clone(""));
}
return new_computation;
}
uint64_t HloModule::RandomNew64() const {
absl::MutexLock l(&rng_mutex_);
return rng_();
}
HloComputation* HloModule::GetComputationWithName(absl::string_view name) {
auto computations_in_module = computations();
auto it = absl::c_find_if(
computations_in_module,
[&](HloComputation* computation) { return computation->name() == name; });
return it == computations_in_module.end() ? nullptr : *it;
}
std::string HloModule::GetFingerprint128(const HloPrintOptions& options) const {
const tsl::Fprint128 fingerprint = tsl::Fingerprint128(ToString(options));
absl::string_view fp_bytes(reinterpret_cast<const char*>(&fingerprint),
sizeof(tsl::Fprint128));
return absl::BytesToHexString(fp_bytes);
}
std::atomic<int> HloModule::next_unique_module_id_(0);
} | #include "xla/hlo/ir/hlo_module.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/service/computation_placer.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/test_compilation_environment.pb.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/lib/strings/proto_serialization.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
std::unique_ptr<tsl::protobuf::Message> ProcessNewEnv(
std::unique_ptr<tsl::protobuf::Message> msg) {
std::unique_ptr<test::TestCompilationEnvironment1> env(
tensorflow::down_cast<test::TestCompilationEnvironment1*>(msg.release()));
if (!env) {
env = std::make_unique<test::TestCompilationEnvironment1>();
env->set_some_flag(100);
}
return env;
}
namespace {
namespace op = ::xla::testing::opcode_matchers;
class HloModuleTest : public HloTestBase {
protected:
static void SetUpTestSuite() {
CompilationEnvironments::RegisterProcessNewEnvFn(
test::TestCompilationEnvironment1::descriptor(), ProcessNewEnv);
}
std::unique_ptr<HloComputation> CreateConstantComputation() {
auto builder = HloComputation::Builder("Constant");
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
return builder.Build();
}
std::unique_ptr<HloComputation> CreateCallComputation(
absl::Span<HloComputation* const> computations) {
auto builder = HloComputation::Builder("Call");
for (auto computation : computations) {
builder.AddInstruction(
HloInstruction::CreateCall(r0f32_, {}, computation));
}
return builder.Build();
}
Shape r0f32_ = ShapeUtil::MakeShape(F32, {});
};
TEST_F(HloModuleTest, OneComputationPostOrder) {
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(CreateConstantComputation());
EXPECT_THAT(module->MakeComputationPostOrder(),
::testing::ElementsAre(computation));
}
TEST_F(HloModuleTest, TwoComputationsPostOrder) {
auto module = CreateNewVerifiedModule();
auto computation1 = module->AddEntryComputation(CreateConstantComputation());
auto computation2 =
module->AddEmbeddedComputation(CreateConstantComputation());
EXPECT_THAT(module->MakeComputationPostOrder(),
::testing::UnorderedElementsAre(computation1, computation2));
EXPECT_EQ(computation1->name(), "Constant");
EXPECT_EQ(computation2->name(), "Constant.1");
}
TEST_F(HloModuleTest, CloneTest) {
auto module = CreateNewVerifiedModule();
auto computation1 =
module->AddEmbeddedComputation(CreateConstantComputation());
auto computation2 =
module->AddEmbeddedComputation(CreateCallComputation({computation1}));
auto computation3 =
module->AddEmbeddedComputation(CreateCallComputation({computation1}));
module->AddEntryComputation(
CreateCallComputation({computation2, computation3}));
auto env = std::make_unique<test::TestCompilationEnvironment1>();
env->set_some_flag(10);
TF_ASSERT_OK(module->comp_envs().AddEnv(std::move(env)));
auto post_order = module->MakeComputationPostOrder();
auto cloned_module = module->Clone("copy");
auto post_order_copied = cloned_module->MakeComputationPostOrder();
EXPECT_EQ(cloned_module->comp_envs()
.GetEnv<test::TestCompilationEnvironment1>()
.some_flag(),
10);
EXPECT_EQ(post_order.size(), post_order_copied.size());
for (auto origin = post_order.begin(), copied = post_order_copied.begin();
origin != post_order.end() && copied != post_order_copied.end();
++origin, ++copied) {
EXPECT_EQ(absl::StrCat((*origin)->name(), ".copy"), (*copied)->name());
}
}
TEST_F(HloModuleTest, CloneFrontendAttributes) {
auto module = CreateNewVerifiedModule();
FrontendAttributes frontend_attributes;
frontend_attributes.mutable_map()->emplace("attribute1", "attribute1_value");
module->set_frontend_attributes(frontend_attributes);
std::unique_ptr<HloModule> clone = module->Clone();
bool areEqual = std::equal(
frontend_attributes.map().begin(), frontend_attributes.map().end(),
clone->frontend_attributes().map().begin(),
[](const auto& kv1, const auto& kv2) {
return kv1.first == kv2.first && kv1.second == kv2.second;
});
EXPECT_TRUE(areEqual);
}
TEST_F(HloModuleTest, CloneHasFusion) {
auto module = CreateNewVerifiedModule();
HloComputation* fused_computation;
{
auto b = HloComputation::Builder("Fused");
auto x = b.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "x"));
b.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, x, x));
fused_computation = module->AddEmbeddedComputation(b.Build());
}
{
auto b = HloComputation::Builder("Entry");
auto input = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
b.AddInstruction(
HloInstruction::CreateFusion(r0f32_, HloInstruction::FusionKind::kInput,
{input}, fused_computation));
module->AddEntryComputation(b.Build());
}
auto post_order = module->MakeComputationPostOrder();
auto cloned_module = module->Clone("copy");
auto post_order_copied = cloned_module->MakeComputationPostOrder();
EXPECT_EQ(post_order.size(), post_order_copied.size());
for (auto origin = post_order.begin(), copied = post_order_copied.begin();
origin != post_order.end() && copied != post_order_copied.end();
++origin, ++copied) {
if ((*origin)->name() == "Fused") {
EXPECT_EQ(absl::StrCat((*origin)->name(), ".clone"), (*copied)->name());
} else {
EXPECT_EQ(absl::StrCat((*origin)->name(), ".copy"), (*copied)->name());
}
}
}
TEST_F(HloModuleTest, CloneCustomCallComputationToApply) {
const char* const hlo_string = R"(
HloModule a_module
add_s32 {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY entry () -> s32[] {
%c1 = s32[] constant(1)
%c2 = s32[] constant(2)
ROOT %custom-call =
s32[] custom-call(s32[] %c1, %c2),
custom_call_target="foo",
backend_config="this string is opaque",
to_apply=add_s32
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
std::unique_ptr<HloModule> cloned_module = module->Clone();
HloComputation* cloned_computation =
cloned_module->GetComputationWithName("add_s32.clone");
HloInstruction* cloned_custom_call =
cloned_module->entry_computation()->GetInstructionWithName("custom-call");
EXPECT_TRUE(cloned_computation->IsCustomCallComputation());
EXPECT_EQ(cloned_computation->CustomCallInstruction(), cloned_custom_call);
}
TEST_F(HloModuleTest, CloneCustomCallComputationCalledComputations) {
const char* const hlo_string = R"(
HloModule a_module
add_s32_0 {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
add_s32_1 {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY entry () -> s32[] {
%c1 = s32[] constant(1)
%c2 = s32[] constant(2)
ROOT %custom-call =
s32[] custom-call(s32[] %c1, %c2),
custom_call_target="foo",
backend_config="this string is opaque",
called_computations={%add_s32_0, %add_s32_1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
std::unique_ptr<HloModule> cloned_module = module->Clone();
HloComputation* cloned_computation_0 =
cloned_module->GetComputationWithName("add_s32_0.clone");
HloComputation* cloned_computation_1 =
cloned_module->GetComputationWithName("add_s32_1.clone");
HloInstruction* cloned_custom_call =
cloned_module->entry_computation()->GetInstructionWithName("custom-call");
EXPECT_TRUE(cloned_computation_0->IsCustomCallComputation());
EXPECT_EQ(cloned_computation_0->CustomCallInstruction(), cloned_custom_call);
EXPECT_TRUE(cloned_computation_1->IsCustomCallComputation());
EXPECT_EQ(cloned_computation_1->CustomCallInstruction(), cloned_custom_call);
}
TEST_F(HloModuleTest, CloneFusionComputation) {
const char* const hlo_string = R"(
HloModule a_module
fused_computation () -> s32[] {
ROOT %result = s32[] parameter(0)
}
ENTRY main {
%c = s32[] constant(1)
ROOT %fusion = s32[] fusion(%c), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
std::unique_ptr<HloModule> cloned_module = module->Clone();
HloComputation* cloned_computation =
cloned_module->GetComputationWithName("fused_computation.clone");
HloInstruction* cloned_fusion_instr =
cloned_module->entry_computation()->GetInstructionWithName("fusion");
EXPECT_TRUE(cloned_computation->IsFusionComputation());
EXPECT_EQ(cloned_computation->FusionInstruction(), cloned_fusion_instr);
}
TEST_F(HloModuleTest, DiamondComputationsPostOrder) {
auto module = CreateNewVerifiedModule();
auto computation1 =
module->AddEmbeddedComputation(CreateConstantComputation());
auto computation2 =
module->AddEmbeddedComputation(CreateCallComputation({computation1}));
auto computation3 =
module->AddEmbeddedComputation(CreateCallComputation({computation1}));
auto computation4 = module->AddEntryComputation(
CreateCallComputation({computation2, computation3}));
auto post_order = module->MakeComputationPostOrder();
EXPECT_THAT(post_order,
::testing::UnorderedElementsAre(computation1, computation2,
computation3, computation4));
EXPECT_EQ(post_order.back(), computation4);
EXPECT_EQ(post_order.front(), computation1);
}
TEST_F(HloModuleTest, LargeConstantToString) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder("Constant");
std::vector<float> values(16, 42.0);
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(values)));
module->AddEntryComputation(builder.Build());
EXPECT_EQ(
"HloModule LargeConstantToString, "
"entry_computation_layout={()->f32[16]{0}}\n\nENTRY %Constant () -> "
"f32[16] {\n ROOT %constant = f32[16]{0} constant({...})\n}\n\n",
module->ToString(HloPrintOptions().set_print_large_constants(false)));
EXPECT_EQ(
"HloModule LargeConstantToString, "
"entry_computation_layout={()->f32[16]{0}}\n\nENTRY %Constant () -> "
"f32[16] {\n ROOT %constant = f32[16]{0} constant({42, 42, 42, 42, 42, "
"42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42})\n}\n\n",
module->ToString(HloPrintOptions().set_print_large_constants(true)));
}
TEST_F(HloModuleTest, UniqueModuleId) {
auto module_a = CreateNewVerifiedModule();
auto module_b = CreateNewVerifiedModule();
EXPECT_NE(module_a->unique_id(), module_b->unique_id());
}
TEST_F(HloModuleTest, ProtoSerializationWithoutSchedule) {
const std::string text = R"(
HloModule axpy_module
ENTRY %axpy.v5 (alpha: f32[], x: f32[2,4], y: f32[2,4]) -> f32[2,4] {
%alpha = f32[] parameter(0)
%x = f32[2,4]{1,0} parameter(1)
%y = f32[2,4]{1,0} parameter(2)
%broadcast = f32[2,4]{1,0} broadcast(f32[] %alpha), dimensions={}
%multiply = f32[2,4]{1,0} multiply(f32[2,4]{1,0} %broadcast, f32[2,4]{1,0} %x)
ROOT %add = f32[2,4]{1,0} add(f32[2,4]{1,0} %multiply, f32[2,4]{1,0} %y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
ASSERT_FALSE(module->has_schedule());
TF_ASSERT_OK_AND_ASSIGN(
auto module_copy,
HloModule::CreateFromProto(module->ToProto(), module->config()));
ASSERT_FALSE(module_copy->has_schedule());
}
TEST_F(HloModuleTest, ProtoSerializationWithSchedule) {
const std::string text = R"(
HloModule axpy_module, is_scheduled=true
ENTRY %axpy.v5 (alpha: f32[], x: f32[2,4], y: f32[2,4]) -> f32[2,4] {
%alpha = f32[] parameter(0)
%x = f32[2,4]{1,0} parameter(1)
%y = f32[2,4]{1,0} parameter(2)
%broadcast = f32[2,4]{1,0} broadcast(f32[] %alpha), dimensions={}
%multiply = f32[2,4]{1,0} multiply(f32[2,4]{1,0} %broadcast, f32[2,4]{1,0} %x)
ROOT %add = f32[2,4]{1,0} add(f32[2,4]{1,0} %multiply, f32[2,4]{1,0} %y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
ASSERT_TRUE(module->has_schedule());
TF_ASSERT_OK_AND_ASSIGN(
auto module_copy,
HloModule::CreateFromProto(module->ToProto(), module->config()));
ASSERT_TRUE(module_copy->has_schedule());
TF_ASSERT_OK(module_copy->schedule().Verify());
EXPECT_EQ(module_copy->schedule().sequences().size(), 1);
ASSERT_TRUE(module_copy->schedule().is_computation_scheduled(
module_copy->entry_computation()));
EXPECT_THAT(
module_copy->schedule()
.sequence(module_copy->entry_computation())
.instructions(),
::testing::ElementsAre(op::Parameter(), op::Parameter(), op::Parameter(),
op::Broadcast(), op::Multiply(), op::Add()));
}
TEST_F(HloModuleTest, ProtoSerializationPreservesIds) {
const std::string text =
R"(HloModule ReduceR3ToR2_module
add_F32.v3 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY ReduceR3ToR2.v3 {
input = f32[8,16,256]{2,1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[8,16]{1,0} reduce(input, constant), dimensions={2}, to_apply=add_F32.v3
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
HloComputation* entry = module->entry_computation();
HloInstruction* root = entry->root_instruction();
HloComputation* reduction = root->to_apply();
HloComputation* reduction_clone =
module->AddEmbeddedComputation(reduction->Clone());
root->set_to_apply(reduction_clone);
TF_ASSERT_OK(module->RemoveEmbeddedComputation(reduction));
HloInstruction* negate = entry->AddInstruction(
HloInstruction::CreateUnary(root->shape(), HloOpcode::kNegate, root));
entry->set_root_instruction(negate);
auto size_fn = [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
};
HloMemoryScheduler scheduler(size_fn);
TF_ASSERT_OK(scheduler.Run(module.get()).status());
ASSERT_TRUE(module->has_schedule());
TF_ASSERT_OK_AND_ASSIGN(
auto module_copy,
HloModule::CreateFromProto(module->ToProto(), module->config()));
EXPECT_NE(module->unique_id(), module_copy->unique_id());
auto computation_copy = module_copy->computations();
auto computation_copy_it = computation_copy.begin();
for (const HloComputation* computation_orig : module->computations()) {
const HloComputation* computation_copy = *computation_copy_it++;
EXPECT_EQ(computation_orig->unique_id(), computation_copy->unique_id())
<< absl::StrFormat(
"ID of original computation %s != ID of deserialized "
"computation %s: %d != %d",
computation_orig->name(), computation_copy->name(),
computation_orig->unique_id(), computation_copy->unique_id());
auto instruction_copy_it = computation_copy->instructions().begin();
for (const HloInstruction* instruction_orig :
computation_orig->instructions()) {
const HloInstruction* instruction_copy = *instruction_copy_it++;
EXPECT_EQ(instruction_orig->unique_id(), instruction_copy->unique_id())
<< absl::StrFormat(
"ID of original instruction %s != ID of deserialized "
"instruction %s: %d != %d",
instruction_orig->name(), instruction_copy->name(),
instruction_orig->unique_id(), instruction_copy->unique_id());
}
}
int next_id = module_copy->NewUniqueInstructionId();
for (const HloComputation* computation : module_copy->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
EXPECT_GT(next_id, instruction->unique_id());
}
}
}
TEST_F(HloModuleTest, VerifyReplaceComputationsWithReduceScatter) {
const std::string text = R"(
HloModule reduce-scatter
%sum (a: f32[], b: f32[]) -> f32[] {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(f32[] a, f32[] b)
}
ENTRY main {
%param = f32[16,8,128]{2,1,0} parameter(0)
ROOT %rs = f32[4,8,128]{2,1,0} reduce-scatter(f32[16,8,128]{2,1,0} %param), replica_groups={}, to_apply=%sum, dimensions={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
HloComputation* new_comp;
{
auto b = HloComputation::Builder("Fused");
auto p0 =
b.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p0"));
auto p1 =
b.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "p1"));
b.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kMultiply, p0, p1));
new_comp = module->AddEmbeddedComputation(b.Build());
}
HloComputation* entry = module->entry_computation();
HloInstruction* root = entry->root_instruction();
EXPECT_EQ(root->to_apply()->root_instruction()->opcode(), HloOpcode::kAdd);
absl::flat_hash_map<HloComputation*, HloComputation*> replacement;
replacement[root->to_apply()] = new_comp;
module->ReplaceComputations(replacement);
EXPECT_EQ(root->to_apply(), new_comp);
}
TEST_F(HloModuleTest, VerifyReplaceComputationsWithSortOp) {
const std::string text = R"(
HloModule sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = f32[] parameter(2)
p.1.rhs = f32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY top {
p.0 = f32[32] parameter(0)
p.1 = f32[32] parameter(1)
ROOT %sort.148.1589 = (f32[32], f32[32]) sort(p.0, p.1), dimensions={0}, to_apply=compare
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
HloComputation* new_comp;
{
auto b = HloComputation::Builder("Fused");
auto p0 =
b.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p0"));
auto p1 =
b.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "p1"));
b.AddInstruction(HloInstruction::CreateParameter(2, r0f32_, "p2"));
b.AddInstruction(HloInstruction::CreateParameter(3, r0f32_, "p3"));
b.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), p0, p1, ComparisonDirection::kGt));
new_comp = module->AddEmbeddedComputation(b.Build());
}
HloComputation* entry = module->entry_computation();
HloInstruction* root = entry->root_instruction();
EXPECT_EQ(root->to_apply()->root_instruction()->opcode(),
HloOpcode::kCompare);
EXPECT_EQ(root->to_apply()->root_instruction()->comparison_direction(),
ComparisonDirection::kLt);
absl::flat_hash_map<HloComputation*, HloComputation*> replacement;
replacement[root->to_apply()] = new_comp;
module->ReplaceComputations(replacement);
EXPECT_EQ(root->to_apply(), new_comp);
}
TEST_F(HloModuleTest, OneComputationAllAllowed) {
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(CreateConstantComputation());
absl::flat_hash_set<HloComputation*> allowList = {computation};
EXPECT_THAT(
module->MakeComputationPostOrder({}, allowList),
::testing::ElementsAre(computation));
}
TEST_F(HloModuleTest, OneComputationAllFiltered) {
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(CreateConstantComputation());
absl::flat_hash_set<HloComputation*> allowList = {};
module->MakeComputationPostOrder({}, allowList);
EXPECT_THAT(
module->MakeComputationPostOrder({}, allowList),
::testing::IsEmpty());
}
TEST_F(HloModuleTest, DiamondComputationsPostOrderAllAllowed) {
auto module = CreateNewVerifiedModule();
auto computation1 =
module->AddEmbeddedComputation(CreateConstantComputation());
auto computation2 =
module->AddEmbeddedComputation(CreateCallComputation({computation1}));
auto computation3 =
module->AddEmbeddedComputation(CreateCallComputation({computation1}));
auto computation4 = module->AddEntryComputation(
CreateCallComputation({computation2, computation3}));
absl::flat_hash_set<HloComputation*> allowList = {computation1, computation2,
computation3, computation4};
auto post_order =
module->MakeComputationPostOrder({}, allowList);
EXPECT_THAT(post_order,
::testing::UnorderedElementsAre(computation1, computation2,
computation3, computation4));
EXPECT_EQ(post_order.back(), computation4);
EXPECT_EQ(post_order.front(), computation1);
}
TEST_F(HloModuleTest, DiamondComputationsPostOrderMiddleFiltered) {
auto module = CreateNewVerifiedModule();
auto computation1 =
module->AddEmbeddedComputation(CreateConstantComputation());
auto computation2 =
module->AddEmbeddedComputation(CreateCallComputation({computation1}));
auto computation3 =
module->AddEmbeddedComputation(CreateCallComputation({computation1}));
auto computation4 = module->AddEntryComputation(
CreateCallComputation({computation2, computation3}));
absl::flat_hash_set<HloComputation*> allowList = {computation1, computation4};
auto post_order =
module->MakeComputationPostOrder({}, allowList);
EXPECT_THAT(post_order,
::testing::UnorderedElementsAre(computation1, computation4));
}
TEST_F(HloModuleTest, DiamondComputationsPostOrderAllFiltered) {
auto module = CreateNewVerifiedModule();
auto computation1 =
module->AddEmbeddedComputation(CreateConstantComputation());
auto computation2 =
module->AddEmbeddedComputation(CreateCallComputation({computation1}));
auto computation3 =
module->AddEmbeddedComputation(CreateCallComputation({computation1}));
module->AddEntryComputation(
CreateCallComputation({computation2, computation3}));
absl::flat_hash_set<HloComputation*> allowList = {};
auto post_order =
module->MakeComputationPostOrder({}, allowList);
EXPECT_THAT(
module->MakeComputationPostOrder({}, allowList),
::testing::IsEmpty());
}
TEST_F(HloModuleTest, TwoComputationsFilterexecution_threads) {
HloComputation::Builder builder(TestName());
constexpr char kParallelThreadName[] = "parallel_thread";
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.1f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto* main_thread_computation = module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(
auto* async_done,
main_thread_computation->CreateAsyncInstructions(
add, {ShapeUtil::MakeScalarShape(U32)}, kParallelThreadName));
auto* parallel_thread_computation = async_done->async_wrapped_computation();
EXPECT_THAT(
module->MakeComputationPostOrder({HloInstruction::kMainExecutionThread}),
::testing::ElementsAre(main_thread_computation));
EXPECT_THAT(module->MakeComputationPostOrder(),
::testing::ElementsAre(parallel_thread_computation,
main_thread_computation));
EXPECT_THAT(module->MakeComputationPostOrder({kParallelThreadName}),
::testing::ElementsAre(parallel_thread_computation));
int num_all_computations = 0;
for ([[maybe_unused]] const HloComputation* comp :
module->computations({})) {
++num_all_computations;
}
EXPECT_EQ(num_all_computations, 2);
int num_main_computations = 0;
for (const HloComputation* comp :
module->computations({HloInstruction::kMainExecutionThread})) {
++num_main_computations;
EXPECT_EQ(comp->execution_thread(), HloInstruction::kMainExecutionThread);
}
EXPECT_EQ(num_main_computations, 1);
int num_parallel_computations = 0;
for (const HloComputation* comp :
module->computations({kParallelThreadName})) {
++num_parallel_computations;
EXPECT_EQ(comp->execution_thread(), kParallelThreadName);
}
EXPECT_EQ(num_parallel_computations, 1);
}
TEST_F(HloModuleTest, HloModuleWithConfigSerializationEquality) {
const std::string computation_text =
R"(HloModule ReduceR3ToR2_module
add_F32.v3 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY ReduceR3ToR2.v3 {
input = f32[8,16,256]{2,1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[8,16]{1,0} reduce(input, constant), dimensions={2}, to_apply=add_F32.v3
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(computation_text));
xla::HloModuleProtoWithConfig proto = module->ToProtoWithConfig();
std::string serialized_module;
ASSERT_TRUE(tsl::SerializeToStringDeterministic(proto, &serialized_module));
std::string original_debug_str = proto.DebugString();
RecordProperty("serialized_module", original_debug_str);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> reconstructed_module,
HloModule::CreateFromProtoWithConfig(proto));
xla::HloModuleProtoWithConfig reconstructed_module_proto =
reconstructed_module->ToProtoWithConfig();
google::protobuf::util::MessageDifferencer diff;
diff.set_message_field_comparison(
google::protobuf::util::MessageDifferencer::EQUIVALENT);
auto module_descriptor = HloModuleProto::GetDescriptor();
auto unique_id_field = module_descriptor->FindFieldByName("id");
diff.IgnoreField(unique_id_field);
EXPECT_TRUE(diff.Compare(proto, reconstructed_module_proto));
}
static ShardableValueUpdatePairProto MakeShardPair(int offset) {
ShardableValueUpdatePairProto pear;
pear.set_input_parameter_number(offset + 1);
for (int64_t i = 0; i < 5; ++i) {
pear.add_parameter_shape_index(offset + i);
}
for (int64_t j = 0; j < 3; ++j) {
pear.add_output_shape_index(offset + j);
}
return pear;
}
static HloModuleConfigProto::BoolList MakeOneHotBoolList(unsigned num_vals,
unsigned hot_idx) {
HloModuleConfigProto::BoolList list;
for (unsigned i = 0; i < num_vals; ++i) {
list.add_vals(i == hot_idx);
}
return list;
}
static absl::StatusOr<HloModuleConfigProto> MakeTestModuleConfigProto() {
HloModuleConfigProto proto;
proto.set_seed(0xdeadbeef);
proto.set_launch_id(0xfeed100);
proto.set_replica_count(3);
proto.set_num_partitions(2);
for (int x = 0; x < 6; ++x) {
proto.add_param_requires_broadcast_via_collectives(x & 1);
}
proto.set_use_spmd_partitioning(true);
proto.set_use_auto_spmd_partitioning(true);
for (unsigned x = 0; x < 4; ++x) {
proto.add_auto_spmd_partitioning_mesh_ids(10 - x);
proto.add_auto_spmd_partitioning_mesh_ids(x);
}
proto.set_deduplicate_hlo(true);
proto.set_intra_op_parallelism_threads(42);
proto.set_device_type("Google Test framework");
*proto.mutable_debug_options() = DefaultDebugOptionsIgnoringFlags();
{
DeviceAssignmentProto device_assignment_proto;
DeviceAssignment device_assignment(3,
2);
device_assignment.Serialize(&device_assignment_proto);
proto.mutable_static_device_assignment()->Swap(&device_assignment_proto);
}
for (int k = 0; k < 3; ++k) {
*proto.add_shardable_value_update_pairs() = MakeShardPair(k);
}
proto.set_alias_passthrough_params(true);
proto.set_content_aware_computation_sorting(true);
proto.set_fusion_config_collection(HloModuleConfigProto::PER_NODE);
for (int idx = 0; idx < 4; ++idx) {
bool reverse = (idx & 1) == 0;
*proto.add_fusion_config() =
MakeOneHotBoolList(6, (reverse) ? 6 - idx : idx);
}
for (int idx = 0; idx < 4; ++idx) {
HloModuleConfigProto::Int64List int_list;
for (int x = 1; x <= 3; ++x) {
int_list.add_vals(x * x * idx);
}
proto.mutable_dot_config()->insert(
{absl::StrCat("Node", idx, "dot"), std::move(int_list)});
}
for (int idx = 0; idx < 4; ++idx) {
HloModuleConfigProto::Int64ListList list_of_lists;
for (int x = 0; x < 4; ++x) {
HloModuleConfigProto::Int64List int_list;
for (int y = 0; y < 6; ++y) {
int_list.add_vals(y * x + idx + y + 1);
}
list_of_lists.add_lists()->Swap(&int_list);
}
proto.mutable_layout_config()->Add(std::move(list_of_lists));
}
for (uint64_t mem_asgn = 42; mem_asgn < 50; ++mem_asgn) {
proto.add_memory_space_assignment_config(mem_asgn);
}
for (int n = 0; n < 4; ++n) {
*proto.add_phase_ordering_config() = MakeOneHotBoolList(4, n);
}
proto.set_phase_index(2);
proto.add_allow_spmd_sharding_propagation_to_output(true);
for (int idx = 1; idx <= 3; ++idx) {
int64_t allowance = 35 * idx;
proto.mutable_analysis_allowance_map()->insert(
{absl::StrCat("Key", idx), allowance});
}
proto.set_matrix_unit_operand_precision(PrecisionConfig::HIGH);
return proto;
}
TEST_F(HloModuleTest, HloModuleConfigCreateFromProto) {
TF_ASSERT_OK_AND_ASSIGN(HloModuleConfigProto input_proto,
MakeTestModuleConfigProto());
TF_ASSERT_OK_AND_ASSIGN(auto good_config,
HloModuleConfig::CreateFromProto(input_proto));
HloModuleConfigProto output_proto = good_config->ToProto();
google::protobuf::util::MessageDifferencer diff;
diff.set_message_field_comparison(
google::protobuf::util::MessageDifferencer::EQUIVALENT);
EXPECT_TRUE(diff.Compare(input_proto, output_proto));
}
TEST_F(HloModuleTest, HloModuleConfigToProto) {
auto module = CreateNewVerifiedModule();
const HloModuleConfig& good_config = module->config();
HloModuleConfigProto first_proto = good_config.ToProto();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModuleConfig> remade_config,
HloModuleConfig::CreateFromProto(first_proto));
ASSERT_NE(remade_config, nullptr);
HloModuleConfigProto second_proto = remade_config->ToProto();
google::protobuf::util::MessageDifferencer diff;
diff.set_message_field_comparison(
google::protobuf::util::MessageDifferencer::EQUIVALENT);
EXPECT_TRUE(diff.Compare(first_proto, second_proto));
}
TEST_F(HloModuleTest, HloModuleStackFrames) {
const std::string text = R"(
HloModule a_module
ENTRY main {
%c = s32[] constant(1)
ROOT %result = s32[] parameter(0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
EXPECT_TRUE(module->get_stack_frame(1).empty());
auto module_proto = module->ToProto();
auto index = module_proto.mutable_stack_frame_index();
index->add_file_names("main.py");
index->add_function_names("main");
auto location = index->add_file_locations();
location->set_file_name_id(1);
location->set_function_name_id(1);
location->set_line(10);
location->set_column(5);
auto frame = index->add_stack_frames();
frame->set_file_location_id(1);
module_proto.mutable_computations(0)
->mutable_instructions(0)
->mutable_metadata()
->set_stack_frame_id(1);
TF_ASSERT_OK_AND_ASSIGN(
auto module_with_stack_frames,
HloModule::CreateFromProto(module_proto, module->config()));
EXPECT_TRUE(module_with_stack_frames->get_stack_frame(0).empty());
EXPECT_TRUE(module_with_stack_frames->get_stack_frame(2).empty());
auto stack_frame = module_with_stack_frames->get_stack_frame(1);
EXPECT_EQ(stack_frame.file_name, index->file_names(0));
EXPECT_EQ(stack_frame.function_name, index->function_names(0));
EXPECT_EQ(stack_frame.line, location->line());
EXPECT_EQ(stack_frame.column, location->column());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_module.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_module_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
54892a0d-a6bd-4074-b1d2-de0af3d15773 | cpp | tensorflow/tensorflow | hlo_sharding | third_party/xla/xla/hlo/ir/hlo_sharding.cc | third_party/xla/xla/service/hlo_sharding_test.cc | #include "xla/hlo/ir/hlo_sharding.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <iterator>
#include <map>
#include <memory>
#include <optional>
#include <ostream>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_op_metadata.h"
#include "xla/overflow_util.h"
#include "xla/printer.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
namespace xla {
namespace {
using absl::StrCat;
bool GroupMinorIotaDimsSorted(absl::Span<const int64_t> dims,
absl::Span<const int> perm, int64_t group_size,
absl::InlinedVector<int64_t, 6>& new_dims,
absl::InlinedVector<int, 6>& new_perm) {
DCHECK_GT(group_size, 1);
int grouped_dims = 0;
std::optional<std::pair<int, int64_t>> split_dim_and_size;
for (int i = perm.size() - 1; i >= 0; --i) {
const int dim = perm[i];
const int64_t dim_size = dims[dim];
if (dim_size <= group_size) {
if (group_size % dim_size != 0) {
return false;
}
group_size /= dim_size;
++grouped_dims;
} else {
if (dim_size % group_size != 0) {
return false;
}
split_dim_and_size.emplace(dim, dim_size / group_size);
++grouped_dims;
group_size = 1;
break;
}
}
if (!split_dim_and_size) {
new_dims.assign(dims.begin(), dims.end());
new_perm.assign(perm.begin(), perm.end());
std::stable_sort(new_perm.end() - grouped_dims, new_perm.end());
return true;
}
new_dims.resize(dims.size() + 1);
new_perm.resize(perm.size() + 1);
const int split_i = split_dim_and_size->first;
for (int i = 0; i < split_i; ++i) {
new_dims[i] = dims[i];
}
new_dims[split_i] = split_dim_and_size->second;
new_dims[split_i + 1] = dims[split_i] / split_dim_and_size->second;
for (int i = split_i + 2; i < new_perm.size(); ++i) {
new_dims[i] = dims[i - 1];
}
int perm_split = 0;
for (int i = 0; i < perm.size(); ++i) {
const int perm_dim = perm[i];
new_perm[i] = perm_dim <= split_i ? perm_dim : (perm_dim + 1);
if (perm_dim == split_i) {
perm_split = i;
break;
}
}
new_perm[perm_split + 1] = new_perm[perm_split] + 1;
for (int i = perm_split + 2; i < new_perm.size(); ++i) {
const int perm_dim = perm[i - 1];
new_perm[i] = perm_dim <= split_i ? perm_dim : (perm_dim + 1);
}
std::stable_sort(new_perm.end() - grouped_dims, new_perm.end());
return true;
}
}
HloSharding HloSharding::AssignDevice(int64_t device_id,
absl::Span<const OpMetadata> metadata) {
return HloSharding(device_id, metadata);
}
HloSharding HloSharding::Tile1D(const Shape& input_shape, int64_t num_tiles,
absl::Span<const OpMetadata> metadata) {
CHECK_EQ(1, input_shape.rank());
CHECK_GT(num_tiles, 1);
absl::Span<const int64_t> dimensions(&num_tiles, 1);
return HloSharding(TileAssignment(dimensions, dimensions, {0}),
false, metadata);
}
HloSharding HloSharding::PartialTile(
const TileAssignment& tile_assignment_last_dim_replicate,
absl::Span<const OpMetadata> metadata) {
if (tile_assignment_last_dim_replicate.num_dimensions() == 1 ||
tile_assignment_last_dim_replicate.dimensions().back() ==
tile_assignment_last_dim_replicate.num_elements()) {
return Replicate(metadata);
}
if (tile_assignment_last_dim_replicate.dimensions().back() == 1) {
auto new_tile_dims = tile_assignment_last_dim_replicate.dimensions();
new_tile_dims.remove_suffix(1);
return HloSharding(
tile_assignment_last_dim_replicate.Reshape(new_tile_dims),
false, metadata);
}
const int64_t group_size =
tile_assignment_last_dim_replicate.dimensions().back();
if (tile_assignment_last_dim_replicate.iota_) {
auto& iota = tile_assignment_last_dim_replicate.iota_.value();
if (iota.reshape_dims()[iota.transpose_perm().back()] == group_size) {
return HloSharding(tile_assignment_last_dim_replicate,
true, metadata);
}
absl::InlinedVector<int64_t, 6> new_reshape_dims;
absl::InlinedVector<int, 6> new_transpose_perm;
if (GroupMinorIotaDimsSorted(iota.reshape_dims(), iota.transpose_perm(),
group_size, new_reshape_dims,
new_transpose_perm)) {
return HloSharding(
TileAssignment(iota.dims(), new_reshape_dims, new_transpose_perm),
true, metadata);
}
}
std::vector<int64_t> sorted_groups(
tile_assignment_last_dim_replicate.num_elements());
const int64_t num_groups =
tile_assignment_last_dim_replicate.num_elements() / group_size;
std::vector<int32_t> current_group_idx(num_groups, 0);
auto get_group_id = [&](absl::Span<const int64_t> indices) {
int64_t group_id = 0;
for (int64_t i = 0; i < indices.size() - 1; ++i) {
group_id *= tile_assignment_last_dim_replicate.dim(i);
group_id += indices[i];
}
return group_id;
};
tile_assignment_last_dim_replicate.Each(
[&](absl::Span<const int64_t> indices, const int64_t device) {
const int64_t group_id = get_group_id(indices);
sorted_groups[group_id * group_size + current_group_idx[group_id]++] =
device;
});
for (int i = 0; i < num_groups; ++i) {
std::sort(sorted_groups.begin() + i * group_size,
sorted_groups.begin() + (i + 1) * group_size);
}
absl::c_fill(current_group_idx, 0);
auto sorted_tile = std::make_shared<Array<int64_t>>(
tile_assignment_last_dim_replicate.dimensions());
sorted_tile->Each([&](absl::Span<const int64_t> indices, int64_t* device) {
const int64_t group_id = get_group_id(indices);
*device =
sorted_groups[group_id * group_size + current_group_idx[group_id]++];
});
return HloSharding(TileAssignment(std::move(sorted_tile)),
true, metadata);
}
HloSharding HloSharding::Subgroup(
const TileAssignment& tile_assignment,
absl::Span<const OpSharding::Type> subgroup_types,
absl::Span<const OpMetadata> metadata) {
if (subgroup_types.empty()) {
return HloSharding(tile_assignment,
false, metadata);
}
if (absl::c_all_of(
subgroup_types,
[&](const OpSharding::Type t) { return t == subgroup_types[0]; }) &&
Product(tile_assignment.dimensions().subspan(
0, tile_assignment.num_dimensions() - subgroup_types.size())) == 1) {
if (subgroup_types[0] == OpSharding::MANUAL) {
return Manual(metadata);
}
if (subgroup_types[0] == OpSharding::REPLICATED) {
return Replicate(metadata);
}
}
int64_t data_dims = tile_assignment.num_dimensions() - subgroup_types.size();
absl::InlinedVector<int, 6> perm(data_dims);
absl::c_iota(perm, 0);
static_assert(sizeof(std::vector<int>) >=
sizeof(absl::InlinedVector<int, 2>));
std::array<absl::InlinedVector<int, 2>, OpSharding::Type_ARRAYSIZE>
type_to_dims;
int subgroup_count = 0;
bool needs_merging = false;
absl::InlinedVector<int, 4> removed_dims;
for (int i = 0; i < subgroup_types.size(); ++i) {
if (tile_assignment.dim(i + data_dims) == 1) {
removed_dims.push_back(i + data_dims);
needs_merging = true;
continue;
}
auto& dims = type_to_dims[subgroup_types[i]];
if (!dims.empty()) {
needs_merging = true;
} else {
++subgroup_count;
}
needs_merging |= !dims.empty();
dims.push_back(i + data_dims);
}
needs_merging |= subgroup_count > 1;
auto create_sharding = [](const TileAssignment& tiles,
absl::Span<const OpSharding::Type> types,
absl::Span<const OpMetadata> metadata) {
if (types.size() == 1 && types.back() == OpSharding::REPLICATED) {
return PartialTile(tiles, metadata);
}
if (types.size() == 1 && types.back() == OpSharding::MANUAL &&
tiles.num_elements() == tiles.dimensions().back()) {
return Manual(metadata);
}
if (!types.empty() && types.back() == OpSharding::REPLICATED) {
HloSharding sharding = PartialTile(tiles, metadata);
sharding.replicate_on_last_tile_dim_ = false;
for (const OpSharding::Type type : types) {
sharding.subgroup_types_.push_back(type);
}
return sharding;
}
return HloSharding(tiles, types, metadata);
};
if (needs_merging) {
auto data_tile_shape = tile_assignment.dimensions().subspan(0, data_dims);
absl::InlinedVector<int64_t, 6> merged_shape(data_tile_shape.begin(),
data_tile_shape.end());
absl::InlinedVector<int64_t, 6> transposed_shape = merged_shape;
std::vector<OpSharding::Type> merged_types;
static constexpr std::array<OpSharding::Type, OpSharding::Type_ARRAYSIZE>
kOrderedTypes = {OpSharding::MAXIMAL, OpSharding::TUPLE,
OpSharding::OTHER, OpSharding::MANUAL,
OpSharding::REPLICATED, OpSharding::UNKNOWN};
static_assert(kOrderedTypes[0] == 1 && kOrderedTypes[1] == 2 &&
kOrderedTypes[2] == 3 && kOrderedTypes[3] == 4 &&
kOrderedTypes[4] == 0 && kOrderedTypes[5] == 5);
for (OpSharding::Type type : kOrderedTypes) {
auto& dims = type_to_dims[type];
if (dims.empty()) continue;
int64_t dim_size = 1;
for (int64_t dim : dims) {
perm.push_back(dim);
dim_size *= tile_assignment.dim(dim);
transposed_shape.push_back(tile_assignment.dim(dim));
}
merged_shape.push_back(dim_size);
merged_types.push_back(type);
}
TileAssignment new_tile_assignment = [&] {
if (tile_assignment.iota_) {
absl::c_copy(removed_dims, std::back_inserter(perm));
auto transposed_iota = tile_assignment.iota_->Transpose(perm);
if (transposed_iota) {
return TileAssignment(merged_shape, transposed_iota->reshape_dims(),
transposed_iota->transpose_perm());
}
}
auto new_tiles = std::make_shared<Array<int64_t>>(transposed_shape);
new_tiles->Each([&](absl::Span<const int64_t> indices, int64_t* value) {
std::vector<int64_t> src_indices(tile_assignment.num_dimensions(), 0);
for (int64_t i = 0; i < indices.size(); ++i) {
src_indices[perm[i]] = indices[i];
}
*value = tile_assignment(src_indices);
});
new_tiles->Reshape(merged_shape);
return TileAssignment(std::move(new_tiles));
}();
return create_sharding(new_tile_assignment, merged_types, metadata);
}
return create_sharding(tile_assignment, subgroup_types, metadata);
}
HloSharding HloSharding::Tuple(const ShapeTree<HloSharding>& sub_shardings) {
std::vector<HloSharding> flattened_list;
flattened_list.reserve(sub_shardings.leaf_count());
for (const auto& index_to_sharding : sub_shardings.leaves()) {
flattened_list.push_back(index_to_sharding.second);
}
if (flattened_list.empty()) {
flattened_list.push_back(sub_shardings.element(ShapeIndex({})));
}
return HloSharding(flattened_list);
}
HloSharding HloSharding::Tuple(const Shape& tuple_shape,
absl::Span<const HloSharding> shardings) {
CHECK(tuple_shape.IsTuple()) << ShapeUtil::HumanString(tuple_shape);
for (auto& sharding : shardings) {
CHECK(!sharding.IsTuple())
<< sharding.ToString()
<< ", tuple shape = " << ShapeUtil::HumanString(tuple_shape);
}
std::vector<HloSharding> flattened_list(shardings.begin(), shardings.end());
if (!flattened_list.empty()) {
CHECK_EQ(flattened_list.size(), RequiredLeaves(tuple_shape))
<< "Flat list has " << flattened_list.size() << ", required "
<< RequiredLeaves(tuple_shape);
}
return HloSharding(std::move(flattened_list));
}
HloSharding HloSharding::SingleTuple(const Shape& tuple_shape,
const HloSharding& sharding) {
CHECK(tuple_shape.IsTuple()) << ShapeUtil::HumanString(tuple_shape);
CHECK(!sharding.IsTuple()) << sharding.ToString();
int64_t leaf_count = RequiredLeaves(tuple_shape);
std::vector<HloSharding> flattened_list;
flattened_list.resize(leaf_count, sharding);
return HloSharding(std::move(flattened_list));
}
HloSharding HloSharding::Single(const Shape& shape,
const HloSharding& sharding) {
return shape.IsTuple() ? SingleTuple(shape, sharding) : sharding;
}
void HloSharding::Print(Printer* printer, bool include_metadata) const {
if (IsTuple()) {
CHECK(metadata_.empty());
if (ABSL_PREDICT_FALSE(tuple_elements_.empty())) {
printer->Append("{}");
return;
}
printer->Append("{");
tuple_elements_[0].Print(printer, include_metadata);
for (int i = 1; i < tuple_elements_.size(); ++i) {
if (i % 5 == 0) {
AppendCat(printer, ", ");
} else {
printer->Append(", ");
}
tuple_elements_[i].Print(printer, include_metadata);
}
printer->Append("}");
return;
}
auto print_metadata = [&] {
if (include_metadata && !metadata_.empty()) {
printer->Append(" metadata={");
if (metadata_.size() == 1) {
printer->Append(OpMetadataToString(metadata_.front()));
} else {
AppendJoin(printer, metadata_, ", ",
[](Printer* printer, auto& metadata) {
AppendCat(printer, "{", OpMetadataToString(metadata), "}");
});
}
printer->Append("}");
}
};
auto print_shard_group = [&] {
auto shard_group_str = shard_group_.ToString();
if (!shard_group_str.empty()) {
printer->Append(" " + shard_group_str);
}
};
if (replicated_) {
printer->Append("{replicated");
print_shard_group();
print_metadata();
printer->Append("}");
return;
}
if (manual_) {
printer->Append("{manual");
print_shard_group();
print_metadata();
printer->Append("}");
return;
}
if (unknown_) {
printer->Append("{unknown");
print_shard_group();
print_metadata();
printer->Append("}");
return;
}
if (maximal_) {
AppendCat(printer, "{maximal device=",
static_cast<int64_t>(*tile_assignment_.array().begin()));
print_shard_group();
print_metadata();
printer->Append("}");
return;
}
auto print_last_tile_dims = [&] {
if (!subgroup_types_.empty()) {
auto op_sharding_type_to_string = [](OpSharding::Type type) {
switch (type) {
case OpSharding::MANUAL:
return "manual";
case OpSharding::MAXIMAL:
return "maximul";
case OpSharding::REPLICATED:
return "replicated";
default:
return "error_type.";
}
};
printer->Append(" last_tile_dims={");
AppendJoin(printer, subgroup_types_, ", ",
[&](Printer* printer, OpSharding::Type sharding_type) {
printer->Append(op_sharding_type_to_string(sharding_type));
});
printer->Append("}");
}
};
printer->Append("{");
tile_assignment_.Print(printer);
if (replicate_on_last_tile_dim_) {
printer->Append(" last_tile_dim_replicate");
}
print_last_tile_dims();
print_shard_group();
print_metadata();
printer->Append("}");
}
std::string HloSharding::ToString(bool include_metadata) const {
StringPrinter printer;
Print(&printer, include_metadata);
return std::move(printer).ToString();
}
bool HloSharding::UsesDevice(int64_t device) const {
if (IsTuple()) {
return absl::c_any_of(tuple_elements_, [&](const HloSharding& s) {
return s.UsesDevice(device);
});
}
return replicated_ || manual_ || tile_assignment_.UsesDevice(device);
}
std::map<int64_t, int64_t> HloSharding::UsedDevices(int64_t* count) const {
int64_t element_count = 1;
std::map<int64_t, int64_t> device_map;
if (IsTuple()) {
for (auto& tuple_element_sharding : tuple_elements()) {
auto unique_device = tuple_element_sharding.UniqueDevice();
if (unique_device) {
device_map[*unique_device] += 1;
}
}
element_count = tuple_elements().size();
} else {
auto unique_device = UniqueDevice();
if (unique_device) {
device_map[*unique_device] += 1;
}
}
if (count != nullptr) {
*count = element_count;
}
return device_map;
}
std::vector<int64_t> HloSharding::TileIndexForDevice(int64_t device) const {
CHECK(!maximal_);
CHECK(!IsManual());
CHECK(!IsUnknown());
CHECK(!IsTuple());
std::vector<int64_t> ret_index;
tile_assignment_.Each([&](absl::Span<const int64_t> index, int64_t d) {
if (d == device) {
ret_index = {index.begin(), index.end()};
}
});
CHECK(!ret_index.empty());
ret_index.resize(TiledDataRank());
return ret_index;
}
int64_t HloSharding::DeviceForTileIndex(absl::Span<const int64_t> index) const {
CHECK(!replicated_);
CHECK(!IsManual());
CHECK(!IsUnknown());
CHECK(!IsTuple());
if (maximal_) {
return *tile_assignment_.array().begin();
}
if (index.size() == TiledDataRank() &&
index.size() < tile_assignment_.num_dimensions()) {
std::vector<int64_t> first_subgroup_index(index.begin(), index.end());
for (int64_t i = 0; i < tile_assignment_.num_dimensions() - index.size();
++i) {
first_subgroup_index.push_back(0);
}
return tile_assignment_(first_subgroup_index);
}
return tile_assignment_(index);
}
std::vector<int64_t> HloSharding::TileOffsetForDevice(const Shape& shape,
int64_t device) const {
CHECK(!IsTuple());
CHECK(!IsManual());
CHECK(!IsUnknown());
if (maximal_) {
return std::vector<int64_t>(shape.dimensions_size(), 0);
}
CHECK_EQ(shape.dimensions_size(), TiledDataRank());
std::vector<int64_t> index = TileIndexForDevice(device);
for (int64_t i = 0; i < index.size(); ++i) {
const int64_t shape_dim = shape.dimensions(i);
index[i] = std::min(
index[i] * CeilOfRatio(shape_dim, tile_assignment_.dim(i)), shape_dim);
}
return index;
}
std::vector<int64_t> HloSharding::TileLimitForDevice(const Shape& shape,
int64_t device) const {
CHECK(!IsTuple());
CHECK(!IsManual());
CHECK(!IsUnknown());
if (maximal_) {
return std::vector<int64_t>(shape.dimensions().begin(),
shape.dimensions().end());
}
CHECK_EQ(shape.dimensions_size(), TiledDataRank());
std::vector<int64_t> index = TileIndexForDevice(device);
for (int64_t i = 0; i < index.size(); ++i) {
const int64_t shape_dim = shape.dimensions(i);
index[i] = std::min(
(index[i] + 1) * CeilOfRatio(shape_dim, tile_assignment_.dim(i)),
shape_dim);
}
return index;
}
int64_t HloSharding::RequiredLeaves(const Shape& shape) {
const int64_t leaf_count = ShapeUtil::GetLeafCount(shape);
return (leaf_count == 0) ? 1 : leaf_count;
}
absl::Status HloSharding::CheckLeafCount(const Shape& shape) const {
int64_t leaf_count = ShapeUtil::GetLeafCount(shape);
if (leaf_count == 0 && tuple_elements_.size() == 1) {
return absl::OkStatus();
}
TF_RET_CHECK(leaf_count == tuple_elements_.size())
<< "Shape " << ShapeUtil::HumanString(shape) << " has " << leaf_count
<< " leaf nodes while this sharding has " << tuple_elements_.size();
return absl::OkStatus();
}
absl::StatusOr<ShapeTree<HloSharding>> HloSharding::AsShapeTree(
const Shape& shape) const {
if (IsTuple()) {
ShapeTree<HloSharding> result(shape, HloSharding::Replicate());
TF_RETURN_IF_ERROR(CheckLeafCount(shape));
auto it = tuple_elements_.begin();
for (auto& index_to_sharding : result.leaves()) {
index_to_sharding.second = *it++;
}
return std::move(result);
} else {
return ShapeTree<HloSharding>(shape, *this);
}
}
absl::StatusOr<HloSharding> HloSharding::GetTupleSharding(
const Shape& shape) const {
if (IsTuple()) {
TF_RETURN_IF_ERROR(CheckLeafCount(shape));
return *this;
}
return SingleTuple(shape, *this);
}
HloSharding HloSharding::NormalizeTupleSharding(const Shape& shape) const {
if (shape.IsTuple() && !IsTuple()) {
return HloSharding::SingleTuple(shape, *this);
}
return *this;
}
std::optional<int64_t> HloSharding::UniqueDevice() const {
if (IsTuple()) {
if (tuple_elements_.empty()) {
return std::nullopt;
}
std::optional<int64_t> unique_device;
for (auto& tuple_sharding : tuple_elements_) {
auto device = tuple_sharding.UniqueDevice();
if (!device || (unique_device && *device != *unique_device)) {
return std::nullopt;
}
unique_device = device;
}
return unique_device;
}
if (!replicated_ && maximal_) {
return static_cast<int64_t>(*tile_assignment_.array().begin());
}
return std::nullopt;
}
int64_t HloSharding::GetUniqueDevice() const {
auto device = UniqueDevice();
CHECK(device) << "Sharding does not have a unique device: " << *this;
return *device;
}
absl::Status HloSharding::ValidateTuple(
const Shape& shape, std::optional<int64_t> num_devices) const {
if (!shape.IsTuple()) {
return tsl::errors::InvalidArgument(
"Sharding is tuple-shaped but validation shape is not.");
}
TF_RETURN_IF_ERROR(CheckLeafCount(shape));
if (ShapeUtil::GetLeafCount(shape) == 0 && tuple_elements_.empty()) {
return absl::OkStatus();
}
ShapeTree<HloSharding> shape_tree = GetAsShapeTree(shape);
for (const auto& index_to_sharding : shape_tree.leaves()) {
absl::Status status = index_to_sharding.second.ValidateNonTuple(
ShapeUtil::GetSubshape(shape, index_to_sharding.first), num_devices);
if (!status.ok()) {
tsl::errors::AppendToMessage(
&status, StrCat("Note: While validating sharding tuple element ",
index_to_sharding.first.ToString(), " which is ",
index_to_sharding.second.ToString()));
return status;
}
}
return absl::OkStatus();
}
absl::Status HloSharding::Validate(const Shape& shape,
std::optional<int64_t> num_devices) const {
if (shape.IsToken()) {
return absl::OkStatus();
}
absl::Status status = IsTuple() ? ValidateTuple(shape, num_devices)
: ValidateNonTuple(shape, num_devices);
if (!status.ok()) {
tsl::errors::AppendToMessage(
&status, StrCat("Note: While validating sharding ", ToString(),
" against shape ", ShapeUtil::HumanString(shape)));
}
return status;
}
absl::Status HloSharding::ValidateNonTuple(
const Shape& shape, std::optional<int64_t> num_devices) const {
if (shape.IsTuple()) {
return absl::InvalidArgumentError(
"Validation shape is a tuple but sharding is not.");
}
if (replicated_) {
return absl::OkStatus();
}
bool all_devices_seen;
if (!tile_assignment_.iota_) {
absl::flat_hash_set<int64_t> seen_devices;
absl::Status status = tile_assignment_.array().EachStatus(
[&num_devices, &seen_devices](absl::Span<const int64_t> indices,
int32_t device) {
if (num_devices.has_value() && device >= *num_devices) {
return absl::InvalidArgumentError(
absl::StrCat("device ", device, " > num_devices (",
*num_devices, ") in tile assignment"));
} else if (seen_devices.contains(device)) {
return absl::InvalidArgumentError(absl::StrCat(
"device ", device, " is not unique in tile assignment"));
}
seen_devices.insert(device);
return absl::OkStatus();
});
TF_RETURN_IF_ERROR(status);
all_devices_seen =
!num_devices.has_value() || seen_devices.size() == *num_devices;
} else {
all_devices_seen = !num_devices.has_value() ||
tile_assignment_.iota_->num_elements() == *num_devices;
}
if (IsTileMaximal() || IsManual() || IsUnknown()) {
return absl::OkStatus();
}
if (shape.rank() != TiledDataRank()) {
return tsl::errors::InvalidArgument(
"Number of tile assignment dimensions (excluding subgroups) is "
"different than the input rank. "
"sharding=",
ToString(), ", input_shape=", ShapeUtil::HumanString(shape));
}
if (!all_devices_seen) {
return tsl::errors::InvalidArgument("tile_assignment should have ",
*num_devices, " devices");
}
if (tile_assignment_.num_elements() == 1) {
return tsl::errors::InvalidArgument(
"Tile assignment only contains a single device. If a replicated "
"sharding was intended, use HloSharding::Replicated(). If a device "
"placement was intended, use HloSharding::AssignDevice()");
}
return absl::OkStatus();
}
absl::StatusOr<HloSharding> HloSharding::FromProto(
const OpSharding& proto) {
std::vector<OpMetadata> metadata(proto.metadata().begin(),
proto.metadata().end());
std::vector<int> subgroup_types_int(proto.last_tile_dims().begin(),
proto.last_tile_dims().end());
std::vector<OpSharding::Type> subgroup_types;
absl::c_transform(
subgroup_types_int, std::back_inserter(subgroup_types),
[](const int type) { return static_cast<OpSharding::Type>(type); });
if (proto.type() == OpSharding::TUPLE) {
TF_RET_CHECK(metadata.empty())
<< "Tuple sharding is expected to have no metadata.";
std::vector<HloSharding> tuple_shardings;
tuple_shardings.reserve(proto.tuple_shardings().size());
for (const OpSharding& tuple_sharding_proto : proto.tuple_shardings()) {
TF_ASSIGN_OR_RETURN(HloSharding sharding,
HloSharding::FromProto(tuple_sharding_proto));
tuple_shardings.push_back(std::move(sharding));
}
return std::move(
HloSharding(std::move(tuple_shardings)).SetShardGroupFromProto(proto));
} else if (proto.type() == OpSharding::REPLICATED) {
return std::move(Replicate(metadata).SetShardGroupFromProto(proto));
} else if (proto.type() == OpSharding::MANUAL) {
return std::move(Manual(metadata).SetShardGroupFromProto(proto));
} else if (proto.type() == OpSharding::UNKNOWN) {
return std::move(Unknown(metadata).SetShardGroupFromProto(proto));
} else if (proto.tile_assignment_devices().size() == 1) {
return std::move(HloSharding(proto.tile_assignment_devices(0), metadata)
.SetShardGroupFromProto(proto));
} else if (!proto.iota_reshape_dims().empty() &&
absl::c_all_of(proto.iota_reshape_dims(),
[](int64_t d) { return d == 1; })) {
return std::move(HloSharding(0, metadata).SetShardGroupFromProto(proto));
}
TF_RET_CHECK(proto.type() != OpSharding::MAXIMAL)
<< "Maximal sharding is expected to have single device assignment, but "
<< proto.tile_assignment_devices().size() << " has provided.";
const bool use_iota_tile_assignments = !proto.iota_reshape_dims().empty();
if (use_iota_tile_assignments) {
TF_RET_CHECK(proto.tile_assignment_devices().empty());
TF_RET_CHECK(proto.iota_reshape_dims().size() ==
proto.iota_transpose_perm().size());
} else {
TF_RET_CHECK(proto.tile_assignment_devices().size() > 1)
<< proto.ShortDebugString();
}
TF_RET_CHECK(!proto.tile_assignment_dimensions().empty());
auto product_no_overflow =
[](absl::Span<const int64_t> dims) -> absl::StatusOr<int64_t> {
int64_t product_of_dimensions = 1;
bool any_overflow = false;
for (auto dimension : dims) {
bool overflow = false;
std::tie(product_of_dimensions, overflow) =
OverflowSafeMultiply(product_of_dimensions, dimension);
}
TF_RET_CHECK(!any_overflow);
return product_of_dimensions;
};
TF_ASSIGN_OR_RETURN(int64_t product_of_dimensions,
product_no_overflow(proto.tile_assignment_dimensions()));
if (use_iota_tile_assignments) {
TF_ASSIGN_OR_RETURN(int64_t product_of_iota_dimensions,
product_no_overflow(proto.iota_reshape_dims()));
TF_RET_CHECK(product_of_dimensions == product_of_iota_dimensions);
} else {
TF_RET_CHECK(product_of_dimensions ==
proto.tile_assignment_devices().size());
}
auto create_tile_assignment = [&] {
if (use_iota_tile_assignments) {
return TileAssignment(proto.tile_assignment_dimensions(),
proto.iota_reshape_dims(),
proto.iota_transpose_perm());
}
auto tiles =
std::make_shared<Array<int64_t>>(proto.tile_assignment_dimensions());
absl::c_copy(proto.tile_assignment_devices(), tiles->begin());
return TileAssignment(std::move(tiles));
};
if (!subgroup_types.empty()) {
TF_RET_CHECK(!proto.replicate_on_last_tile_dim());
return std::move(
Subgroup(create_tile_assignment(), subgroup_types, metadata)
.SetShardGroupFromProto(proto));
}
if (proto.replicate_on_last_tile_dim()) {
return std::move(PartialTile(create_tile_assignment(), metadata)
.SetShardGroupFromProto(proto));
}
return std::move(HloSharding(create_tile_assignment(),
false, metadata)
.SetShardGroupFromProto(proto));
}
OpSharding HloSharding::ToProto() const {
OpSharding result;
if (IsTuple()) {
CHECK(metadata_.empty());
for (const HloSharding& element : tuple_elements_) {
*result.add_tuple_shardings() = element.ToProto();
}
result.set_type(OpSharding::TUPLE);
return result;
}
result.mutable_metadata()->Reserve(metadata_.size());
for (const auto& metadata : metadata_) {
*result.add_metadata() = metadata;
}
result.mutable_tile_assignment_dimensions()->Reserve(
tile_assignment_.num_dimensions());
absl::c_copy(tile_assignment_.dimensions(),
tsl::protobuf::RepeatedFieldBackInserter(
result.mutable_tile_assignment_dimensions()));
if (tile_assignment_.iota_) {
result.mutable_iota_reshape_dims()->Reserve(
tile_assignment_.iota_->reshape_dims().size());
absl::c_copy(tile_assignment_.iota_->reshape_dims(),
tsl::protobuf::RepeatedFieldBackInserter(
result.mutable_iota_reshape_dims()));
result.mutable_iota_transpose_perm()->Reserve(
tile_assignment_.iota_->transpose_perm().size());
absl::c_copy(tile_assignment_.iota_->transpose_perm(),
tsl::protobuf::RepeatedFieldBackInserter(
result.mutable_iota_transpose_perm()));
} else {
result.mutable_tile_assignment_devices()->Reserve(
tile_assignment_.num_elements());
absl::c_copy(tile_assignment_.array(),
tsl::protobuf::RepeatedFieldBackInserter(
result.mutable_tile_assignment_devices()));
}
if (IsReplicated()) {
result.set_type(OpSharding::REPLICATED);
result.clear_tile_assignment_dimensions();
} else if (IsTileMaximal()) {
result.set_type(OpSharding::MAXIMAL);
} else if (IsManual()) {
result.set_type(OpSharding::MANUAL);
result.clear_tile_assignment_dimensions();
} else if (IsUnknown()) {
result.set_type(OpSharding::UNKNOWN);
result.clear_tile_assignment_dimensions();
} else {
result.set_type(OpSharding::OTHER);
result.set_replicate_on_last_tile_dim(ReplicateOnLastTileDim());
for (auto type : subgroup_types_) {
result.add_last_tile_dims(type);
}
}
if (IsShardGroup()) {
result.set_is_shard_group(true);
result.set_shard_group_id(shard_group_.shard_group_id);
if (shard_group_.shard_as) {
result.set_shard_group_type(OpSharding::AS);
} else {
result.set_shard_group_type(OpSharding::LIKE);
}
}
return result;
}
Shape HloSharding::TileShape(const Shape& shape) const {
if (IsTileMaximal() || IsManual() || IsUnknown()) {
return shape;
}
Shape result_shape = shape;
for (int64_t i = 0; i < TiledDataRank(); ++i) {
result_shape.set_dimensions(
i, CeilOfRatio<int64_t>(shape.dimensions(i), tile_assignment_.dim(i)));
}
return result_shape;
}
Shape HloSharding::TileShape(const Shape& shape, int64_t device) const {
if (IsTileMaximal() || IsManual() || IsUnknown()) {
return shape;
}
std::vector<int64_t> index = TileIndexForDevice(device);
Shape result_shape = shape;
for (int64_t i = 0; i < index.size(); ++i) {
const int64_t shape_dim = shape.dimensions(i);
int64_t offset = std::min(
index[i] * CeilOfRatio(shape_dim, tile_assignment_.dim(i)), shape_dim);
int64_t limit = std::min(
(index[i] + 1) * CeilOfRatio(shape_dim, tile_assignment_.dim(i)),
shape_dim);
result_shape.set_dimensions(i, limit - offset);
}
return result_shape;
}
int64_t HloSharding::TotalNumTiles() const {
if (IsTileMaximal()) {
return 1;
}
CHECK(!IsManual());
CHECK(!IsUnknown());
return Product(absl::Span<const int64_t>(tile_assignment_.dimensions()));
}
int64_t HloSharding::NumTiles() const {
if (IsTileMaximal()) {
return 1;
}
CHECK(!IsManual());
CHECK(!IsUnknown());
return Product(absl::Span<const int64_t>(tile_assignment_.dimensions())
.subspan(0, TiledDataRank()));
}
int64_t HloSharding::NumTilesLeaf() const {
DCHECK(!IsTuple());
if (IsTileMaximalLeaf()) {
return 1;
}
CHECK(!IsManualLeaf() && !IsUnknownLeaf());
return Product(absl::Span<const int64_t>(tile_assignment_.dimensions())
.subspan(0, TiledDataRankLeaf()));
}
int64_t HloSharding::NumTiles(absl::Span<const int64_t> dims) const {
if (IsTileMaximal()) {
return 1;
}
CHECK(!IsManual());
CHECK(!ReplicateOnLastTileDim() ||
!absl::c_linear_search(dims, tile_assignment().num_dimensions() - 1));
int64_t num_tiles = 1;
for (auto d : dims) {
CHECK(d < tile_assignment().num_dimensions());
num_tiles *= tile_assignment().dim(d);
}
return num_tiles;
}
HloSharding HloSharding::GetSubSharding(const Shape& shape,
const ShapeIndex& index) const {
CHECK(IsTuple());
int64_t sharding_index = 0;
const Shape* sub_shape = &shape;
for (int64_t idx : index) {
for (int64_t i = 0; i < idx; ++i) {
sharding_index += ShapeUtil::GetLeafCount(
ShapeUtil::GetSubshapeOneIndex(*sub_shape, i));
}
sub_shape = &ShapeUtil::GetSubshapeOneIndex(*sub_shape, idx);
}
if (sub_shape->IsTuple()) {
auto begin_it = tuple_elements_.begin() + sharding_index;
return HloSharding::Tuple(
*sub_shape,
absl::MakeConstSpan(
&*begin_it,
&*(begin_it + ShapeUtil::GetLeafCountTuple(*sub_shape))));
} else {
return tuple_elements_[sharding_index];
}
}
std::optional<HloSharding> HloSharding::ExtractSingleSharding() const {
if (!IsTuple()) {
return *this;
}
if (tuple_elements_.empty()) {
return std::nullopt;
}
for (int64_t i = 1; i < tuple_elements_.size(); ++i) {
if (tuple_elements_[0] != tuple_elements_[i]) {
return std::nullopt;
}
}
return tuple_elements_.front();
}
HloSharding HloSharding::WithMetadata(absl::Span<const OpMetadata> metadata,
bool overwrite) const {
auto assign_metadata = [&](HloSharding& sharding) {
if (sharding.metadata_.empty() || overwrite) {
sharding.metadata_.assign(metadata.begin(), metadata.end());
}
};
HloSharding sharding = *this;
if (sharding.IsTuple()) {
for (HloSharding& sub_sharding : sharding.tuple_elements()) {
assign_metadata(sub_sharding);
}
} else {
assign_metadata(sharding);
}
return sharding;
}
HloSharding HloSharding::WithoutMetadata() const {
HloSharding sharding = *this;
sharding.metadata_.clear();
for (HloSharding& sub_sharding : sharding.tuple_elements()) {
sub_sharding.metadata_.clear();
}
return sharding;
}
std::ostream& operator<<(std::ostream& out, const HloSharding& sharding) {
out << sharding.ToString();
return out;
}
} | #include <algorithm>
#include <set>
#include <sstream>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/hash/hash.h"
#include "xla/protobuf_util.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
Array<int64_t> MakeArray(absl::Span<const int64_t> dimensions,
absl::Span<const int64_t> contents) {
Array<int64_t> a(dimensions);
std::copy(contents.begin(), contents.end(), a.begin());
return a;
}
OpMetadata GetMetadata(const std::string& op_name) {
OpMetadata metadata;
metadata.set_op_name(op_name);
return metadata;
}
std::vector<OpMetadata> SingleMetadata() { return {GetMetadata("a")}; }
std::vector<OpMetadata> ListMetadata() {
return {GetMetadata("b"), GetMetadata("c")};
}
class HloShardingTest : public HloTestBase {};
TEST_F(HloShardingTest, Replicate) {
HloSharding sharding = HloSharding::Replicate();
EXPECT_TRUE(sharding.IsReplicated());
EXPECT_TRUE(sharding.IsTileMaximal());
EXPECT_TRUE(sharding.UsesDevice(0));
EXPECT_TRUE(sharding.UsesDevice(65535));
HloSharding other = HloSharding::Replicate();
EXPECT_EQ(other, sharding);
EXPECT_IS_OK(sharding.Validate(ShapeUtil::MakeShape(U32, {4}),
2));
EXPECT_FALSE(sharding.HasUniqueDevice());
}
TEST_F(HloShardingTest, DevicePlacement) {
HloSharding sharding = HloSharding::AssignDevice(5);
EXPECT_FALSE(sharding.IsReplicated());
EXPECT_TRUE(sharding.IsTileMaximal());
EXPECT_FALSE(sharding.UsesDevice(0));
EXPECT_TRUE(sharding.UsesDevice(5));
EXPECT_EQ(5, sharding.GetUniqueDevice());
HloSharding other = HloSharding::Replicate();
EXPECT_NE(other, sharding);
EXPECT_IS_OK(sharding.Validate(ShapeUtil::MakeShape(U32, {4}),
6));
EXPECT_IS_NOT_OK(
sharding.Validate(ShapeUtil::MakeShape(U32, {4}), 5));
ShapeTree<HloSharding> shape_tree =
sharding.GetAsShapeTree(ShapeUtil::MakeShape(U32, {4}));
EXPECT_EQ(shape_tree.element({}), sharding);
EXPECT_TRUE(shape_tree.IsLeaf({}));
}
TEST_F(HloShardingTest, ProtoRoundTrip) {
OpSharding proto;
proto.set_type(OpSharding::TUPLE);
auto* tiled = proto.add_tuple_shardings();
tiled->set_type(OpSharding::OTHER);
tiled->add_tile_assignment_devices(0);
tiled->add_tile_assignment_devices(1);
tiled->add_tile_assignment_dimensions(1);
tiled->add_tile_assignment_dimensions(2);
*tiled->add_metadata() = GetMetadata("a");
*tiled->add_metadata() = GetMetadata("b");
auto* replicated = proto.add_tuple_shardings();
replicated->set_type(OpSharding::REPLICATED);
*replicated->add_metadata() = GetMetadata("c");
auto* manual = proto.add_tuple_shardings();
manual->set_type(OpSharding::MANUAL);
HloSharding sharding = HloSharding::FromProto(proto).value();
EXPECT_TRUE(protobuf_util::ProtobufEquals(proto, sharding.ToProto()));
}
TEST_F(HloShardingTest, IotaProtoRoundTrip) {
OpSharding proto;
proto.set_type(OpSharding::TUPLE);
auto* tiled = proto.add_tuple_shardings();
tiled->set_type(OpSharding::OTHER);
tiled->add_tile_assignment_dimensions(6);
tiled->add_tile_assignment_dimensions(1);
tiled->add_iota_reshape_dims(3);
tiled->add_iota_reshape_dims(2);
tiled->add_iota_transpose_perm(1);
tiled->add_iota_transpose_perm(0);
*tiled->add_metadata() = GetMetadata("a");
*tiled->add_metadata() = GetMetadata("b");
auto* replicated = proto.add_tuple_shardings();
replicated->set_type(OpSharding::REPLICATED);
*replicated->add_metadata() = GetMetadata("c");
auto* manual = proto.add_tuple_shardings();
manual->set_type(OpSharding::MANUAL);
HloSharding sharding = HloSharding::FromProto(proto).value();
EXPECT_TRUE(protobuf_util::ProtobufEquals(proto, sharding.ToProto()));
}
TEST_F(HloShardingTest, Tile) {
{
HloSharding sharding = HloSharding::Tile(MakeArray({2, 2}, {0, 0, 2, 3}));
EXPECT_IS_NOT_OK(sharding.Validate(ShapeUtil::MakeShape(F32, {4, 6}),
4));
}
{
HloSharding sharding = HloSharding::Tile(MakeArray({2, 2}, {0, 1, 2, 3}));
EXPECT_IS_NOT_OK(sharding.Validate(ShapeUtil::MakeShape(U32, {4, 6}),
2));
}
{
HloSharding sharding = HloSharding::Tile(MakeArray({2, 2}, {0, 1, 2, 3}));
EXPECT_IS_NOT_OK(sharding.Validate(ShapeUtil::MakeShape(U32, {4, 6}),
5));
}
{
Shape shape = ShapeUtil::MakeShape(U32, {4, 5});
HloSharding sharding = HloSharding::Tile(MakeArray({2, 2}, {0, 3, 2, 1}));
EXPECT_IS_OK(sharding.Validate(ShapeUtil::MakeShape(F32, {3, 5}),
4));
EXPECT_EQ(0, sharding.DeviceForTileIndex({0, 0}));
EXPECT_EQ(3, sharding.DeviceForTileIndex({0, 1}));
EXPECT_EQ(2, sharding.DeviceForTileIndex({1, 0}));
EXPECT_EQ(1, sharding.DeviceForTileIndex({1, 1}));
EXPECT_EQ(sharding.TileOffsetForDevice(shape, 0),
(std::vector<int64_t>{0, 0}));
EXPECT_EQ(sharding.TileOffsetForDevice(shape, 3),
(std::vector<int64_t>{0, 3}));
EXPECT_EQ(sharding.TileOffsetForDevice(shape, 2),
(std::vector<int64_t>{2, 0}));
EXPECT_EQ(sharding.TileOffsetForDevice(shape, 1),
(std::vector<int64_t>{2, 3}));
EXPECT_FALSE(sharding.HasUniqueDevice());
}
}
TEST_F(HloShardingTest, V1V2TileEquivalence) {
{
HloSharding v1 = HloSharding::Tile(MakeArray({2, 2}, {0, 1, 2, 3}));
HloSharding v2 = HloSharding::IotaTile({2, 2});
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
{
HloSharding v1 = HloSharding::Tile(MakeArray({2, 2}, {0, 2, 1, 3}));
HloSharding v2 = HloSharding::IotaTile({2, 2}, {2, 2}, {1, 0});
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
{
HloSharding v1 =
HloSharding::Tile(MakeArray({2, 2, 2}, {0, 2, 4, 6, 1, 3, 5, 7}));
HloSharding v2 = HloSharding::IotaTile({2, 2, 2}, {2, 2, 2}, {2, 0, 1});
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
}
TEST_F(HloShardingTest, V1V2PartialTileEquivalence) {
{
HloSharding v1 = HloSharding::PartialTile(MakeArray({2, 2}, {0, 1, 2, 3}));
HloSharding v2 = HloSharding::PartialTile(TileAssignment({2, 2}));
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
{
HloSharding v1 = HloSharding::PartialTile(MakeArray({2, 2}, {0, 2, 1, 3}));
HloSharding v2 =
HloSharding::PartialTile(TileAssignment({2, 2}, {2, 2}, {1, 0}));
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
{
HloSharding v1 = HloSharding::PartialTile(
MakeArray({2, 2, 2}, {0, 2, 4, 6, 1, 3, 5, 7}));
HloSharding v2 = HloSharding::PartialTile(
TileAssignment({2, 2, 2}, {2, 2, 2}, {2, 0, 1}));
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
}
TEST_F(HloShardingTest, V1V2SubgroupEquivalence) {
{
HloSharding v1 =
HloSharding::Subgroup(MakeArray({2, 2}, {0, 1, 2, 3}),
{OpSharding::MANUAL, OpSharding::REPLICATED});
HloSharding v2 = HloSharding::Subgroup(
TileAssignment({2, 2}), {OpSharding::MANUAL, OpSharding::REPLICATED});
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
{
HloSharding v1 =
HloSharding::Subgroup(MakeArray({2, 2}, {0, 2, 1, 3}),
{OpSharding::MANUAL, OpSharding::REPLICATED});
HloSharding v2 =
HloSharding::Subgroup(TileAssignment({2, 2}, {2, 2}, {1, 0}),
{OpSharding::MANUAL, OpSharding::REPLICATED});
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
{
HloSharding v1 =
HloSharding::Subgroup(MakeArray({2, 2, 2}, {0, 2, 4, 6, 1, 3, 5, 7}),
{OpSharding::MANUAL, OpSharding::REPLICATED});
HloSharding v2 =
HloSharding::Subgroup(TileAssignment({2, 2, 2}, {2, 2, 2}, {2, 0, 1}),
{OpSharding::MANUAL, OpSharding::REPLICATED});
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
}
TEST_F(HloShardingTest, EmptySingleTuple) {
HloSharding sharding = HloSharding::SingleTuple(ShapeUtil::MakeTupleShape({}),
HloSharding::AssignDevice(0));
EXPECT_TRUE(sharding.ExtractSingleSharding());
}
TEST_F(HloShardingTest, EmptySingleTupleIsNotShardGroup) {
HloSharding sharding = HloSharding::SingleTuple(ShapeUtil::MakeTupleShape({}),
HloSharding::AssignDevice(0));
EXPECT_FALSE(sharding.IsShardGroup());
EXPECT_FALSE(sharding.IsShardAs());
EXPECT_FALSE(sharding.IsShardLike());
}
TEST_F(HloShardingTest, NestedTuple) {
Shape nested_tuple_shape = ShapeUtil::MakeTupleShape({
ShapeUtil::MakeShape(F32, {}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3})}),
ShapeUtil::MakeShape(F32, {4, 6}),
});
HloSharding tiled_sharding = HloSharding::Tile(Array<int64_t>({{0, 1}}));
OpSharding proto;
proto.set_type(OpSharding::TUPLE);
*proto.add_tuple_shardings() = HloSharding::Replicate().ToProto();
*proto.add_tuple_shardings() = HloSharding::AssignDevice(0).ToProto();
*proto.add_tuple_shardings() = tiled_sharding.ToProto();
HloSharding tuple_sharding = HloSharding::FromProto(proto).value();
ShapeTree<HloSharding> shape_tree =
tuple_sharding.GetAsShapeTree(nested_tuple_shape);
EXPECT_EQ(shape_tree.element({0}), HloSharding::Replicate());
EXPECT_EQ(shape_tree.element({1, 0}), HloSharding::AssignDevice(0));
EXPECT_EQ(shape_tree.element({2}), tiled_sharding);
EXPECT_IS_OK(tuple_sharding.Validate(nested_tuple_shape, 2));
EXPECT_IS_NOT_OK(tuple_sharding.Validate(ShapeUtil::MakeTupleShape({}),
5));
EXPECT_IS_NOT_OK(tuple_sharding.Validate(ShapeUtil::MakeShape(F32, {}),
5));
}
TEST_F(HloShardingTest, NormalizeTrivialSubgroupToManual) {
HloSharding sharding =
HloSharding::Subgroup(MakeArray({1, 2, 1}, {0, 1}),
{OpSharding::MANUAL, OpSharding::REPLICATED});
EXPECT_TRUE(sharding.IsManual());
}
TEST_F(HloShardingTest, Hash) {
auto hash_compare_equal = [](const HloSharding& a, const HloSharding& b) {
if (absl::HashOf(a) != absl::HashOf(b)) {
return false;
}
return a == b;
};
{
HloSharding sharding1 = HloSharding::Replicate();
HloSharding sharding2 = HloSharding::Replicate();
EXPECT_TRUE(hash_compare_equal(sharding1, sharding2));
}
{
HloSharding sharding1 = HloSharding::AssignDevice(1);
HloSharding sharding2 = HloSharding::AssignDevice(1);
EXPECT_TRUE(hash_compare_equal(sharding1, sharding2));
}
{
HloSharding sharding1 = HloSharding::AssignDevice(1);
HloSharding sharding2 = HloSharding::AssignDevice(2);
EXPECT_FALSE(hash_compare_equal(sharding1, sharding2));
}
{
HloSharding sharding1 = HloSharding::Tile(MakeArray({2, 2}, {0, 3, 2, 1}));
HloSharding sharding2 = HloSharding::Tile(MakeArray({2, 2}, {0, 3, 2, 1}));
EXPECT_TRUE(hash_compare_equal(sharding1, sharding2));
}
{
HloSharding sharding1 = HloSharding::IotaTile({3, 4});
HloSharding sharding2 = HloSharding::Tile(
MakeArray({3, 4}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}));
EXPECT_TRUE(hash_compare_equal(sharding1, sharding2));
}
HloSharding default_sharding = HloSharding::Replicate();
{
ShapeTree<HloSharding> shape_tree(ShapeUtil::MakeTupleShape({}),
default_sharding);
HloSharding sharding1 = HloSharding::Replicate();
HloSharding sharding2 = HloSharding::Tuple(shape_tree);
EXPECT_FALSE(hash_compare_equal(sharding1, sharding2));
}
{
ShapeTree<HloSharding> shape_tree(ShapeUtil::MakeTupleShape({}),
default_sharding);
HloSharding sharding1 = HloSharding::Tuple(shape_tree);
HloSharding sharding2 = HloSharding::Tuple(shape_tree);
EXPECT_TRUE(hash_compare_equal(sharding1, sharding2));
}
{
ShapeTree<HloSharding> shape_tree1(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {4})}),
default_sharding);
*shape_tree1.mutable_element({0}) = HloSharding::Replicate();
ShapeTree<HloSharding> shape_tree2(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {4})}),
default_sharding);
*shape_tree2.mutable_element({0}) = HloSharding::AssignDevice(0);
HloSharding sharding1 = HloSharding::Tuple(shape_tree1);
HloSharding sharding2 = HloSharding::Tuple(shape_tree2);
EXPECT_FALSE(hash_compare_equal(sharding1, sharding2));
}
{
ShapeTree<HloSharding> shape_tree1(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {4})}),
default_sharding);
*shape_tree1.mutable_element({0}) = HloSharding::AssignDevice(0);
ShapeTree<HloSharding> shape_tree2(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {4})}),
default_sharding);
*shape_tree2.mutable_element({0}) = HloSharding::AssignDevice(0);
HloSharding sharding1 = HloSharding::Tuple(shape_tree1);
HloSharding sharding2 = HloSharding::Tuple(shape_tree2);
EXPECT_TRUE(hash_compare_equal(sharding1, sharding2));
}
}
using ShardingWithMetadataParamType =
std::tuple<std::vector<OpMetadata>, std::string>;
TEST_F(HloShardingTest, ToStringReplicatedTest) {
HloSharding sharding = HloSharding::Replicate();
EXPECT_EQ(sharding.ToString(), "{replicated}");
}
class HloReplicateShardingWithMetadataTest
: public ::testing::TestWithParam<ShardingWithMetadataParamType> {};
TEST_P(HloReplicateShardingWithMetadataTest, ToStringTest) {
HloSharding sharding = HloSharding::Replicate(std::get<0>(GetParam()));
EXPECT_EQ(sharding.ToString(false), "{replicated}");
EXPECT_EQ(sharding.ToString(true),
std::get<1>(GetParam()));
}
INSTANTIATE_TEST_SUITE_P(
ToString, HloReplicateShardingWithMetadataTest,
::testing::Values(
std::make_tuple(std::vector<OpMetadata>(), "{replicated}"),
std::make_tuple(SingleMetadata(),
"{replicated metadata={op_name=\"a\"}}"),
std::make_tuple(
ListMetadata(),
"{replicated metadata={{op_name=\"b\"}, {op_name=\"c\"}}}")));
TEST_F(HloShardingTest, ToStringAssignDeviceTest) {
HloSharding sharding = HloSharding::AssignDevice(7);
EXPECT_EQ(sharding.ToString(), "{maximal device=7}");
}
class HloAssignDeviceShardingWithMetadataTest
: public ::testing::TestWithParam<ShardingWithMetadataParamType> {};
TEST_P(HloAssignDeviceShardingWithMetadataTest, ToStringTest) {
HloSharding sharding = HloSharding::AssignDevice(7, std::get<0>(GetParam()));
EXPECT_EQ(sharding.ToString(false),
"{maximal device=7}");
EXPECT_EQ(sharding.ToString(true),
std::get<1>(GetParam()));
}
INSTANTIATE_TEST_SUITE_P(
ToString, HloAssignDeviceShardingWithMetadataTest,
::testing::Values(
std::make_tuple(std::vector<OpMetadata>(), "{maximal device=7}"),
std::make_tuple(SingleMetadata(),
"{maximal device=7 metadata={op_name=\"a\"}}"),
std::make_tuple(
ListMetadata(),
"{maximal device=7 metadata={{op_name=\"b\"}, {op_name=\"c\"}}}")));
TEST_F(HloShardingTest, ToStringTiledTest) {
HloSharding sharding =
HloSharding::Tile(Array3D<int64_t>({{{2, 3}}, {{5, 7}}}));
EXPECT_EQ(sharding.ToString(), "{devices=[2,1,2]2,3,5,7}");
}
TEST_F(HloShardingTest, ToStringIotaTiledTest) {
HloSharding sharding = HloSharding::IotaTile({3, 4}, {2, 2, 3}, {2, 1, 0});
EXPECT_EQ(sharding.ToString(), "{devices=[3,4]<=[2,2,3]T(2,1,0)}");
}
class HloTiledShardingWithMetadataTest
: public ::testing::TestWithParam<ShardingWithMetadataParamType> {};
TEST_P(HloTiledShardingWithMetadataTest, ToStringTest) {
HloSharding sharding = HloSharding::Tile(
Array3D<int64_t>({{{2, 3}}, {{5, 7}}}), std::get<0>(GetParam()));
EXPECT_EQ(sharding.ToString(false),
"{devices=[2,1,2]2,3,5,7}");
EXPECT_EQ(sharding.ToString(true),
std::get<1>(GetParam()));
}
INSTANTIATE_TEST_SUITE_P(
ToString, HloTiledShardingWithMetadataTest,
::testing::Values(
std::make_tuple(std::vector<OpMetadata>(), "{devices=[2,1,2]2,3,5,7}"),
std::make_tuple(SingleMetadata(),
"{devices=[2,1,2]2,3,5,7 metadata={op_name=\"a\"}}"),
std::make_tuple(ListMetadata(),
"{devices=[2,1,2]2,3,5,7 metadata={{op_name=\"b\"}, "
"{op_name=\"c\"}}}")));
TEST_F(HloShardingTest, ToStringTupleTest) {
HloSharding sharding = HloSharding::Tuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3, 5}),
ShapeUtil::MakeShape(U32, {7, 25}),
ShapeUtil::MakeShape(S32, {9, 11})}),
{HloSharding::Replicate(), HloSharding::Tile(Array2D<int64_t>({{3, 5}})),
HloSharding::AssignDevice(3)});
EXPECT_EQ(sharding.ToString(),
"{{replicated}, {devices=[1,2]3,5}, {maximal device=3}}");
}
TEST_F(HloShardingTest, ToStringTupleWithMetadataTest) {
auto metadata = SingleMetadata();
HloSharding sharding = HloSharding::Tuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3, 5}),
ShapeUtil::MakeShape(U32, {7, 25}),
ShapeUtil::MakeShape(S32, {9, 11})}),
{HloSharding::Replicate({GetMetadata("d")}),
HloSharding::Tile(Array2D<int64_t>({{3, 5}})),
HloSharding::AssignDevice(3, {GetMetadata("e")})});
EXPECT_EQ(sharding.ToString(false),
"{{replicated}, {devices=[1,2]3,5}, {maximal device=3}}");
EXPECT_EQ(sharding.ToString(true),
"{{replicated metadata={op_name=\"d\"}}, {devices=[1,2]3,5}, "
"{maximal device=3 metadata={op_name=\"e\"}}}");
}
TEST_F(HloShardingTest, OstreamTest) {
HloSharding sharding =
HloSharding::Tile(Array4D<int64_t>({{{{0, 1}, {2, 3}}}}));
std::ostringstream oss;
oss << sharding;
EXPECT_EQ(oss.str(), "{devices=[1,1,2,2]0,1,2,3}");
}
class HloParseShardingWithMetadataTest
: public ::testing::TestWithParam<std::vector<OpMetadata>> {};
TEST_P(HloParseShardingWithMetadataTest, ParseHloString) {
auto check = [](const HloSharding& sharding) {
TF_ASSERT_OK_AND_ASSIGN(
auto parsed_sharding,
ParseSharding(sharding.ToString(true)));
EXPECT_EQ(sharding, parsed_sharding);
};
check(HloSharding::Replicate(GetParam()));
check(HloSharding::AssignDevice(2, GetParam()));
check(HloSharding::Tile(Array4D<int64_t>({{{{0}, {1}}}}), GetParam()));
check(HloSharding::Tuple(ShapeUtil::MakeTupleShape({}),
{HloSharding::Replicate(GetParam())}));
{
auto tuple_shape =
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3, 1, 5, 7}),
ShapeUtil::MakeShape(F32, {3, 5, 7}),
ShapeUtil::MakeShape(F32, {3, 7})});
check(HloSharding::Tuple(
tuple_shape,
{HloSharding::Tile(Array4D<int64_t>({{{{0}, {1}}}})),
HloSharding::Replicate(GetParam()), HloSharding::AssignDevice(1)}));
}
{
auto tuple_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 1, 5, 7}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3, 5, 7}),
ShapeUtil::MakeShape(F32, {3, 7})})});
std::vector<HloSharding> leaf_shardings = {
HloSharding::Tile(Array4D<int64_t>({{{{0}, {1}}}})),
HloSharding::Replicate(), HloSharding::AssignDevice(1, GetParam())};
ShapeTree<HloSharding> sharding_tree(tuple_shape, HloSharding::Replicate());
auto it = leaf_shardings.begin();
for (auto& index_to_sharding : sharding_tree.leaves()) {
index_to_sharding.second = *it++;
}
check(HloSharding::Tuple(sharding_tree));
}
}
INSTANTIATE_TEST_SUITE_P(ParseHloString, HloParseShardingWithMetadataTest,
::testing::Values(std::vector<OpMetadata>(),
SingleMetadata(), ListMetadata()));
TEST_F(HloShardingTest, WithMetadataNoOverwrite) {
{
HloSharding sharding = HloSharding::Replicate();
auto sharding_new_metadata =
sharding.WithMetadata(SingleMetadata(), false);
ASSERT_EQ(sharding_new_metadata.metadata().size(), 1);
EXPECT_TRUE(protobuf_util::ProtobufEquals(
sharding_new_metadata.metadata().front(), SingleMetadata().front()));
}
{
HloSharding sharding = HloSharding::AssignDevice(7, SingleMetadata());
auto sharding_new_metadata =
sharding.WithMetadata(ListMetadata(), false);
ASSERT_EQ(sharding_new_metadata.metadata().size(), 1);
EXPECT_TRUE(protobuf_util::ProtobufEquals(
sharding.metadata().front(), sharding_new_metadata.metadata().front()));
}
{
HloSharding sharding = HloSharding::Tuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3, 5}),
ShapeUtil::MakeShape(U32, {7, 25}),
ShapeUtil::MakeShape(S32, {9, 11})}),
{HloSharding::Replicate(SingleMetadata()),
HloSharding::Tile(Array2D<int64_t>({{3, 5}})),
HloSharding::AssignDevice(3, SingleMetadata())});
auto sharding_new_metadata =
sharding.WithMetadata(ListMetadata(), false);
EXPECT_TRUE(sharding_new_metadata.metadata().empty());
ASSERT_TRUE(sharding_new_metadata.IsTuple());
ASSERT_EQ(sharding_new_metadata.tuple_elements().size(), 3);
ASSERT_EQ(sharding_new_metadata.tuple_elements()[0].metadata().size(), 1);
EXPECT_TRUE(protobuf_util::ProtobufEquals(
sharding_new_metadata.tuple_elements()[0].metadata().front(),
SingleMetadata().front()));
ASSERT_EQ(sharding_new_metadata.tuple_elements()[1].metadata().size(), 2);
for (int i = 0; i < 2; ++i) {
EXPECT_TRUE(protobuf_util::ProtobufEquals(
sharding_new_metadata.tuple_elements()[1].metadata()[i],
ListMetadata()[i]));
}
ASSERT_EQ(sharding_new_metadata.tuple_elements()[2].metadata().size(), 1);
EXPECT_TRUE(protobuf_util::ProtobufEquals(
sharding_new_metadata.tuple_elements()[2].metadata().front(),
SingleMetadata().front()));
}
}
TEST_F(HloShardingTest, WithMetadataOverwrite) {
{
HloSharding sharding = HloSharding::Replicate();
auto sharding_new_metadata =
sharding.WithMetadata(SingleMetadata(), true);
ASSERT_EQ(sharding_new_metadata.metadata().size(), 1);
EXPECT_TRUE(protobuf_util::ProtobufEquals(
sharding_new_metadata.metadata().front(), SingleMetadata().front()));
}
{
HloSharding sharding = HloSharding::AssignDevice(7, SingleMetadata());
auto sharding_new_metadata =
sharding.WithMetadata(ListMetadata(), true);
ASSERT_EQ(sharding_new_metadata.metadata().size(), 2);
for (int i = 0; i < 2; ++i) {
EXPECT_TRUE(protobuf_util::ProtobufEquals(
sharding_new_metadata.metadata()[i], ListMetadata()[i]));
}
}
{
HloSharding sharding = HloSharding::Tuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3, 5}),
ShapeUtil::MakeShape(U32, {7, 25}),
ShapeUtil::MakeShape(S32, {9, 11})}),
{HloSharding::Replicate(SingleMetadata()),
HloSharding::Tile(Array2D<int64_t>({{3, 5}})),
HloSharding::AssignDevice(3, SingleMetadata())});
auto sharding_new_metadata =
sharding.WithMetadata(ListMetadata(), true);
EXPECT_TRUE(sharding_new_metadata.metadata().empty());
ASSERT_TRUE(sharding_new_metadata.IsTuple());
ASSERT_EQ(sharding_new_metadata.tuple_elements().size(), 3);
for (const auto& sub_sharding : sharding_new_metadata.tuple_elements()) {
ASSERT_EQ(sub_sharding.metadata().size(), 2);
for (int i = 0; i < 2; ++i) {
EXPECT_TRUE(protobuf_util::ProtobufEquals(sub_sharding.metadata()[i],
ListMetadata()[i]));
}
}
}
}
TEST_F(HloShardingTest, WithoutMetadata) {
{
HloSharding sharding = HloSharding::Replicate();
auto sharding_no_metadata = sharding.WithoutMetadata();
EXPECT_TRUE(sharding_no_metadata.metadata().empty());
}
{
HloSharding sharding = HloSharding::AssignDevice(7, SingleMetadata());
auto sharding_no_metadata = sharding.WithoutMetadata();
EXPECT_TRUE(sharding_no_metadata.metadata().empty());
}
{
HloSharding sharding = HloSharding::Tuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3, 5}),
ShapeUtil::MakeShape(U32, {7, 25}),
ShapeUtil::MakeShape(S32, {9, 11})}),
{HloSharding::Replicate(SingleMetadata()),
HloSharding::Tile(Array2D<int64_t>({{3, 5}})),
HloSharding::AssignDevice(3, ListMetadata())});
auto sharding_no_metadata = sharding.WithoutMetadata();
EXPECT_TRUE(sharding_no_metadata.metadata().empty());
ASSERT_TRUE(sharding_no_metadata.IsTuple());
EXPECT_EQ(sharding_no_metadata.tuple_elements().size(), 3);
for (const auto& sub_sharding : sharding_no_metadata.tuple_elements()) {
EXPECT_TRUE(sub_sharding.metadata().empty());
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_sharding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_sharding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1ebc940d-e527-4074-8f1f-bd379b98050d | cpp | tensorflow/tensorflow | hlo_matchers | tensorflow/compiler/mlir/lite/stablehlo/transforms/hlo_matchers.cc | third_party/xla/xla/hlo/utils/hlo_matchers_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/hlo_matchers.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <optional>
#include <utility>
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributeInterfaces.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Transforms/DialectConversion.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir {
namespace odml {
namespace {
class StridedArrayViewBase {
protected:
StridedArrayViewBase(ArrayRef<int64_t> shape, ArrayRef<int64_t> index,
int64_t axis) {
assert(shape.size() == index.size());
assert(axis < shape.size());
assert(axis >= 0);
assert(index[axis] == 0);
offset_ = IndexToOffset(shape, index);
stride_ = StrideForAxis(shape, axis);
size_ = shape[axis];
}
int64_t size() const { return size_; }
static std::optional<SmallVector<int64_t>> NextTensorIndex(
SmallVector<int64_t> index, ArrayRef<int64_t> shape, int64_t fixed_axis) {
#ifndef NDEBUG
assert(shape.size() == index.size());
assert(fixed_axis < shape.size());
assert(fixed_axis >= 0);
assert(index[fixed_axis] == 0);
for (size_t i = 0; i < shape.size(); ++i) {
assert(index[i] < shape[i]);
assert(index[i] >= 0);
}
#endif
for (int64_t dim = shape.size() - 1; dim >= 0; --dim) {
if (dim == fixed_axis) continue;
++index[dim];
if (index[dim] < shape[dim]) return std::move(index);
index[dim] = 0;
}
return std::nullopt;
}
protected:
int64_t OffsetForIndex(int64_t i) const { return offset_ + i * stride_; }
private:
static int64_t StrideForAxis(ArrayRef<int64_t> shape, int64_t axis) {
int64_t stride = 1;
for (int64_t dim = shape.size() - 1; dim > axis; --dim) {
stride *= shape[dim];
}
return stride;
}
static int64_t IndexToOffset(ArrayRef<int64_t> shape,
ArrayRef<int64_t> index) {
#ifndef NDEBUG
assert(shape.size() == index.size());
for (size_t i = 0; i < shape.size(); ++i) {
assert(index[i] < shape[i]);
assert(index[i] >= 0);
}
#endif
int64_t offset = 0;
int64_t stride = 1;
for (int64_t dim = shape.size() - 1; dim >= 0; --dim) {
offset += index[dim] * stride;
stride *= shape[dim];
}
return offset;
}
int64_t offset_;
int64_t stride_;
int64_t size_;
};
template <typename T>
class StridedArrayView;
template <>
class StridedArrayView<DenseIntElementsAttr> : StridedArrayViewBase {
public:
StridedArrayView(const DenseIntElementsAttr& data, ArrayRef<int64_t> shape,
ArrayRef<int64_t> index, int64_t axis)
: StridedArrayViewBase(shape, index, axis), data_(data) {
int64_t element_count = 1;
for (int64_t i = 0, e = shape.size(); i < e; ++i) {
element_count *= shape[i];
}
assert(data.getNumElements() == element_count);
}
using StridedArrayViewBase::NextTensorIndex;
using StridedArrayViewBase::size;
int64_t operator[](int64_t i) const {
return data_.getValues<APInt>()[OffsetForIndex(i)].getSExtValue();
}
private:
const DenseIntElementsAttr& data_;
};
bool MatchIotaBroadCastInDim(DenseIntElementsAttr dimensions, Value iota) {
auto iota_broadcast =
dyn_cast_or_null<mhlo::BroadcastInDimOp>(iota.getDefiningOp());
if (!iota_broadcast || iota_broadcast.getBroadcastDimensions() != dimensions)
return false;
if (!isa_and_nonnull<mhlo::IotaOp>(
iota_broadcast.getOperand().getDefiningOp()))
return false;
return true;
}
bool MatchIotaConst(DenseIntElementsAttr dimensions, Value iota) {
DenseIntElementsAttr iota_const_attr;
if (!matchPattern(iota, m_Constant(&iota_const_attr))) return false;
auto iota_type = iota_const_attr.getType();
auto iota_shape = iota_type.getShape();
auto reduce_dim = (*dimensions.value_begin<APInt>()).getSExtValue();
if (reduce_dim < 0) reduce_dim += iota_type.getRank();
auto index =
std::optional<SmallVector<int64_t>>(std::in_place, iota_type.getRank());
while (index.has_value()) {
StridedArrayView<DenseIntElementsAttr> array_view(
iota_const_attr, iota_shape, *index, reduce_dim);
for (int64_t i = 0; i < array_view.size(); ++i) {
if (array_view[i] != i) return false;
}
index = StridedArrayView<DenseIntElementsAttr>::NextTensorIndex(
std::move(*index), iota_shape, reduce_dim);
}
return true;
}
bool MatchReshapedIota(DenseIntElementsAttr dimensions, Value iota) {
if (dimensions.getNumElements() != 1) return false;
auto reshape_op = dyn_cast_or_null<mhlo::ReshapeOp>(iota.getDefiningOp());
if (!reshape_op) return false;
auto operand_type =
mlir::dyn_cast<RankedTensorType>(reshape_op.getOperand().getType());
if (!operand_type || !operand_type.hasStaticShape()) return false;
auto reshape_type = mlir::cast<RankedTensorType>(reshape_op.getType());
if (operand_type.getRank() != 1) return false;
if (!dyn_cast_or_null<mhlo::IotaOp>(reshape_op.getOperand().getDefiningOp()))
return false;
int64_t iota_dim = (*dimensions.value_begin<APInt>()).getSExtValue();
for (int64_t i = 0, e = reshape_type.getRank(); i < e; ++i) {
if (i == iota_dim) {
if (reshape_type.getDimSize(i) != operand_type.getDimSize(0))
return false;
} else if (reshape_type.getDimSize(i) != 1) {
return false;
}
}
return true;
}
bool MatchSingleIota(DenseIntElementsAttr dimensions, Value iota) {
auto iota_op = dyn_cast_or_null<mhlo::IotaOp>(iota.getDefiningOp());
if (!iota_op || dimensions.getNumElements() != 1) return false;
auto dim = *dimensions.value_begin<APInt>();
return dim == iota_op.getIotaDimension();
}
bool MatchConstIotaBroadCastInDim(DenseIntElementsAttr dimensions, Value iota) {
if (dimensions.getNumElements() != 1) return false;
auto iota_broadcast =
dyn_cast_or_null<mhlo::BroadcastInDimOp>(iota.getDefiningOp());
if (!iota_broadcast || iota_broadcast.getBroadcastDimensions() != dimensions)
return false;
DenseElementsAttr range_const;
if (!matchPattern(iota_broadcast.getOperand(), m_Constant(&range_const)))
return false;
int index = 0;
for (auto value : range_const.getValues<APInt>()) {
if (value != index++) return false;
}
return true;
}
}
bool MatchIota(DenseIntElementsAttr dimensions, Value iota) {
return MatchSingleIota(dimensions, iota) ||
MatchIotaBroadCastInDim(dimensions, iota) ||
MatchReshapedIota(dimensions, iota) ||
MatchConstIotaBroadCastInDim(dimensions, iota) ||
MatchIotaConst(dimensions, iota);
}
}
} | #include "xla/hlo/utils/hlo_matchers.h"
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include "xla/literal_util.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace op = xla::testing::opcode_matchers;
using ::testing::_;
using ::testing::Eq;
using ::testing::HasSubstr;
namespace xla {
namespace {
using HloMatchersTest = HloTestBase;
std::string DescribeHloMatcher(
const ::testing::Matcher<const HloInstruction*>& m) {
std::stringstream ss;
m.DescribeTo(&ss);
return ss.str();
}
template <typename M, typename T>
std::string Explain(const T& t, const M& m) {
::testing::StringMatchResultListener listener;
EXPECT_THAT(t, ::testing::Not(m));
EXPECT_FALSE(m.MatchAndExplain(t, &listener));
return listener.str();
}
TEST_F(HloMatchersTest, Test) {
auto shape = ShapeUtil::MakeShape(F32, {1});
auto param = HloInstruction::CreateParameter(0, shape, "param");
auto mul = HloInstruction::CreateBinary(shape, HloOpcode::kMultiply,
param.get(), param.get());
auto add = HloInstruction::CreateBinary(shape, HloOpcode::kAdd, param.get(),
mul.get());
EXPECT_THAT(add.get(), op::Add());
EXPECT_THAT(add.get(), op::Add(op::Parameter(), op::Multiply()));
EXPECT_THAT(add.get(),
op::Add(op::Parameter(), op::Multiply(_, op::Parameter())));
EXPECT_THAT(
Explain(add.get(), op::Parameter()),
Eq("(%add = f32[1]{0} add(f32[1]{0} %param, f32[1]{0} %multiply))"));
EXPECT_THAT(
Explain(add.get(), op::Add(op::Parameter())),
Eq("(%add = f32[1]{0} add(f32[1]{0} %param, f32[1]{0} %multiply)) "
"has too many operands (got 2, want 1)"));
EXPECT_THAT(
Explain(add.get(), op::Add(op::Parameter(), op::Parameter())),
Eq("(%add = f32[1]{0} add(f32[1]{0} %param, f32[1]{0} %multiply))"
"\noperand 1:\n\t"
"%multiply = f32[1]{0} multiply(f32[1]{0} %param, f32[1]{0} %param)\n"
"doesn't match expected:\n\t"
"parameter"
", (%multiply = f32[1]{0} multiply(f32[1]{0} %param, f32[1]{0} "
"%param))"));
EXPECT_THAT(
Explain(add.get(),
op::Add(op::Parameter(), op::Multiply(op::Add(), op::Add()))),
Eq("(%add = f32[1]{0} add(f32[1]{0} %param, f32[1]{0} %multiply))"
"\noperand 1:\n\t"
"%multiply = f32[1]{0} multiply(f32[1]{0} %param, f32[1]{0} %param)\n"
"doesn't match expected:\n\t"
"multiply(add, add)"
", (%multiply = f32[1]{0} multiply(f32[1]{0} %param, f32[1]{0} "
"%param))\n"
"operand 0:\n\t"
"%param = f32[1]{0} parameter(0)\n"
"doesn't match expected:\n\t"
"add, (%param = f32[1]{0} parameter(0))"));
}
TEST_F(HloMatchersTest, CustomCallMatcher) {
auto c1 =
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({1, 2, 3}));
auto c2 =
HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32_t>({1, 2, 3}));
auto call = HloInstruction::CreateCustomCall(
ShapeUtil::MakeShape(F32, {1}), {c1.get(), c2.get()}, "foo_target");
EXPECT_THAT(call.get(), op::CustomCall());
EXPECT_THAT(call.get(), op::CustomCall(c1.get(), c2.get()));
EXPECT_THAT(call.get(), op::CustomCall("foo_target"));
EXPECT_THAT(call.get(), op::CustomCall("foo_target", c1.get(), c2.get()));
EXPECT_THAT(call.get(), op::CustomCall(::testing::StartsWith("foo")));
EXPECT_THAT(call.get(),
op::CustomCall(::testing::Not(::testing::StartsWith("bar"))));
EXPECT_THAT(call.get(), ::testing::Not(op::CustomCall(c1.get())));
EXPECT_THAT(call.get(),
::testing::Not(op::CustomCall(::testing::StartsWith("bar"))));
EXPECT_THAT(Explain(call.get(), op::CustomCall("bar")),
"(%custom-call = f32[1]{0} custom-call(f32[3]{0} %constant, "
"s32[3]{0} %constant), custom_call_target=\"foo_target\") "
"custom-call with call target that isn't equal to \"bar\"");
EXPECT_THAT(DescribeHloMatcher(op::CustomCall("foo_target")),
R"(custom-call with call target that is equal to "foo_target")");
}
TEST_F(HloMatchersTest, ShapeMatcher) {
auto p0 = HloInstruction::CreateParameter(
0, ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 7}, {0, 1}), "param");
EXPECT_THAT(p0.get(), op::Shape(ShapeUtil::MakeShape(F32, {5, 7})));
EXPECT_THAT(p0.get(), op::Shape("f32[5,7]"));
EXPECT_THAT(
p0.get(),
::testing::Not(op::ShapeWithLayout(ShapeUtil::MakeShape(F32, {5, 7}))));
EXPECT_THAT(p0.get(), ::testing::Not(op::ShapeWithLayout("f32[5,7]")));
EXPECT_THAT(p0.get(),
::testing::Not(op::Shape(ShapeUtil::MakeShape(F32, {7, 5}))));
EXPECT_THAT(p0.get(), ::testing::Not(op::Shape("f32[7,5]")));
EXPECT_THAT(
p0.get(),
::testing::Not(op::ShapeWithLayout(ShapeUtil::MakeShape(F32, {7, 5}))));
EXPECT_THAT(p0.get(), ::testing::Not(op::ShapeWithLayout("f32[7,5]")));
EXPECT_THAT(p0.get(), op::Shape(ShapeUtil::MakeShapeWithDenseLayout(
F32, {5, 7}, {0, 1})));
EXPECT_THAT(p0.get(), op::Shape("f32[5,7]{0,1}"));
EXPECT_THAT(p0.get(), op::ShapeWithLayout(ShapeUtil::MakeShapeWithDenseLayout(
F32, {5, 7}, {0, 1})));
EXPECT_THAT(p0.get(), op::ShapeWithLayout("f32[5,7]{0,1}"));
EXPECT_THAT(p0.get(),
::testing::Not(op::ShapeWithLayout(
ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 7}, {1, 0}))));
EXPECT_THAT(p0.get(), ::testing::Not(op::ShapeWithLayout("f32[5,7]{1,0}")));
EXPECT_THAT(Explain(p0.get(), op::Shape(ShapeUtil::MakeShape(F32, {7, 5}))),
"%param = f32[5,7]{0,1} parameter(0) has incorrect shape "
"(expected: f32[7,5])");
EXPECT_THAT(
Explain(p0.get(), op::ShapeWithLayout(ShapeUtil::MakeShapeWithDenseLayout(
F32, {7, 5}, {1, 0}))),
"%param = f32[5,7]{0,1} parameter(0) has incorrect shape "
"(expected: f32[7,5]{1,0})");
}
TEST_F(HloMatchersTest, ShardingMatcher) {
auto p0 = HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {5}),
"param.0");
p0->clear_sharding();
auto p1 = HloInstruction::CreateParameter(1, ShapeUtil::MakeShape(F32, {7}),
"param.1");
p1->set_sharding(HloSharding::AssignDevice(1));
auto tuple_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {7}), ShapeUtil::MakeShape(S32, {9}),
ShapeUtil::MakeShape(F32, {11})});
auto p2 = HloInstruction::CreateParameter(1, tuple_shape, "param.2");
Array<int64_t> assignment({2});
assignment.SetValues({0, 1});
auto sharding = HloSharding::Tuple(
tuple_shape, {HloSharding::Tile(assignment), HloSharding::AssignDevice(1),
HloSharding::Replicate()});
p2->set_sharding(sharding);
EXPECT_THAT(p0.get(), op::NoSharding());
EXPECT_THAT(p0.get(),
::testing::Not(op::Sharding(HloSharding::AssignDevice(1))));
EXPECT_THAT(p1.get(), ::testing::Not(op::NoSharding()));
EXPECT_THAT(p1.get(),
::testing::Not(op::Sharding(HloSharding::AssignDevice(0))));
EXPECT_THAT(p1.get(), op::Sharding(HloSharding::AssignDevice(1)));
EXPECT_THAT(
p2.get(),
op::Sharding("{{devices=[2]0,1}, {maximal device=1}, {replicated}}"));
EXPECT_THAT(Explain(p0.get(), op::Sharding(HloSharding::AssignDevice(1))),
"%param.0 = f32[5]{0} parameter(0) has no sharding (expected: "
"{maximal device=1})");
EXPECT_THAT(Explain(p1.get(), op::NoSharding()),
"%param.1 = f32[7]{0} parameter(1), sharding={maximal device=1} "
"expected to have no sharding.");
EXPECT_THAT(Explain(p1.get(), op::Sharding(HloSharding::AssignDevice(0))),
"%param.1 = f32[7]{0} parameter(1), sharding={maximal device=1} "
"has incorrect sharding (expected: {maximal device=0})");
}
TEST_F(HloMatchersTest, DotMatcher) {
std::string hlo_string = R"(
HloModule DotOperationFusion_TransposeFusion
ENTRY DotOperationFusion_TransposeFusion {
arg0 = f32[1,256] parameter(0)
arg1 = f32[256,1024] parameter(1)
ROOT dot = f32[1,1024] dot(arg0, arg1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Dot(op::Parameter(0), op::Parameter(1),
1,
0));
EXPECT_THAT(
Explain(root, op::Dot(op::Parameter(0), op::Parameter(1),
0,
0)),
"(%dot = f32[1,1024]{1,0} dot(f32[1,256]{1,0} %arg0, f32[256,1024]{1,0} "
"%arg1), lhs_contracting_dims={1}, rhs_contracting_dims={0}) has wrong "
"lhs_contracting_dimensions (got {1} want {0})");
EXPECT_THAT(
Explain(root, op::Dot(op::Parameter(0), op::Parameter(1),
1,
1)),
"(%dot = f32[1,1024]{1,0} dot(f32[1,256]{1,0} %arg0, f32[256,1024]{1,0} "
"%arg1), lhs_contracting_dims={1}, rhs_contracting_dims={0}) has wrong "
"rhs_contracting_dimensions (got {0} want {1})");
}
TEST_F(HloMatchersTest, ComparisonMatcher) {
auto shape = ShapeUtil::MakeShape(F32, {1});
auto p0 = HloInstruction::CreateParameter(0, shape, "param.0");
auto p1 = HloInstruction::CreateParameter(1, shape, "param.1");
auto eq = HloInstruction::CreateCompare(shape, p0.get(), p1.get(),
ComparisonDirection::kEq);
auto ne = HloInstruction::CreateCompare(shape, p0.get(), p1.get(),
ComparisonDirection::kNe);
auto add =
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0.get(), p1.get());
auto le = HloInstruction::CreateCompare(shape, p0.get(), add.get(),
ComparisonDirection::kLe);
EXPECT_THAT(eq.get(), op::Compare());
EXPECT_THAT(eq.get(), op::Eq());
EXPECT_THAT(ne.get(), op::Compare());
EXPECT_THAT(ne.get(), op::Ne());
EXPECT_THAT(le.get(),
op::Compare(op::Parameter(0),
op::Add(op::Parameter(0), op::Parameter(1))));
EXPECT_THAT(le.get(), op::Le(op::Parameter(0),
op::Add(op::Parameter(0), op::Parameter(1))));
EXPECT_THAT(Explain(eq.get(), op::Add()),
Eq("(%compare = f32[1]{0} compare(f32[1]{0} %param.0, "
"f32[1]{0} %param.1), direction=EQ)"));
EXPECT_THAT(Explain(eq.get(), op::Ne()),
Eq("(%compare = f32[1]{0} compare(f32[1]{0} %param.0, "
"f32[1]{0} %param.1), direction=EQ) "
"has wrong comparison direction (got EQ, want NE)"));
}
TEST_F(HloMatchersTest, AsyncCopyMatcher) {
Shape shape_memspace1 = ShapeUtil::MakeShapeWithDenseLayout(
F32, {16}, {0}, {},
1,
0,
1);
Shape shape_memspace2 = ShapeUtil::MakeShapeWithDenseLayout(
F32, {16}, {0}, {},
1,
0,
2);
auto p0 = HloInstruction::CreateParameter(0, shape_memspace1, "p0");
auto copy_start = HloInstruction::CreateCopyStart(
ShapeUtil::MakeTupleShape(
{shape_memspace2, shape_memspace1, ShapeUtil::MakeShape(U32, {})}),
p0.get());
auto copy_done = HloInstruction::CreateUnary(
shape_memspace2, HloOpcode::kCopyDone, copy_start.get());
EXPECT_THAT(copy_done.get(), op::AsyncCopy(2, 1, op::Parameter(0)));
EXPECT_THAT(Explain(copy_start.get(), op::AsyncCopy(2, 1, op::Parameter(0))),
Eq("(%copy-start = (f32[16]{0:S(2)}, f32[16]{0:S(1)}, u32[]) "
"copy-start(f32[16]{0:S(1)} %p0))"));
EXPECT_THAT(Explain(copy_done.get(), op::AsyncCopy(3, 1, op::Parameter(0))),
"(%copy-done = f32[16]{0:S(2)} copy-done((f32[16]{0:S(2)}, "
"f32[16]{0:S(1)}, u32[]) "
"%copy-start)) "
"copies to memory space 2, expected 3");
EXPECT_THAT(Explain(copy_done.get(), op::AsyncCopy(2, 3, op::Parameter(0))),
"(%copy-done = f32[16]{0:S(2)} copy-done((f32[16]{0:S(2)}, "
"f32[16]{0:S(1)}, u32[]) "
"%copy-start)) "
"is in the memory space 1, expected 3");
}
TEST_F(HloMatchersTest, ConstantMatcher) {
std::string hlo_string = R"(
HloModule Constant
ENTRY main {
ROOT x = u32[2] constant({1, 2})
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Constant());
EXPECT_THAT(root, op::Constant(LiteralUtil::CreateR1<uint32_t>({1, 2})));
EXPECT_THAT(root, ::testing::Not(
op::Constant(LiteralUtil::CreateR1<uint32_t>({1, 1}))));
EXPECT_THAT(Explain(root, op::Constant(LiteralUtil::CreateR0<uint32_t>(1))),
"(%x = u32[2]{0} constant({1, 2})) has wrong value (got u32[2] "
"{1, 2}, want u32[] 1)");
}
TEST_F(HloMatchersTest, ReplicaGroupsMatcher) {
Shape shape = ShapeUtil::MakeShape(F32, {5, 7});
std::unique_ptr<HloInstruction> p0 =
HloInstruction::CreateParameter(0, shape, "param");
std::vector<ReplicaGroup> replica_groups(2);
replica_groups[0].add_replica_ids(0);
replica_groups[0].add_replica_ids(2);
replica_groups[1].add_replica_ids(1);
replica_groups[1].add_replica_ids(3);
std::unique_ptr<HloInstruction> all_to_all = HloInstruction::CreateAllToAll(
shape, {p0.get()}, CollectiveDeviceList(replica_groups),
false,
std::nullopt);
EXPECT_THAT(Explain(p0.get(), op::ReplicaGroups({})),
"%param = f32[5,7]{1,0} parameter(0) not a collective op");
EXPECT_THAT(Explain(all_to_all.get(), op::ReplicaGroups({{0, 1}, {2, 3}})),
"%all-to-all = f32[5,7]{1,0} all-to-all(f32[5,7]{1,0} %param), "
"replica_groups={{0,2},{1,3}} has incorrect replica_groups "
"(expected: {{0,1},{2,3}})");
EXPECT_THAT(all_to_all.get(), op::ReplicaGroups({{0, 2}, {1, 3}}));
}
TEST_F(HloMatchersTest, SourceTargetPairsMatcher) {
Shape shape = ShapeUtil::MakeShape(F32, {5, 7});
std::unique_ptr<HloInstruction> p0 =
HloInstruction::CreateParameter(0, shape, "param");
std::vector<std::pair<int64_t, int64_t>> source_target_pairs = {
{0, 1}, {2, 3}, {1, 2}};
std::unique_ptr<HloInstruction> cp = HloInstruction::CreateCollectivePermute(
shape, p0.get(), source_target_pairs, std::nullopt);
EXPECT_THAT(Explain(p0.get(), op::SourceTargetPairs({{0, 1}})),
HasSubstr("not a collective permute"));
EXPECT_THAT(Explain(cp.get(), op::SourceTargetPairs({{0, 1}, {2, 3}})),
HasSubstr("source_target_pairs (expected: {{0,1},{2,3}}"));
EXPECT_THAT(cp.get(), op::SourceTargetPairs({{0, 1}, {2, 3}, {1, 2}}));
}
TEST_F(HloMatchersTest, MetadataMatcher) {
Shape shape = ShapeUtil::MakeShape(F32, {5, 7});
std::unique_ptr<HloInstruction> p0 =
HloInstruction::CreateParameter(0, shape, "param");
OpMetadata metadata;
metadata.set_op_type("op_type1");
metadata.set_op_name("op_name1");
p0->set_metadata(metadata);
OpMetadata actual_opname;
actual_opname.set_op_type("op_type1");
actual_opname.set_op_name("op_name2");
OpMetadata actual_source_file;
actual_source_file.set_op_type("op_type1");
actual_source_file.set_op_name("op_name1");
actual_source_file.set_source_file("source_file");
OpMetadata actual_optype;
actual_optype.set_op_type("op_type2");
actual_optype.set_op_name("op_name1");
OpMetadata actual_source_line;
actual_source_line.set_op_type("op_type1");
actual_source_line.set_op_name("op_name1");
actual_source_line.set_source_line(1);
EXPECT_THAT(Explain(p0.get(), op::Metadata(actual_opname)),
HasSubstr("has wrong metadata (got op_name1, want op_name2)"));
EXPECT_THAT(Explain(p0.get(), op::Metadata(actual_source_file)),
HasSubstr("has wrong metadata (got "
", want source_file)"));
EXPECT_THAT(Explain(p0.get(), op::Metadata(actual_optype)),
HasSubstr("has wrong metadata (got"
" op_type1, want op_type2)"));
EXPECT_THAT(Explain(p0.get(), op::Metadata(actual_source_line)),
HasSubstr("has wrong metadata (got 0"
", want 1)"));
EXPECT_THAT(DescribeHloMatcher(op::Metadata(p0->metadata())),
R"( (metadata: op_type1 op_name1 0))");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/hlo_matchers.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/utils/hlo_matchers_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3ae28366-5f5c-4685-9465-c15e3ed6cca1 | cpp | tensorflow/tensorflow | hlo_live_range | third_party/xla/xla/hlo/utils/hlo_live_range.cc | third_party/xla/xla/hlo/utils/hlo_live_range_test.cc | #include "xla/hlo/utils/hlo_live_range.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_value.h"
#include "xla/shape_util.h"
#include "tsl/platform/logging.h"
namespace xla {
absl::StatusOr<std::unique_ptr<HloLiveRange>> HloLiveRange::Run(
const HloSchedule& schedule, const HloAliasAnalysis& alias_analysis,
const HloComputation* computation, bool module_scoped_analysis) {
std::unique_ptr<HloLiveRange> hlo_live_range(
new HloLiveRange(schedule, alias_analysis, module_scoped_analysis));
hlo_live_range->FlattenSchedule(*computation);
hlo_live_range->CalculateBufferStartEndMap();
hlo_live_range->NormalizeAliasedBuffers();
return std::move(hlo_live_range);
}
void HloLiveRange::NormalizeAliasedBuffers() {
absl::flat_hash_map<HloBuffer::Id,
std::vector<std::pair<TimeBound*, HloValue::Id>>>
live_ranges_by_buffer;
for (auto& entry : buffer_live_ranges_) {
const HloValue& value = *entry.first;
const HloBuffer& buffer = alias_analysis_.GetBufferContainingValue(value);
live_ranges_by_buffer[buffer.id()].push_back({&entry.second, value.id()});
}
for (auto& entry : live_ranges_by_buffer) {
auto& aliased_live_ranges = entry.second;
absl::c_sort(
aliased_live_ranges, [](std::pair<const TimeBound*, HloValue::Id> a,
std::pair<const TimeBound*, HloValue::Id> b) {
return std::forward_as_tuple(a.first->start, a.first->end, a.second) <
std::forward_as_tuple(b.first->start, b.first->end, b.second);
});
for (int64_t i = 0; i + 1 < aliased_live_ranges.size(); ++i) {
TimeBound& live_range1 = *aliased_live_ranges[i].first;
TimeBound& live_range2 = *aliased_live_ranges[i + 1].first;
live_range2.end = std::max(live_range1.end, live_range2.end);
live_range1.end = std::min(live_range1.end, live_range2.start);
}
}
}
void HloLiveRange::FlattenSchedule(const HloComputation& computation,
const HloComputation* async_context) {
auto it = schedule_.sequences().find(computation.unique_id());
if (it == schedule_.sequences().end()) {
total_order_scheduled_ = false;
return;
}
if (computation_span_times_.contains(&computation)) return;
if (async_context != nullptr) {
computations_in_async_context_[&computation] = async_context;
}
LogicalTime start_time = flattened_instruction_sequence_.size();
const HloInstructionSequence& instruction_sequence = it->second;
for (HloInstruction* instruction : instruction_sequence.instructions()) {
if (module_scoped_analysis_) {
if (instruction->opcode() == HloOpcode::kCall ||
instruction->opcode() == HloOpcode::kConditional ||
instruction->opcode() == HloOpcode::kAsyncStart) {
for (const HloComputation* called_computation :
instruction->called_computations()) {
FlattenSchedule(*called_computation,
instruction->opcode() == HloOpcode::kAsyncStart
? called_computation
: async_context);
}
} else if (instruction->opcode() == HloOpcode::kWhile) {
FlattenSchedule(*instruction->while_condition(), async_context);
FlattenSchedule(*instruction->while_body(), async_context);
}
}
LogicalTime time = flattened_instruction_sequence_.size();
CHECK(instruction_schedule_.insert({instruction, time}).second);
flattened_instruction_sequence_.push_back(instruction);
}
LogicalTime end_time = flattened_instruction_sequence_.size();
computation_span_times_[&computation] = {start_time, end_time};
}
HloLiveRange::TimeBound HloLiveRange::GetLastPosition(
const HloValue& value,
HloLiveRange::LogicalTime definition_end_time) const {
LogicalTime end_time = definition_end_time;
const HloPosition* end_position = &value.defining_position();
for (const HloPosition& position :
absl::Span<const HloPosition>(value.positions()).subspan(1)) {
const HloInstruction* position_inst = position.instruction;
LogicalTime position_time;
if (position_inst->IsRoot()) {
auto it = computation_span_times_.find(position_inst->parent());
if (it == computation_span_times_.end()) continue;
position_time = it->second.end;
} else {
auto it = instruction_schedule_.find(position_inst);
if (it == instruction_schedule_.end()) continue;
position_time = it->second;
}
if (position_time > end_time) {
end_time = position_time;
end_position = &position;
}
}
return {-1, end_time, *end_position};
}
HloLiveRange::LogicalTime HloLiveRange::GetLastUsageTime(
const HloValue& value) const {
LogicalTime end_time = -1;
for (const HloUse& use : value.GetUses()) {
const HloInstruction* used = use.instruction;
if (module_scoped_analysis_ && used->opcode() == HloOpcode::kCall) continue;
if (module_scoped_analysis_ && used->opcode() == HloOpcode::kWhile) {
used = used->while_body()->parameter_instruction(0);
VLOG(1) << "Moved value " << value.ToShortString()
<< " to while param: " << used->ToString();
}
auto it = instruction_schedule_.find(used);
if (it != instruction_schedule_.end()) {
end_time = std::max(end_time, it->second);
}
}
return end_time;
}
void HloLiveRange::CalculateBufferStartEndMap() {
for (const auto& entry : instruction_schedule_) {
const HloInstruction& instruction = *entry.first;
const HloComputation* computation = instruction.parent();
LogicalTime start_time = (instruction.opcode() == HloOpcode::kParameter)
? computation_span_times_[computation].start
: entry.second;
LogicalTime definition_end_time =
instruction.IsRoot() ? computation_span_times_[computation].end
: entry.second;
auto async_context_it = computations_in_async_context_.find(computation);
if (async_context_it != computations_in_async_context_.end()) {
const HloComputation* async_context = async_context_it->second;
CHECK(async_context->IsAsyncComputation());
auto async_done = async_context->AsyncStart()->async_chain_done();
auto async_done_it = instruction_schedule_.find(async_done);
CHECK(async_done_it != instruction_schedule_.end());
definition_end_time =
std::max(definition_end_time, async_done_it->second);
VLOG(2) << "Setting the definition end time for op in async context: "
<< definition_end_time;
}
const InstructionValueSet& value_set_tree =
alias_analysis_.dataflow_analysis().GetInstructionValueSet(
&instruction);
for (const auto& entry : value_set_tree) {
for (const HloValue* value : entry.second.values()) {
if (value->defining_instruction() != &instruction) continue;
TimeBound live_range = GetLastPosition(*value, definition_end_time);
live_range.start = start_time;
const HloModule& module = *computation->parent();
if (instruction.opcode() == HloOpcode::kParameter &&
computation == module.entry_computation() &&
!module.input_output_alias_config().ParameterHasAlias(
instruction.parameter_number(), value->index())) {
live_range.end = schedule_end_time();
} else {
live_range.end = std::max(live_range.end, GetLastUsageTime(*value));
}
CHECK_LE(live_range.start, live_range.end) << instruction.ToString();
CHECK(buffer_live_ranges_.insert({value, live_range}).second);
}
}
}
}
int64_t HloLiveRange::ComputePeakMemoryMoment() const {
std::vector<std::tuple<int64_t , bool , const HloValue*>>
events;
for (const HloValue* value : alias_analysis_.dataflow_analysis().values()) {
auto it = buffer_live_ranges_.find(value);
if (it != buffer_live_ranges_.end()) {
events.emplace_back(it->second.start, false, value);
events.emplace_back(it->second.end + 1, true, value);
}
}
std::sort(events.begin(), events.end());
int64_t memory_usage = 0;
int64_t peak_usage = 0;
std::optional<int64_t> peak_time;
for (const auto& event : events) {
int64_t time;
bool is_end;
const HloValue* value;
std::tie(time, is_end, value) = event;
auto buffer_size = ShapeUtil::ByteSizeOf(value->instruction()->shape(), 8);
if (is_end) {
memory_usage -= buffer_size;
} else {
memory_usage += buffer_size;
}
if (peak_usage < memory_usage) {
peak_usage = memory_usage;
peak_time = time;
}
}
return peak_time.value_or(0);
}
std::string HloLiveRange::ToString() const {
std::string output;
absl::StrAppendFormat(&output, "HloLiveRange (max %d):\n",
schedule_end_time());
absl::StrAppendFormat(&output, " InstructionSequence:\n");
auto& instructions = flattened_instruction_sequence().instructions();
for (int64_t i = 0; i < instructions.size(); ++i) {
absl::StrAppendFormat(&output, " %d:%s\n", i, instructions[i]->name());
}
absl::StrAppendFormat(&output, " BufferLiveRange:\n");
for (const HloValue* value : alias_analysis_.dataflow_analysis().values()) {
auto it = buffer_live_ranges_.find(value);
if (it != buffer_live_ranges_.end()) {
absl::StrAppendFormat(
&output, " %s%s:%d-%d\n", value->instruction()->name(),
value->index().ToString(), it->second.start, it->second.end);
}
}
int64_t peak_moment = ComputePeakMemoryMoment();
absl::StrAppendFormat(&output, " Live ranges at %lld (peak):\n",
peak_moment);
for (const HloValue* value : alias_analysis_.dataflow_analysis().values()) {
auto it = buffer_live_ranges_.find(value);
if (it != buffer_live_ranges_.end()) {
if (it->second.start <= peak_moment && peak_moment <= it->second.end) {
int64_t bytes = ShapeUtil::ByteSizeOf(value->instruction()->shape(), 8);
absl::StrAppendFormat(&output, " %s: %lld bytes\n",
value->instruction()->name(), bytes);
}
}
}
return output;
}
} | #include "xla/hlo/utils/hlo_live_range.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using TimeBound = HloLiveRange::TimeBound;
class HloLiveRangeTest : public HloTestBase {
protected:
HloLiveRangeTest() : module_(CreateNewVerifiedModule()) {}
~HloLiveRangeTest() override {}
void Analyze(const HloSchedule& schedule) {
alias_analysis_ = HloAliasAnalysis::Run(module_.get()).value();
hlo_live_range_ = HloLiveRange::Run(schedule, *alias_analysis_,
module_->entry_computation())
.value();
}
std::unique_ptr<HloModule> module_;
std::unique_ptr<HloLiveRange> hlo_live_range_;
std::unique_ptr<HloAliasAnalysis> alias_analysis_;
Shape f32scalar_ = ShapeUtil::MakeShape(xla::F32, {});
Shape f32vec4_ = ShapeUtil::MakeShape(F32, {4});
const HloValue* BufferAt(const HloInstruction* instruction,
const ShapeIndex& index) const {
return &alias_analysis_->dataflow_analysis().GetUniqueValueAt(instruction,
index);
}
HloLiveRange::TimeBound LiveRangeAt(const HloInstruction* instruction,
const ShapeIndex& index = {}) const {
auto* value = BufferAt(instruction, index);
return hlo_live_range_->buffer_live_ranges().at(value);
}
void CheckSchedule() const {
const auto& flattened_instructions =
hlo_live_range_->flattened_instruction_sequence().instructions();
EXPECT_EQ(flattened_instructions.size(),
hlo_live_range_->instruction_schedule().size());
for (const auto& inst_and_time : hlo_live_range_->instruction_schedule()) {
EXPECT_EQ(flattened_instructions.at(inst_and_time.second),
inst_and_time.first)
<< "(flattened_inst[" << inst_and_time.second
<< "] = " << flattened_instructions.at(inst_and_time.second)->name()
<< ") != (inst_schedule[" << inst_and_time.second
<< "] = " << inst_and_time.first->name() << ")";
}
}
};
TEST_F(HloLiveRangeTest, Multiply) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
module_->AddEntryComputation(builder.Build());
HloSchedule schedule(module_.get());
schedule.set_sequence(module_->entry_computation(), {paramA, paramX, mul});
Analyze(schedule);
CheckSchedule();
EXPECT_EQ(LiveRangeAt(paramA), TimeBound({0, 3}));
EXPECT_EQ(LiveRangeAt(paramX), TimeBound({0, 3}));
EXPECT_EQ(LiveRangeAt(mul), TimeBound({2, 3}));
}
TEST_F(HloLiveRangeTest, MultiplyAdd) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec4_, "paramY"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, mul, paramY));
module_->AddEntryComputation(builder.Build());
HloSchedule schedule(module_.get());
schedule.set_sequence(module_->entry_computation(),
{paramA, paramX, mul, paramY, add});
Analyze(schedule);
CheckSchedule();
EXPECT_EQ(LiveRangeAt(paramA), TimeBound({0, 5}));
EXPECT_EQ(LiveRangeAt(paramX), TimeBound({0, 5}));
EXPECT_EQ(LiveRangeAt(paramY), TimeBound({0, 5}));
EXPECT_EQ(LiveRangeAt(mul), TimeBound({2, 4}));
EXPECT_EQ(LiveRangeAt(add), TimeBound({4, 5}));
}
TEST_F(HloLiveRangeTest, LiveOutBuffers) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec4_, "paramY"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, mul, paramY));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({mul, add}));
module_->AddEntryComputation(builder.Build());
HloSchedule schedule(module_.get());
schedule.set_sequence(module_->entry_computation(),
{paramA, paramX, mul, paramY, add, tuple});
Analyze(schedule);
CheckSchedule();
EXPECT_EQ(LiveRangeAt(paramA), TimeBound({0, 6}));
EXPECT_EQ(LiveRangeAt(paramX), TimeBound({0, 6}));
EXPECT_EQ(LiveRangeAt(paramY), TimeBound({0, 6}));
EXPECT_EQ(LiveRangeAt(mul), TimeBound({2, 6}));
EXPECT_EQ(LiveRangeAt(add), TimeBound({4, 6}));
}
TEST_F(HloLiveRangeTest, InstructionScheduledAfterRoot) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec4_, "paramY"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, mul, paramY));
auto add2 = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, mul, paramY));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({mul, add}));
module_->AddEntryComputation(builder.Build());
HloSchedule schedule(module_.get());
schedule.set_sequence(module_->entry_computation(),
{paramA, paramX, mul, paramY, add, tuple, add2});
Analyze(schedule);
CheckSchedule();
EXPECT_EQ(LiveRangeAt(paramA), TimeBound({0, 7}));
EXPECT_EQ(LiveRangeAt(paramX), TimeBound({0, 7}));
EXPECT_EQ(LiveRangeAt(paramY), TimeBound({0, 7}));
EXPECT_EQ(LiveRangeAt(mul), TimeBound({2, 7}));
EXPECT_EQ(LiveRangeAt(add), TimeBound({4, 7}));
EXPECT_EQ(LiveRangeAt(tuple), TimeBound({5, 7}));
EXPECT_EQ(LiveRangeAt(add2), TimeBound({6, 6}));
}
TEST_F(HloLiveRangeTest, AliasedParameter) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec4_, "paramY"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, mul, paramY));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias({}, 0, {}));
HloSchedule schedule(module_.get());
schedule.set_sequence(module_->entry_computation(),
{paramA, paramX, mul, paramY, add});
Analyze(schedule);
CheckSchedule();
EXPECT_EQ(LiveRangeAt(paramA), TimeBound({0, 2}));
EXPECT_EQ(LiveRangeAt(paramX), TimeBound({0, 5}));
EXPECT_EQ(LiveRangeAt(paramY), TimeBound({0, 5}));
EXPECT_EQ(LiveRangeAt(mul), TimeBound({2, 4}));
EXPECT_EQ(LiveRangeAt(add), TimeBound({4, 5}));
}
TEST_F(HloLiveRangeTest, While) {
Shape shape = ShapeUtil::MakeShape(xla::F32, {2, 3});
Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, scalar_shape});
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "cond_param"));
HloInstruction* cond_iter = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, cond_param, 1));
HloInstruction* cond_limit = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(50.f)));
HloInstruction* cond_lt = cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), cond_iter,
cond_limit, ComparisonDirection::kLt));
HloComputation* cond_computation =
module_->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "body_param"));
HloInstruction* body_iter = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, body_param, 1));
HloInstruction* body_data = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, body_param, 0));
HloInstruction* body_iter_increment = body_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.f)));
HloInstruction* body_iter_next =
body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, body_iter, body_iter_increment));
HloInstruction* body_data_increment =
body_builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.f, 2.f, 3.f}, {4.f, 5.f, 6.f}})));
HloInstruction* body_data_mul =
body_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, body_data, body_data));
HloInstruction* body_data_add =
body_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, body_data, body_data_increment));
HloInstruction* body_data_next =
body_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, body_data_add, body_data_mul));
HloInstruction* body_out = body_builder.AddInstruction(
HloInstruction::CreateTuple({body_data_next, body_iter_next}));
HloComputation* body_computation =
module_->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* data = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param_iter"));
HloInstruction* iter = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "param_data"));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({data, iter}));
HloInstruction* while_op = builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, cond_computation, body_computation, tuple));
HloComputation* entry_computation =
module_->AddEntryComputation(builder.Build());
HloSchedule schedule(module_.get());
schedule.set_sequence(cond_computation,
{cond_param, cond_iter, cond_limit, cond_lt});
schedule.set_sequence(body_computation,
{body_param, body_iter, body_data, body_iter_increment,
body_iter_next, body_data_increment, body_data_mul,
body_data_add, body_data_next, body_out});
schedule.set_sequence(entry_computation, {iter, data, tuple, while_op});
Analyze(schedule);
CheckSchedule();
EXPECT_EQ(LiveRangeAt(iter).end, LiveRangeAt(cond_iter).start);
EXPECT_EQ(LiveRangeAt(cond_iter).end, LiveRangeAt(body_iter).start);
EXPECT_EQ(LiveRangeAt(body_iter).end, LiveRangeAt(body_iter_next).start);
}
TEST_F(HloLiveRangeTest, Determinism) {
std::string hlo_string = R"(
HloModule While, is_scheduled=true
%WhileBody {
%body_param = (f32[2,3]{1,0}, f32[], f32[2,3]{1,0}) parameter(0)
%get-tuple-element.2 = f32[2,3]{1,0} get-tuple-element(%body_param), index=0
%constant.2 = f32[2,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 } })
%add.1 = f32[2,3]{1,0} add(f32[2,3]{1,0} %get-tuple-element.2, f32[2,3]{1,0} %constant.2)
%multiply = f32[2,3]{1,0} multiply(f32[2,3]{1,0} %get-tuple-element.2, f32[2,3]{1,0} %get-tuple-element.2)
%add.2 = f32[2,3]{1,0} add(f32[2,3]{1,0} %add.1, f32[2,3]{1,0} %multiply)
%get-tuple-element.1 = f32[] get-tuple-element(%body_param), index=1
%constant.1 = f32[] constant(1)
%add = f32[] add(f32[] %get-tuple-element.1, f32[] %constant.1)
%get-tuple-element.3 = f32[2,3]{1,0} get-tuple-element(%body_param), index=2
%add.3 = f32[2,3]{1,0} add(f32[2,3]{1,0} %get-tuple-element.3, f32[2,3]{1,0} %constant.2)
ROOT %tuple = (f32[2,3]{1,0}, f32[], f32[2,3]{1,0}) tuple(f32[2,3]{1,0} %add.2, f32[] %add, f32[2,3]{1,0} %add.3)
}
%WhileCond {
%cond_param = (f32[2,3]{1,0}, f32[], f32[2,3]{1,0}) parameter(0)
%get-tuple-element = f32[] get-tuple-element(%cond_param), index=1
%constant = f32[] constant(50)
ROOT %compare = pred[] compare(f32[] %get-tuple-element, f32[] %constant), direction=LT
}
ENTRY %While {
%param_iter = f32[2,3]{1,0} parameter(0)
%param_data = f32[] parameter(1)
%tuple.1 = (f32[2,3]{1,0}, f32[], f32[2,3]{1,0}) tuple(f32[2,3]{1,0} %param_iter, f32[] %param_data, f32[2,3]{1,0} %param_iter)
%while = (f32[2,3]{1,0}, f32[], f32[2,3]{1,0}) while(%tuple.1), condition=%WhileCond, body=%WhileBody
ROOT %get-tuple-element.4 = f32[2,3]{1,0} get-tuple-element(%while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_string));
const HloSchedule& schedule = module_->schedule();
const int32_t num_runs = 20;
std::vector<std::unique_ptr<HloLiveRange>> hlo_live_ranges;
std::unique_ptr<HloAliasAnalysis> alias_analysis =
HloAliasAnalysis::Run(module_.get()).value();
for (int i = 0; i < num_runs; ++i) {
hlo_live_ranges.push_back(HloLiveRange::Run(schedule, *alias_analysis,
module_->entry_computation())
.value());
}
absl::flat_hash_map<const HloValue*, HloLiveRange::TimeBound>&
buffer_live_ranges_0 = hlo_live_ranges[0]->buffer_live_ranges();
for (const auto& iter : buffer_live_ranges_0) {
for (size_t i = 1; i < num_runs; i++) {
absl::flat_hash_map<const HloValue*, HloLiveRange::TimeBound>&
buffer_live_ranges_i = hlo_live_ranges[i]->buffer_live_ranges();
auto found_iter = buffer_live_ranges_i.find(iter.first);
EXPECT_TRUE(found_iter != buffer_live_ranges_i.end())
<< "value does not exist: " << iter.first->ToString();
EXPECT_EQ(found_iter->second.start, iter.second.start)
<< "value " << iter.first->ToString()
<< " has different start: " << found_iter->second.start << " vs "
<< iter.second.start;
EXPECT_EQ(found_iter->second.end, iter.second.end)
<< "value " << iter.first->ToString()
<< " has different end: " << found_iter->second.end << " vs "
<< iter.second.end;
}
}
}
TEST_F(HloLiveRangeTest, AsyncCall) {
std::string hlo_string = R"(
HloModule AsyncCall, is_scheduled=true, entry_computation_layout={(f32[4096]{0},f32[4096]{0})->f32[4096]{0}}
%called_computation (param_0: f32[4096], param_1: f32[4096]) -> f32[4096] {
%param_0 = f32[4096]{0} parameter(0)
%param_1 = f32[4096]{0} parameter(1)
%negate_2 = f32[4096]{0} negate(f32[4096]{0} %param_0)
%negate_3 = f32[4096]{0} negate(f32[4096]{0} %param_1)
ROOT %result.1 = f32[4096]{0} add(f32[4096]{0} %negate_2, f32[4096]{0} %negate_3)
}
%async_wrapped (async_param: f32[4096], async_param.1: f32[4096]) -> f32[4096] {
%async_param = f32[4096]{0} parameter(0)
%async_param.1 = f32[4096]{0} parameter(1)
ROOT %call = f32[4096]{0} call(f32[4096]{0} %async_param, f32[4096]{0} %async_param.1), to_apply=%called_computation
}
ENTRY %main (a: f32[4096], b: f32[4096]) -> f32[4096] {
%a = f32[4096]{0} parameter(0)
%b = f32[4096]{0} parameter(1)
%negate_0 = f32[4096]{0} negate(f32[4096]{0} %a)
%negate_1 = f32[4096]{0} negate(f32[4096]{0} %b)
%async-start = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) async-start(f32[4096]{0} %negate_0, f32[4096]{0} %negate_1), calls=%async_wrapped
%add_0 = f32[4096]{0} add(f32[4096]{0} %negate_0, f32[4096]{0} %negate_1)
%async-done = f32[4096]{0} async-done(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %async-start)
ROOT %add_1 = f32[4096]{0} add(f32[4096]{0} %add_0, f32[4096]{0} %async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_string));
const HloSchedule& schedule = module_->schedule();
Analyze(schedule);
CheckSchedule();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloAliasAnalysis> aa,
HloAliasAnalysis::Run(module_.get()));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(module_->schedule(), *aa,
module_->entry_computation()));
absl::flat_hash_map<std::string, std::pair<int32_t, int32_t>> inst_ranges;
for (auto& [value, time_bound] : hlo_live_range->buffer_live_ranges()) {
inst_ranges[value->instruction()->name()] = {time_bound.start,
time_bound.end};
}
EXPECT_EQ(inst_ranges["a"], std::make_pair(0, 16));
EXPECT_EQ(inst_ranges["b"], std::make_pair(0, 16));
EXPECT_EQ(inst_ranges["add_0"], std::make_pair(13, 15));
EXPECT_EQ(inst_ranges["add_1"], std::make_pair(15, 16));
EXPECT_EQ(inst_ranges["negate_0"], std::make_pair(2, 14));
EXPECT_EQ(inst_ranges["negate_1"], std::make_pair(3, 14));
}
TEST_F(HloLiveRangeTest, Call) {
std::string hlo_string = R"(
HloModule Call, is_scheduled=true
%called_computation (param_0: f32[4096]) -> f32[4096] {
%param_0 = f32[4096]{0} parameter(0)
ROOT %negate_0 = f32[4096]{0} negate(f32[4096]{0} %param_0)
}
ENTRY %main (a: f32[4096]) -> f32[4096] {
%a = f32[4096]{0} parameter(0)
%b = f32[4096]{0} negate(%a)
%c = f32[4096]{0} call(%b), to_apply=%called_computation
%d = f32[4096]{0} negate(%c)
ROOT %e = f32[4096]{0} add(%c, %d)
})";
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloAliasAnalysis> aa,
HloAliasAnalysis::Run(module_.get()));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(module_->schedule(), *aa,
module_->entry_computation()));
absl::flat_hash_map<std::string, std::pair<int32_t, int32_t>> inst_ranges;
for (auto& [value, time_bound] : hlo_live_range->buffer_live_ranges()) {
inst_ranges[value->instruction()->name()] = {time_bound.start,
time_bound.end};
}
EXPECT_EQ(inst_ranges["a"], std::make_pair(0, 7));
EXPECT_EQ(inst_ranges["b"], std::make_pair(1, 3));
EXPECT_EQ(inst_ranges["negate_0"], std::make_pair(3, 6));
EXPECT_EQ(inst_ranges["d"], std::make_pair(5, 6));
EXPECT_EQ(inst_ranges["e"], std::make_pair(6, 7));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/utils/hlo_live_range.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/utils/hlo_live_range_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
03d4180a-c3ad-4127-b9e0-08f2506c3923 | cpp | tensorflow/tensorflow | hlo_query | third_party/xla/xla/hlo/utils/hlo_query.cc | third_party/xla/xla/hlo/utils/hlo_query_test.cc | #include "xla/hlo/utils/hlo_query.h"
#include <algorithm>
#include <cstdint>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape_util.h"
namespace xla {
namespace hlo_query {
bool IsCollectiveCommunicationOp(HloOpcode op) {
return op == HloOpcode::kAllReduce || op == HloOpcode::kAllGather ||
op == HloOpcode::kAllToAll || op == HloOpcode::kCollectivePermute ||
op == HloOpcode::kCollectiveBroadcast ||
op == HloOpcode::kReduceScatter || op == HloOpcode::kAllReduceStart ||
op == HloOpcode::kAllGatherStart ||
op == HloOpcode::kCollectivePermuteStart;
}
bool IsAsyncCollectiveStartOp(const HloInstruction* instruction,
bool include_send_recv) {
HloOpcode op = instruction->opcode();
if (op == HloOpcode::kAsyncStart) {
return IsCollectiveCommunicationOp(instruction->async_wrapped_opcode());
}
return op == HloOpcode::kAllReduceStart || op == HloOpcode::kAllGatherStart ||
op == HloOpcode::kCollectivePermuteStart ||
(include_send_recv &&
(op == HloOpcode::kSend || op == HloOpcode::kRecv));
}
bool IsAsyncCollectiveDoneOp(const HloInstruction* instruction,
bool include_send_recv) {
HloOpcode op = instruction->opcode();
if (op == HloOpcode::kAsyncDone) {
return IsCollectiveCommunicationOp(instruction->async_wrapped_opcode());
}
return op == HloOpcode::kAllReduceDone || op == HloOpcode::kAllGatherDone ||
op == HloOpcode::kCollectivePermuteDone ||
(include_send_recv &&
(op == HloOpcode::kSendDone || op == HloOpcode::kRecvDone));
}
bool IsConstantR0F32(HloInstruction* instruction, float* out) {
if (instruction->opcode() == HloOpcode::kConstant &&
ShapeUtil::IsScalarWithElementType(instruction->shape(), F32)) {
*out = instruction->literal().Get<float>({});
return true;
}
return false;
}
bool AllOperandsAreParametersOrConstants(const HloInstruction& instruction) {
for (const auto& operand : instruction.operands()) {
if (operand->opcode() != HloOpcode::kParameter &&
operand->opcode() != HloOpcode::kConstant) {
return false;
}
}
return true;
}
bool AllOperandsAreParametersOrConstantsWithSingleUser(
const HloInstruction& instruction) {
for (const auto& operand : instruction.operands()) {
if (operand->opcode() != HloOpcode::kParameter &&
operand->opcode() != HloOpcode::kConstant) {
return false;
}
if (operand->user_count() > 1) {
return false;
}
}
return true;
}
bool AllOperandsAreParameters(const HloInstruction& instruction) {
for (const auto& operand : instruction.operands()) {
if (operand->opcode() != HloOpcode::kParameter) {
return false;
}
}
return true;
}
bool AllOperandsAreConstants(const HloInstruction& instruction) {
for (const auto& operand : instruction.operands()) {
if (operand->opcode() != HloOpcode::kConstant) {
return false;
}
}
return true;
}
HloInstruction* GetMatchingOperand(const HloPredicate& matcher,
HloInstruction* instruction) {
for (HloInstruction* op : instruction->operands()) {
if (matcher(op)) {
return op;
}
}
return nullptr;
}
bool MatchBinaryInstructionOperand(const HloPredicate& matcher,
HloInstruction* instruction,
HloInstruction** matching_operand,
HloInstruction** other_operand) {
CHECK_EQ(instruction->operand_count(), 2);
if (matcher(instruction->operand(0))) {
*matching_operand = instruction->mutable_operand(0);
*other_operand = instruction->mutable_operand(1);
return true;
}
if (matcher(instruction->operand(1))) {
*matching_operand = instruction->mutable_operand(1);
*other_operand = instruction->mutable_operand(0);
return true;
}
return false;
}
bool MatchBinaryInstructionOperandOpcode(HloOpcode opcode,
HloInstruction* instruction,
HloInstruction** matching_operand,
HloInstruction** other_operand) {
return MatchBinaryInstructionOperand(
[opcode](const HloInstruction* instruction) {
return instruction->opcode() == opcode;
},
instruction, matching_operand, other_operand);
}
bool IsScalarConstant(const HloInstruction* instruction) {
return instruction->IsConstant() && ShapeUtil::IsScalar(instruction->shape());
}
bool IsBroadcastedConstantOrScalar(const HloInstruction& instr) {
return instr.IsConstant() || ShapeUtil::IsScalar(instr.shape()) ||
(HloOpcode::kBroadcast == instr.opcode() &&
(instr.operand(0)->IsConstant() ||
ShapeUtil::IsScalar(instr.operand(0)->shape())));
}
bool IsBroadcastOfScalarConstant(const HloInstruction& instr) {
return instr.opcode() == HloOpcode::kBroadcast &&
IsScalarConstant(instr.operand(0));
}
bool IsBroadcastOfParameter(const HloInstruction& instr) {
return instr.opcode() == HloOpcode::kBroadcast &&
instr.operand(0)->opcode() == HloOpcode::kParameter;
}
HloInstruction* GetFirstInstructionWithOpcode(const HloComputation& computation,
const HloOpcode opcode) {
auto instructions = computation.instructions();
auto it = absl::c_find_if(instructions, [&](HloInstruction* instr) {
return instr->opcode() == opcode;
});
return it == instructions.end() ? nullptr : *it;
}
bool ContainsInstrWithOpcode(const HloComputation* comp,
const absl::flat_hash_set<HloOpcode>& opcodes) {
for (const auto* instr : comp->instructions()) {
if (opcodes.count(instr->opcode())) {
return true;
}
for (const HloComputation* subcomp : instr->called_computations()) {
if (ContainsInstrWithOpcode(subcomp, opcodes)) {
return true;
}
}
}
return false;
}
bool ContainsLayoutConstrainedCollective(const HloModule& module,
HloOpcode op) {
CHECK(IsCollectiveCommunicationOp(op));
for (auto computation : module.computations()) {
for (auto hlo : computation->instructions()) {
if (hlo->opcode() == op &&
DynCast<HloCollectiveInstruction>(hlo)->constrain_layout()) {
return true;
}
}
}
return false;
}
int64_t NextChannelId(const HloModule& module) {
int64_t next_channel_id = 1;
for (const HloComputation* comp : module.computations()) {
for (const HloInstruction* hlo : comp->instructions()) {
const HloChannelInstruction* channel_instr =
DynCast<HloChannelInstruction>(hlo);
if (channel_instr && channel_instr->channel_id()) {
next_channel_id =
std::max(next_channel_id, *channel_instr->channel_id() + 1);
}
}
}
return next_channel_id;
}
bool HasX64TransformedHostTransfer(const HloModule& module) {
for (auto computation : module.computations()) {
for (auto hlo : computation->instructions()) {
if (hlo->opcode() == HloOpcode::kSend) {
auto send = DynCast<HloSendInstruction>(hlo);
if (send->is_host_transfer() && send->operand(0)->shape().IsTuple()) {
return true;
}
} else if (hlo->opcode() == HloOpcode::kRecv) {
auto recv = DynCast<HloRecvInstruction>(hlo);
if (recv->is_host_transfer() &&
recv->shape().tuple_shapes(0).IsTuple()) {
return true;
}
}
}
}
return false;
}
HloInstruction* GetUniqueGteInstruction(const HloInstruction* operand,
int64_t index) {
HloInstruction* gte = nullptr;
for (HloInstruction* instr : operand->parent()->MakeInstructionPostOrder()) {
if (!Match(instr, match::GetTupleElement().WithTupleIndex(index))) {
continue;
}
if (instr->operand(0) != operand) {
continue;
}
if (gte != nullptr) {
return nullptr;
}
gte = instr;
}
return gte;
}
HloComputation* FindComputation(HloModule* module, absl::string_view name) {
auto computations = module->computations();
auto it = absl::c_find_if(
computations, [&](HloComputation* c) { return c->name() == name; });
if (it == computations.end()) {
return nullptr;
}
return *it;
}
HloInstruction* FindInstruction(const HloComputation* computation,
absl::string_view name) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->name() == name) return instruction;
}
return nullptr;
}
HloInstruction* FindInstruction(const HloComputation* computation,
HloOpcode opcode) {
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == opcode) return instruction;
}
return nullptr;
}
}
} | #include "xla/hlo/utils/hlo_query.h"
#include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using HloQueryTest = HloTestBase;
template <typename Hlo>
int CountInstructions(Hlo& module, HloOpcode opcode) {
int counter = 0;
hlo_query::ForEachInstructionWithOpcode(
module, opcode, [&counter](auto& instr) { counter++; });
return counter;
}
constexpr absl::string_view kConstantAdditionHloString = R"(
HloModule test
ENTRY main {
zero = f32[] constant(0)
five = f32[] constant(5)
ROOT out = f32[] add(zero, five)
})";
TEST_F(HloQueryTest,
GetInstructionWithOpCodeReturnsMatchingInstructionForModule) {
constexpr absl::string_view kHloString = R"(
HloModule m
computation.0 {
param.0 = f32[32]{0} parameter(0)
ROOT _ = f32[32]{0} rsqrt(param.0)
}
ENTRY main {
param.0 = f32[32]{0} parameter(0)
param.1 = f32[32]{0} parameter(1)
param.2 = f32[32]{0} parameter(2)
param.3 = f32[32]{0} parameter(3)
add.0 = f32[32]{0} add(param.0,param.1)
add.1 = f32[32]{0} add(param.1,param.2)
sub.0 = f32[32]{0} subtract(param.0,param.1)
mul.0 = f32[32]{0} multiply(param.0,param.1)
mul.1 = f32[32]{0} multiply(param.1,param.2)
mul.2 = f32[32]{0} multiply(param.2,param.3)
comp.0 = call(param.0), to_apply=computation.0
ROOT _ = (f32[32],f32[32],f32[32],f32[32],f32[32],f32[32],f32[32]) tuple(comp.0,add.0,add.1,sub.0,mul.0,mul.1,mul.2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHloString));
EXPECT_EQ(CountInstructions(*module, HloOpcode::kAdd), 2);
EXPECT_EQ(CountInstructions(*module, HloOpcode::kSubtract), 1);
EXPECT_EQ(CountInstructions(*module, HloOpcode::kMultiply), 3);
}
TEST_F(HloQueryTest,
GetInstructionWithOpCodeReturnsMatchingInstructionForComputation) {
constexpr absl::string_view kHloString = R"(
HloModule m
computation.0 {
param.0 = f32[32]{0} parameter(0)
param.1 = f32[32]{0} parameter(1)
param.2 = f32[32]{0} parameter(2)
param.3 = f32[32]{0} parameter(3)
add.0 = f32[32]{0} add(param.0,param.1)
add.1 = f32[32]{0} add(param.1,param.2)
sub.0 = f32[32]{0} subtract(param.0,param.1)
mul.0 = f32[32]{0} multiply(param.0,param.1)
mul.1 = f32[32]{0} multiply(param.1,param.2)
ROOT mul.2 = f32[32]{0} multiply(param.2,param.3)
}
ENTRY main {
param.0 = f32[32]{0} parameter(0)
param.1 = f32[32]{0} parameter(1)
param.2 = f32[32]{0} parameter(2)
param.3 = f32[32]{0} parameter(3)
add.0 = f32[32]{0} add(param.0,param.1)
sub.0 = f32[32]{0} subtract(param.0,param.1)
mul.0 = f32[32]{0} multiply(param.0,param.1)
comp.0 = f32[32]{0} call(param.0,param.1,param.2), to_apply=computation.0
ROOT _ = (f32[32],f32[32],f32[32],f32[32]) tuple(add.0,sub.0,mul.0,comp.0)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHloString));
HloComputation* computation = module->GetComputationWithName("computation.0");
EXPECT_EQ(CountInstructions(*computation, HloOpcode::kAdd), 2);
EXPECT_EQ(CountInstructions(*computation, HloOpcode::kSubtract), 1);
EXPECT_EQ(CountInstructions(*computation, HloOpcode::kMultiply), 3);
}
TEST_F(HloQueryTest, GetUniqueGteTest) {
constexpr absl::string_view kHloString = R"(
HloModule m
ENTRY main {
param.0 = (f32[32]{0}, f32[32]{0}, f32[32]{0}, f32[32]{0}) parameter(0)
gte1 = f32[32]{0} get-tuple-element(param.0), index=0
gte2 = f32[32]{0} get-tuple-element(param.0), index=1
dup_gte2 = f32[32]{0} get-tuple-element(param.0), index=1
gte3 = f32[32]{0} get-tuple-element(param.0), index=2
ROOT gte4 = f32[32]{0} get-tuple-element(param.0), index=3
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHloString));
HloInstruction* param = module->entry_computation()->parameter_instruction(0);
HloInstruction* gte1 = hlo_query::GetUniqueGteInstruction(param, 0);
EXPECT_NE(gte1, nullptr);
HloInstruction* gte2 = hlo_query::GetUniqueGteInstruction(param, 1);
EXPECT_EQ(gte2, nullptr);
}
TEST_F(HloQueryTest, FindComputationTest) {
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kConstantAdditionHloString));
EXPECT_NE(hlo_query::FindComputation(module.get(), "main"), nullptr);
EXPECT_EQ(hlo_query::FindComputation(module.get(), "foo"), nullptr);
}
TEST_F(HloQueryTest, FindInstructionUsingNameTest) {
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kConstantAdditionHloString));
const HloComputation* main = hlo_query::FindComputation(module.get(), "main");
EXPECT_NE(hlo_query::FindInstruction(main, "zero"), nullptr);
EXPECT_NE(hlo_query::FindInstruction(main, "five"), nullptr);
EXPECT_NE(hlo_query::FindInstruction(main, "out"), nullptr);
EXPECT_EQ(hlo_query::FindInstruction(main, "foo"), nullptr);
}
void FindInstructionsAndExpectEqual(const HloComputation* main,
absl::string_view name, HloOpcode opcode) {
SCOPED_TRACE(absl::StrCat("Comparing finding by name: ", name,
" and opcode: ", opcode));
HloInstruction* by_name = hlo_query::FindInstruction(main, name);
HloInstruction* by_opcode = hlo_query::FindInstruction(main, opcode);
EXPECT_EQ(by_name, by_opcode);
}
TEST_F(HloQueryTest, FindInstructionUsingOpcodeTest) {
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kConstantAdditionHloString));
const HloComputation* main = hlo_query::FindComputation(module.get(), "main");
EXPECT_NE(hlo_query::FindInstruction(main, HloOpcode::kConstant), nullptr);
EXPECT_NE(hlo_query::FindInstruction(main, HloOpcode::kAdd), nullptr);
EXPECT_EQ(hlo_query::FindInstruction(main, HloOpcode::kSelect), nullptr);
}
TEST_F(HloQueryTest, FindInstructionUsingOpcodeAndNameEqualTest) {
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kConstantAdditionHloString));
const HloComputation* main = hlo_query::FindComputation(module.get(), "main");
FindInstructionsAndExpectEqual(main, "zero", HloOpcode::kConstant);
FindInstructionsAndExpectEqual(main, "out", HloOpcode::kAdd);
FindInstructionsAndExpectEqual(main, "dummy", HloOpcode::kSelect);
}
TEST_F(HloQueryTest, FindInstructionDoesNotExistTest) {
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kConstantAdditionHloString));
const HloComputation* main = hlo_query::FindComputation(module.get(), "main");
EXPECT_NE(main, nullptr);
auto find_beef = hlo_query::FindInstruction(main, "deadbeef");
auto find_nothing = hlo_query::FindInstruction(main, "");
EXPECT_EQ(find_beef, nullptr);
EXPECT_EQ(find_nothing, nullptr);
}
TEST_F(HloQueryTest, NextChannelIdForModuleWithoutChannelIdTest) {
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnUnverifiedModule(kConstantAdditionHloString));
EXPECT_EQ(hlo_query::NextChannelId(*module), 1)
<< "module with no channel id";
}
TEST_F(HloQueryTest, NextChannelIdBasicTest) {
absl::string_view hlo = R"(
HloModule test
ENTRY test_computation {
p = u32[] partition-id()
ROOT start = u32[] collective-permute(p), channel_id=8,
source_target_pairs={{0,1},{1,2},{2,3},{3,0}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
EXPECT_EQ(hlo_query::NextChannelId(*module), 9);
}
TEST_F(HloQueryTest, NextChannelIdTwoIdsTest) {
absl::string_view hlo = R"(
HloModule test
ENTRY test_computation {
p = u32[] partition-id()
l = u32[] collective-permute(p), channel_id=8,
source_target_pairs={{0,1},{1,2}}
r = u32[] collective-permute(p), channel_id=9,
source_target_pairs={{2,3},{3,0}}
ROOT res = u32[] add(l,r)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo));
EXPECT_EQ(hlo_query::NextChannelId(*module), 10);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/utils/hlo_query.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/utils/hlo_query_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fa51a62f-98c3-4da9-956d-b566594678eb | cpp | tensorflow/tensorflow | hlo_sharding_util | third_party/xla/xla/hlo/utils/hlo_sharding_util.cc | third_party/xla/xla/hlo/utils/hlo_sharding_util_test.cc | #include "xla/hlo/utils/hlo_sharding_util.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <cstdlib>
#include <iterator>
#include <map>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/tile_assignment.h"
#include "xla/literal_util.h"
#include "xla/map_util.h"
#include "xla/protobuf_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/dot_as_convolution_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace hlo_sharding_util {
bool IsSubTilingOrEqualSharding(const Shape& potential_sharded_shape,
const HloSharding& potential_subsharding,
const HloSharding& sharding) {
if (potential_subsharding.IsManual() || sharding.IsManual()) {
return false;
}
if (sharding.IsTileMaximal()) {
return true;
}
if (potential_subsharding.IsTileMaximal()) {
return false;
}
const int32_t tiled_data_rank = potential_subsharding.TiledDataRank();
if (tiled_data_rank != sharding.TiledDataRank() ||
tiled_data_rank != potential_sharded_shape.dimensions_size()) {
return false;
}
DimensionVector potential_base_tile(tiled_data_rank);
DimensionVector base_tile(tiled_data_rank);
bool shortcut = true;
int64_t diff_dim_counter = 0;
DimensionVector reshape_dims(
potential_subsharding.tile_assignment().dimensions().begin(),
potential_subsharding.tile_assignment().dimensions().end());
for (int64_t i = 0; i < tiled_data_rank; ++i) {
const auto shape_i = potential_sharded_shape.dimensions(i);
const auto p_tile_dim_i = potential_subsharding.tile_assignment().dim(i);
const auto s_tile_dim_i = sharding.tile_assignment().dim(i);
if (p_tile_dim_i < s_tile_dim_i) {
return false;
}
potential_base_tile[i] = CeilOfRatio(shape_i, p_tile_dim_i);
base_tile[i] = CeilOfRatio(shape_i, s_tile_dim_i);
if (s_tile_dim_i != 1 &&
(p_tile_dim_i % s_tile_dim_i != 0 ||
base_tile[i] % potential_base_tile[i] != 0 ||
shape_i <= (p_tile_dim_i - 1) * potential_base_tile[i] ||
shape_i <= (s_tile_dim_i - 1) * base_tile[i])) {
shortcut = false;
}
if (shortcut && p_tile_dim_i != s_tile_dim_i) {
reshape_dims[i + diff_dim_counter] = s_tile_dim_i;
reshape_dims.insert(reshape_dims.begin() + i + diff_dim_counter + 1,
p_tile_dim_i / s_tile_dim_i);
diff_dim_counter++;
}
}
if (shortcut) {
if (!sharding.HasPartialReplication()) {
return potential_subsharding == sharding;
}
std::vector<int> perm(reshape_dims.size());
absl::c_iota(perm, 0);
for (int64_t i = 0; i < tiled_data_rank; ++i) {
if (potential_subsharding.tile_assignment().dim(i) !=
sharding.tile_assignment().dim(i)) {
auto element = perm[i + 1];
perm.erase(perm.begin() + i + 1);
perm.push_back(element);
}
}
auto reshaped_ta = potential_subsharding.tile_assignment()
.Reshape(reshape_dims)
.Transpose(perm)
.Reshape(sharding.tile_assignment().dimensions());
return HloSharding::PartialTile(reshaped_ta).tile_assignment() ==
sharding.tile_assignment();
}
auto storage = std::make_unique<int32_t[]>(
sharding.tile_assignment().num_elements() * tiled_data_rank);
int32_t* storage_cursor = storage.get();
absl::flat_hash_map<int32_t, int32_t*> sharding_offsets;
sharding_offsets.reserve(sharding.tile_assignment().num_elements());
auto get_sharding_offsets = [&](int64_t device) -> absl::Span<int32_t> {
auto it = sharding_offsets.find(device);
if (it == sharding_offsets.end()) {
bool emplaced;
std::tie(it, emplaced) = sharding_offsets.emplace(device, storage_cursor);
DCHECK(emplaced);
storage_cursor += tiled_data_rank;
}
return absl::MakeSpan(it->second, tiled_data_rank);
};
sharding.tile_assignment().Each(
[&](absl::Span<const int64_t> indices, int64_t device) {
auto indices_per_device = get_sharding_offsets(device);
for (int64_t i = 0; i < tiled_data_rank; ++i) {
indices_per_device[i] = base_tile[i] * indices[i];
}
});
auto& potential_ta = potential_subsharding.tile_assignment().array();
absl::Status ok_if_no_violation = potential_ta.EachStatus(
[&](absl::Span<const int64_t> indices, int64_t device) -> absl::Status {
auto sharding_offset = get_sharding_offsets(device);
for (int j = 0; j < tiled_data_rank; ++j) {
const int32_t subsharding_offset_j =
potential_base_tile[j] * indices[j];
if (subsharding_offset_j < sharding_offset[j]) {
return Internal("");
}
if (subsharding_offset_j + potential_base_tile[j] <=
potential_sharded_shape.dimensions(j) &&
subsharding_offset_j + potential_base_tile[j] >
sharding_offset[j] + base_tile[j]) {
return Internal("");
}
}
return absl::OkStatus();
});
return ok_if_no_violation.ok();
}
static bool IsLeafShardingMoreSpecific(const HloSharding& lhs,
const HloSharding& rhs) {
DCHECK(!lhs.IsTuple());
DCHECK(!rhs.IsTuple());
if (lhs.IsManualLeaf() && rhs.IsTileMaximalLeaf()) {
return true;
}
if (lhs.IsManualLeaf() || rhs.IsManualLeaf()) {
return false;
}
if (!rhs.IsTileMaximalLeaf()) {
return lhs.NumTilesLeaf() > rhs.NumTilesLeaf();
}
return !(rhs.IsReplicatedLeaf() ? lhs.IsReplicatedLeaf()
: lhs.IsTileMaximalLeaf());
}
bool IsShardingMoreSpecific(const HloSharding& lhs, const HloSharding& rhs) {
CHECK_EQ(lhs.IsTuple(), rhs.IsTuple()) << lhs << " <> " << rhs;
if (lhs.IsTuple()) {
const auto& lhs_shardings = lhs.tuple_elements();
const auto& rhs_shardings = rhs.tuple_elements();
CHECK_EQ(lhs_shardings.size(), rhs_shardings.size());
bool is_better = false;
for (int64_t i = 0; i < lhs_shardings.size(); ++i) {
if (IsShardingMoreSpecific(rhs_shardings[i], lhs_shardings[i])) {
return false;
}
if (IsShardingMoreSpecific(lhs_shardings[i], rhs_shardings[i])) {
is_better = true;
}
}
return is_better;
}
return IsLeafShardingMoreSpecific(lhs, rhs);
}
bool MergeSharding(const HloSharding& to_merge, HloSharding* dst,
bool may_combine_partial_sharding) {
if (to_merge.IsTuple()) {
CHECK(dst->IsTuple());
bool changed = false;
for (int64_t i = 0; i < to_merge.tuple_elements().size(); ++i) {
changed |=
MergeSharding(to_merge.tuple_elements()[i], &dst->tuple_elements()[i],
may_combine_partial_sharding);
}
return changed;
}
if (!may_combine_partial_sharding || !to_merge.HasPartialReplication() ||
!dst->HasPartialReplication() ||
to_merge.tile_assignment().num_elements() !=
dst->tile_assignment().num_elements()) {
goto check_if_more_specific;
}
if (MergeShardingIfCompatible(
to_merge,
std::max(to_merge.NumTiles(), dst->NumTiles()) + 1,
dst)) {
return true;
}
check_if_more_specific:
return IsLeafShardingMoreSpecific(*dst, to_merge);
}
bool MergeShardingIfCompatible(const HloSharding& to_merge, HloSharding* dst) {
return MergeShardingIfCompatible(to_merge,
dst->NumTiles() + 1, dst);
}
bool MergeShardingIfCompatible(const HloSharding& to_merge,
int64_t minimum_tiles, HloSharding* dst) {
CHECK(!to_merge.IsTuple() && !to_merge.IsManual() && !dst->IsTuple() &&
!dst->IsManual());
if (to_merge.IsTileMaximal()) {
return false;
}
if (dst->IsTileMaximal()) {
*dst = to_merge;
return true;
}
if (!dst->HasPartialReplication()) {
return false;
}
if (dst->TiledDataRank() != to_merge.TiledDataRank()) {
return false;
}
const int64_t to_merge_man_dim = to_merge.SubgroupManualDim();
const int64_t dst_man_dim = dst->SubgroupManualDim();
if ((to_merge_man_dim >= 0) != (dst_man_dim >= 0)) {
return false;
}
DimensionVector perm_merge(dst->tile_assignment().num_dimensions(), -1);
DimensionVector perm_dst(dst->tile_assignment().num_dimensions(), -1);
int64_t perm_merge_counter = 0;
int64_t perm_dst_counter = 0;
DimensionVector merge_old_tile_dim, dst_old_tile_dim;
DimensionVector merge_new_tile_dim, dst_new_tile_dim;
DimensionVector merge_new_tile_index, dst_new_tile_index;
DimensionVector merged_tile_dims;
merged_tile_dims.reserve(dst->tile_assignment().num_dimensions());
int64_t num_merge_groups = 1;
int64_t num_dst_groups = 1;
for (int64_t i = 0; i < to_merge.TiledDataRank(); ++i) {
int64_t merge_dim = to_merge.tile_assignment().dim(i);
int64_t dst_dim = dst->tile_assignment().dim(i);
num_merge_groups *= merge_dim;
num_dst_groups *= dst_dim;
if (dst_dim == merge_dim) {
merge_old_tile_dim.push_back(merge_dim);
perm_merge[i] = perm_merge_counter++;
dst_old_tile_dim.push_back(dst_dim);
perm_dst[i] = perm_dst_counter++;
merged_tile_dims.push_back(dst_dim);
} else if (dst_dim == 1) {
merge_old_tile_dim.push_back(merge_dim);
perm_merge[i] = perm_merge_counter++;
dst_new_tile_dim.push_back(merge_dim);
dst_new_tile_index.push_back(i);
merged_tile_dims.push_back(merge_dim);
} else if (merge_dim == 1) {
merge_new_tile_dim.push_back(dst_dim);
merge_new_tile_index.push_back(i);
dst_old_tile_dim.push_back(dst_dim);
perm_dst[i] = perm_dst_counter++;
merged_tile_dims.push_back(dst_dim);
} else {
return false;
}
}
const int64_t num_devices = to_merge.tile_assignment().num_elements();
const int64_t new_num_tiles = Product(merged_tile_dims);
if (num_devices % new_num_tiles != 0 || new_num_tiles < minimum_tiles) {
return false;
}
int64_t replication;
if (to_merge_man_dim >= 0) {
int64_t man_group_size = to_merge.tile_assignment().dim(to_merge_man_dim);
if (man_group_size != dst->tile_assignment().dim(dst_man_dim)) {
return false;
}
merge_old_tile_dim.push_back(man_group_size);
dst_old_tile_dim.push_back(man_group_size);
perm_merge[to_merge.TiledDataRank()] = perm_merge_counter++;
perm_dst[to_merge.TiledDataRank()] = perm_dst_counter++;
merged_tile_dims.push_back(man_group_size);
num_merge_groups *= man_group_size;
num_dst_groups *= man_group_size;
if (num_devices % (new_num_tiles * man_group_size) != 0) {
return false;
}
replication = num_devices / (new_num_tiles * man_group_size);
} else {
replication = num_devices / new_num_tiles;
}
if (replication > 1) {
merged_tile_dims.push_back(replication);
}
std::optional<TileAssignment> compatible_tile_assignment;
{
auto get_compatible_tile_assignment =
[&](const HloSharding& sharding, const DimensionVector& old_tile_dims,
DimensionVector& new_tile_dims, DimensionVector& new_tile_indices,
DimensionVector& perm,
const int64_t perm_counter) -> std::vector<TileAssignment> {
if (!sharding.HasPartialReplication() ||
sharding.tile_assignment().dim(sharding.SubgroupReplicationDim()) ==
replication) {
return {sharding.tile_assignment()};
}
if (replication == 1) {
perm.pop_back();
} else {
new_tile_dims.push_back(replication);
new_tile_indices.push_back(dst->tile_assignment().num_dimensions() - 1);
}
std::vector<TileAssignment> result;
DimensionVector iota(new_tile_dims.size());
absl::c_iota(iota, 0);
do {
std::vector<int> local_perm(perm.begin(), perm.end());
int64_t local_perm_counter = perm_counter;
DimensionVector reshape_dims(old_tile_dims.begin(),
old_tile_dims.end());
reshape_dims.reserve(old_tile_dims.size() + new_tile_dims.size());
for (auto i : iota) {
reshape_dims.push_back(new_tile_dims[i]);
local_perm[new_tile_indices[i]] = local_perm_counter++;
}
result.push_back(sharding.tile_assignment()
.Reshape(reshape_dims)
.Transpose(local_perm));
} while (std::next_permutation(iota.begin(), iota.end()));
return result;
};
auto merge_compatible_tile_assignment = get_compatible_tile_assignment(
to_merge, merge_old_tile_dim, merge_new_tile_dim, merge_new_tile_index,
perm_merge, perm_merge_counter);
auto dst_compatible_tile_assignment = get_compatible_tile_assignment(
*dst, dst_old_tile_dim, dst_new_tile_dim, dst_new_tile_index, perm_dst,
perm_dst_counter);
for (const auto& ta1 : dst_compatible_tile_assignment) {
for (const auto& ta2 : merge_compatible_tile_assignment) {
if (ta1 == ta2) {
compatible_tile_assignment = ta1.iota() ? ta1 : ta2;
}
}
}
}
if (!compatible_tile_assignment.has_value()) {
Array<int64_t> new_tile_array(merged_tile_dims);
std::vector<absl::btree_set<int64_t>> merge_group_members(num_merge_groups);
std::vector<absl::btree_set<int64_t>> dst_group_members(num_dst_groups);
const int64_t merge_group_size = num_devices / num_merge_groups;
const int64_t dst_group_size = num_devices / num_dst_groups;
const auto* merge_begin = to_merge.tile_assignment().array().begin();
const auto* dst_begin = dst->tile_assignment().array().begin();
for (int64_t i = 0; i < num_merge_groups; ++i) {
merge_group_members[i] =
absl::btree_set<int64_t>{merge_begin + i * merge_group_size,
merge_begin + (i + 1) * merge_group_size};
}
for (int64_t i = 0; i < num_dst_groups; ++i) {
dst_group_members[i] = absl::btree_set<int64_t>{
dst_begin + i * dst_group_size, dst_begin + (i + 1) * dst_group_size};
}
auto get_group_index = [&](absl::Span<const int64_t> tile_indices,
const HloSharding& sharding,
int64_t manual_dim) {
int64_t group_id = 0;
for (int64_t i = 0; i < to_merge.TiledDataRank(); ++i) {
group_id *= sharding.tile_assignment().dim(i);
group_id += tile_indices[i];
}
if (manual_dim >= 0) {
group_id *= sharding.tile_assignment().dim(manual_dim);
group_id += tile_indices[manual_dim];
}
return group_id;
};
absl::Status compatible =
new_tile_array.EachStatus([&](absl::Span<const int64_t> indices,
int64_t* device) -> absl::Status {
DimensionVector to_merge_index(
to_merge.tile_assignment().num_dimensions());
DimensionVector dst_index(dst->tile_assignment().num_dimensions());
for (int64_t i = 0; i < to_merge.TiledDataRank(); ++i) {
if (to_merge.tile_assignment().dim(i) == 1) {
to_merge_index[i] = 0;
} else {
to_merge_index[i] = indices[i];
}
if (dst->tile_assignment().dim(i) == 1) {
dst_index[i] = 0;
} else {
dst_index[i] = indices[i];
}
}
if (to_merge_man_dim >= 0) {
to_merge_index[to_merge_man_dim] =
indices[to_merge.TiledDataRank()];
dst_index[dst_man_dim] = indices[to_merge.TiledDataRank()];
}
if (to_merge.HasPartialReplication()) {
to_merge_index[to_merge.SubgroupReplicationDim()] = indices.back();
}
dst_index[dst->SubgroupReplicationDim()] = indices.back();
int64_t to_merge_group_id =
get_group_index(to_merge_index, to_merge, to_merge_man_dim);
int64_t dst_group_id = get_group_index(dst_index, *dst, dst_man_dim);
auto& gm1 = merge_group_members[to_merge_group_id];
auto& gm2 = dst_group_members[dst_group_id];
auto it1 = gm1.begin();
auto it2 = gm2.begin();
while (it1 != gm1.end() && it2 != gm2.end()) {
if (*it1 == *it2) {
*device = *it1;
gm1.erase(it1);
gm2.erase(it2);
return absl::OkStatus();
} else if (*it1 < *it2) {
it1++;
} else {
it2++;
}
}
return InvalidArgument("Not compatible");
});
if (!compatible.ok()) {
return false;
}
compatible_tile_assignment =
TileAssignment(std::make_shared<const Array<int64_t>>(new_tile_array));
}
std::vector<OpMetadata> merged_metadata(std::move(dst->metadata()));
merged_metadata.reserve(merged_metadata.size() + to_merge.metadata().size());
const absl::flat_hash_set<OpMetadata, protobuf_util::ProtobufHashWrapper,
protobuf_util::ProtobufEqualsWrapper>
metadata_set(merged_metadata.begin(), merged_metadata.end());
absl::c_copy_if(to_merge.metadata(), std::back_inserter(merged_metadata),
[&metadata_set](const OpMetadata& data) {
return !ContainsKey(metadata_set, data);
});
std::vector<OpSharding::Type> subgroup_types;
if (to_merge_man_dim >= 0) {
subgroup_types.push_back(OpSharding::MANUAL);
}
if (replication > 1) {
subgroup_types.push_back(OpSharding::REPLICATED);
}
*dst = HloSharding::Subgroup(compatible_tile_assignment.value(),
subgroup_types, merged_metadata);
return true;
}
std::optional<int64_t> SelectDominantDevice(
const std::map<int64_t, int64_t>& device_map, int64_t* top_count) {
int64_t device = 0;
int64_t count = 0;
for (auto& it : device_map) {
if (it.second > count) {
count = it.second;
device = it.first;
}
}
if (top_count != nullptr) {
*top_count = count;
}
return count > 0 ? std::optional<int64_t>(device) : std::optional<int64_t>();
}
HloSharding FindCommonSharding(absl::Span<const HloSharding> shardings,
std::optional<HloSharding> default_sharding) {
CHECK(!shardings.empty());
bool all_compatible = true;
HloSharding common_sharding = shardings[0];
for (int i = 1; i != shardings.size(); ++i) {
if (common_sharding != shardings[i] &&
!MergeShardingIfCompatible(shardings[i], common_sharding.NumTiles(),
&common_sharding)) {
all_compatible = false;
break;
}
}
if (all_compatible) {
return common_sharding;
}
return default_sharding.has_value() ? default_sharding.value() : shardings[0];
}
void AssignComputationDevice(HloComputation* computation, int64_t device) {
VLOG(4) << "Assigning device " << device << " to " << computation->name()
<< " computation";
for (HloInstruction* instruction : computation->instructions()) {
if (!instruction->has_sharding()) {
VLOG(4) << "Assigning device " << device << " to " << instruction->name();
instruction->set_device_sharding(device);
}
}
}
std::optional<int64_t> GetMostOccurringDevice(
absl::Span<HloInstruction* const> instructions) {
std::map<int64_t, int64_t> device_map;
for (HloInstruction* instruction : instructions) {
if (instruction->has_sharding()) {
for (auto& it : instruction->sharding().UsedDevices(nullptr)) {
device_map[it.first] += it.second;
}
}
}
return SelectDominantDevice(device_map, nullptr);
}
std::optional<int64_t> GetDominantDevice(
absl::Span<HloComputation* const> computations, double dominant_factor) {
int64_t instruction_count = 0;
std::map<int64_t, int64_t> device_map;
for (HloComputation* computation : computations) {
for (HloInstruction* instruction : computation->instructions()) {
int64_t count = 1;
if (instruction->has_sharding()) {
for (auto& it : instruction->sharding().UsedDevices(&count)) {
device_map[it.first] += it.second;
}
}
instruction_count += count;
}
}
int64_t count;
std::optional<int64_t> device = SelectDominantDevice(device_map, &count);
std::optional<int64_t> dominant_device;
if (device) {
double factor =
static_cast<double>(count) / static_cast<double>(instruction_count);
if (factor >= dominant_factor) {
dominant_device = device;
}
}
return dominant_device;
}
HloSharding TransposeSharding(const HloSharding& sharding,
absl::Span<const int64_t> dimensions) {
if (sharding.IsTileMaximal() || sharding.IsManual()) {
return sharding;
}
std::vector<int> perm_dimensions(dimensions.begin(), dimensions.end());
if (sharding.TiledDataRank() == dimensions.size()) {
for (int64_t i = sharding.TiledDataRank();
i < sharding.tile_assignment().num_dimensions(); ++i) {
perm_dimensions.push_back(i);
}
} else {
CHECK_EQ(sharding.tile_assignment().num_dimensions(), dimensions.size());
}
auto tile_assignment = sharding.tile_assignment().Transpose(perm_dimensions);
if (!sharding.ReplicateOnLastTileDim()) {
std::vector<OpSharding::Type> subgroup_types;
for (int64_t i = sharding.TiledDataRank(); i < perm_dimensions.size();
++i) {
int64_t src_i = perm_dimensions[i] - sharding.TiledDataRank();
subgroup_types.push_back(sharding.subgroup_types()[src_i]);
}
return HloSharding::Subgroup(tile_assignment, subgroup_types,
sharding.metadata());
} else {
return HloSharding::PartialTile(tile_assignment, sharding.metadata());
}
}
std::optional<HloSharding> ReshapeSharding(const Shape& source_shape,
const Shape& target_shape,
const HloSharding& source_sharding) {
if (source_sharding.IsTileMaximal() || source_sharding.IsManual()) {
return source_sharding;
}
DimensionVector target_tile_assignment_dimensions;
DimensionVector source_dims_stack(source_shape.dimensions().rbegin(),
source_shape.dimensions().rend());
DimensionVector target_dims_stack(target_shape.dimensions().rbegin(),
target_shape.dimensions().rend());
DimensionVector sharding_tile_dims_stack(
source_sharding.tile_assignment().dimensions().begin(),
source_sharding.tile_assignment().dimensions().begin() +
source_shape.rank());
std::reverse(sharding_tile_dims_stack.begin(),
sharding_tile_dims_stack.end());
int64_t source_dims_index = -1;
std::vector<int64_t> dims_to_replicate;
auto source_dims_push = [&](int64_t shape_size, int64_t partitions) {
source_dims_stack.push_back(shape_size);
sharding_tile_dims_stack.push_back(partitions);
source_dims_index--;
};
auto source_dims_pop = [&]() {
source_dims_stack.pop_back();
sharding_tile_dims_stack.pop_back();
source_dims_index++;
};
bool inplace_add_sharding_dim = false;
auto append_sharding_dim = [&](int64_t size) {
if (inplace_add_sharding_dim) {
target_tile_assignment_dimensions.back() *= size;
} else {
target_tile_assignment_dimensions.push_back(size);
}
inplace_add_sharding_dim = false;
};
while (!source_dims_stack.empty() || !target_dims_stack.empty()) {
if (Product(sharding_tile_dims_stack) == 1) {
break;
}
int64_t source_dims_product = 1;
while (!sharding_tile_dims_stack.empty() &&
sharding_tile_dims_stack.back() == 1) {
source_dims_product *= source_dims_stack.back();
source_dims_pop();
}
while (!target_dims_stack.empty() && target_dims_stack.back() > 1 &&
source_dims_product % target_dims_stack.back() == 0) {
source_dims_product /= target_dims_stack.back();
target_dims_stack.pop_back();
append_sharding_dim(1);
}
if (source_dims_product != 1) {
source_dims_push(source_dims_product, 1);
}
if (target_dims_stack.empty()) {
break;
}
int64_t t_size = target_dims_stack.back();
target_dims_stack.pop_back();
int64_t s_size = 1;
int64_t s_partitions = 1;
if (!source_dims_stack.empty()) {
s_size = source_dims_stack.back();
s_partitions = sharding_tile_dims_stack.back();
source_dims_pop();
}
if (s_size == t_size) {
append_sharding_dim(s_partitions);
} else if (s_partitions > 1 && s_size % s_partitions == 0 &&
t_size % s_partitions == 0) {
source_dims_push(s_size / s_partitions, 1);
target_dims_stack.push_back(t_size / s_partitions);
append_sharding_dim(s_partitions);
inplace_add_sharding_dim = true;
} else if (t_size == 1) {
append_sharding_dim(1);
source_dims_push(s_size, s_partitions);
} else if (s_size == 1) {
target_dims_stack.push_back(t_size);
if (s_partitions > 1) {
dims_to_replicate.push_back(source_dims_index);
}
} else if (s_size > t_size) {
if (s_size % s_partitions != 0) {
return std::nullopt;
}
if (s_size % t_size != 0) {
append_sharding_dim(std::gcd(t_size, s_partitions));
break;
}
if (t_size % s_partitions == 0) {
append_sharding_dim(s_partitions);
source_dims_push(s_size / t_size, 1);
} else if (s_partitions % t_size == 0) {
append_sharding_dim(t_size);
source_dims_push(s_size / t_size, s_partitions / t_size);
} else {
append_sharding_dim(std::gcd(t_size, s_partitions));
break;
}
} else {
if (s_size % s_partitions != 0) {
return std::nullopt;
}
CHECK(!source_dims_stack.empty());
if (t_size % s_size != 0) {
append_sharding_dim(std::gcd(t_size, s_partitions));
break;
}
if (sharding_tile_dims_stack.back() != 1 && s_size != s_partitions) {
break;
}
source_dims_stack.back() *= s_size;
sharding_tile_dims_stack.back() *= s_partitions;
target_dims_stack.push_back(t_size);
}
}
if (Product(target_tile_assignment_dimensions) == 1) {
return std::nullopt;
}
while (target_tile_assignment_dimensions.size() < target_shape.rank()) {
target_tile_assignment_dimensions.push_back(1);
}
const HloSharding sharding = !dims_to_replicate.empty()
? PartiallyReplicateTiledShardingOnDims(
source_sharding, dims_to_replicate)
: source_sharding;
for (int64_t i = sharding.TiledDataRank();
i < sharding.tile_assignment().num_dimensions(); ++i) {
target_tile_assignment_dimensions.push_back(
i == sharding.SubgroupReplicationDim()
? 1
: sharding.tile_assignment().dim(i));
}
auto subgroup_types = sharding.subgroup_types();
auto partially_replicated = std::div(
sharding.TotalNumTiles(), Product(target_tile_assignment_dimensions));
CHECK_EQ(partially_replicated.rem, 0);
if (partially_replicated.quot > 1) {
if (sharding.ReplicateOnLastTileDim()) {
target_tile_assignment_dimensions.back() = partially_replicated.quot;
subgroup_types.push_back(OpSharding::REPLICATED);
} else if (absl::c_linear_search(subgroup_types, OpSharding::REPLICATED)) {
target_tile_assignment_dimensions[sharding.SubgroupReplicationDim() -
sharding.TiledDataRank() +
target_shape.rank()] =
partially_replicated.quot;
} else {
target_tile_assignment_dimensions.push_back(partially_replicated.quot);
subgroup_types.push_back(OpSharding::REPLICATED);
}
}
auto new_tile_assignment =
sharding.tile_assignment().Reshape(target_tile_assignment_dimensions);
return HloSharding::Subgroup(new_tile_assignment, subgroup_types,
sharding.metadata());
}
HloSharding PropagateShardingThroughReshape(const Shape& source_shape,
const Shape& target_shape,
const HloSharding& sharding) {
if (sharding.IsTileMaximal() || sharding.IsManual()) {
return sharding;
}
if (sharding.IsManualSubgroup()) {
auto group =
GroupShardingOnDims(sharding, {sharding.SubgroupManualDim()}, true);
HloSharding inner_reshaped = PropagateShardingThroughReshape(
source_shape, target_shape, group.sharding);
group.sharding = std::move(inner_reshaped);
group.data_rank = target_shape.rank();
group.group_dims[0] += target_shape.rank() - source_shape.rank();
return UngroupSharding(group);
}
HloSharding result = HloSharding::Replicate();
int64_t start_dim = 0;
while (start_dim < source_shape.rank()) {
bool found_compatible = false;
for (int64_t end_dim = source_shape.rank(); end_dim > start_dim;
--end_dim) {
DimensionVector grouped_tiling_dims(source_shape.rank(), 1);
for (int64_t i = start_dim; i < end_dim; ++i) {
grouped_tiling_dims[i] = sharding.tile_assignment().dim(i);
}
HloSharding grouped_sharding =
HloSharding::Tile(TileAssignment(grouped_tiling_dims));
if (auto reshaped =
ReshapeSharding(source_shape, target_shape, grouped_sharding)) {
std::vector<int> perm;
perm.reserve(sharding.tile_assignment().num_dimensions());
for (int64_t i = start_dim; i < end_dim; i++) {
perm.push_back(i);
}
for (int64_t i = 0; i < start_dim; i++) {
perm.push_back(i);
}
for (int64_t i = end_dim;
i < sharding.tile_assignment().num_dimensions(); i++) {
perm.push_back(i);
}
DimensionVector reshape_dims(
reshaped->tile_assignment().dimensions().begin(),
reshaped->tile_assignment().dimensions().end());
CHECK_EQ(
sharding.tile_assignment().num_elements() % Product(reshape_dims),
0);
int64_t num_replicated_dims =
sharding.tile_assignment().num_elements() / Product(reshape_dims);
const int64_t diff = reshape_dims.size() - target_shape.rank();
CHECK(diff == 0 || diff == 1);
if (diff == 0) {
reshape_dims.push_back(num_replicated_dims);
} else {
reshape_dims.back() *= num_replicated_dims;
}
HloSharding ungrouped_sharding = HloSharding::PartialTile(
sharding.tile_assignment().Transpose(perm).Reshape(reshape_dims));
if (MergeShardingIfCompatible(ungrouped_sharding, &result)) {
start_dim = end_dim;
found_compatible = true;
break;
}
}
}
if (!found_compatible) {
start_dim += 1;
}
}
result.metadata() = sharding.metadata();
return result;
}
HloSharding ReverseSharding(const HloSharding& sharding,
absl::Span<const int64_t> dimensions) {
if (sharding.IsTileMaximal() || dimensions.empty()) {
return sharding;
}
Array<int64_t> new_tile_assignment(sharding.tile_assignment().dimensions());
new_tile_assignment.Each(
[&](absl::Span<const int64_t> indices, int64_t* device) {
std::vector<int64_t> original_indices(indices.begin(), indices.end());
for (int64_t d : dimensions) {
original_indices[d] =
new_tile_assignment.dim(d) - 1 - original_indices[d];
}
*device = sharding.tile_assignment()(original_indices);
});
return sharding.ReplicateOnLastTileDim()
? HloSharding::PartialTile(new_tile_assignment,
sharding.metadata())
: HloSharding::Subgroup(new_tile_assignment,
sharding.subgroup_types(),
sharding.metadata());
}
HloSharding ReshapeToTileDimension(const HloSharding& sharding, int64_t dim,
absl::Span<const int64_t> dims) {
CHECK(!sharding.IsTuple() && !sharding.IsTileMaximal());
CHECK_NE(absl::c_find(dims, dim), dims.end()) << "dim is not in dims";
auto old_dims = sharding.tile_assignment().dimensions();
DimensionVector new_dims(old_dims.begin(), old_dims.end());
std::vector<int> not_in_dims, dims_except_the_dim;
for (int64_t i = 0; i < sharding.tile_assignment().num_dimensions(); ++i) {
if (i == dim) {
continue;
} else if (absl::c_find(dims, i) != dims.end()) {
dims_except_the_dim.push_back(i);
new_dims[dim] *= old_dims[i];
new_dims[i] = 1;
} else {
not_in_dims.push_back(i);
}
}
std::vector<int> perm;
perm.reserve(sharding.tile_assignment().num_dimensions());
perm.insert(perm.end(), not_in_dims.begin(), not_in_dims.end());
perm.push_back(dim);
perm.insert(perm.end(), dims_except_the_dim.begin(),
dims_except_the_dim.end());
auto new_tile_assignment =
sharding.tile_assignment().Transpose(perm).Reshape(new_dims);
return HloSharding::Tile(new_tile_assignment, sharding.metadata());
}
bool ContainsTileSharding(const HloModule& module) {
for (const HloComputation* computation : module.computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
if (instruction->has_sharding() &&
!instruction->sharding().IsTileMaximal()) {
return true;
}
}
}
return false;
}
template <typename T>
std::vector<int64_t> argsort(absl::Span<const T> data) {
std::vector<int64_t> indices(data.size());
std::iota(indices.begin(), indices.end(), 0);
std::sort(indices.begin(), indices.end(),
[&data](int64_t i, int64_t j) { return data[i] < data[j]; });
return indices;
}
HloSharding PropagateShardingAlongDimsAndReplicateOthers(
const HloSharding& source_sharding, absl::Span<const int64_t> source_dims,
absl::Span<const int64_t> target_dims, int64_t target_shape_rank) {
CHECK_EQ(source_dims.size(), target_dims.size());
if (source_sharding.IsTileMaximal() || source_sharding.IsManual()) {
return source_sharding;
}
HloSharding replicate_other_dims =
PartiallyReplicateTiledShardingOnAllDimsExcept(source_sharding,
source_dims);
if (replicate_other_dims.IsTileMaximal()) {
return replicate_other_dims;
}
std::vector<int64_t> argsort_source_dims = argsort(source_dims);
std::vector<int64_t> argsort_target_dims = argsort(target_dims);
if (argsort_source_dims != argsort_target_dims) {
std::vector<int64_t> perm(
replicate_other_dims.tile_assignment().num_dimensions(), -1);
for (int64_t i = 0; i < source_dims.size(); ++i) {
perm[source_dims[argsort_target_dims[i]]] = i;
}
int64_t i = source_dims.size();
for (int64_t& perm_element : perm) {
if (perm_element == -1) {
perm_element = i++;
}
}
replicate_other_dims = TransposeSharding(replicate_other_dims, perm);
}
std::vector<int64_t> target_tile_dims(target_shape_rank, 1);
for (int i = 0; i < source_dims.size(); ++i) {
target_tile_dims[target_dims[i]] =
source_sharding.tile_assignment().dim(source_dims[i]);
}
for (int64_t i = replicate_other_dims.TiledDataRank();
i < replicate_other_dims.tile_assignment().num_dimensions(); ++i) {
target_tile_dims.push_back(replicate_other_dims.tile_assignment().dim(i));
}
auto target_tile_assignment =
replicate_other_dims.tile_assignment().Reshape(target_tile_dims);
return replicate_other_dims.ReplicateOnLastTileDim()
? HloSharding::PartialTile(target_tile_assignment,
replicate_other_dims.metadata())
: HloSharding::Subgroup(target_tile_assignment,
replicate_other_dims.subgroup_types(),
replicate_other_dims.metadata());
}
HloSharding GatherOutputShardingFromIndexIndexPassthroughDimensions(
const HloSharding& index_sharding, const HloInstruction* hlo) {
CHECK(hlo->opcode() == HloOpcode::kGather);
if (index_sharding.IsTileMaximal() || index_sharding.IsManual()) {
return index_sharding;
}
const GatherDimensionNumbers& dnums = hlo->gather_dimension_numbers();
const absl::InlinedVector<int64_t, 1> index_passthrough_dims =
GetGatherScatterIndexPassthroughIndexDims(hlo->operand(1)->shape().rank(),
dnums.index_vector_dim());
const absl::InlinedVector<int64_t, 1> output_passthrough_dims =
GetGatherScatterIndexPassthroughOutputOrUpdateDims(hlo->shape().rank(),
dnums.offset_dims());
CHECK_EQ(index_passthrough_dims.size(), output_passthrough_dims.size());
DimensionVector output_tile(hlo->shape().rank(), 1);
for (auto i = 0; i != index_passthrough_dims.size(); ++i) {
output_tile[output_passthrough_dims[i]] =
index_sharding.tile_assignment().dim(index_passthrough_dims[i]);
}
HloSharding relevant_index_sharding =
PartiallyReplicateTiledShardingOnAllDimsExcept(index_sharding,
index_passthrough_dims);
if (relevant_index_sharding.IsTileMaximal()) {
return relevant_index_sharding;
}
for (int64_t i = relevant_index_sharding.TiledDataRank();
i != relevant_index_sharding.tile_assignment().num_dimensions(); ++i) {
output_tile.push_back(relevant_index_sharding.tile_assignment().dim(i));
}
auto tile_assignment =
relevant_index_sharding.tile_assignment().Reshape(output_tile);
return relevant_index_sharding.ReplicateOnLastTileDim()
? HloSharding::PartialTile(tile_assignment,
index_sharding.metadata())
: HloSharding::Subgroup(tile_assignment,
relevant_index_sharding.subgroup_types(),
index_sharding.metadata());
}
HloSharding GatherIndexShardingFromOutputIndexPassthroughDimensions(
const HloSharding& output_sharding, const HloInstruction* hlo) {
CHECK(hlo->opcode() == HloOpcode::kGather);
if (output_sharding.IsTileMaximal() || output_sharding.IsManual()) {
return output_sharding;
}
const GatherDimensionNumbers& dnums = hlo->gather_dimension_numbers();
const absl::InlinedVector<int64_t, 1> index_passthrough_dims =
GetGatherScatterIndexPassthroughIndexDims(hlo->operand(1)->shape().rank(),
dnums.index_vector_dim());
const absl::InlinedVector<int64_t, 1> output_passthrough_dims =
GetGatherScatterIndexPassthroughOutputOrUpdateDims(hlo->shape().rank(),
dnums.offset_dims());
CHECK_EQ(index_passthrough_dims.size(), output_passthrough_dims.size());
DimensionVector index_tile(hlo->operand(1)->shape().rank(), 1);
for (auto i = 0; i != index_passthrough_dims.size(); ++i) {
index_tile[index_passthrough_dims[i]] =
output_sharding.tile_assignment().dim(output_passthrough_dims[i]);
}
HloSharding relevant_output_sharding =
PartiallyReplicateTiledShardingOnAllDimsExcept(output_sharding,
output_passthrough_dims);
if (relevant_output_sharding.IsTileMaximal()) {
return relevant_output_sharding;
}
for (int64_t i = relevant_output_sharding.TiledDataRank();
i != relevant_output_sharding.tile_assignment().num_dimensions(); ++i) {
index_tile.push_back(relevant_output_sharding.tile_assignment().dim(i));
}
auto tile_assignment =
relevant_output_sharding.tile_assignment().Reshape(index_tile);
return relevant_output_sharding.ReplicateOnLastTileDim()
? HloSharding::PartialTile(tile_assignment,
output_sharding.metadata())
: HloSharding::Subgroup(tile_assignment,
relevant_output_sharding.subgroup_types(),
output_sharding.metadata());
}
HloSharding GatherEffectiveOutputSharding(const HloInstruction& hlo) {
if (hlo.sharding().IsTileMaximal() || hlo.sharding().IsManual()) {
return hlo.sharding();
}
const GatherDimensionNumbers& dnums = hlo.gather_dimension_numbers();
DimensionVector tile_assignment_dims(hlo.shape().rank());
int64_t num_elements = 1;
for (int64_t i = 0; i < hlo.shape().rank(); ++i) {
if (!absl::c_binary_search(dnums.offset_dims(), i)) {
tile_assignment_dims[i] = hlo.sharding().tile_assignment().dim(i);
num_elements *= hlo.sharding().tile_assignment().dim(i);
} else {
tile_assignment_dims[i] = 1;
}
}
if (num_elements == hlo.sharding().tile_assignment().num_elements()) {
return hlo.sharding();
}
if (num_elements == 1) {
return HloSharding::AssignDevice(hlo.sharding().tile_assignment().first(),
hlo.sharding().metadata());
}
DimensionVector slice_starts(hlo.shape().rank(), 0LL),
slice_limits(hlo.shape().rank());
for (int64_t i = 0; i < hlo.shape().rank(); ++i) {
if (!absl::c_binary_search(dnums.offset_dims(), i)) {
slice_limits[i] = hlo.sharding().tile_assignment().dim(i);
} else {
slice_limits[i] = 1;
}
}
Array<int64_t> tile_assignment =
hlo.sharding().tile_assignment().array().Slice(slice_starts,
slice_limits);
return HloSharding::Tile(tile_assignment, hlo.sharding().metadata());
}
HloSharding ScatterIndexShardingFromUpdateIndexPassthroughDimensions(
const HloSharding& update_sharding, const HloScatterInstruction* scatter) {
if (update_sharding.IsTileMaximal() || update_sharding.IsManual()) {
return update_sharding;
}
const ScatterDimensionNumbers& dnums = scatter->scatter_dimension_numbers();
const absl::InlinedVector<int64_t, 1> index_passthrough_dims =
GetGatherScatterIndexPassthroughIndexDims(
scatter->scatter_indices()->shape().rank(), dnums.index_vector_dim());
const absl::InlinedVector<int64_t, 1> update_passthrough_dims =
GetGatherScatterIndexPassthroughOutputOrUpdateDims(
scatter->scatter_updates()[0]->shape().rank(),
dnums.update_window_dims());
CHECK_EQ(index_passthrough_dims.size(), update_passthrough_dims.size());
DimensionVector index_tile(scatter->scatter_indices()->shape().rank(), 1);
for (auto i = 0; i != index_passthrough_dims.size(); ++i) {
index_tile[index_passthrough_dims[i]] =
update_sharding.tile_assignment().dim(update_passthrough_dims[i]);
}
HloSharding relevant_update_sharding =
PartiallyReplicateTiledShardingOnAllDimsExcept(update_sharding,
update_passthrough_dims);
if (relevant_update_sharding.IsTileMaximal()) {
return relevant_update_sharding;
}
for (int64_t i = relevant_update_sharding.TiledDataRank();
i != relevant_update_sharding.tile_assignment().num_dimensions(); ++i) {
index_tile.push_back(relevant_update_sharding.tile_assignment().dim(i));
}
auto tile_assignment =
relevant_update_sharding.tile_assignment().Reshape(index_tile);
return relevant_update_sharding.ReplicateOnLastTileDim()
? HloSharding::PartialTile(tile_assignment,
update_sharding.metadata())
: HloSharding::Subgroup(tile_assignment,
relevant_update_sharding.subgroup_types(),
update_sharding.metadata());
}
HloSharding ScatterUpdateShardingFromIndexIndexPassthroughDimensions(
const HloSharding& index_sharding, const HloScatterInstruction* scatter) {
if (index_sharding.IsTileMaximal() || index_sharding.IsManual()) {
return index_sharding;
}
const ScatterDimensionNumbers& dnums = scatter->scatter_dimension_numbers();
const absl::InlinedVector<int64_t, 1> index_passthrough_dims =
GetGatherScatterIndexPassthroughIndexDims(
scatter->scatter_indices()->shape().rank(), dnums.index_vector_dim());
const absl::InlinedVector<int64_t, 1> update_passthrough_dims =
GetGatherScatterIndexPassthroughOutputOrUpdateDims(
scatter->scatter_updates()[0]->shape().rank(),
dnums.update_window_dims());
CHECK_EQ(index_passthrough_dims.size(), update_passthrough_dims.size());
DimensionVector update_tile(scatter->scatter_updates()[0]->shape().rank(), 1);
for (auto i = 0; i != index_passthrough_dims.size(); ++i) {
update_tile[update_passthrough_dims[i]] =
index_sharding.tile_assignment().dim(index_passthrough_dims[i]);
}
HloSharding relevant_index_sharding =
PartiallyReplicateTiledShardingOnAllDimsExcept(index_sharding,
index_passthrough_dims);
if (relevant_index_sharding.IsTileMaximal()) {
return relevant_index_sharding;
}
for (int64_t i = relevant_index_sharding.TiledDataRank();
i != relevant_index_sharding.tile_assignment().num_dimensions(); ++i) {
update_tile.push_back(relevant_index_sharding.tile_assignment().dim(i));
}
auto tile_assignment =
relevant_index_sharding.tile_assignment().Reshape(update_tile);
return relevant_index_sharding.ReplicateOnLastTileDim()
? HloSharding::PartialTile(tile_assignment,
index_sharding.metadata())
: HloSharding::Subgroup(tile_assignment,
relevant_index_sharding.subgroup_types(),
index_sharding.metadata());
}
HloSharding ScatterEffectiveIndexSharding(
const HloSharding& index_sharding, const HloScatterInstruction& scatter) {
if (index_sharding.IsTileMaximal() || index_sharding.IsManual()) {
return index_sharding;
}
const ScatterDimensionNumbers& dnums = scatter.scatter_dimension_numbers();
int64_t num_elements = 1;
int64_t index_dim = 0;
for (int64_t i = 0; i < scatter.shape().rank(); ++i) {
if (absl::c_binary_search(dnums.inserted_window_dims(), i)) {
num_elements *= index_sharding.tile_assignment().dim(index_dim);
index_dim++;
}
}
if (num_elements == index_sharding.tile_assignment().num_elements()) {
return index_sharding;
}
if (num_elements == 1) {
return HloSharding::AssignDevice(index_sharding.tile_assignment().first(),
index_sharding.metadata());
}
const int64_t index_rank = scatter.scatter_indices()->shape().rank();
DimensionVector slice_starts(index_rank, 0LL), slice_limits(index_rank);
for (int64_t i = 0; i < index_rank; ++i) {
if (i < index_dim) {
slice_limits[i] = index_sharding.tile_assignment().dim(i);
} else {
slice_limits[i] = 1;
}
}
Array<int64_t> tile_assignment =
index_sharding.tile_assignment().array().Slice(slice_starts,
slice_limits);
return HloSharding::Tile(tile_assignment, index_sharding.metadata());
}
HloSharding ScatterEffectiveDataSharding(const HloSharding& data_sharding,
const HloScatterInstruction& scatter) {
if (data_sharding.IsTileMaximal() || data_sharding.IsManual()) {
return data_sharding;
}
const ScatterDimensionNumbers& dnums = scatter.scatter_dimension_numbers();
const int64_t data_rank = scatter.scatter_updates()[0]->shape().rank();
DimensionVector tile_assignment_dims(data_rank, 1LL);
int64_t num_elements = 1;
for (int64_t i = 0; i < scatter.shape().rank(); ++i) {
if (absl::c_binary_search(dnums.inserted_window_dims(), i)) {
CHECK_LT(i, data_rank);
tile_assignment_dims[i] = data_sharding.tile_assignment().dim(i);
num_elements *= data_sharding.tile_assignment().dim(i);
}
}
if (num_elements == data_sharding.tile_assignment().num_elements()) {
return data_sharding;
}
if (num_elements == 1) {
return HloSharding::AssignDevice(data_sharding.tile_assignment().first(),
data_sharding.metadata());
}
DimensionVector slice_starts(data_rank, 0LL);
Array<int64_t> tile_assignment =
data_sharding.tile_assignment().array().Slice(slice_starts,
tile_assignment_dims);
return HloSharding::Tile(tile_assignment, data_sharding.metadata());
}
namespace {
absl::InlinedVector<int64_t, 1> GetGatherScatterOperandPassthroughOperandDims(
const Shape& operand_shape,
absl::Span<const int64_t> collapsed_or_inserted_dims,
absl::Span<const int64_t> operand_batching_dims,
absl::Span<const int64_t> index_map,
absl::Span<const int64_t> offset_or_window_dims,
absl::Span<const int64_t> slice_size) {
absl::InlinedVector<int64_t, 1> passthrough_dims;
int64_t collapsed_or_batching = 0;
for (int64_t i = 0; i < operand_shape.rank(); ++i) {
if (absl::c_linear_search(collapsed_or_inserted_dims, i) ||
absl::c_linear_search(operand_batching_dims, i)) {
collapsed_or_batching++;
continue;
}
if (slice_size[i] != operand_shape.dimensions(i)) {
continue;
}
if (i - collapsed_or_batching > 0 &&
offset_or_window_dims[i - collapsed_or_batching] <
offset_or_window_dims[i - collapsed_or_batching - 1]) {
continue;
}
passthrough_dims.push_back(i);
}
return passthrough_dims;
}
absl::InlinedVector<int64_t, 1>
GetGatherScatterOperandPassthroughOutputOrUpdateDims(
const int64_t output_or_update_rank, const Shape& operand_shape,
absl::Span<const int64_t> collapsed_or_inserted_dims,
absl::Span<const int64_t> operand_batching_dims,
absl::Span<const int64_t> index_map,
absl::Span<const int64_t> offset_or_window_dims,
absl::Span<const int64_t> slice_size) {
auto operand_passthrough_dims = GetGatherScatterOperandPassthroughOperandDims(
operand_shape, collapsed_or_inserted_dims, operand_batching_dims,
index_map, offset_or_window_dims, slice_size);
absl::InlinedVector<int64_t, 1> passthrough_dims;
int64_t collapsed_or_batching = 0;
for (int64_t i = 0; i < operand_shape.rank(); ++i) {
if (absl::c_linear_search(collapsed_or_inserted_dims, i) ||
absl::c_linear_search(operand_batching_dims, i)) {
collapsed_or_batching++;
continue;
}
if (!absl::c_linear_search(operand_passthrough_dims, i)) {
continue;
}
int64_t offset_dim = offset_or_window_dims[i - collapsed_or_batching];
passthrough_dims.push_back(offset_dim);
}
return passthrough_dims;
}
std::optional<HloSharding> PassthroughOperandToGatherOutputOrScatterUpdate(
const Shape& operand_shape, const HloSharding& operand_sharding,
const int64_t output_or_update_rank,
absl::Span<const int64_t> collapsed_or_inserted_dims,
absl::Span<const int64_t> operand_batching_dims,
absl::Span<const int64_t> index_map,
absl::Span<const int64_t> offset_or_window_dims,
absl::Span<const int64_t> slice_size, const int64_t index_vector_dim) {
if (operand_sharding.IsTileMaximal() || operand_sharding.IsManual()) {
return std::nullopt;
}
auto operand_passthrough_dims = GetGatherScatterOperandPassthroughOperandDims(
operand_shape, collapsed_or_inserted_dims, operand_batching_dims,
index_map, offset_or_window_dims, slice_size);
DimensionVector passthrough_tile(output_or_update_rank, 1);
int64_t collapsed_or_batching = 0;
for (int64_t i = 0; i < operand_shape.rank(); ++i) {
if (absl::c_linear_search(collapsed_or_inserted_dims, i) ||
absl::c_linear_search(operand_batching_dims, i)) {
collapsed_or_batching++;
continue;
}
if (!absl::c_linear_search(operand_passthrough_dims, i)) {
continue;
}
int64_t offset_dim = offset_or_window_dims[i - collapsed_or_batching];
passthrough_tile[offset_dim] = operand_sharding.tile_assignment().dim(i);
}
HloSharding replicate_non_passthrough_dims =
PartiallyReplicateTiledShardingOnAllDimsExcept(operand_sharding,
operand_passthrough_dims);
if (replicate_non_passthrough_dims.IsTileMaximal()) {
return std::nullopt;
}
for (int64_t i = replicate_non_passthrough_dims.TiledDataRank();
i < replicate_non_passthrough_dims.tile_assignment().num_dimensions();
++i) {
passthrough_tile.push_back(
replicate_non_passthrough_dims.tile_assignment().dim(i));
}
auto tile_assignment =
replicate_non_passthrough_dims.tile_assignment().Reshape(
passthrough_tile);
return replicate_non_passthrough_dims.ReplicateOnLastTileDim()
? HloSharding::PartialTile(
tile_assignment, replicate_non_passthrough_dims.metadata())
: HloSharding::Subgroup(
tile_assignment,
replicate_non_passthrough_dims.subgroup_types(),
replicate_non_passthrough_dims.metadata());
}
std::optional<HloSharding> PassthroughGatherOutputOrScatterUpdateToOperand(
const Shape& operand_shape, const HloSharding& output_or_update_sharding,
absl::Span<const int64_t> collapsed_or_inserted_dims,
absl::Span<const int64_t> operand_batching_dims,
absl::Span<const int64_t> index_map,
absl::Span<const int64_t> offset_or_window_dims,
absl::Span<const int64_t> slice_size) {
if (output_or_update_sharding.IsTileMaximal() ||
output_or_update_sharding.IsManual()) {
return output_or_update_sharding;
}
auto operand_passthrough_dims = GetGatherScatterOperandPassthroughOperandDims(
operand_shape, collapsed_or_inserted_dims, operand_batching_dims,
index_map, offset_or_window_dims, slice_size);
DimensionVector passthrough_tile(operand_shape.rank(), 1);
int64_t collapsed_or_batching = 0;
DimensionVector relevant_output_or_update_dims;
for (int64_t i = 0; i < operand_shape.rank(); ++i) {
if (absl::c_linear_search(collapsed_or_inserted_dims, i) ||
absl::c_linear_search(operand_batching_dims, i)) {
collapsed_or_batching++;
continue;
}
if (!absl::c_linear_search(operand_passthrough_dims, i)) {
continue;
}
int64_t offset_dim = offset_or_window_dims[i - collapsed_or_batching];
passthrough_tile[i] =
output_or_update_sharding.tile_assignment().dim(offset_dim);
relevant_output_or_update_dims.push_back(offset_dim);
}
HloSharding relevant_sharding =
PartiallyReplicateTiledShardingOnAllDimsExcept(
output_or_update_sharding, relevant_output_or_update_dims);
if (relevant_sharding.IsTileMaximal()) {
return std::nullopt;
}
for (int64_t i = relevant_sharding.TiledDataRank();
i < relevant_sharding.tile_assignment().num_dimensions(); ++i) {
passthrough_tile.push_back(relevant_sharding.tile_assignment().dim(i));
}
auto tile_assignment =
relevant_sharding.tile_assignment().Reshape(passthrough_tile);
return relevant_sharding.ReplicateOnLastTileDim()
? HloSharding::PartialTile(tile_assignment,
output_or_update_sharding.metadata())
: HloSharding::Subgroup(tile_assignment,
relevant_sharding.subgroup_types(),
output_or_update_sharding.metadata());
}
std::optional<HloSharding> GatherOperandShardingFromOutputParallelDimensions(
const HloSharding& output_sharding, const HloInstruction& gather,
const CallGraph& call_graph) {
if (output_sharding.IsTileMaximal() || output_sharding.IsManual()) {
return output_sharding;
}
GatherScatterParallelDims parallel_dims;
const GatherDimensionNumbers& dnums = gather.gather_dimension_numbers();
if (!dnums.operand_batching_dims().empty()) {
parallel_dims.operand_parallel_dims.assign(
dnums.operand_batching_dims().begin(),
dnums.operand_batching_dims().end());
parallel_dims.indices_parallel_dims.assign(
dnums.start_indices_batching_dims().begin(),
dnums.start_indices_batching_dims().end());
}
if (std::optional<GatherScatterParallelDims> implicit_parallel_dims =
GetGatherParallelBatchDims(gather, call_graph)) {
parallel_dims.operand_parallel_dims.insert(
parallel_dims.operand_parallel_dims.end(),
implicit_parallel_dims->operand_parallel_dims.begin(),
implicit_parallel_dims->operand_parallel_dims.end());
parallel_dims.indices_parallel_dims.insert(
parallel_dims.indices_parallel_dims.end(),
implicit_parallel_dims->indices_parallel_dims.begin(),
implicit_parallel_dims->indices_parallel_dims.end());
}
if (parallel_dims.operand_parallel_dims.empty()) {
return std::nullopt;
}
return PropagateShardingAlongDimsAndReplicateOthers(
output_sharding, GetGatherParallelOutputDims(gather, parallel_dims),
parallel_dims.operand_parallel_dims, gather.operand(0)->shape().rank());
}
}
std::optional<HloSharding>
GatherOutputShardingFromOperandOperandPassthroughDimensions(
const HloSharding& operand_sharding, const HloInstruction& hlo) {
return GatherOutputShardingFromOperandOperandPassthroughDimensions(
hlo.operand(0)->shape(), operand_sharding, hlo, hlo.gather_slice_sizes());
}
std::optional<HloSharding>
GatherOutputShardingFromOperandOperandPassthroughDimensions(
const Shape& operand_shape, const HloSharding& operand_sharding,
const HloInstruction& hlo, absl::Span<const int64_t> slice_sizes) {
const auto& dnums = hlo.gather_dimension_numbers();
return PassthroughOperandToGatherOutputOrScatterUpdate(
operand_shape, operand_sharding, hlo.shape().rank(),
dnums.collapsed_slice_dims(), dnums.operand_batching_dims(),
dnums.start_index_map(), dnums.offset_dims(), slice_sizes,
dnums.index_vector_dim());
}
std::optional<HloSharding> GatherOperandShardingFromOutput(
const HloSharding& output_sharding, const HloInstruction& hlo,
const CallGraph& call_graph) {
const auto& dnums = hlo.gather_dimension_numbers();
std::optional<HloSharding> parallel_sharding =
GatherOperandShardingFromOutputParallelDimensions(output_sharding, hlo,
call_graph);
std::optional<HloSharding> passthrough_sharding =
PassthroughGatherOutputOrScatterUpdateToOperand(
hlo.operand(0)->shape(), output_sharding,
dnums.collapsed_slice_dims(), dnums.operand_batching_dims(),
dnums.start_index_map(), dnums.offset_dims(),
hlo.gather_slice_sizes());
if (!passthrough_sharding) {
return parallel_sharding;
}
if (!parallel_sharding) {
return passthrough_sharding;
}
if (MergeSharding(*parallel_sharding, &*passthrough_sharding,
true)) {
return passthrough_sharding;
}
if (MergeSharding(*passthrough_sharding, &*parallel_sharding,
true)) {
return parallel_sharding;
}
return parallel_sharding;
}
std::vector<int64_t> GetScatterSliceSize(const Shape& operand_shape,
const Shape& update_shape,
const ScatterDimensionNumbers& dnums) {
std::vector<int64_t> slice_size(operand_shape.rank(), 1);
int64_t num_update_window_dims = 0;
for (int64_t i = 0; i < operand_shape.rank(); ++i) {
if (absl::c_linear_search(dnums.inserted_window_dims(), i) ||
absl::c_linear_search(dnums.input_batching_dims(), i)) {
continue;
}
slice_size[i] = update_shape.dimensions(
dnums.update_window_dims(num_update_window_dims++));
}
CHECK_EQ(num_update_window_dims, dnums.update_window_dims_size());
return slice_size;
}
std::optional<HloSharding> ScatterOutputShardingFromUpdate(
const HloSharding& update_sharding, const HloScatterInstruction& scatter) {
const auto& dnums = scatter.scatter_dimension_numbers();
std::vector<int64_t> slice_size =
GetScatterSliceSize(scatter.scatter_operands()[0]->shape(),
scatter.scatter_updates()[0]->shape(), dnums);
return PassthroughGatherOutputOrScatterUpdateToOperand(
scatter.scatter_operands()[0]->shape(), update_sharding,
dnums.inserted_window_dims(), dnums.input_batching_dims(),
dnums.scatter_dims_to_operand_dims(), dnums.update_window_dims(),
slice_size);
}
std::optional<HloSharding> ScatterUpdateShardingFromOutput(
const HloSharding& per_output_sharding,
const HloScatterInstruction& scatter, const CallGraph& call_graph) {
std::optional<HloSharding> parallel_sharding =
ScatterUpdateShardingFromOutputParallelDimensions(per_output_sharding,
scatter, call_graph);
std::optional<HloSharding> passthrough_sharding =
ScatterUpdateShardingFromOutputOperandPassthroughDimensions(
per_output_sharding, scatter);
if (!passthrough_sharding) {
return parallel_sharding;
}
if (!parallel_sharding) {
return passthrough_sharding;
}
if (MergeSharding(*parallel_sharding, &*passthrough_sharding,
true)) {
return passthrough_sharding;
}
if (MergeSharding(*passthrough_sharding, &*parallel_sharding,
true)) {
return parallel_sharding;
}
return parallel_sharding;
}
std::optional<HloSharding>
ScatterUpdateShardingFromOutputOperandPassthroughDimensions(
const HloSharding& output_sharding, const HloInstruction& hlo) {
const HloScatterInstruction* scatter = DynCast<HloScatterInstruction>(&hlo);
CHECK(scatter);
const Shape& operand_shape = scatter->scatter_operands()[0]->shape();
const Shape& update_shape = scatter->scatter_updates()[0]->shape();
const Shape& output_shape = operand_shape;
return ScatterUpdateShardingFromOutputOperandPassthroughDimensions(
output_shape, output_sharding, *scatter,
GetScatterSliceSize(operand_shape, update_shape,
scatter->scatter_dimension_numbers()));
}
std::optional<HloSharding>
ScatterUpdateShardingFromOutputOperandPassthroughDimensions(
const Shape& output_shape, const HloSharding& output_sharding,
const HloInstruction& hlo, absl::Span<const int64_t> slice_sizes) {
const HloScatterInstruction* scatter = DynCast<HloScatterInstruction>(&hlo);
CHECK(scatter);
const auto& dnums = scatter->scatter_dimension_numbers();
return PassthroughOperandToGatherOutputOrScatterUpdate(
output_shape, output_sharding,
scatter->scatter_updates()[0]->shape().rank(),
dnums.inserted_window_dims(), dnums.input_batching_dims(),
dnums.scatter_dims_to_operand_dims(), dnums.update_window_dims(),
slice_sizes, dnums.index_vector_dim());
}
std::optional<HloSharding> ScatterUpdateShardingFromOutputParallelDimensions(
const HloSharding& output_sharding, const HloScatterInstruction& scatter,
const CallGraph& call_graph) {
if (output_sharding.IsTileMaximal() || output_sharding.IsManual()) {
return output_sharding;
}
GatherScatterParallelDims parallel_dims;
const ScatterDimensionNumbers& dnums = scatter.scatter_dimension_numbers();
if (!dnums.input_batching_dims().empty()) {
parallel_dims.operand_parallel_dims.assign(
dnums.input_batching_dims().begin(), dnums.input_batching_dims().end());
parallel_dims.indices_parallel_dims.assign(
dnums.scatter_indices_batching_dims().begin(),
dnums.scatter_indices_batching_dims().end());
}
if (std::optional<GatherScatterParallelDims> implicit_parallel_dims =
GetScatterParallelBatchDims(scatter, call_graph)) {
parallel_dims.operand_parallel_dims.insert(
parallel_dims.operand_parallel_dims.end(),
implicit_parallel_dims->operand_parallel_dims.begin(),
implicit_parallel_dims->operand_parallel_dims.end());
parallel_dims.indices_parallel_dims.insert(
parallel_dims.indices_parallel_dims.end(),
implicit_parallel_dims->indices_parallel_dims.begin(),
implicit_parallel_dims->indices_parallel_dims.end());
}
if (parallel_dims.operand_parallel_dims.empty()) {
return std::nullopt;
}
return PropagateShardingAlongDimsAndReplicateOthers(
output_sharding, parallel_dims.operand_parallel_dims,
GetScatterParallelUpdateDims(scatter, parallel_dims),
scatter.scatter_updates()[0]->shape().rank());
}
HloSharding GatherOutputOrScatterUpdateShardingFromIndicesParallelDimensions(
const HloSharding& indices_sharding,
const int64_t output_or_update_shape_rank,
absl::Span<const int64_t> indices_parallel_dims,
absl::Span<const int64_t> output_or_update_parallel_dims) {
if (indices_sharding.IsTileMaximal() || indices_sharding.IsManual()) {
return indices_sharding;
}
CHECK_EQ(output_or_update_parallel_dims.size(), indices_parallel_dims.size());
absl::InlinedVector<int64_t, 4> output_or_update_tiling(
output_or_update_shape_rank, 1);
absl::InlinedVector<int64_t, 4> relevant_indices_dims;
for (int i = 0; i != output_or_update_parallel_dims.size(); ++i) {
const int output_or_update_idx = output_or_update_parallel_dims[i];
CHECK_LT(output_or_update_idx, output_or_update_shape_rank);
const int indices_idx = indices_parallel_dims[i];
output_or_update_tiling[output_or_update_idx] =
indices_sharding.tile_assignment().dim(indices_idx);
relevant_indices_dims.push_back(indices_idx);
}
HloSharding relevant_indices_sharding =
PartiallyReplicateTiledShardingOnAllDimsExcept(indices_sharding,
relevant_indices_dims);
if (relevant_indices_sharding.IsTileMaximal()) {
return relevant_indices_sharding;
}
for (int64_t i = relevant_indices_sharding.TiledDataRank();
i != relevant_indices_sharding.tile_assignment().num_dimensions(); ++i) {
output_or_update_tiling.push_back(
relevant_indices_sharding.tile_assignment().dim(i));
}
auto output_tile_assignment =
relevant_indices_sharding.tile_assignment().Reshape(
output_or_update_tiling);
return relevant_indices_sharding.ReplicateOnLastTileDim()
? HloSharding::PartialTile(output_tile_assignment,
indices_sharding.metadata())
: HloSharding::Subgroup(output_tile_assignment,
relevant_indices_sharding.subgroup_types(),
indices_sharding.metadata());
}
absl::StatusOr<std::pair<std::unique_ptr<HloInstruction>, HloOpcode>>
IdentityValueAndHloOpcodeForScatterReduceComputation(
const HloScatterInstruction& scatter) {
auto computation = scatter.to_apply();
if (computation->instruction_count() != 3) {
return absl::Status(
absl::StatusCode::kInvalidArgument,
"Expected scatter reduce computation with 2 parameters and only 1 "
"calculation");
}
auto root_instruction = computation->root_instruction();
if (root_instruction->opcode() == HloOpcode::kAdd ||
root_instruction->opcode() == HloOpcode::kOr) {
return std::make_pair(HloInstruction::CreateConstant(LiteralUtil::Zero(
scatter.shape().element_type())),
root_instruction->opcode());
} else if (root_instruction->opcode() == HloOpcode::kMultiply ||
root_instruction->opcode() == HloOpcode::kAnd) {
return std::make_pair(HloInstruction::CreateConstant(
LiteralUtil::One(scatter.shape().element_type())),
root_instruction->opcode());
} else if (root_instruction->opcode() == HloOpcode::kMaximum) {
return std::make_pair(HloInstruction::CreateConstant(LiteralUtil::MinValue(
scatter.shape().element_type())),
root_instruction->opcode());
} else if (root_instruction->opcode() == HloOpcode::kMinimum) {
return std::make_pair(HloInstruction::CreateConstant(LiteralUtil::MaxValue(
scatter.shape().element_type())),
root_instruction->opcode());
}
return absl::Status(absl::StatusCode::kInvalidArgument,
"Expected scatter reduce computation which is "
"add/or/multiply/add/min/max");
}
namespace {
void DevicesForShardingInternal(
const HloSharding& sharding,
const absl::flat_hash_set<int64_t>& available_devices,
absl::flat_hash_set<int64_t>* used) {
if (sharding.IsTuple()) {
for (const auto& subsharding : sharding.tuple_elements()) {
DevicesForShardingInternal(subsharding, available_devices, used);
}
return;
}
if (sharding.IsReplicated()) {
for (int64_t device : available_devices) {
if (!HloSharding::IsReservedDevice(device)) {
used->insert(device);
}
}
return;
}
DCHECK(std::all_of(
sharding.tile_assignment().array().begin(),
sharding.tile_assignment().array().end(),
[&](int64_t device) { return available_devices.contains(device); }));
sharding.tile_assignment().Each(
[&](absl::Span<const int64_t> , int64_t device) {
used->insert(device);
});
}
}
std::vector<int64_t> DevicesForSharding(
const HloSharding& sharding, absl::Span<const int64_t> available_devices) {
absl::flat_hash_set<int64_t> available_set;
for (int64_t device : available_devices) {
available_set.insert(device);
}
absl::flat_hash_set<int64_t> used_set;
DevicesForShardingInternal(sharding, available_set, &used_set);
std::vector<int64_t> devices;
for (int64_t device : available_devices) {
if (used_set.contains(device)) {
devices.push_back(device);
}
}
return devices;
}
HloSharding PartiallyReplicateTiledShardingOnDims(
const HloSharding& sharding, absl::Span<const int64_t> dims_to_replicate) {
if (sharding.IsTileMaximal() || sharding.IsManual()) {
return sharding;
}
int64_t group_count = 1;
DimensionVector valid_dims_to_replicate;
for (int64_t dim : dims_to_replicate) {
if (dim >= sharding.TiledDataRank()) {
continue;
}
valid_dims_to_replicate.push_back(dim);
group_count *= sharding.tile_assignment().dim(dim);
}
if (group_count == 1) {
return sharding;
}
if (group_count == sharding.NumTiles() && sharding.subgroup_types().empty()) {
return HloSharding::Replicate(sharding.metadata());
}
DimensionVector dim_permutation(sharding.TiledDataRank());
absl::c_iota(dim_permutation, 0);
absl::c_stable_sort(dim_permutation, [&](const int64_t a, const int64_t b) {
return absl::c_linear_search(valid_dims_to_replicate, a) <
absl::c_linear_search(valid_dims_to_replicate, b);
});
auto new_tile =
TransposeSharding(sharding, dim_permutation).tile_assignment();
DimensionVector new_tile_shape(
sharding.tile_assignment().dimensions().begin(),
sharding.tile_assignment().dimensions().end());
for (int64_t dim : valid_dims_to_replicate) {
new_tile_shape[dim] = 1;
}
if (sharding.ReplicateOnLastTileDim()) {
new_tile_shape.back() *= group_count;
new_tile = new_tile.Reshape(new_tile_shape);
return HloSharding::PartialTile(new_tile, sharding.metadata());
} else {
new_tile_shape.insert(new_tile_shape.begin() + sharding.TiledDataRank(),
group_count);
new_tile = new_tile.Reshape(new_tile_shape);
std::vector<OpSharding::Type> subgroup_types;
subgroup_types.push_back(OpSharding::REPLICATED);
for (OpSharding::Type type : sharding.subgroup_types()) {
subgroup_types.push_back(type);
}
return HloSharding::Subgroup(new_tile, subgroup_types, sharding.metadata());
}
}
HloSharding PartiallyReplicateTiledShardingOnAllDimsExcept(
const HloSharding& sharding, absl::Span<const int64_t> dims_to_keep) {
if (sharding.IsTileMaximal() || sharding.IsManual()) {
return sharding;
}
DimensionVector dims_to_replicate(sharding.TiledDataRank());
absl::c_iota(dims_to_replicate, 0);
dims_to_replicate.erase(
std::remove_if(
dims_to_replicate.begin(), dims_to_replicate.end(),
[&](int64_t i) { return absl::c_linear_search(dims_to_keep, i); }),
dims_to_replicate.end());
return PartiallyReplicateTiledShardingOnDims(sharding, dims_to_replicate);
}
HloSharding ReplicateAllDataDims(const HloSharding& sharding,
int64_t data_rank) {
if (sharding.IsManual()) {
return sharding;
}
if (sharding.subgroup_types().empty()) {
return HloSharding::Replicate(sharding.metadata());
}
HloSharding result =
PartiallyReplicateTiledShardingOnAllDimsExcept(sharding, {});
if (data_rank >= 0 && data_rank != result.TiledDataRank() &&
!result.IsTileMaximal()) {
DimensionVector new_tile_shape(data_rank, 1);
for (int64_t i = result.TiledDataRank();
i < result.tile_assignment().num_dimensions(); ++i) {
new_tile_shape.push_back(result.tile_assignment().dim(i));
}
auto tile = result.tile_assignment().Reshape(new_tile_shape);
result = HloSharding::Subgroup(tile, result.subgroup_types());
}
return result;
}
HloSharding RemoveShapeDimensions(const HloSharding& sharding,
absl::Span<const int64_t> dims_to_remove) {
if (sharding.IsTileMaximal() || dims_to_remove.empty()) {
return sharding;
}
DimensionVector new_tile_shape;
new_tile_shape.reserve(sharding.tile_assignment().num_dimensions() -
dims_to_remove.size());
for (int64_t i = 0; i < sharding.tile_assignment().num_dimensions(); ++i) {
if (absl::c_linear_search(dims_to_remove, i)) {
CHECK_EQ(sharding.tile_assignment().dim(i), 1);
} else {
new_tile_shape.push_back(sharding.tile_assignment().dim(i));
}
}
auto new_tile = sharding.tile_assignment().Reshape(new_tile_shape);
return sharding.ReplicateOnLastTileDim()
? HloSharding::PartialTile(new_tile, sharding.metadata())
: HloSharding::Subgroup(new_tile, sharding.subgroup_types(),
sharding.metadata());
}
std::optional<HloSharding> TransposeShardingWithCollapsedDims(
const HloSharding& source, absl::Span<int64_t const> src_to_tgt,
absl::Span<int64_t const> tgt_to_src) {
if (source.IsTileMaximal() || source.IsManual()) {
return source;
}
if (src_to_tgt.size() < source.tile_assignment().num_dimensions()) {
DimensionVector new_src_to_tgt(src_to_tgt.begin(), src_to_tgt.end());
DimensionVector new_tgt_to_src(tgt_to_src.begin(), tgt_to_src.end());
for (int64_t i = 0;
i < source.tile_assignment().num_dimensions() - src_to_tgt.size();
++i) {
new_src_to_tgt.push_back(tgt_to_src.size() + i);
new_tgt_to_src.push_back(src_to_tgt.size() + i);
}
return TransposeShardingWithCollapsedDims(source, new_src_to_tgt,
new_tgt_to_src);
}
DimensionVector tgt_dims_skipping_new(tgt_to_src.size(), -1);
int64_t skipped_tgt_dims = 0;
int64_t src_non_subgroup_dims =
src_to_tgt.size() - source.subgroup_types().size();
int64_t tgt_non_subgroup_dims =
tgt_to_src.size() - source.subgroup_types().size();
for (int64_t i = 0; i < tgt_to_src.size(); ++i) {
if (tgt_to_src[i] < 0) {
CHECK_LT(i, tgt_non_subgroup_dims)
<< "Sharding transpose should not remove subgroup dims.";
skipped_tgt_dims++;
} else {
tgt_dims_skipping_new[i] = i - skipped_tgt_dims;
}
}
int64_t skipped_src_dims = absl::c_count(src_to_tgt, -1);
DimensionVector perm(src_to_tgt.size());
for (int64_t i = 0; i < src_non_subgroup_dims; ++i) {
if (src_to_tgt[i] < 0) {
if (source.tile_assignment().dim(i) > 1) {
return std::nullopt;
}
perm[src_non_subgroup_dims - skipped_src_dims] = i;
skipped_src_dims--;
} else {
perm[tgt_dims_skipping_new[src_to_tgt[i]]] = i;
}
}
skipped_src_dims = absl::c_count(src_to_tgt, -1);
for (int64_t i = src_non_subgroup_dims; i < src_to_tgt.size(); ++i) {
CHECK_GE(src_to_tgt[i], tgt_non_subgroup_dims)
<< "Sharding transpose should not move subgroup dims before data dims.";
perm[src_to_tgt[i] - skipped_tgt_dims + skipped_src_dims] = i;
}
auto tgt_sharding = TransposeSharding(source, perm);
DimensionVector tgt_tiles(tgt_to_src.size(), 1);
for (int64_t i = 0; i < tgt_tiles.size(); ++i) {
if (tgt_to_src[i] >= 0) {
int64_t dim = tgt_dims_skipping_new[i];
if (i >= tgt_non_subgroup_dims) {
dim += skipped_src_dims;
}
tgt_tiles[i] = tgt_sharding.tile_assignment().dim(dim);
}
}
auto reshape_tiles = tgt_sharding.tile_assignment().Reshape(tgt_tiles);
return source.ReplicateOnLastTileDim()
? HloSharding::PartialTile(reshape_tiles, source.metadata())
: HloSharding::Subgroup(reshape_tiles, source.subgroup_types(),
source.metadata());
}
std::optional<int64_t> GetDimensionForIota(const HloInstruction* maybe_iota,
const CallGraph& call_graph) {
if (auto* iota = DynCast<HloIotaInstruction>(maybe_iota)) {
return iota->iota_dimension();
}
if (maybe_iota->shape().element_type() != S32) {
return std::nullopt;
}
if (maybe_iota->IsConstant()) {
std::vector<bool> is_iota_dim(maybe_iota->shape().rank(), true);
maybe_iota->literal().EachCell<int32_t>(
[&](absl::Span<const int64_t> indices, int32_t val) {
for (int64_t i = 0; i < indices.size(); ++i) {
if (val != indices[i]) {
is_iota_dim[i] = false;
}
}
});
for (int64_t i = 0; i < is_iota_dim.size(); ++i) {
if (is_iota_dim[i] && maybe_iota->shape().dimensions(i) > 1) {
return i;
}
}
return std::nullopt;
}
if (maybe_iota->opcode() == HloOpcode::kBroadcast) {
auto operand_dim = GetDimensionForIota(maybe_iota->operand(0), call_graph);
if (operand_dim) {
return maybe_iota->dimensions(*operand_dim);
}
return std::nullopt;
}
if (maybe_iota->opcode() == HloOpcode::kGetTupleElement &&
maybe_iota->operand(0)->opcode() == HloOpcode::kParameter) {
const HloComputation* called_computation = maybe_iota->parent();
if (!called_computation->IsEntryComputation()) {
const HloInstruction* gte = maybe_iota;
const int64_t gte_index = gte->tuple_index();
std::vector<HloInstruction*> callers =
call_graph.GetComputationCallers(called_computation);
CHECK_EQ(callers.size(), 1);
HloInstruction* caller =
call_graph.GetComputationCallers(called_computation)[0];
if (caller->opcode() == HloOpcode::kWhile &&
caller->operand(0)->opcode() == HloOpcode::kTuple) {
HloInstruction* while_root = called_computation->root_instruction();
if (while_root->opcode() == HloOpcode::kTuple &&
while_root->operand(gte_index) == gte) {
return GetDimensionForIota(caller->operand(0)->operand(gte_index),
call_graph);
}
}
if (caller->opcode() == HloOpcode::kConditional) {
int64_t cond_comp_idx =
absl::c_find(caller->branch_computations(), called_computation) -
caller->branch_computations().begin();
CHECK(cond_comp_idx < caller->branch_computations().size());
const HloInstruction* branch_comp_arg =
caller->operand(cond_comp_idx + 1);
CHECK(branch_comp_arg->shape().IsTuple());
return GetDimensionForIota(branch_comp_arg->operand(gte_index),
call_graph);
}
}
return std::nullopt;
}
return std::nullopt;
}
std::optional<GatherScatterParallelDims> GetGatherScatterBatchParallelDims(
const HloInstruction* operand, const HloInstruction* indices,
absl::Span<const int64_t> slice_sizes, int64_t index_vector_dim,
absl::Span<const int64_t> index_map, const CallGraph& call_graph) {
std::vector<int64_t> index_parallel_in_dim(index_map.size(), -1);
auto findConcatenate = [&](const HloInstruction* indices) {
const HloInstruction* orig_indices = indices;
while (indices->opcode() == HloOpcode::kCopy) {
indices = indices->operand(0);
}
if (indices->opcode() == HloOpcode::kConcatenate) {
return indices;
}
return orig_indices;
};
indices = findConcatenate(indices);
if (indices->opcode() == HloOpcode::kConcatenate &&
indices->concatenate_dimension() == index_vector_dim) {
int concatenated_dims = 0;
for (int i = 0; i < indices->operand_count(); ++i) {
const HloInstruction* op = indices->operand(i);
const int64_t num_indices_from_element =
op->shape().dimensions_size() > index_vector_dim
? op->shape().dimensions(index_vector_dim)
: 1;
if (std::optional<int64_t> maybe_iota_dim =
GetDimensionForIota(op, call_graph)) {
if (*maybe_iota_dim != index_vector_dim) {
for (int j = 0; j < num_indices_from_element; ++j) {
index_parallel_in_dim[concatenated_dims + j] = *maybe_iota_dim;
}
}
}
concatenated_dims += num_indices_from_element;
}
} else if (std::optional<int64_t> maybe_iota_dim =
GetDimensionForIota(indices, call_graph)) {
if (*maybe_iota_dim != index_vector_dim) {
const int64_t num_indices_from_element =
indices->shape().dimensions_size() > index_vector_dim
? indices->shape().dimensions(index_vector_dim)
: 1;
index_parallel_in_dim.assign(num_indices_from_element, *maybe_iota_dim);
}
}
absl::InlinedVector<int64_t, 1> indices_parallel_dims;
absl::InlinedVector<int64_t, 1> operand_parallel_dims;
for (int i = 0; i < index_parallel_in_dim.size(); ++i) {
int index_parallel_dim = index_parallel_in_dim[i];
if (index_parallel_dim == -1) {
continue;
}
if (absl::c_linear_search(indices_parallel_dims, index_parallel_dim)) {
return std::nullopt;
}
if (slice_sizes[index_map[i]] == 1) {
indices_parallel_dims.push_back(index_parallel_dim);
operand_parallel_dims.push_back(index_map[i]);
if (operand->shape().dimensions(operand_parallel_dims.back()) !=
indices->shape().dimensions(indices_parallel_dims.back())) {
return std::nullopt;
}
} else {
index_parallel_in_dim[i] = -1;
}
}
if (!indices_parallel_dims.empty()) {
return GatherScatterParallelDims{indices_parallel_dims,
operand_parallel_dims};
}
return std::nullopt;
}
std::optional<GatherScatterParallelDims> GetGatherParallelBatchDims(
const HloInstruction& hlo, const CallGraph& call_graph) {
CHECK(DynCast<HloGatherInstruction>(&hlo));
const HloInstruction* operand = hlo.operand(0);
const HloInstruction* indices = hlo.operand(1);
absl::Span<const int64_t> slice_sizes = hlo.gather_slice_sizes();
const auto& dnums = hlo.gather_dimension_numbers();
int64_t index_vector_dim = dnums.index_vector_dim();
const auto& index_map = dnums.start_index_map();
return GetGatherScatterBatchParallelDims(
operand, indices, slice_sizes, index_vector_dim, index_map, call_graph);
}
std::optional<GatherScatterParallelDims> GetScatterParallelBatchDims(
const HloInstruction& hlo, const CallGraph& call_graph) {
const HloScatterInstruction* scatter = DynCast<HloScatterInstruction>(&hlo);
CHECK(scatter);
const HloInstruction* operand = scatter->scatter_operands()[0];
const HloInstruction* indices = scatter->scatter_indices();
const auto& dnums = hlo.scatter_dimension_numbers();
std::vector<int64_t> slice_sizes =
GetScatterSliceSize(scatter->scatter_operands()[0]->shape(),
scatter->scatter_updates()[0]->shape(), dnums);
int64_t index_vector_dim = dnums.index_vector_dim();
const auto& index_map = dnums.scatter_dims_to_operand_dims();
return GetGatherScatterBatchParallelDims(
operand, indices, slice_sizes, index_vector_dim, index_map, call_graph);
}
static absl::InlinedVector<int64_t, 1>
GetGatherOutputOrScatterUpdateParallelDims(
const Shape& shape, absl::Span<const int64_t> indices_parallel_dims,
int64_t index_vector_dim, absl::Span<const int64_t> offset_or_window_dims) {
absl::InlinedVector<int64_t, 1> output_parallel_dims;
for (int64_t indices_parallel_dim : indices_parallel_dims) {
for (int i = 0, idx_dim = 0; i < shape.dimensions_size(); ++i) {
if (absl::c_linear_search(offset_or_window_dims, i)) {
continue;
}
const int index_dim = idx_dim < index_vector_dim ? idx_dim : idx_dim + 1;
if (indices_parallel_dim == index_dim) {
output_parallel_dims.push_back(i);
break;
}
++idx_dim;
}
}
CHECK_EQ(output_parallel_dims.size(), indices_parallel_dims.size());
return output_parallel_dims;
}
absl::InlinedVector<int64_t, 1> GetGatherParallelOutputDims(
const HloInstruction& hlo, const GatherScatterParallelDims& parallel_dim) {
CHECK(DynCast<HloGatherInstruction>(&hlo));
const Shape& output_shape = hlo.shape();
const auto& dnums = hlo.gather_dimension_numbers();
int64_t index_vector_dim = dnums.index_vector_dim();
const auto& offset_dims = dnums.offset_dims();
return GetGatherOutputOrScatterUpdateParallelDims(
output_shape, parallel_dim.indices_parallel_dims, index_vector_dim,
offset_dims);
}
absl::InlinedVector<int64_t, 1> GetScatterParallelUpdateDims(
const HloInstruction& hlo, const GatherScatterParallelDims& parallel_dim) {
const HloScatterInstruction* scatter = DynCast<HloScatterInstruction>(&hlo);
CHECK(scatter);
const Shape update_shape = scatter->scatter_updates()[0]->shape();
const auto& dnums = hlo.scatter_dimension_numbers();
int64_t index_vector_dim = dnums.index_vector_dim();
const auto& window_dims = dnums.update_window_dims();
return GetGatherOutputOrScatterUpdateParallelDims(
update_shape, parallel_dim.indices_parallel_dims, index_vector_dim,
window_dims);
}
absl::InlinedVector<int64_t, 1> GetGatherOperandPassthroughOperandDims(
const Shape& operand_shape, const HloInstruction& hlo,
absl::Span<const int64_t> slice_sizes) {
const auto& dnums = hlo.gather_dimension_numbers();
return GetGatherScatterOperandPassthroughOperandDims(
operand_shape, dnums.collapsed_slice_dims(),
dnums.operand_batching_dims(), dnums.start_index_map(),
dnums.offset_dims(), slice_sizes);
}
absl::InlinedVector<int64_t, 1> GetScatterOperandPassthroughOperandDims(
const Shape& operand_shape, const HloSharding& operand_sharding,
const HloInstruction& hlo, absl::Span<const int64_t> slice_sizes) {
const auto& dnums = hlo.scatter_dimension_numbers();
return GetGatherScatterOperandPassthroughOperandDims(
operand_shape, dnums.inserted_window_dims(), dnums.input_batching_dims(),
dnums.scatter_dims_to_operand_dims(), dnums.update_window_dims(),
slice_sizes);
}
absl::InlinedVector<int64_t, 1> GetGatherOperandPassthroughOutputDims(
const Shape& output_shape, const Shape& operand_shape,
const HloInstruction& hlo, absl::Span<const int64_t> slice_sizes) {
const auto& dnums = hlo.gather_dimension_numbers();
return GetGatherScatterOperandPassthroughOutputOrUpdateDims(
output_shape.rank(), operand_shape, dnums.collapsed_slice_dims(),
dnums.operand_batching_dims(), dnums.start_index_map(),
dnums.offset_dims(), slice_sizes);
}
absl::InlinedVector<int64_t, 1> GetScatterOperandPassthroughUpdateDims(
const Shape& update_shape, const Shape& operand_shape,
const HloSharding& operand_sharding, const HloInstruction& hlo,
absl::Span<const int64_t> slice_sizes) {
const auto& dnums = hlo.scatter_dimension_numbers();
return GetGatherScatterOperandPassthroughOutputOrUpdateDims(
update_shape.rank(), operand_shape, dnums.inserted_window_dims(),
dnums.input_batching_dims(), dnums.scatter_dims_to_operand_dims(),
dnums.update_window_dims(), slice_sizes);
}
absl::InlinedVector<int64_t, 1> GetGatherScatterIndexPassthroughIndexDims(
const int64_t indices_rank, const int64_t index_vector_dim) {
absl::InlinedVector<int64_t, 1> passthrough_dims;
for (int64_t i = 0; i != indices_rank; ++i) {
if (i == index_vector_dim) {
continue;
}
passthrough_dims.push_back(i);
}
return passthrough_dims;
}
absl::InlinedVector<int64_t, 1>
GetGatherScatterIndexPassthroughOutputOrUpdateDims(
const int64_t output_or_update_rank,
absl::Span<const int64_t> offset_or_window_dims) {
absl::InlinedVector<int64_t, 1> passthrough_dims;
for (int64_t i = 0; i != output_or_update_rank; ++i) {
if (!absl::c_linear_search(offset_or_window_dims, i)) {
passthrough_dims.push_back(i);
}
}
return passthrough_dims;
}
HloSharding InferGatherScatterParallelShardingFromOperandSharding(
const HloSharding& operand_sharding, const Shape& shape,
absl::Span<const int64_t> output_aligned_operand_parallel_dims,
absl::Span<const int64_t> output_parallel_dims) {
return PropagateShardingAlongDimsAndReplicateOthers(
operand_sharding, output_aligned_operand_parallel_dims,
output_parallel_dims, shape.rank());
}
std::string GroupedSharding::ToString() const {
auto result =
absl::StrCat("group dims: ", absl::StrJoin(group_dims, ","), "\n");
absl::StrAppend(
&result, "group dim sizes: ", absl::StrJoin(group_dim_sizes, ","), "\n");
absl::StrAppend(&result, "data rank: ", data_rank, "\n");
absl::StrAppend(&result, "subgroup manual: ", subgroup_manual, "\n");
absl::StrAppend(&result, "inner sharding: ", sharding.ToString(), "\n");
absl::StrAppend(&result, "device groups:", "\n");
for (auto& device_group : device_groups) {
absl::StrAppend(&result, "\t", absl::StrJoin(device_group, ","), "\n");
}
return result;
}
GroupedSharding GroupShardingOnAllDimsExcept(
const HloSharding& sharding, absl::Span<const int64_t> non_group_dims,
bool subgroup_manual) {
std::vector<int64_t> group_dims(sharding.tile_assignment().num_dimensions());
absl::c_iota(group_dims, 0);
group_dims.erase(
std::remove_if(
group_dims.begin(), group_dims.end(),
[&](int64_t i) { return absl::c_linear_search(non_group_dims, i); }),
group_dims.end());
return GroupShardingOnDims(sharding, group_dims, subgroup_manual);
}
GroupedSharding GroupShardingOnDims(const HloSharding& sharding,
absl::Span<const int64_t> group_dims,
bool subgroup_manual) {
std::vector<int64_t> group_dim_shards(group_dims.size(), 1);
return GroupShardingOnDims(sharding, group_dims, group_dim_shards,
subgroup_manual);
}
GroupedSharding GroupShardingOnDims(const HloSharding& sharding,
absl::Span<const int64_t> group_dims,
absl::Span<const int64_t> group_dim_shards,
bool subgroup_manual) {
CHECK(!sharding.IsTileMaximal());
std::vector<std::pair<int64_t, int64_t>> decomposed_tiling_dims(
sharding.tile_assignment().num_dimensions());
for (int64_t i = 0; i < decomposed_tiling_dims.size(); ++i) {
decomposed_tiling_dims[i] =
std::make_pair(1, sharding.tile_assignment().dim(i));
}
DimensionVector group_dim_sizes(group_dims.size());
for (int64_t i = 0; i < group_dims.size(); ++i) {
CHECK_EQ(
sharding.tile_assignment().dim(group_dims[i]) % group_dim_shards[i], 0);
group_dim_sizes[i] =
sharding.tile_assignment().dim(group_dims[i]) / group_dim_shards[i];
decomposed_tiling_dims[group_dims[i]].first = group_dim_sizes[i];
decomposed_tiling_dims[group_dims[i]].second = group_dim_shards[i];
}
DimensionVector grouped_tiling_dims(decomposed_tiling_dims.size());
for (int64_t i = 0; i < decomposed_tiling_dims.size(); ++i) {
grouped_tiling_dims[i] = decomposed_tiling_dims[i].second;
}
DimensionVector sorted_group_dims(group_dims.size());
std::partial_sort_copy(group_dims.begin(), group_dims.end(),
sorted_group_dims.begin(), sorted_group_dims.end());
absl::flat_hash_map<int64_t, int64_t> group_dim_to_index(group_dims.size());
DimensionVector reshape_dimensions(grouped_tiling_dims.begin(),
grouped_tiling_dims.end());
reshape_dimensions.reserve(decomposed_tiling_dims.size() + group_dims.size());
for (int64_t i = 0; i < sorted_group_dims.size(); ++i) {
int64_t index = sorted_group_dims[i] + i;
group_dim_to_index[sorted_group_dims[i]] = index;
reshape_dimensions.insert(
reshape_dimensions.begin() + index,
decomposed_tiling_dims[sorted_group_dims[i]].first);
}
std::vector<int> perm(reshape_dimensions.size());
absl::c_iota(perm, 0);
for (int64_t i = 0; i < group_dims.size(); ++i) {
const int64_t index = group_dim_to_index[group_dims[i]];
perm.erase(std::remove(perm.begin(), perm.end(), index), perm.end());
perm.insert(perm.begin() + i, index);
}
auto grouped_array = sharding.tile_assignment()
.Reshape(reshape_dimensions)
.Transpose(perm)
.array();
const int64_t num_device_groups = Product(group_dim_sizes);
const int64_t num_devices = sharding.tile_assignment().num_elements();
CHECK_EQ(num_devices % num_device_groups, 0);
const int64_t device_group_size = num_devices / num_device_groups;
std::vector<std::vector<int64_t>> device_groups(
num_device_groups, std::vector<int64_t>(device_group_size));
for (int64_t i = 0; i < num_device_groups; ++i) {
device_groups[i].assign(
grouped_array.begin() + i * device_group_size,
grouped_array.begin() + (i + 1) * device_group_size);
}
auto grouped = GroupedSharding(
std::move(device_groups),
DimensionVector(group_dims.begin(), group_dims.end()),
std::move(group_dim_sizes), sharding.tile_assignment().num_dimensions(),
HloSharding::Replicate(), subgroup_manual);
if (sharding.ReplicateOnLastTileDim()) {
grouped.data_rank--;
}
if (sharding.IsManualSubgroup()) {
grouped.data_rank -= sharding.subgroup_types().size();
}
if (Product(grouped_tiling_dims) == 1 ||
(sharding.ReplicateOnLastTileDim() &&
Product(grouped_tiling_dims) == grouped_tiling_dims.back())) {
return grouped;
}
if (sharding.IsManualSubgroup()) {
int64_t tile_dimensions = sharding.tile_assignment().num_dimensions();
int64_t subgroup_size = sharding.subgroup_types().size();
int64_t rank = tile_dimensions - subgroup_size;
int num_dims_erase = 0;
for (int i = 0; i < subgroup_size; i++) {
if (sharding.subgroup_types()[i] == OpSharding::MANUAL) {
grouped_tiling_dims.erase(grouped_tiling_dims.begin() + i + rank -
num_dims_erase);
num_dims_erase++;
}
}
}
if (sharding.ReplicateOnLastTileDim() && grouped_tiling_dims.back() == 1) {
grouped_tiling_dims.pop_back();
}
TileAssignment grouped_tiling(grouped_tiling_dims);
grouped.sharding =
sharding.ReplicateOnLastTileDim() &&
grouped_tiling_dims.size() ==
sharding.tile_assignment().num_dimensions()
? HloSharding::PartialTile(grouped_tiling, sharding.metadata())
: HloSharding::Tile(grouped_tiling, sharding.metadata());
return grouped;
}
namespace {
std::vector<int64_t> PrimeFactorization(int64_t num) {
std::vector<int64_t> prime_factors;
while (num % 2 == 0) {
prime_factors.push_back(2);
num /= 2;
}
for (int64_t i = 3; i <= sqrt(num); i += 2) {
while (num % i == 0) {
prime_factors.push_back(i);
num /= i;
}
}
return prime_factors;
}
}
GroupedSharding GroupShardingOnReplicatedDim(
const HloSharding& sharding, int64_t num_groups, int64_t num_tiles,
int64_t data_rank, absl::Span<const int64_t> replicable_dims) {
if (sharding.ReplicateOnLastTileDim() &&
sharding.tile_assignment().dimensions().back() % num_groups == 0) {
absl::InlinedVector<int64_t, 1> group_dim_shards = {
sharding.tile_assignment().dimensions().back() / num_groups};
return GroupShardingOnDims(
sharding, {sharding.tile_assignment().num_dimensions() - 1},
group_dim_shards);
}
if (sharding.IsTiled()) {
const int64_t reps_on_last_tile_dim =
sharding.ReplicateOnLastTileDim()
? sharding.tile_assignment().dimensions().back()
: 1;
const int64_t max_replicable_dimensions = absl::c_accumulate(
replicable_dims, reps_on_last_tile_dim,
[&](int64_t product, int64_t dim) {
return product * sharding.tile_assignment().dim(dim);
});
if (max_replicable_dimensions % num_groups == 0 &&
num_groups % reps_on_last_tile_dim == 0) {
auto tile_assignment = [&]() -> std::optional<TileAssignment> {
int dimensions_to_borrow = num_groups / reps_on_last_tile_dim;
DimensionVector tile_dims(
sharding.tile_assignment().dimensions().begin(),
sharding.tile_assignment().dimensions().end());
if (!sharding.ReplicateOnLastTileDim()) {
tile_dims.push_back(1);
}
for (auto replicable_dim : replicable_dims) {
for (auto factor : PrimeFactorization(
sharding.tile_assignment().dim(replicable_dim))) {
if (dimensions_to_borrow % factor == 0) {
tile_dims[replicable_dim] /= factor;
tile_dims.back() *= factor;
dimensions_to_borrow /= factor;
if (dimensions_to_borrow == 1) {
return TileAssignment(tile_dims);
}
}
}
}
return std::nullopt;
}();
if (tile_assignment.has_value()) {
HloSharding partial_sharding = HloSharding::PartialTile(
tile_assignment.value(), sharding.metadata());
if (!partial_sharding.IsReplicated()) {
return GroupShardingOnDims(
partial_sharding,
{partial_sharding.tile_assignment().num_dimensions() - 1});
}
}
}
}
return GetGroupedReplicatedSharding(num_groups, num_tiles, data_rank);
}
GroupedSharding GetGroupedReplicatedSharding(const int64_t num_groups,
const int64_t num_tiles,
const int64_t data_rank) {
CHECK_EQ(num_tiles % num_groups, 0);
const int64_t group_size = num_tiles / num_groups;
std::vector<std::vector<int64_t>> device_groups(
num_groups, std::vector<int64_t>(group_size));
int64_t device_id = 0;
for (auto& device_group : device_groups) {
absl::c_iota(device_group, device_id);
device_id = device_group.back() + 1;
}
return GroupedSharding(std::move(device_groups), {data_rank}, {num_groups},
data_rank, HloSharding::Replicate(),
false);
}
GroupedSharding GetManualSubgroupSharding(const HloSharding& sharding) {
CHECK(sharding.IsManualSubgroup());
int64_t tile_dimensions = sharding.tile_assignment().num_dimensions();
int64_t subgroup_size = sharding.subgroup_types().size();
int64_t rank = tile_dimensions - subgroup_size;
DimensionVector group_dims;
bool last_tile_dim_replicate = false;
for (int64_t i = 0; i < subgroup_size; i++) {
if (sharding.subgroup_types()[i] == OpSharding::MANUAL) {
group_dims.push_back(rank + i);
} else if (sharding.subgroup_types()[i] == OpSharding::REPLICATED) {
last_tile_dim_replicate = true;
}
}
GroupedSharding group_sharding =
GroupShardingOnDims(sharding, group_dims, true);
if (last_tile_dim_replicate ||
group_sharding.sharding.tile_assignment().num_dimensions() > rank) {
group_sharding.sharding = HloSharding::PartialTile(
group_sharding.sharding.tile_assignment(), sharding.metadata());
}
return group_sharding;
}
std::optional<GroupedSharding>
PartialReplicatedGroupShardingWithAssignedDeviceGroups(
const HloSharding& sharding, int64_t num_shards,
const std::vector<std::vector<int64_t>>& device_groups) {
if (!sharding.ReplicateOnLastTileDim() ||
sharding.tile_assignment().dimensions().back() % device_groups.size() !=
0) {
VLOG(5) << "Failed because not partial replicated or not divisible";
return std::nullopt;
}
std::vector<DimensionVector> device_to_index(
Product(sharding.tile_assignment().dimensions()),
DimensionVector(sharding.tile_assignment().num_dimensions()));
sharding.tile_assignment().Each(
[&device_to_index](absl::Span<const int64_t> indices, int64_t device) {
device_to_index[device].assign(indices.begin(), indices.end());
});
DimensionVector grouped_tiling_dims(
sharding.tile_assignment().dimensions().begin(),
sharding.tile_assignment().dimensions().end());
grouped_tiling_dims.back() /= device_groups.size();
std::optional<HloSharding> final_sharding;
const int64_t shard_size_on_replicated_dim =
sharding.tile_assignment().dimensions().back() / num_shards;
for (int64_t group_idx = 0; group_idx < device_groups.size(); ++group_idx) {
HloSharding group_sharding = HloSharding::Replicate();
Array<int64_t> grouped_tiling(grouped_tiling_dims);
Array<int64_t> stacked_pos(
absl::MakeConstSpan(grouped_tiling_dims.data(),
grouped_tiling_dims.size() - 1),
0);
for (int64_t device_idx = 0; device_idx < device_groups[group_idx].size();
++device_idx) {
VLOG(5) << "Device idx: " << device_idx;
const int64_t device = device_groups[group_idx][device_idx];
const auto& indices = device_to_index[device];
absl::Span<const int64_t> stacked_pos_idx =
absl::MakeConstSpan(indices.data(), indices.size() - 1);
int64_t& position = stacked_pos(stacked_pos_idx);
if (position == num_shards) {
VLOG(5) << "Fail because stacked position overflow " << position
<< " device_groups " << device_groups.size() << " ["
<< absl::StrJoin(indices, ",") << "]";
VLOG(5) << "Device: " << device << " "
<< device_groups[group_idx][device_idx];
VLOG(5) << "Indices: " << absl::StrJoin(indices, ",");
VLOG(5) << "Grouped tiling: " << grouped_tiling.ToString();
return std::nullopt;
}
auto stacked_indices = indices;
stacked_indices.back() = position++;
grouped_tiling(stacked_indices) = device_idx;
}
group_sharding =
HloSharding::PartialTile(grouped_tiling, sharding.metadata());
if (!final_sharding) {
final_sharding = group_sharding;
continue;
}
if (*final_sharding != group_sharding) {
VLOG(5) << "Fail because final sharding different from group sharding: "
<< final_sharding->ToString() << " vs "
<< group_sharding.ToString();
return std::nullopt;
}
}
return GroupedSharding(device_groups,
{sharding.tile_assignment().num_dimensions() - 1},
{shard_size_on_replicated_dim},
sharding.tile_assignment().num_dimensions() - 1,
*final_sharding, false);
}
HloSharding UngroupSharding(const GroupedSharding& grouped_sharding) {
DimensionVector tiling_dims;
bool partial_sharding = false;
std::vector<OpSharding::Type> subgroup_types;
auto grouped_tiling = grouped_sharding.sharding.tile_assignment();
if (grouped_sharding.sharding.IsTileMaximal()) {
tiling_dims = DimensionVector(grouped_sharding.data_rank, 1);
if (grouped_sharding.device_groups[0].size() != 1 ||
absl::c_linear_search(grouped_sharding.group_dims,
tiling_dims.size())) {
tiling_dims.push_back(grouped_sharding.device_groups[0].size());
partial_sharding = true;
}
grouped_tiling = TileAssignment(tiling_dims);
}
if (grouped_sharding.subgroup_manual) {
partial_sharding = grouped_sharding.sharding.ReplicateOnLastTileDim() ||
grouped_sharding.sharding.IsReplicated();
int64_t subgroup_dim_size = grouped_sharding.group_dims.size();
if (partial_sharding) {
subgroup_dim_size++;
}
subgroup_types = std::vector<OpSharding::Type>(subgroup_dim_size,
OpSharding::REPLICATED);
if (!grouped_sharding.sharding.IsTileMaximal()) {
tiling_dims.assign(
grouped_sharding.sharding.tile_assignment().dimensions().begin(),
grouped_sharding.sharding.tile_assignment().dimensions().end());
}
for (int i = 0; i < grouped_sharding.group_dims.size(); i++) {
subgroup_types[grouped_sharding.group_dims[i] -
grouped_sharding.data_rank] = OpSharding::MANUAL;
tiling_dims.insert(tiling_dims.begin() + grouped_sharding.group_dims[i],
1);
}
} else if (!grouped_sharding.sharding.IsTileMaximal()) {
partial_sharding = grouped_sharding.sharding.ReplicateOnLastTileDim();
tiling_dims.assign(
grouped_sharding.sharding.tile_assignment().dimensions().begin(),
grouped_sharding.sharding.tile_assignment().dimensions().end());
if (absl::c_linear_search(grouped_sharding.group_dims,
tiling_dims.size())) {
tiling_dims.push_back(1);
partial_sharding = true;
}
}
DimensionVector group_dim_sizes_and_tiling_dims(
grouped_sharding.group_dim_sizes.begin(),
grouped_sharding.group_dim_sizes.end());
group_dim_sizes_and_tiling_dims.insert(group_dim_sizes_and_tiling_dims.end(),
tiling_dims.begin(),
tiling_dims.end());
Array<int64_t> tiling(group_dim_sizes_and_tiling_dims);
DimensionVector sorted_group_dims(grouped_sharding.group_dims.size());
std::partial_sort_copy(grouped_sharding.group_dims.begin(),
grouped_sharding.group_dims.end(),
sorted_group_dims.begin(), sorted_group_dims.end());
absl::flat_hash_map<int64_t, int64_t> group_dim_to_index(
grouped_sharding.group_dims.size());
for (int64_t i = 0; i < sorted_group_dims.size(); ++i) {
group_dim_to_index[sorted_group_dims[i]] = sorted_group_dims[i] + i;
}
std::vector<int> perm(tiling_dims.size() + grouped_sharding.group_dims.size(),
-1);
for (int64_t i = 0; i < grouped_sharding.group_dims.size(); i++) {
perm[group_dim_to_index[grouped_sharding.group_dims[i]]] = i;
}
int64_t j = grouped_sharding.group_dims.size();
for (int64_t i = 0; i < perm.size(); i++) {
if (perm[i] == -1) {
perm[i] = j++;
}
}
std::vector<int64_t> flattened_device_groups;
flattened_device_groups.reserve(grouped_sharding.device_groups.size() *
grouped_sharding.device_groups[0].size());
bool same_length =
grouped_tiling.num_elements() == grouped_sharding.device_groups[0].size();
for (auto const& v : grouped_sharding.device_groups) {
if (same_length) {
for (int64_t i = 0; i < v.size(); ++i) {
flattened_device_groups.push_back(
v[*(grouped_tiling.array().begin() + i)]);
}
} else {
flattened_device_groups.insert(flattened_device_groups.end(), v.begin(),
v.end());
}
}
tiling.SetValues(flattened_device_groups);
TileAssignment tile_assignment(
std::make_shared<const Array<int64_t>>(std::move(tiling)));
for (int64_t i = 0; i < grouped_sharding.group_dims.size(); ++i) {
int64_t dim = grouped_sharding.group_dims[i];
tiling_dims[dim] *= grouped_sharding.group_dim_sizes[i];
}
tile_assignment = tile_assignment.Transpose(perm).Reshape(tiling_dims);
if (grouped_sharding.subgroup_manual) {
return HloSharding::Subgroup(tile_assignment, subgroup_types,
grouped_sharding.sharding.metadata());
}
return partial_sharding ? HloSharding::PartialTile(tile_assignment)
: HloSharding::Tile(tile_assignment);
}
bool DeviceGroupsAreMatch(GroupedSharding& lhs, GroupedSharding& rhs,
bool ignore_group_order) {
if (lhs.device_groups.size() != rhs.device_groups.size()) {
return false;
}
bool matching_groups = true;
std::vector<int64_t> device_to_ref_group(lhs.device_groups.size() *
lhs.device_groups[0].size());
for (int64_t g = 0; g < lhs.device_groups.size(); ++g) {
for (int64_t device : lhs.device_groups[g]) {
device_to_ref_group[device] = g;
}
}
auto unique_ref_dev_group =
[&](absl::Span<const int64_t> devices) -> int64_t {
int64_t ref_g = -1;
for (int64_t device : devices) {
if (ref_g == -1) {
ref_g = device_to_ref_group[device];
} else if (ref_g != device_to_ref_group[device]) {
return -1;
}
}
return ref_g;
};
for (int64_t g = 0; g < rhs.device_groups.size(); ++g) {
int64_t ref_g = unique_ref_dev_group(rhs.device_groups[g]);
if (ref_g < 0 || (!ignore_group_order && g != ref_g)) {
matching_groups = false;
break;
}
}
return matching_groups;
}
HloSharding SplitShardingDimension(const HloSharding& sharding,
int64_t dimension, int64_t new_dim_size) {
CHECK_GT(sharding.TiledDataRank(), dimension);
CHECK_EQ(sharding.tile_assignment().dim(dimension) % new_dim_size, 0)
<< "dim size " << new_dim_size;
DimensionVector dimensions(sharding.tile_assignment().dimensions().begin(),
sharding.tile_assignment().dimensions().end());
int64_t current_dimension = dimensions[dimension];
dimensions.insert(dimensions.begin() + dimension + 1,
current_dimension / new_dim_size);
dimensions[dimension] = new_dim_size;
auto new_tile_assignment = sharding.tile_assignment().Reshape(dimensions);
return sharding.ReplicateOnLastTileDim()
? HloSharding::PartialTile(new_tile_assignment)
: HloSharding::Subgroup(new_tile_assignment,
sharding.subgroup_types());
}
HloSharding MergeShardingDimension(const HloSharding& sharding,
int64_t dimension) {
CHECK_GT(sharding.TiledDataRank(), dimension);
DimensionVector dimensions(sharding.tile_assignment().dimensions().begin(),
sharding.tile_assignment().dimensions().end());
dimensions[dimension] *= dimensions[dimension + 1];
dimensions.erase(dimensions.begin() + dimension + 1);
auto new_tile_assignment = sharding.tile_assignment().Reshape(dimensions);
return sharding.ReplicateOnLastTileDim()
? HloSharding::PartialTile(new_tile_assignment)
: HloSharding::Subgroup(new_tile_assignment,
sharding.subgroup_types());
}
std::shared_ptr<const HloSharding> CreateTupleSharding(
const Shape& shape, absl::Span<const HloInstruction* const> elements) {
bool any_sharding = false;
for (const HloInstruction* element : elements) {
any_sharding |= element->has_sharding();
}
if (!any_sharding) {
return nullptr;
}
std::vector<HloSharding> sub_shardings;
sub_shardings.reserve(elements.size());
for (const HloInstruction* element : elements) {
if (element->has_sharding()) {
sub_shardings.push_back(element->sharding());
} else {
sub_shardings.push_back(HloSharding::Replicate());
}
}
return std::make_shared<const HloSharding>(
HloSharding::Tuple(shape, sub_shardings));
}
bool IsSortOperandShardingMovable(const HloInstruction* sort_operand,
int64_t sort_dim) {
if (sort_operand == nullptr || sort_operand->shape().rank() < 2 ||
!sort_operand->has_sharding()) {
return false;
}
const auto& sharding = sort_operand->sharding();
if (!sharding.IsTiled() || sharding.IsTileMaximal() ||
sharding.tile_assignment().dim(sort_dim) == 1) {
return false;
}
auto tile_assignment_dims = sharding.tile_assignment().dimensions();
const int rank = sort_operand->shape().rank();
for (int64_t dim = 0; dim < rank; ++dim) {
if (dim == sort_dim || tile_assignment_dims[dim] != 1 ||
sort_operand->shape().dimensions(dim) == 1) {
continue;
}
return true;
}
return false;
}
std::optional<HloSharding> GetOutputSharding(
const HloInstruction* instruction) {
if (!instruction->has_sharding()) {
return std::nullopt;
}
if (instruction->opcode() == HloOpcode::kOutfeed) {
if (!instruction->sharding().IsTuple()) {
return instruction->sharding();
}
return instruction->sharding().tuple_elements().back();
}
return instruction->sharding();
}
Shape UntileShape(const HloSharding& sharding, const Shape& shape) {
if (!sharding.IsTuple()) {
return UntileLeafShape(sharding, shape);
}
Shape result_shape = shape;
ShapeUtil::ForEachMutableSubshape(
&result_shape,
[&shape, &sharding](Shape* subshape, const ShapeIndex& index) {
if (!ShapeUtil::IsLeafIndex(shape, index)) {
return;
}
const HloSharding& subshape_sharding =
sharding.GetSubSharding(shape, index);
*subshape = UntileLeafShape(subshape_sharding, *subshape);
});
return result_shape;
}
Shape UntileLeafShape(const HloSharding& sharding, const Shape& shape) {
if (sharding.IsTileMaximal() || sharding.IsManual() || sharding.IsUnknown()) {
return shape;
}
if (!shape.IsArray()) {
return shape;
}
Shape result_shape = shape;
for (int64_t i = 0; i < sharding.TiledDataRank() && i < shape.rank(); ++i) {
result_shape.set_dimensions(
i, shape.dimensions(i) * sharding.tile_assignment().dim(i));
}
return result_shape;
}
Shape TileShape(const HloSharding& sharding, const Shape& shape) {
if (!sharding.IsTuple()) {
return TileLeafShape(sharding, shape);
}
Shape result_shape = shape;
ShapeUtil::ForEachMutableSubshape(
&result_shape,
[&shape, &sharding](Shape* subshape, const ShapeIndex& index) {
if (!ShapeUtil::IsLeafIndex(shape, index)) {
return;
}
const HloSharding& subshape_sharding =
sharding.GetSubSharding(shape, index);
*subshape = TileLeafShape(subshape_sharding, *subshape);
});
return result_shape;
}
Shape TileLeafShape(const HloSharding& sharding, const Shape& shape) {
if (sharding.IsTileMaximal() || sharding.IsManual() || sharding.IsUnknown()) {
return shape;
}
if (!shape.IsArray()) {
return shape;
}
Shape result_shape = shape;
for (int64_t i = 0; i < sharding.TiledDataRank() && i < shape.rank(); ++i) {
CHECK_EQ(shape.dimensions(i) % sharding.tile_assignment().dim(i), 0);
result_shape.set_dimensions(
i, shape.dimensions(i) / sharding.tile_assignment().dim(i));
}
return result_shape;
}
absl::Status CanonicalizeLayoutAfterShardingPropagation(
HloModule* module, bool update_output_layout,
bool update_parameters_layout) {
if (!update_output_layout && !update_parameters_layout) {
return absl::OkStatus();
}
if (!module->layout_canonicalization_callback()) {
LOG(INFO) << "There is no registered layout_canonicalization_callback.";
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(auto shapes_with_layout,
module->layout_canonicalization_callback()(*module));
if (update_output_layout &&
module->entry_computation_layout().result_layout().LayoutIsSet()) {
TF_RETURN_IF_ERROR(module->mutable_entry_computation_layout()
->mutable_result_layout()
->CopyLayoutFromShape(shapes_with_layout.second));
}
if (update_parameters_layout) {
for (int64_t i = 0; i < module->entry_computation()->num_parameters();
++i) {
if (module->entry_computation_layout()
.parameter_layout(i)
.LayoutIsSet()) {
TF_RETURN_IF_ERROR(
module->mutable_entry_computation_layout()
->mutable_parameter_layout(i)
->CopyLayoutFromShape(shapes_with_layout.first[i]));
}
}
}
return absl::OkStatus();
}
bool IsSpatiallyPartitioned(const HloSharding& sharding) {
if (sharding.IsTuple()) {
return absl::c_any_of(sharding.tuple_elements(),
[](const HloSharding& sub_sharding) {
return IsSpatiallyPartitioned(sub_sharding);
});
} else {
return !sharding.IsTileMaximal() || sharding.IsReplicated();
}
}
int MaskTupleShardingStrictlyBetter(const HloSharding& lhs,
const HloSharding& rhs) {
DCHECK(lhs.IsTuple());
DCHECK(rhs.IsTuple());
const auto& lhs_shardings = lhs.tuple_elements();
const auto& rhs_shardings = rhs.tuple_elements();
CHECK_EQ(lhs_shardings.size(), rhs_shardings.size());
int mask = 0;
for (int64_t i = 0; i < lhs_shardings.size(); ++i) {
const auto& lhs_shard = lhs_shardings[i];
const auto& rhs_shard = rhs_shardings[i];
CHECK_EQ(lhs_shard.IsTuple(), rhs_shard.IsTuple());
if (lhs_shard.IsTuple()) {
mask |= MaskTupleShardingStrictlyBetter(lhs_shard, rhs_shard);
} else {
if (lhs_shard.IsManualLeaf() && rhs_shard.IsTileMaximalLeaf()) {
mask |= 1;
}
if (rhs_shard.IsManualLeaf() && lhs_shard.IsTileMaximalLeaf()) {
mask |= 2;
}
}
if (mask == 3) break;
}
return mask;
}
bool IsShardingStrictlyBetter(const HloSharding& lhs, const HloSharding& rhs) {
CHECK_EQ(lhs.IsTuple(), rhs.IsTuple()) << lhs << " <> " << rhs;
if (lhs.IsTuple()) {
return MaskTupleShardingStrictlyBetter(lhs, rhs) == 1;
}
return lhs.IsManualLeaf() && rhs.IsTileMaximalLeaf();
}
std::optional<HloSharding> ReturnImprovedShardingImpl(
HloSharding from, const HloSharding* to_improved,
const Shape& to_improved_shape, bool may_combine_partial_sharding,
bool allow_aggressive_resharding) {
if (to_improved != nullptr && IsShardingStrictlyBetter(from, *to_improved)) {
return std::move(from);
}
if (!IsSpatiallyPartitioned(from)) {
return std::nullopt;
}
if (to_improved == nullptr) {
return std::move(from);
}
if (from.IsManual()) {
return std::nullopt;
}
int64_t sharding_tiles = from.NumTiles();
if (MergeSharding(*to_improved, &from, may_combine_partial_sharding)) {
if (!allow_aggressive_resharding && to_improved_shape.IsArray() &&
!to_improved->IsTileMaximal() && from.NumTiles() == sharding_tiles) {
if (!IsSubTilingOrEqualSharding(to_improved_shape, from, *to_improved)) {
VLOG(10) << "Not merging because of different device distribution";
VLOG(10) << "Instr sharding: " << to_improved->ToString();
VLOG(10) << "New sharding " << from.ToString();
return std::nullopt;
}
}
return std::move(from);
}
return std::nullopt;
}
HloSharding InferDotOperandSharding(
const HloSharding* dot_sharding, const HloSharding* other_operand_sharding,
int64_t operand_index,
const dot_as_convolution_util::DotConvolutionDimsInfo& dnums,
bool consider_other_operand, bool may_combine_partial_sharding) {
CHECK(operand_index == 0 || operand_index == 1);
CHECK(dnums.conv_spatial_dims.empty());
std::vector<int64_t> output_dims_to_replicate;
std::vector<int64_t> other_operand_dims_to_replicate;
for (const auto& dim : operand_index == 0 ? dnums.rhs_non_contracting_dims
: dnums.lhs_non_contracting_dims) {
output_dims_to_replicate.push_back(dim.output);
other_operand_dims_to_replicate.push_back(operand_index == 0 ? dim.rhs
: dim.lhs);
}
for (const auto& dim : dnums.contracting_dims) {
if (dim.output >= 0) {
output_dims_to_replicate.push_back(dim.output);
}
}
for (const auto& dim : operand_index == 0 ? dnums.lhs_non_contracting_dims
: dnums.rhs_non_contracting_dims) {
int64_t other_dim = operand_index == 0 ? dim.rhs : dim.lhs;
if (other_dim >= 0) {
other_operand_dims_to_replicate.push_back(other_dim);
}
}
int64_t operand_shape_rank =
operand_index == 0 ? dnums.lhs_shape_rank : dnums.rhs_shape_rank;
int64_t other_shape_rank =
operand_index == 0 ? dnums.rhs_shape_rank : dnums.lhs_shape_rank;
HloSharding sharding = HloSharding::Replicate();
if (dot_sharding != nullptr) {
HloSharding output_other_dims_replicated =
PartiallyReplicateTiledShardingOnDims(*dot_sharding,
output_dims_to_replicate);
std::vector<int64_t> output_to_operand_dims(dnums.output_shape_rank, -1);
std::vector<int64_t> operand_to_output_dims(operand_shape_rank, -1);
for (const auto& dim : dnums.batch_dims) {
output_to_operand_dims[dim.output] =
operand_index == 0 ? dim.lhs : dim.rhs;
operand_to_output_dims[operand_index == 0 ? dim.lhs : dim.rhs] =
dim.output;
}
for (const auto& dim : operand_index == 0
? dnums.lhs_non_contracting_dims
: dnums.rhs_non_contracting_dims) {
output_to_operand_dims[dim.output] =
operand_index == 0 ? dim.lhs : dim.rhs;
operand_to_output_dims[operand_index == 0 ? dim.lhs : dim.rhs] =
dim.output;
}
sharding = std::move(*TransposeShardingWithCollapsedDims(
output_other_dims_replicated, output_to_operand_dims,
operand_to_output_dims));
}
if (consider_other_operand && other_operand_sharding != nullptr &&
IsSpatiallyPartitioned(*other_operand_sharding)) {
auto other_operand_dims_replicated = PartiallyReplicateTiledShardingOnDims(
*other_operand_sharding, other_operand_dims_to_replicate);
std::vector<int64_t> other_to_operand_dims(other_shape_rank, -1);
std::vector<int64_t> operand_to_other_dims(operand_shape_rank, -1);
for (const auto& dim : dnums.batch_dims) {
other_to_operand_dims[operand_index == 0 ? dim.rhs : dim.lhs] =
operand_index == 0 ? dim.lhs : dim.rhs;
operand_to_other_dims[operand_index == 0 ? dim.lhs : dim.rhs] =
operand_index == 0 ? dim.rhs : dim.lhs;
}
for (const auto& dim : dnums.contracting_dims) {
other_to_operand_dims[operand_index == 0 ? dim.rhs : dim.lhs] =
operand_index == 0 ? dim.lhs : dim.rhs;
operand_to_other_dims[operand_index == 0 ? dim.lhs : dim.rhs] =
operand_index == 0 ? dim.rhs : dim.lhs;
}
HloSharding sharding_from_other = *TransposeShardingWithCollapsedDims(
other_operand_dims_replicated, other_to_operand_dims,
operand_to_other_dims);
if (MergeSharding(sharding, &sharding_from_other,
may_combine_partial_sharding)) {
sharding = std::move(sharding_from_other);
}
}
return sharding;
}
HloSharding InferDotOperandSharding(
const HloInstruction* dot, int64_t operand_index,
const dot_as_convolution_util::DotConvolutionDimsInfo& dnums,
bool consider_other_operand, bool may_combine_partial_sharding) {
CHECK(dot->opcode() == HloOpcode::kDot ||
dot->opcode() == HloOpcode::kConvolution);
const HloInstruction* other_operand = dot->operand(1 - operand_index);
return InferDotOperandSharding(
dot->has_sharding() ? &dot->sharding() : nullptr,
other_operand->has_sharding() ? &other_operand->sharding() : nullptr,
operand_index, dnums, consider_other_operand,
may_combine_partial_sharding);
}
}
} | #include "xla/hlo/utils/hlo_sharding_util.h"
#include <cstdint>
#include <initializer_list>
#include <optional>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/tile_assignment.h"
#include "xla/service/dot_as_convolution_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace hlo_sharding_util {
namespace {
TEST(HloShardingUtilTest, MergeShardingIfCompatible1) {
HloSharding to_merge =
HloSharding::PartialTile(TileAssignment({1, 4, 2, 16}, {16, 8}, {1, 0}));
HloSharding dst = HloSharding::PartialTile(TileAssignment({4, 1, 1, 32}));
EXPECT_TRUE(MergeShardingIfCompatible(to_merge, &dst));
EXPECT_EQ(dst, HloSharding::PartialTile(
TileAssignment({4, 4, 2, 4}, {4, 4, 8}, {0, 2, 1})));
}
TEST(HloShardingUtilTest, MergeShardingIfCompatible2) {
HloSharding to_merge =
HloSharding::PartialTile(TileAssignment({1, 2, 4, 16}, {16, 8}, {1, 0}));
HloSharding dst = HloSharding::PartialTile(TileAssignment({4, 1, 1, 32}));
EXPECT_TRUE(MergeShardingIfCompatible(to_merge, &dst));
EXPECT_EQ(dst, HloSharding::PartialTile(
TileAssignment({4, 2, 4, 4}, {4, 4, 8}, {0, 2, 1})));
}
TEST(HloShardingUtilTest, MergeShardingIfCompatible3) {
HloSharding to_merge =
HloSharding::PartialTile(TileAssignment({4, 2, 1, 16}, {16, 8}, {1, 0}));
HloSharding dst = HloSharding::PartialTile(TileAssignment({1, 1, 4, 32}));
EXPECT_TRUE(MergeShardingIfCompatible(to_merge, &dst));
EXPECT_EQ(dst, HloSharding::PartialTile(
TileAssignment({4, 2, 4, 4}, {16, 8}, {1, 0})));
}
TEST(HloShardingUtilTest, MergeShardingIfCompatible4) {
HloSharding to_merge =
HloSharding::PartialTile(TileAssignment({1, 4, 2, 16}, {16, 8}, {1, 0}));
HloSharding dst =
HloSharding::PartialTile(TileAssignment({4, 1, 1, 32}, {4, 32}, {1, 0}));
EXPECT_TRUE(MergeShardingIfCompatible(to_merge, &dst));
EXPECT_EQ(dst, HloSharding::PartialTile(
TileAssignment({4, 4, 2, 4}, {4, 32}, {1, 0})));
}
TEST(HloShardingUtilTest, MergeShardingIfCompatible5) {
HloSharding to_merge =
HloSharding::PartialTile(TileAssignment({1, 4, 2, 16}, {16, 8}, {1, 0}));
HloSharding dst =
HloSharding::PartialTile(TileAssignment({4, 1, 1, 32}, {32, 4}, {1, 0}));
EXPECT_FALSE(MergeShardingIfCompatible(to_merge, &dst));
}
TEST(HloShardingUtilTest, MergeShardingIfCompatible6) {
HloSharding to_merge =
HloSharding::PartialTile(TileAssignment({1, 4, 2, 16}));
HloSharding dst = HloSharding::PartialTile(TileAssignment({4, 1, 1, 32}));
EXPECT_FALSE(MergeShardingIfCompatible(to_merge, &dst));
}
TEST(HloShardingUtilTest, MergeShardingIfCompatible7) {
HloSharding to_merge = HloSharding::PartialTile(
TileAssignment({2, 1, 2, 2}, {2, 2, 2}, {2, 1, 0}));
HloSharding dst = HloSharding::PartialTile(TileAssignment({1, 2, 1, 4}));
EXPECT_TRUE(MergeShardingIfCompatible(to_merge, &dst));
EXPECT_EQ(dst,
HloSharding::Tile(TileAssignment({2, 2, 2}, {2, 2, 2}, {2, 0, 1})));
}
TEST(HloShardingUtilTest, MergeShardingIfCompatible8) {
HloSharding to_merge = HloSharding::PartialTile(TileAssignment({2, 1, 4}));
HloSharding dst =
HloSharding::PartialTile(TileAssignment({1, 4, 2}, {2, 2, 2}, {2, 1, 0}));
EXPECT_TRUE(MergeShardingIfCompatible(to_merge, &dst));
EXPECT_EQ(dst,
HloSharding::Tile(TileAssignment({2, 4}, {2, 2, 2}, {0, 2, 1})));
}
TEST(HloShardingUtilTest, TransposeShardingReplicated) {
EXPECT_EQ(TransposeSharding(HloSharding::Replicate(), {0, 1, 2}),
HloSharding::Replicate());
}
TEST(HloShardingUtilTest, TransposeShardingTiled) {
HloSharding input = HloSharding::IotaTile({1, 2, 1, 2});
HloSharding output = HloSharding::IotaTile({2, 1, 2, 1}, {2, 2}, {1, 0});
EXPECT_EQ(TransposeSharding(input, {3, 0, 1, 2}), output);
}
TEST(HloShardingUtilTest, TransposeShardingWithCollapsedDimsSubgroupManual) {
HloSharding input =
HloSharding::Subgroup(TileAssignment({1, 2, 4}), {OpSharding::MANUAL});
HloSharding output =
HloSharding::Subgroup(TileAssignment({1, 1, 2, 4}), {OpSharding::MANUAL});
EXPECT_EQ(TransposeShardingWithCollapsedDims(input, {-1, 2}, {-1, -1, 1}),
output);
}
TEST(HloShardingUtilTest, ReshapeShardingDimensionSizeOnePartitioned1) {
Shape input_shape = ShapeUtil::MakeShape(F32, {1, 2, 16});
Shape output_shape = ShapeUtil::MakeShape(F32, {2, 16});
HloSharding input_sharding = HloSharding::IotaTile({3, 2, 2});
HloSharding output_sharding =
HloSharding::PartialTile(TileAssignment({2, 2, 3}, {3, 2, 2}, {1, 2, 0}));
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingDimensionSizeOnePartitioned2) {
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 1, 16});
Shape output_shape = ShapeUtil::MakeShape(F32, {2, 16});
HloSharding input_sharding = HloSharding::IotaTile({2, 3, 2});
HloSharding output_sharding =
HloSharding::PartialTile(TileAssignment({2, 2, 3}, {2, 3, 2}, {0, 2, 1}));
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingDimensionSizeOnePartitioned3) {
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 1, 16});
Shape output_shape = ShapeUtil::MakeShape(F32, {32});
HloSharding input_sharding = HloSharding::IotaTile({2, 3, 2});
HloSharding output_sharding =
HloSharding::PartialTile(TileAssignment({4, 3}, {2, 3, 2}, {0, 2, 1}));
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingDimensionSizeOnePartitioned4) {
Shape input_shape = ShapeUtil::MakeShape(F32, {1, 32});
Shape output_shape = ShapeUtil::MakeShape(F32, {2, 16});
HloSharding input_sharding = HloSharding::IotaTile({3, 4});
HloSharding output_sharding =
HloSharding::PartialTile(TileAssignment({2, 2, 3}, {3, 4}, {1, 0}));
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingDimensionSizeOnePartitioned5) {
Shape input_shape = ShapeUtil::MakeShape(F32, {1, 1, 32});
Shape output_shape = ShapeUtil::MakeShape(F32, {1, 1, 2, 16});
HloSharding input_sharding = HloSharding::IotaTile({2, 3, 4});
HloSharding output_sharding = HloSharding::IotaTile({2, 3, 2, 2});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingMaximal) {
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 3, 5});
Shape output_shape = ShapeUtil::MakeShape(F32, {3, 5, 2});
HloSharding sharding = HloSharding::AssignDevice(7);
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTiledInvalid) {
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 3, 5});
Shape output_shape = ShapeUtil::MakeShape(F32, {3, 5, 2});
HloSharding sharding = HloSharding::IotaTile({1, 2, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, sharding);
EXPECT_FALSE(result.has_value());
}
TEST(HloShardingUtilTest, ReshapeShardingTiledMerge) {
Shape input_shape = ShapeUtil::MakeShape(F32, {4, 5, 7});
Shape output_shape = ShapeUtil::MakeShape(F32, {20, 7});
HloSharding input_sharding = HloSharding::IotaTile({2, 1, 1});
HloSharding output_sharding = HloSharding::IotaTile({2, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTiledSplit) {
Shape input_shape = ShapeUtil::MakeShape(F32, {16, 7});
Shape output_shape = ShapeUtil::MakeShape(F32, {4, 4, 7});
HloSharding input_sharding = HloSharding::IotaTile({2, 1});
HloSharding output_sharding = HloSharding::IotaTile({2, 1, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTiledSplit2) {
Shape input_shape = ShapeUtil::MakeShape(F32, {16, 7});
Shape output_shape = ShapeUtil::MakeShape(F32, {4, 4, 7});
HloSharding input_sharding = HloSharding::IotaTile({16, 1});
HloSharding output_sharding = HloSharding::IotaTile({4, 4, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTiledSplit3) {
Shape input_shape = ShapeUtil::MakeShape(F32, {36});
Shape output_shape = ShapeUtil::MakeShape(F32, {6, 6});
HloSharding input_sharding = HloSharding::IotaTile({4});
HloSharding output_sharding =
HloSharding::PartialTile(TileAssignment({2, 1, 2}));
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTiledSplitThenMerge) {
Shape input_shape = ShapeUtil::MakeShape(F32, {16, 4, 7});
Shape output_shape = ShapeUtil::MakeShape(F32, {4, 16, 7});
HloSharding input_sharding = HloSharding::IotaTile({2, 1, 1});
HloSharding output_sharding = HloSharding::IotaTile({2, 1, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTiledArbitraryMinorDimensions) {
Shape input_shape = ShapeUtil::MakeShape(F32, {16, 7, 5, 3});
Shape output_shape = ShapeUtil::MakeShape(F32, {4, 15, 2, 14});
HloSharding sharding = HloSharding::IotaTile({2, 1, 1, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTiledTrivialDimensions) {
Shape input_shape = ShapeUtil::MakeShape(F32, {3, 1, 5, 7});
Shape output_shape = ShapeUtil::MakeShape(F32, {3, 5, 1, 7});
HloSharding input_sharding = HloSharding::IotaTile({1, 1, 2, 1});
HloSharding output_sharding = HloSharding::IotaTile({1, 2, 1, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTrivialDimensionInsertedToEnd) {
Shape input_shape = ShapeUtil::MakeShape(F32, {8, 16});
Shape output_shape = ShapeUtil::MakeShape(F32, {8, 16, 1});
HloSharding input_sharding = HloSharding::IotaTile({2, 1});
HloSharding output_sharding = HloSharding::IotaTile({2, 1, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, NoopReshapeShardingEmptyTile) {
Shape shape = ShapeUtil::MakeShape(F32, {7, 1, 1});
HloSharding sharding = HloSharding::IotaTile({2, 1, 1});
std::optional<HloSharding> result = ReshapeSharding(shape, shape, sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingScalar) {
Shape input_shape = ShapeUtil::MakeShape(F32, {1, 1, 1});
Shape output_shape = ShapeUtil::MakeShape(F32, {});
HloSharding sharding = HloSharding::IotaTile({2, 1, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, sharding);
EXPECT_FALSE(result.has_value());
}
TEST(HloShardingUtilTest, ReshapeShardingSuffixShapeSizeOne1) {
Shape input_shape = ShapeUtil::MakeShape(F32, {64, 1, 1});
Shape output_shape = ShapeUtil::MakeShape(F32, {64, 1});
HloSharding input_sharding = HloSharding::IotaTile({4, 1, 1});
HloSharding output_sharding = HloSharding::IotaTile({4, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
result = ReshapeSharding(output_shape, input_shape, output_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), input_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingSuffixShapeSizeOne2) {
Shape input_shape = ShapeUtil::MakeShape(F32, {64, 1, 1});
Shape output_shape = ShapeUtil::MakeShape(F32, {64, 1});
HloSharding input_sharding = HloSharding::IotaTile({4, 2, 8});
HloSharding output_sharding =
HloSharding::PartialTile(TileAssignment({4, 2, 8}));
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingSuffixShapeSizeOne3) {
Shape input_shape = ShapeUtil::MakeShape(F32, {64, 1});
Shape output_shape = ShapeUtil::MakeShape(F32, {64, 1, 1});
HloSharding input_sharding = HloSharding::IotaTile({4, 2});
HloSharding output_sharding = HloSharding::IotaTile({4, 2, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingSuffixShapeSizeOne4) {
Shape input_shape = ShapeUtil::MakeShape(F32, {4, 2, 1});
Shape output_shape = ShapeUtil::MakeShape(F32, {4, 2});
HloSharding input_sharding = HloSharding::IotaTile({4, 2, 4});
HloSharding output_sharding =
HloSharding::PartialTile(TileAssignment({4, 2, 4}));
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingPrefixShapeSizeOne1) {
Shape input_shape = ShapeUtil::MakeShape(F32, {1, 1, 64});
Shape output_shape = ShapeUtil::MakeShape(F32, {1, 64});
HloSharding input_sharding = HloSharding::IotaTile({1, 1, 4});
HloSharding output_sharding = HloSharding::IotaTile({1, 4});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
result = ReshapeSharding(output_shape, input_shape, output_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), input_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingPrefixShapeSizeOne2) {
Shape input_shape = ShapeUtil::MakeShape(F32, {1, 1, 64});
Shape output_shape = ShapeUtil::MakeShape(F32, {1, 64});
HloSharding input_sharding = HloSharding::IotaTile({2, 1, 1});
HloSharding output_sharding = HloSharding::IotaTile({2, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
result = ReshapeSharding(output_shape, input_shape, output_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), input_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTranspose1) {
Shape input_shape = ShapeUtil::MakeShape(F32, {6, 2, 5});
Shape output_shape = ShapeUtil::MakeShape(F32, {4, 3, 5});
HloSharding sharding = HloSharding::IotaTile({2, 1, 5});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTranspose2) {
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 3, 5, 7, 11});
Shape output_shape = ShapeUtil::MakeShape(F32, {10, 21, 11});
HloSharding input_sharding = HloSharding::IotaTile({2, 1, 1, 1, 13});
HloSharding output_sharding = HloSharding::IotaTile({2, 1, 13});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTranspose3) {
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 3, 5});
Shape output_shape = ShapeUtil::MakeShape(F32, {3, 10});
HloSharding input_sharding = HloSharding::IotaTile({1, 1, 5});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_FALSE(result.has_value());
}
TEST(HloShardingUtilTest, ReshapeShardingTranspose4) {
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 3, 5, 7, 11, 13, 17, 19});
Shape output_shape = ShapeUtil::MakeShape(F32, {3, 2, 55, 91, 19, 17});
HloSharding input_sharding = HloSharding::IotaTile({1, 1, 5, 1, 1, 13, 1, 1});
HloSharding output_sharding =
HloSharding::PartialTile(TileAssignment({1, 1, 5, 1, 1, 1, 13}));
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeToTileDimension2D) {
std::vector<HloSharding> shardings = {HloSharding::IotaTile({2, 2}),
HloSharding::Tile({{0, 1}, {2, 3}})};
for (const HloSharding& sharding : shardings) {
EXPECT_EQ(ReshapeToTileDimension(sharding, 0, {0, 1})
.tile_assignment(),
TileAssignment({4, 1}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 1, {0, 1})
.tile_assignment(),
TileAssignment({1, 4}, {2, 2}, {1, 0}));
}
}
TEST(HloShardingUtilTest, ReshapeToTileDimension3D_Case1) {
std::vector<HloSharding> shardings = {
HloSharding::IotaTile({2, 2, 2}),
HloSharding::Tile({{{0, 1}, {2, 3}}, {{4, 5}, {6, 7}}})};
for (const HloSharding& sharding : shardings) {
EXPECT_EQ(ReshapeToTileDimension(sharding, 0, {0, 1, 2})
.tile_assignment(),
TileAssignment({8, 1, 1}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 1, {0, 1, 2})
.tile_assignment(),
TileAssignment({1, 8, 1}, {2, 2, 2}, {1, 0, 2}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 2, {0, 1, 2})
.tile_assignment(),
TileAssignment({1, 1, 8}, {4, 2}, {1, 0}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 2,
{1, 2})
.tile_assignment(),
TileAssignment({2, 1, 4}, {2, 2, 2}, {0, 2, 1}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 0,
{0, 2})
.tile_assignment(),
TileAssignment({4, 2, 1}, {2, 2, 2}, {1, 0, 2}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 2,
{0, 2})
.tile_assignment(),
TileAssignment({1, 2, 4}, {2, 2, 2}, {1, 2, 0}));
}
}
TEST(HloShardingUtilTest, ReshapeToTileDimension3D_Case2) {
std::vector<HloSharding> shardings = {
HloSharding::IotaTile({2, 2, 2}, {4, 2}, {1, 0}),
HloSharding::Tile({{{0, 2}, {4, 6}}, {{1, 3}, {5, 7}}})};
for (const HloSharding& sharding : shardings) {
EXPECT_EQ(ReshapeToTileDimension(sharding, 0, {0, 1, 2})
.tile_assignment(),
TileAssignment({8, 1, 1}, {4, 2}, {1, 0}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 1, {0, 1, 2})
.tile_assignment(),
TileAssignment({1, 8, 1}, {2, 2, 2}, {0, 2, 1}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 2, {0, 1, 2})
.tile_assignment(),
TileAssignment({1, 1, 8}, {2, 4}, {1, 0}));
}
}
TEST(HloShardingUtilTest, ReshapeToTileDimension4D) {
HloSharding sharding1 = HloSharding::IotaTile({2, 3, 5, 7});
HloSharding sharding2 =
HloSharding::Tile(sharding1.tile_assignment().array());
std::vector<HloSharding> shardings = {sharding1, sharding2};
for (const HloSharding& sharding : shardings) {
EXPECT_EQ(ReshapeToTileDimension(sharding, 1, {0, 1})
.tile_assignment(),
TileAssignment({1, 6, 5, 7}, {2, 3, 5, 7}, {2, 3, 1, 0}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 1, {1, 2})
.tile_assignment(),
TileAssignment({2, 15, 1, 7}, {2, 3, 5, 7}, {0, 3, 1, 2}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 1, {1, 3})
.tile_assignment(),
TileAssignment({2, 21, 5, 1}, {2, 3, 5, 7}, {0, 2, 1, 3}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 1, {0, 1, 2})
.tile_assignment(),
TileAssignment({1, 30, 1, 7}, {2, 3, 5, 7}, {3, 1, 0, 2}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 1, {0, 1, 3})
.tile_assignment(),
TileAssignment({1, 42, 5, 1}, {2, 3, 5, 7}, {2, 1, 0, 3}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 1, {1, 2, 3})
.tile_assignment(),
TileAssignment({2, 105, 1, 1}, {2, 3, 5, 7}, {0, 1, 2, 3}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 1, {0, 1, 2, 3})
.tile_assignment(),
TileAssignment({1, 210, 1, 1}, {2, 3, 5, 7}, {1, 0, 2, 3}));
}
}
TEST(HloShardingUtilTest, PropagateReshapeShardingTranspose1) {
Shape input_shape = ShapeUtil::MakeShape(F32, {6, 4});
Shape output_shape = ShapeUtil::MakeShape(F32, {2, 2, 3, 2});
HloSharding input_sharding = HloSharding::IotaTile({6, 1});
HloSharding output_sharding =
HloSharding::PartialTile(TileAssignment({2, 1, 1, 1, 3}));
HloSharding result = PropagateShardingThroughReshape(
input_shape, output_shape, input_sharding);
EXPECT_EQ(result, output_sharding);
}
TEST(HloShardingUtilTest, PropagateReshapeShardingTranspose2) {
Shape input_shape = ShapeUtil::MakeShape(F32, {6, 4});
Shape output_shape = ShapeUtil::MakeShape(F32, {4, 6});
HloSharding input_sharding = HloSharding::IotaTile({6, 1});
HloSharding output_sharding =
HloSharding::PartialTile(TileAssignment({2, 1, 3}));
HloSharding result = PropagateShardingThroughReshape(
input_shape, output_shape, input_sharding);
EXPECT_EQ(result, output_sharding);
}
TEST(HloShardingUtilTest, PropagateReshapeShardingTranspose3) {
Shape input_shape = ShapeUtil::MakeShape(F32, {4, 6, 5});
Shape output_shape = ShapeUtil::MakeShape(F32, {2, 2, 2, 5, 3});
HloSharding input_sharding = HloSharding::IotaTile({2, 6, 1});
HloSharding output_sharding =
HloSharding::PartialTile(TileAssignment({2, 1, 2, 1, 1, 3}));
HloSharding result = PropagateShardingThroughReshape(
input_shape, output_shape, input_sharding);
EXPECT_EQ(result, output_sharding);
}
TEST(HloShardingUtilTest, PropagateReshapeShardingTiledSplitPartialMatch) {
Shape input_shape = ShapeUtil::MakeShape(F32, {14, 16});
Shape output_shape = ShapeUtil::MakeShape(F32, {2, 7, 4, 4});
HloSharding input_sharding = HloSharding::IotaTile({4, 8});
HloSharding output_sharding =
HloSharding::PartialTile(TileAssignment({1, 1, 4, 2, 4}, {4, 8}, {1, 0}));
HloSharding result = PropagateShardingThroughReshape(
input_shape, output_shape, input_sharding);
EXPECT_EQ(result, output_sharding);
}
TEST(HloShardingUtilTest, PropagateReshapeShardingTiledMergeSplitPartialMatch) {
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 2, 14, 16});
Shape output_shape = ShapeUtil::MakeShape(F32, {4, 2, 7, 4, 4});
HloSharding input_sharding = HloSharding::IotaTile({2, 2, 4, 8});
HloSharding output_sharding = HloSharding::PartialTile(
TileAssignment({4, 1, 1, 4, 2, 4}, {2, 2, 4, 8}, {0, 1, 3, 2}));
HloSharding result = PropagateShardingThroughReshape(
input_shape, output_shape, input_sharding);
EXPECT_EQ(result, output_sharding);
}
TEST(HloShardingUtilTest,
PropagateReshapeShardingTiledSplitPartialMatchManual) {
Shape input_shape = ShapeUtil::MakeShape(F32, {14, 16});
Shape output_shape = ShapeUtil::MakeShape(F32, {2, 7, 4, 4});
HloSharding input_sharding =
HloSharding::Subgroup(TileAssignment({4, 8, 2}), {OpSharding::MANUAL});
HloSharding output_sharding = HloSharding::Subgroup(
TileAssignment({1, 1, 4, 2, 4, 2}, {4, 8, 2}, {1, 0, 2}),
{OpSharding::REPLICATED, OpSharding::MANUAL});
HloSharding result = PropagateShardingThroughReshape(
input_shape, output_shape, input_sharding);
EXPECT_EQ(result, output_sharding);
}
TEST(HloShardingUtilTest, MergeManualSubgroupSharding) {
TileAssignment tile_assignment({16, 4});
std::vector<OpSharding::Type> subgroup_types = {OpSharding::MANUAL,
OpSharding::REPLICATED};
HloSharding dst = HloSharding::Subgroup(tile_assignment, subgroup_types);
HloSharding to_merge = dst;
EXPECT_FALSE(MergeShardingIfCompatible(to_merge, &dst));
}
TEST(HloShardingUtilTest, GetManualSubgroupSharding_ManualOnly) {
TileAssignment tile_assignment({1, 2, 2});
std::vector<OpSharding::Type> subgroup_types = {OpSharding::MANUAL};
HloSharding sharding = HloSharding::Subgroup(tile_assignment, subgroup_types);
GroupedSharding group_sharding = GetManualSubgroupSharding(sharding);
EXPECT_EQ(group_sharding.sharding.tile_assignment(), TileAssignment({1, 2}));
EXPECT_THAT(group_sharding.device_groups[0],
::testing::ElementsAreArray({0, 2}));
EXPECT_THAT(group_sharding.device_groups[1],
::testing::ElementsAreArray({1, 3}));
}
TEST(HloShardingUtilTest, GetManualSubgroupSharding_ManualAndReplicted) {
TileAssignment tile_assignment({1, 2, 2, 2});
std::vector<OpSharding::Type> subgroup_types = {OpSharding::REPLICATED,
OpSharding::MANUAL};
HloSharding sharding = HloSharding::Subgroup(tile_assignment, subgroup_types);
GroupedSharding group_sharding = GetManualSubgroupSharding(sharding);
EXPECT_EQ(group_sharding.sharding.ToString(),
"{devices=[1,2,2]<=[4] last_tile_dim_replicate}");
EXPECT_THAT(group_sharding.device_groups[0],
::testing::ElementsAreArray({0, 2, 4, 6}));
EXPECT_THAT(group_sharding.device_groups[1],
::testing::ElementsAreArray({1, 3, 5, 7}));
}
TEST(HloShardingUtilTest, GetManualSubgroupSharding_ReplicatedAndManual) {
TileAssignment tile_assignment({1, 2, 2, 2});
std::vector<OpSharding::Type> subgroup_types = {OpSharding::MANUAL,
OpSharding::REPLICATED};
HloSharding sharding = HloSharding::Subgroup(tile_assignment, subgroup_types);
GroupedSharding group_sharding = GetManualSubgroupSharding(sharding);
EXPECT_EQ(group_sharding.sharding.ToString(),
"{devices=[1,2,2]<=[4] last_tile_dim_replicate}");
EXPECT_THAT(group_sharding.device_groups[0],
::testing::ElementsAreArray({0, 1, 4, 5}));
EXPECT_THAT(group_sharding.device_groups[1],
::testing::ElementsAreArray({2, 3, 6, 7}));
}
TEST(HloShardingUtilTest, UngroupSharding_ManualOnly) {
HloSharding sharding = HloSharding::IotaTile({1, 2});
std::vector<std::vector<int64_t>> device_groups = {{0, 2}, {1, 3}};
DimensionVector group_dims = {2};
DimensionVector group_dim_sizes = {2};
auto grouped = GroupedSharding(
std::move(device_groups), std::move(group_dims),
std::move(group_dim_sizes), sharding.tile_assignment().num_dimensions(),
sharding, true);
HloSharding ungroup_sharding = UngroupSharding(grouped);
EXPECT_EQ(ungroup_sharding.ToString(),
"{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}");
}
TEST(HloShardingUtilTest, UngroupSharding_ReplicatedAndManual) {
HloSharding sharding = HloSharding::PartialTile(TileAssignment({1, 2, 2}));
std::vector<std::vector<int64_t>> device_groups = {{0, 2, 4, 6},
{1, 3, 5, 7}};
DimensionVector group_dims = {3};
DimensionVector group_dim_sizes = {2};
auto grouped =
GroupedSharding(std::move(device_groups), std::move(group_dims),
std::move(group_dim_sizes),
sharding.tile_assignment().num_dimensions() - 1, sharding,
true);
HloSharding ungroup_sharding = UngroupSharding(grouped);
VLOG(1) << "ungroup_sharding: " << ungroup_sharding.ToString();
EXPECT_EQ(
ungroup_sharding.ToString(),
"{devices=[1,2,2,2]0,2,1,3,4,6,5,7 last_tile_dims={manual, replicated}}");
}
TEST(HloShardingUtilTest, UngroupSharding_ManualAndReplicated) {
HloSharding sharding = HloSharding::PartialTile(TileAssignment({1, 2, 2}));
std::vector<std::vector<int64_t>> device_groups = {{0, 1, 4, 5},
{2, 3, 6, 7}};
DimensionVector group_dims = {2};
DimensionVector group_dim_sizes = {2};
auto grouped =
GroupedSharding(std::move(device_groups), std::move(group_dims),
std::move(group_dim_sizes),
sharding.tile_assignment().num_dimensions() - 1, sharding,
true);
HloSharding ungroup_sharding = UngroupSharding(grouped);
VLOG(1) << "ungroup_sharding: " << ungroup_sharding.ToString();
EXPECT_EQ(
ungroup_sharding.ToString(),
"{devices=[1,2,2,2]0,1,2,3,4,5,6,7 last_tile_dims={manual, replicated}}");
}
TEST(HloShardingUtilTest, UngroupSharding_Replicated) {
HloSharding sharding = HloSharding::Replicate();
DimensionVector group_dims = {3};
DimensionVector group_dim_sizes = {2};
std::vector<std::vector<int64_t>> device_groups = {{0, 1}, {2, 3}};
auto grouped =
GroupedSharding(std::move(device_groups), std::move(group_dims),
std::move(group_dim_sizes), 2, sharding,
true);
HloSharding ungroup_sharding = UngroupSharding(grouped);
VLOG(1) << "ungroup_sharding: " << ungroup_sharding.ToString();
EXPECT_EQ(ungroup_sharding.ToString(),
"{devices=[1,1,2,2]0,1,2,3 last_tile_dims={manual, replicated}}");
}
TEST(HloShardingUtilTest, UngroupSharding_Replicated2) {
HloSharding sharding = HloSharding::Replicate();
DimensionVector group_dims = {2};
DimensionVector group_dim_sizes = {2};
std::vector<std::vector<int64_t>> device_groups = {{0, 2}, {1, 3}};
auto grouped =
GroupedSharding(std::move(device_groups), std::move(group_dims),
std::move(group_dim_sizes), 2, sharding,
true);
HloSharding ungroup_sharding = UngroupSharding(grouped);
VLOG(1) << "ungroup_sharding: " << ungroup_sharding.ToString();
EXPECT_EQ(ungroup_sharding.ToString(),
"{devices=[1,1,2,2]0,2,1,3 last_tile_dims={manual, replicated}}");
}
TEST(HloShardingUtilTest, GroupedAndUngroupedReplicatedSharding) {
GroupedSharding group_sharding = GetGroupedReplicatedSharding(
3, 12, 2);
EXPECT_EQ(UngroupSharding(group_sharding), HloSharding::Replicate());
}
TEST(HloShardingUtilTest, GroupedAndUngroupedIotaSharding) {
std::vector<std::vector<int64_t>> device_groups = {{0, 1, 2, 3, 4, 5},
{6, 7, 8, 9, 10, 11}};
GroupedSharding group_sharding = GroupedSharding(
device_groups, {0}, {2},
2, HloSharding::IotaTile({1, 2, 3}, {2, 3}, {1, 0}));
EXPECT_EQ(UngroupSharding(group_sharding),
HloSharding::IotaTile({2, 2, 3}, {2, 2, 3}, {0, 2, 1}));
}
TEST(HloShardingUtilTest, GroupedAndUngroupedShardingWithUnsortedGroupDims) {
HloSharding sharding = HloSharding::IotaTile({4, 3, 5, 7});
GroupedSharding group_sharding =
GroupShardingOnDims(sharding, {2, 0}, {1, 2});
EXPECT_EQ(group_sharding.sharding, HloSharding::IotaTile({2, 3, 1, 7}));
EXPECT_EQ(UngroupSharding(group_sharding), sharding);
}
TEST(HloShardingUtilTest, UngroupShardingWithUnsortedGroupDims) {
GroupedSharding group_sharding({{0}, {1}, {2}, {3}}, {1, 0}, {2, 2}, 4,
HloSharding::Replicate());
EXPECT_EQ(UngroupSharding(group_sharding),
HloSharding::IotaTile({2, 2, 1, 1}, {2, 2}, {1, 0}));
}
TEST(HloShardingUtilTest, DeviceGroupsDoesNotMatch) {
HloSharding sharding = HloSharding::PartialTile(TileAssignment({2, 2}));
DimensionVector group_dim_sizes = {2};
std::vector<std::vector<int64_t>> lhs_device_groups = {{0, 2, 4, 6},
{1, 3, 5, 7}};
DimensionVector lhs_group_dims = {3};
auto lhs =
GroupedSharding(std::move(lhs_device_groups), std::move(lhs_group_dims),
group_dim_sizes, 2, sharding,
true);
std::vector<std::vector<int64_t>> rhs_device_groups = {{0, 1, 4, 5},
{2, 3, 6, 7}};
DimensionVector rhs_group_dims = {2};
auto rhs =
GroupedSharding(std::move(rhs_device_groups), std::move(rhs_group_dims),
group_dim_sizes, 2, sharding,
true);
EXPECT_FALSE(DeviceGroupsAreMatch(lhs, rhs));
}
TEST(HloShardingUtilTest, DeviceGroupsMatch) {
HloSharding lhs_sharding = HloSharding::Replicate();
DimensionVector group_dims = {2};
DimensionVector group_dim_sizes = {2};
std::vector<std::vector<int64_t>> device_groups = {{0, 2}, {1, 3}};
auto lhs = GroupedSharding(
device_groups, DimensionVector(group_dims.begin(), group_dims.end()),
group_dim_sizes, 2, lhs_sharding,
true);
HloSharding rhs_sharding = HloSharding::PartialTile(TileAssignment({2, 2}));
auto rhs = GroupedSharding(
device_groups, DimensionVector(group_dims.begin(), group_dims.end()),
group_dim_sizes, 2, rhs_sharding,
true);
EXPECT_TRUE(DeviceGroupsAreMatch(lhs, rhs));
}
TEST(HloShardingUtilTest, IsSubShardingTiledReplicated) {
HloSharding rhs_sharding = HloSharding::Replicate();
HloSharding lhs_sharding = HloSharding::IotaTile({4, 1});
Shape shape = ShapeUtil::MakeShape(F32, {129, 253});
EXPECT_TRUE(IsSubTilingOrEqualSharding(shape, lhs_sharding, rhs_sharding));
}
TEST(HloShardingUtilTest, IsSubShardingReplicatedTiled) {
HloSharding rhs_sharding = HloSharding::IotaTile({4, 1});
HloSharding lhs_sharding = HloSharding::Replicate();
Shape shape = ShapeUtil::MakeShape(F32, {129, 253});
EXPECT_FALSE(IsSubTilingOrEqualSharding(shape, lhs_sharding, rhs_sharding));
}
TEST(HloShardingUtilTest, IsSubShardingTiledPartialReplicated) {
HloSharding rhs_sharding = HloSharding::Replicate();
HloSharding lhs_sharding = HloSharding::PartialTile(TileAssignment({2, 2}));
Shape shape = ShapeUtil::MakeShape(F32, {129, 253});
EXPECT_TRUE(IsSubTilingOrEqualSharding(shape, lhs_sharding, rhs_sharding));
}
TEST(HloShardingUtilTest, IsSubShardingReplicatedTiledPartial) {
HloSharding rhs_sharding = HloSharding::PartialTile(TileAssignment({2, 2}));
HloSharding lhs_sharding = HloSharding::Replicate();
Shape shape = ShapeUtil::MakeShape(F32, {129, 253});
EXPECT_FALSE(IsSubTilingOrEqualSharding(shape, lhs_sharding, rhs_sharding));
}
TEST(HloShardingUtilTest, IsSubShardingPartialTiledTiled) {
HloSharding rhs_sharding = HloSharding::PartialTile(TileAssignment({2, 2}));
HloSharding lhs_sharding = HloSharding::IotaTile({4, 1});
Shape shape = ShapeUtil::MakeShape(F32, {129, 253});
EXPECT_FALSE(IsSubTilingOrEqualSharding(shape, lhs_sharding, rhs_sharding));
}
TEST(HloShardingUtilTest, IsSubShardingIncompatibleTiled) {
HloSharding rhs_sharding = HloSharding::IotaTile({4, 1});
HloSharding lhs_sharding = HloSharding::IotaTile({1, 4});
Shape shape = ShapeUtil::MakeShape(F32, {129, 253});
EXPECT_FALSE(IsSubTilingOrEqualSharding(shape, lhs_sharding, rhs_sharding));
}
TEST(HloShardingUtilTest, IsSubShardingIncompatibleShapeTiledPartialTiled) {
HloSharding rhs_sharding = HloSharding::PartialTile(TileAssignment({2, 2}));
HloSharding lhs_sharding = HloSharding::IotaTile({4, 1});
Shape shape = ShapeUtil::MakeShape(F32, {129, 253});
EXPECT_FALSE(IsSubTilingOrEqualSharding(shape, lhs_sharding, rhs_sharding));
}
TEST(HloShardingUtilTest, IsSubShardingCompatibleShapeTiledPartialTiled) {
HloSharding rhs_sharding =
HloSharding::PartialTile(TileAssignment({2, 1, 2}));
HloSharding lhs_sharding = HloSharding::IotaTile({4, 1});
Shape shape = ShapeUtil::MakeShape(F32, {128, 253});
EXPECT_TRUE(IsSubTilingOrEqualSharding(shape, lhs_sharding, rhs_sharding));
}
TEST(HloShardingUtilTest, IsSubTilingOrEqualShardingNoShortcut) {
HloSharding rhs_sharding = HloSharding::PartialTile(TileAssignment({2, 2}));
HloSharding lhs_sharding = HloSharding::IotaTile({4});
std::vector<int64_t> success = {1, 3, 4, 7, 8, 11, 12, 15, 16, 19, 20};
std::vector<int64_t> fail = {2, 5, 6, 9, 10, 13, 14, 17, 18};
for (int64_t i : success) {
Shape shape = ShapeUtil::MakeShape(F32, {i});
EXPECT_TRUE(IsSubTilingOrEqualSharding(shape, lhs_sharding, rhs_sharding));
}
for (int64_t i : fail) {
Shape shape = ShapeUtil::MakeShape(F32, {i});
EXPECT_FALSE(IsSubTilingOrEqualSharding(shape, lhs_sharding, rhs_sharding));
}
}
TEST(HloShardingUtilTest, IsSubTilingOrEqualShardingShortcut1) {
HloSharding rhs_sharding = HloSharding::PartialTile(TileAssignment({2, 2}));
HloSharding lhs_sharding = HloSharding::IotaTile({4});
Shape shape = ShapeUtil::MakeShape(F32, {8});
EXPECT_TRUE(IsSubTilingOrEqualSharding(shape, lhs_sharding, rhs_sharding));
}
TEST(HloShardingUtilTest, IsSubTilingOrEqualShardingShortcut2) {
HloSharding rhs_sharding = HloSharding::PartialTile(TileAssignment({2, 2}));
Array<int64_t> lhs_array({4});
lhs_array.SetValues({1, 0, 2, 3});
HloSharding lhs_sharding = HloSharding::Tile(lhs_array);
Shape shape = ShapeUtil::MakeShape(F32, {8});
EXPECT_TRUE(IsSubTilingOrEqualSharding(shape, lhs_sharding, rhs_sharding));
}
TEST(HloShardingUtilTest, IsSubTilingOrEqualShardingShortcut3) {
HloSharding rhs_sharding = HloSharding::PartialTile(TileAssignment({2, 2}));
HloSharding lhs_sharding = HloSharding::IotaTile({4}, {2, 2}, {1, 0});
Shape shape = ShapeUtil::MakeShape(F32, {8});
EXPECT_FALSE(IsSubTilingOrEqualSharding(shape, lhs_sharding, rhs_sharding));
}
TEST(HloShardingUtilTest, IsSubTilingOrEqualShardingShortcut4) {
HloSharding rhs_sharding =
HloSharding::PartialTile(TileAssignment({2, 2}, {2, 2}, {1, 0}));
HloSharding lhs_sharding = HloSharding::IotaTile({4}, {2, 2}, {1, 0});
Shape shape = ShapeUtil::MakeShape(F32, {8});
EXPECT_TRUE(IsSubTilingOrEqualSharding(shape, lhs_sharding, rhs_sharding));
}
TEST(HloShardingUtilTest, IsSubTilingOrEqualShardingShortcut5) {
HloSharding rhs_sharding =
HloSharding::PartialTile(TileAssignment({2, 3, 5, 7}));
HloSharding lhs_sharding_1 =
HloSharding::IotaTile({2, 21, 5}, {2, 3, 5, 7}, {0, 1, 3, 2});
HloSharding lhs_sharding_2 =
HloSharding::IotaTile({2, 21, 5}, {2, 3, 5, 7}, {0, 2, 3, 1});
HloSharding lhs_sharding_3 = HloSharding::IotaTile({2, 21, 5});
std::vector<Shape> shapes = {ShapeUtil::MakeShape(F32, {10, 42, 10}),
ShapeUtil::MakeShape(F32, {11, 41, 11})};
for (const auto& shape : shapes) {
EXPECT_TRUE(
IsSubTilingOrEqualSharding(shape, lhs_sharding_1, rhs_sharding));
EXPECT_FALSE(
IsSubTilingOrEqualSharding(shape, lhs_sharding_2, rhs_sharding));
EXPECT_FALSE(
IsSubTilingOrEqualSharding(shape, lhs_sharding_3, rhs_sharding));
}
}
TEST(HloShardingUtilTest, IsSubTilingOrEqualShardingShortcut6) {
HloSharding rhs_sharding =
HloSharding::PartialTile(TileAssignment({2, 3, 5, 7 * 11 * 13}));
HloSharding lhs_sharding_1 = HloSharding::PartialTile(TileAssignment(
{2 * 7, 3, 5 * 11, 13}, {2, 3, 5, 7, 11, 13}, {0, 3, 1, 2, 4, 5}));
HloSharding lhs_sharding_2 = HloSharding::PartialTile(TileAssignment(
{2 * 7, 3, 5 * 11, 13}, {2, 3, 5, 11, 7, 13}, {0, 4, 1, 2, 3, 5}));
HloSharding lhs_sharding_3 = HloSharding::PartialTile(TileAssignment(
{2 * 7, 3, 5 * 11, 13}, {2, 3, 5, 13, 7, 11}, {0, 4, 1, 2, 5, 3}));
HloSharding lhs_sharding_4 = HloSharding::PartialTile(TileAssignment(
{2 * 7, 3, 5 * 11, 13}, {2, 3, 5, 7, 13, 11}, {0, 3, 1, 2, 5, 4}));
HloSharding lhs_sharding_5 =
HloSharding::PartialTile(TileAssignment({2 * 7, 3, 5 * 11, 13}));
std::vector<Shape> shapes = {
ShapeUtil::MakeShape(F32, {2 * 7, 9, 5 * 11}),
ShapeUtil::MakeShape(F32, {2 * 7 - 1, 4, 5 * 11 - 1})};
for (const auto& shape : shapes) {
EXPECT_TRUE(
IsSubTilingOrEqualSharding(shape, lhs_sharding_1, rhs_sharding));
EXPECT_TRUE(
IsSubTilingOrEqualSharding(shape, lhs_sharding_2, rhs_sharding));
EXPECT_TRUE(
IsSubTilingOrEqualSharding(shape, lhs_sharding_3, rhs_sharding));
EXPECT_TRUE(
IsSubTilingOrEqualSharding(shape, lhs_sharding_4, rhs_sharding));
EXPECT_FALSE(
IsSubTilingOrEqualSharding(shape, lhs_sharding_5, rhs_sharding));
}
}
TEST(HloShardingUtilTest, IsSubTilingOrEqualShardingShortcut7) {
HloSharding rhs_sharding =
HloSharding::PartialTile(TileAssignment({1, 2, 1, 3, 5 * 7 * 11}));
HloSharding lhs_sharding = HloSharding::PartialTile(
TileAssignment({5, 2, 7, 3, 11}, {2, 3, 5, 7, 11}, {2, 0, 3, 1, 4}));
std::vector<Shape> shapes = {ShapeUtil::MakeShape(F32, {5, 2, 7, 3}),
ShapeUtil::MakeShape(F32, {2, 2, 9, 3})};
for (const auto& shape : shapes) {
EXPECT_TRUE(IsSubTilingOrEqualSharding(shape, lhs_sharding, rhs_sharding));
}
}
TEST(HloShardingUtilTest, IsSortOperandShardingMovableRankTwoOneFreeDim) {
HloIotaInstruction iota(ShapeUtil::MakeShape(F32, {8, 128}), 1);
iota.set_sharding(HloSharding::IotaTile({1, 2}));
EXPECT_TRUE(IsSortOperandShardingMovable(&iota, 1));
}
TEST(HloShardingUtilTest,
IsSortOperandShardingMovableRankTwoOneFreeDimOfSize1) {
HloIotaInstruction iota(ShapeUtil::MakeShape(F32, {1, 128}), 1);
iota.set_sharding(HloSharding::IotaTile({1, 2}));
EXPECT_FALSE(IsSortOperandShardingMovable(&iota, 1));
}
TEST(HloShardingUtilTest, IsSortOperandShardingMovableRankTwoNoFreeDims) {
HloIotaInstruction iota(ShapeUtil::MakeShape(F32, {8, 128}), 1);
iota.set_sharding(HloSharding::IotaTile({2, 2}));
EXPECT_FALSE(IsSortOperandShardingMovable(&iota, 1));
}
TEST(HloShardingUtilTest, IsSortOperandShardingMovableRankOne) {
HloIotaInstruction iota(ShapeUtil::MakeShape(F32, {1024}), 1);
iota.set_sharding(
HloSharding::Tile(TileAssignment(std::initializer_list<int64_t>{2})));
EXPECT_FALSE(IsSortOperandShardingMovable(&iota, 0));
}
TEST(HloShardingUtilTest, IsSortOperandShardingMovableNoSharding) {
HloIotaInstruction iota(ShapeUtil::MakeShape(F32, {1024}), 1);
EXPECT_FALSE(IsSortOperandShardingMovable(&iota, 0));
}
TEST(HloShardingUtilTest, IsSortOperandShardingMovableReplicated) {
HloIotaInstruction iota(ShapeUtil::MakeShape(F32, {8, 128}), 1);
iota.set_sharding(HloSharding::Replicate());
EXPECT_FALSE(IsSortOperandShardingMovable(&iota, 1));
}
TEST(HloShardingUtilTest, IsSortOperandShardingMovableSortDimUnsharded) {
HloIotaInstruction iota(ShapeUtil::MakeShape(F32, {8, 128}), 1);
iota.set_sharding(HloSharding::IotaTile({1, 2}));
EXPECT_FALSE(IsSortOperandShardingMovable(&iota, 0));
}
TEST(HloShardingUtilTest, TileShape) {
HloSharding sharding = HloSharding::Tile(TileAssignment({4, 1}));
Shape shape_0 = ShapeUtil::MakeShape(F32, {80, 128});
auto tile_shape_0 = hlo_sharding_util::TileShape(sharding, shape_0);
auto expected_shape_0 = ShapeUtil::MakeShape(F32, {20, 128});
EXPECT_EQ(tile_shape_0, expected_shape_0);
Shape shape_1 = ShapeUtil::MakeShape(F32, {40, 128});
auto tile_shape_1 = hlo_sharding_util::TileShape(sharding, shape_1);
auto expected_shape_1 = ShapeUtil::MakeShape(F32, {10, 128});
EXPECT_EQ(tile_shape_1, expected_shape_1);
const Shape tuple = ShapeUtil::MakeTupleShape({tile_shape_0, tile_shape_1});
EXPECT_EQ(hlo_sharding_util::TileShape(sharding, tuple),
ShapeUtil::MakeTupleShape({expected_shape_0, expected_shape_1}));
}
TEST(HloShardingUtilTest, UntileShape) {
HloSharding sharding = HloSharding::Tile(TileAssignment({4, 1}));
Shape shape_0 = ShapeUtil::MakeShape(F32, {80, 128});
auto tile_shape_0 = hlo_sharding_util::UntileShape(sharding, shape_0);
auto expected_shape_0 = ShapeUtil::MakeShape(F32, {320, 128});
EXPECT_EQ(tile_shape_0, expected_shape_0);
Shape shape_1 = ShapeUtil::MakeShape(F32, {40, 128});
auto tile_shape_1 = hlo_sharding_util::UntileShape(sharding, shape_1);
auto expected_shape_1 = ShapeUtil::MakeShape(F32, {160, 128});
EXPECT_EQ(tile_shape_1, expected_shape_1);
const Shape tuple = ShapeUtil::MakeTupleShape({tile_shape_0, tile_shape_1});
EXPECT_EQ(hlo_sharding_util::UntileShape(sharding, tuple),
ShapeUtil::MakeTupleShape({expected_shape_0, expected_shape_1}));
}
using HloShardingUtilTestWithHlo = HloTestBase;
TEST_F(HloShardingUtilTestWithHlo, InferDotOperandShardingTest1) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %main.7 {
%p0 = bf16[32,64,128,512] parameter(0), sharding={devices=[8,1,1,4]<=[32]}
%p1 = bf16[32,64,256,512] parameter(1), sharding={devices=[1,1,1,2,16]<=[8,2,2]T(1,0,2) last_tile_dim_replicate}
ROOT %dot.3 = bf16[32,64,128,256] dot(%p0, %p1), lhs_batch_dims={0,1}, rhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_contracting_dims={3}, sharding={devices=[2,2,2,2,2]<=[32] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* dot = module->entry_computation()->root_instruction();
auto dnums = dot_as_convolution_util::ParseDotGeneralFromDot(dot);
bool consider_other_operand = true;
bool may_combine_partial_sharding = false;
EXPECT_EQ(InferDotOperandSharding(dot, 0, dnums, consider_other_operand,
may_combine_partial_sharding),
HloSharding::PartialTile(TileAssignment({2, 2, 2, 1, 4})));
EXPECT_EQ(InferDotOperandSharding(dot, 1, dnums, consider_other_operand,
may_combine_partial_sharding),
HloSharding::IotaTile({8, 1, 1, 4}));
consider_other_operand = true;
may_combine_partial_sharding = true;
EXPECT_EQ(InferDotOperandSharding(dot, 0, dnums, consider_other_operand,
may_combine_partial_sharding),
HloSharding::PartialTile(TileAssignment({2, 2, 2, 2, 2})));
EXPECT_EQ(InferDotOperandSharding(dot, 1, dnums, consider_other_operand,
may_combine_partial_sharding),
HloSharding::IotaTile({8, 1, 1, 4}));
consider_other_operand = false;
for (bool may_combine_partial_sharding : {false, true}) {
EXPECT_EQ(InferDotOperandSharding(dot, 0, dnums, consider_other_operand,
may_combine_partial_sharding),
HloSharding::PartialTile(TileAssignment({2, 2, 2, 1, 4})));
EXPECT_EQ(InferDotOperandSharding(dot, 1, dnums, consider_other_operand,
may_combine_partial_sharding),
HloSharding::PartialTile(TileAssignment(
{2, 2, 2, 1, 4}, {2, 2, 2, 2, 2}, {0, 1, 3, 2, 4})));
}
}
TEST_F(HloShardingUtilTestWithHlo, InferDotOperandShardingTest2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %main.7 {
%p0 = bf16[32,64,128,512] parameter(0), sharding={devices=[8,1,1,4]<=[32]}
%p1 = bf16[32,64,256,512] parameter(1), sharding={devices=[1,1,1,2,16]<=[8,2,2]T(1,0,2) last_tile_dim_replicate}
ROOT %dot.3 = bf16[32,64,128,256] dot(%p0, %p1), lhs_batch_dims={0,1}, rhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_contracting_dims={3}, sharding={devices=[2,2,2,2,2]<=[32] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* dot = module->entry_computation()->root_instruction();
auto dnums = dot_as_convolution_util::ParseDotGeneralFromDot(dot);
const HloSharding& lhs_sharding = dot->operand(0)->sharding();
const HloSharding& rhs_sharding = dot->operand(1)->sharding();
const HloSharding& dot_sharding = dot->sharding();
bool may_combine_partial_sharding = true;
for (int64_t i = 0; i < 2; ++i) {
EXPECT_EQ(InferDotOperandSharding(nullptr, nullptr, i, dnums, true,
may_combine_partial_sharding),
HloSharding::Replicate());
}
for (int64_t i = 0; i < 2; ++i) {
EXPECT_EQ(InferDotOperandSharding(&dot_sharding, nullptr, i, dnums, true,
may_combine_partial_sharding),
InferDotOperandSharding(dot, i, dnums, false,
may_combine_partial_sharding));
}
EXPECT_EQ(InferDotOperandSharding(nullptr, &rhs_sharding, 0, dnums, true,
may_combine_partial_sharding),
rhs_sharding);
EXPECT_EQ(InferDotOperandSharding(nullptr, &lhs_sharding, 1, dnums, true,
may_combine_partial_sharding),
lhs_sharding);
EXPECT_EQ(InferDotOperandSharding(nullptr, &rhs_sharding, 0, dnums, false,
may_combine_partial_sharding),
HloSharding::Replicate());
EXPECT_EQ(InferDotOperandSharding(nullptr, &lhs_sharding, 1, dnums, false,
may_combine_partial_sharding),
HloSharding::Replicate());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/utils/hlo_sharding_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/utils/hlo_sharding_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
94825b0c-0378-4fc1-b42d-9a415dd00c59 | cpp | tensorflow/tensorflow | hlo_parser | third_party/xla/xla/hlo/parser/hlo_parser.cc | third_party/xla/xla/hlo/parser/hlo_parser_test.cc | #include "xla/hlo/parser/hlo_parser.h"
#include <cmath>
#include <complex>
#include <cstdint>
#include <functional>
#include <iterator>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/casts.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "xla/array.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_domain_metadata.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_original_value.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/hlo_sharding_metadata.h"
#include "xla/hlo/ir/tile_assignment.h"
#include "xla/hlo/parser/hlo_lexer.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/computation_layout.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/name_uniquer.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/tsl/lib/gtl/map_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
using absl::StrAppend;
using absl::StrCat;
using absl::StrFormat;
using absl::StrJoin;
using std::nullopt;
using std::optional;
const int8_t kDebugLevel = 10;
const int8_t kErrorLevel = 1;
HloSchedule ScheduleFromInstructionOrder(HloModule* module) {
HloSchedule schedule(module);
for (HloComputation* computation : module->computations()) {
if (!computation->IsFusionComputation()) {
for (HloInstruction* instruction : computation->instructions()) {
schedule.GetOrCreateSequence(computation).push_back(instruction);
}
}
}
return schedule;
}
bool CanInferShape(HloOpcode code) {
switch (code) {
case HloOpcode::kAbs:
case HloOpcode::kAdd:
case HloOpcode::kAddDependency:
case HloOpcode::kAfterAll:
case HloOpcode::kAtan2:
case HloOpcode::kBatchNormGrad:
case HloOpcode::kBatchNormInference:
case HloOpcode::kBatchNormTraining:
case HloOpcode::kBroadcast:
case HloOpcode::kCall:
case HloOpcode::kCeil:
case HloOpcode::kCholesky:
case HloOpcode::kClamp:
case HloOpcode::kClz:
case HloOpcode::kCompare:
case HloOpcode::kComplex:
case HloOpcode::kConcatenate:
case HloOpcode::kConditional:
case HloOpcode::kConvolution:
case HloOpcode::kCopy:
case HloOpcode::kCos:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kDivide:
case HloOpcode::kDomain:
case HloOpcode::kDot:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFft:
case HloOpcode::kFloor:
case HloOpcode::kGather:
case HloOpcode::kGetDimensionSize:
case HloOpcode::kSetDimensionSize:
case HloOpcode::kGetTupleElement:
case HloOpcode::kImag:
case HloOpcode::kIsFinite:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kLogistic:
case HloOpcode::kAnd:
case HloOpcode::kNot:
case HloOpcode::kOr:
case HloOpcode::kXor:
case HloOpcode::kMap:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kMultiply:
case HloOpcode::kNegate:
case HloOpcode::kPad:
case HloOpcode::kPartitionId:
case HloOpcode::kPopulationCount:
case HloOpcode::kPower:
case HloOpcode::kReal:
case HloOpcode::kReduce:
case HloOpcode::kRemainder:
case HloOpcode::kReplicaId:
case HloOpcode::kReverse:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kRsqrt:
case HloOpcode::kScatter:
case HloOpcode::kSelect:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSqrt:
case HloOpcode::kCbrt:
case HloOpcode::kReduceWindow:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kSort:
case HloOpcode::kSubtract:
case HloOpcode::kTan:
case HloOpcode::kTanh:
case HloOpcode::kTranspose:
case HloOpcode::kTriangularSolve:
case HloOpcode::kTuple:
case HloOpcode::kWhile:
case HloOpcode::kTopK:
return true;
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncDone:
case HloOpcode::kAllGather:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllGatherDone:
case HloOpcode::kAllReduce:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllReduceDone:
case HloOpcode::kAllToAll:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kCopyDone:
case HloOpcode::kCopyStart:
case HloOpcode::kDynamicReshape:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kReduceScatter:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kSlice:
case HloOpcode::kBitcast:
case HloOpcode::kBitcastConvert:
case HloOpcode::kConstant:
case HloOpcode::kConvert:
case HloOpcode::kCustomCall:
case HloOpcode::kFusion:
case HloOpcode::kInfeed:
case HloOpcode::kIota:
case HloOpcode::kOutfeed:
case HloOpcode::kParameter:
case HloOpcode::kReducePrecision:
case HloOpcode::kReshape:
case HloOpcode::kRng:
case HloOpcode::kRngBitGenerator:
case HloOpcode::kRngGetAndUpdateState:
case HloOpcode::kStochasticConvert:
return false;
}
}
class HloParserImpl : public HloParser {
public:
using LocTy = HloLexer::LocTy;
using BoolList = absl::InlinedVector<bool, 1>;
explicit HloParserImpl(absl::string_view str,
const HloParserOptions& options = HloParserOptions())
: lexer_(str), options_(options) {}
absl::Status Run(HloModule* module) override;
std::string GetError() const { return StrJoin(error_, "\n"); }
absl::StatusOr<Shape> ParseShapeOnly();
absl::StatusOr<Layout> ParseLayoutOnly();
absl::StatusOr<HloSharding> ParseShardingOnly();
absl::StatusOr<FrontendAttributes> ParseFrontendAttributesOnly();
absl::StatusOr<StatisticsViz> ParseStatisticsVizOnly();
absl::StatusOr<std::vector<bool>> ParseParameterReplicationOnly();
absl::StatusOr<BoolList> ParseBooleanListOrSingleBooleanOnly();
absl::StatusOr<Window> ParseWindowOnly();
absl::StatusOr<ConvolutionDimensionNumbers>
ParseConvolutionDimensionNumbersOnly();
absl::StatusOr<PaddingConfig> ParsePaddingConfigOnly();
absl::StatusOr<std::vector<ReplicaGroup>> ParseReplicaGroupsOnly();
private:
enum class AttrTy {
kBool,
kInt64,
kInt32,
kFloat,
kString,
kLiteral,
kBracedInt64List,
kBracedInt64ListList,
kHloComputation,
kBracedHloComputationList,
kFftType,
kPaddingType,
kComparisonDirection,
kComparisonType,
kWindow,
kConvolutionDimensionNumbers,
kSharding,
kFrontendAttributes,
kStatisticsViz,
kBracedBoolListOrBool,
kParameterReplication,
kInstructionList,
kSliceRanges,
kPaddingConfig,
kMetadata,
kFusionKind,
kDistribution,
kDomain,
kPrecisionList,
kShape,
kShapeList,
kEnum,
kRandomAlgorithm,
kPrecisionAlgorithm,
kAliasing,
kBufferDonor,
kComputationLayout,
kInstructionAliasing,
kCustomCallSchedule,
kCustomCallApiVersion,
kSparsityDescriptor,
kStringOrJsonDict,
kCollectiveDeviceList,
kOriginalValue,
};
struct AttrConfig {
bool required;
AttrTy attr_type;
void* result;
};
using InstrNameTable =
absl::flat_hash_map<std::string, std::pair<HloInstruction*, LocTy>>;
InstrNameTable& current_name_table() { return scoped_name_tables_.back(); }
std::pair<HloInstruction*, LocTy>* FindInstruction(
const std::string& name, const optional<Shape>& shape = nullopt);
bool ParseSingleInstruction(HloModule* module);
bool ParseHloModule(HloModule* module,
bool parse_module_without_header = false);
bool ParseComputations(HloModule* module);
bool ParseComputation(HloComputation** entry_computation);
bool ParseInstructionList(HloComputation** computation,
const std::string& computation_name);
bool ParseInstruction(HloComputation::Builder* builder,
std::string* root_name);
bool ParseInstructionRhs(HloComputation::Builder* builder, std::string name,
LocTy name_loc, bool allow_attributes = true);
bool ParseControlPredecessors(HloInstruction* instruction);
bool ParseLiteral(Literal* literal);
bool ParseLiteral(Literal* literal, const Shape& shape);
bool ParseTupleLiteral(Literal* literal, const Shape& shape);
bool ParseNonTupleLiteral(Literal* literal, const Shape& shape);
bool ParseDenseLiteral(Literal* literal, const Shape& shape);
HloInstruction* CreateInstruction(
HloComputation::Builder* builder, absl::string_view name,
std::optional<Shape> shape, HloOpcode opcode,
std::optional<HloOpcode> async_wrapped_opcode,
absl::flat_hash_map<std::string, AttrConfig>& attrs,
bool allow_attributes,
std::vector<HloInstruction*>* preset_operands = nullptr);
bool SetValueInLiteral(LocTy loc, int64_t value, int64_t index,
Literal* literal);
bool SetValueInLiteral(LocTy loc, double value, int64_t index,
Literal* literal);
bool SetValueInLiteral(LocTy loc, bool value, int64_t index,
Literal* literal);
bool SetValueInLiteral(LocTy loc, std::complex<double> value, int64_t index,
Literal* literal);
template <typename LiteralNativeT, typename ParsedElemT>
bool SetValueInLiteralHelper(LocTy loc, ParsedElemT value, int64_t index,
Literal* literal);
template <typename LiteralNativeT, typename ParsedElemT>
bool CheckParsedValueIsInRange(LocTy loc, ParsedElemT value);
template <typename LiteralNativeT>
bool CheckParsedValueIsInRange(LocTy loc, std::complex<double> value);
bool ParseOperands(std::vector<HloInstruction*>* operands,
HloComputation::Builder* builder);
bool ParseOperands(std::vector<HloInstruction*>* operands,
HloComputation::Builder* builder, int expected_size);
struct SliceRanges {
std::vector<int64_t> starts;
std::vector<int64_t> limits;
std::vector<int64_t> strides;
};
struct DomainData {
std::unique_ptr<DomainMetadata> entry_metadata;
std::unique_ptr<DomainMetadata> exit_metadata;
};
bool ParseAttributes(
const absl::flat_hash_map<std::string, AttrConfig>& attrs,
bool allow_attributes = true, const std::optional<Shape>& shape = {});
bool ParseSubAttributes(
const absl::flat_hash_map<std::string, AttrConfig>& attrs);
bool ParseAttributeHelper(
const absl::flat_hash_map<std::string, AttrConfig>& attrs,
absl::flat_hash_set<std::string>* seen_attrs,
const std::optional<Shape>& shape = {});
bool CopyAttributeToProtoMessage(
absl::flat_hash_set<std::string> non_proto_attrs,
const absl::flat_hash_map<std::string, AttrConfig>& attrs,
tsl::protobuf::Message* message);
bool ParseAttributesAsProtoMessage(
const absl::flat_hash_map<std::string, AttrConfig>& non_proto_attrs,
tsl::protobuf::Message* message);
bool ParseComputationName(HloComputation** value);
bool ParseInstructionNames(std::vector<HloInstruction*>* instructions);
bool ParseWindow(Window* window, bool expect_outer_curlies);
bool ParseConvolutionDimensionNumbers(ConvolutionDimensionNumbers* dnums);
bool ParsePaddingConfig(PaddingConfig* padding);
bool ParseMetadata(OpMetadata& metadata);
bool ParseSingleOrListMetadata(std::vector<OpMetadata>& metadata);
bool ParseOpShardingType(OpSharding::Type* type);
bool ParseListShardingType(std::vector<OpSharding::Type>* types);
bool ParseSharding(std::optional<HloSharding>& sharding);
bool ParseCollectiveDeviceList(CollectiveDeviceList* device_list);
bool ParseFrontendAttributes(FrontendAttributes* frontend_attributes);
bool ParseStatisticsViz(StatisticsViz* statistics_viz);
bool ParseTileAssignment(std::vector<int64_t>& tile_assignment_dimensions,
std::vector<int64_t>& iota_reshape_dims,
std::vector<int>& iota_transpose_perm,
std::vector<int64_t>* devices);
bool ParseSingleSharding(std::optional<HloSharding>& sharding,
bool lbrace_pre_lexed);
bool ParseParameterReplication(ParameterReplication* parameter_replication);
bool ParseBooleanListOrSingleBoolean(BoolList* boolean_list);
bool ParseReplicaGroupsOnly(std::vector<ReplicaGroup>* replica_groups);
bool ParseDomain(DomainData* domain);
bool ParseDxD(const std::string& name, std::vector<int64_t>* result);
bool ParseWindowPad(std::vector<std::vector<int64_t>>* pad);
bool ParseSliceRanges(SliceRanges* result);
bool ParsePrecisionList(std::vector<PrecisionConfig::Precision>* result);
bool ParseHloComputation(HloComputation** result);
bool ParseHloComputationList(std::vector<HloComputation*>* result);
bool ParseShapeList(std::vector<Shape>* result);
bool ParseInt64List(TokKind start, TokKind end, TokKind delim,
std::vector<int64_t>* result);
bool ParseInt64ListList(TokKind start, TokKind end, TokKind delim,
std::vector<std::vector<int64_t>>* result);
bool ParseList(TokKind start, TokKind end, TokKind delim,
absl::FunctionRef<bool()> parse_and_add_item);
bool ParseParamListToShape(Shape* shape, LocTy* shape_loc);
bool ParseParamList();
bool ParseName(std::string* result);
bool ParseAttributeName(std::string* result);
bool ParseString(std::string* result);
bool ParseJsonDict(std::string* result);
bool ParseDimensionSizes(std::vector<int64_t>* dimension_sizes,
std::vector<bool>* dynamic_dimensions);
bool ParseShape(Shape* result);
bool ParseLayout(Layout* layout);
bool ParseLayoutIntAttribute(int64_t* attr_value,
absl::string_view attr_description);
bool ParseDimLevelTypes(
absl::InlinedVector<DimLevelType, InlineRank()>* dim_level_types,
absl::InlinedVector<bool, InlineRank()>* dim_unique,
absl::InlinedVector<bool, InlineRank()>* dim_ordered);
bool ParseTiles(std::vector<Tile>* tiles);
bool ParseSplitConfigs(std::vector<SplitConfig>& split_configs);
bool ParsePhysicalShape(Shape* physical_shape);
bool ParseOpcode(HloOpcode* opcode,
std::optional<HloOpcode>* async_wrapped_opcode);
bool ParseFftType(FftType* result);
bool ParsePaddingType(PaddingType* result);
bool ParsePrimitiveType(PrimitiveType* result);
bool ParseComparisonDirection(ComparisonDirection* result);
bool ParseComparisonType(Comparison::Type* result);
bool ParseFusionKind(HloInstruction::FusionKind* result);
bool ParseRandomDistribution(RandomDistribution* result);
bool ParseRandomAlgorithm(RandomAlgorithm* result);
bool ParsePrecision(PrecisionConfig::Precision* result);
bool ParseAlgorithm(PrecisionConfig::Algorithm* result);
bool ParseInt64(int64_t* result);
bool ParseDouble(double* result);
bool ParseComplex(std::complex<double>* result);
bool ParseBool(bool* result);
bool ParseToken(TokKind kind, const std::string& msg);
bool ParseUnsignedIntegerType(PrimitiveType* primitive_type);
bool ParseOriginalValue(
optional<std::shared_ptr<OriginalValue>>* original_value,
const Shape& shape);
using AliasingData =
absl::flat_hash_map<ShapeIndex, HloInputOutputAliasConfig::Alias>;
using BufferDonor = absl::flat_hash_set<HloBufferDonorConfig::BufferDonor>;
bool ParseAliasing(AliasingData* data);
bool ParseBufferDonor(BufferDonor* data);
bool ParseComputationLayout(ComputationLayout* computation_layout);
bool ParseInstructionOutputOperandAliasing(
std::vector<std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>*
aliasing_output_operand_pairs);
bool ParseCustomCallSchedule(CustomCallSchedule* result);
bool ParseCustomCallApiVersion(CustomCallApiVersion* result);
bool ParseSparsityDescriptor(std::vector<SparsityDescriptor>* result);
bool ParseShapeIndex(ShapeIndex* out);
bool CanBeShape();
bool CanBeParamListToShape();
bool TokenError(absl::string_view msg);
bool Error(LocTy loc, absl::string_view msg);
bool EatIfPresent(TokKind kind);
bool AddInstruction(const std::string& name, HloInstruction* instruction,
LocTy name_loc);
bool AddComputation(const std::string& name, HloComputation* computation,
LocTy name_loc);
HloLexer lexer_;
std::vector<InstrNameTable> scoped_name_tables_;
class Scope {
public:
explicit Scope(std::vector<InstrNameTable>* scoped_name_tables)
: scoped_name_tables_(scoped_name_tables) {
scoped_name_tables_->emplace_back();
}
~Scope() { scoped_name_tables_->pop_back(); }
private:
std::vector<InstrNameTable>* scoped_name_tables_;
};
absl::flat_hash_map<std::string, std::pair<HloComputation*, LocTy>>
computation_pool_;
std::vector<std::unique_ptr<HloComputation>> computations_;
std::vector<std::string> error_;
std::function<std::pair<HloInstruction*, LocTy>*(const std::string& name,
const Shape& shape)>
create_missing_instruction_;
NameUniquer name_uniquer_{"."};
const HloParserOptions options_;
};
bool SplitToInt64s(absl::string_view s, char delim, std::vector<int64_t>* out) {
for (const auto& split : absl::StrSplit(s, delim)) {
int64_t val;
if (!absl::SimpleAtoi(split, &val)) {
return false;
}
out->push_back(val);
}
return true;
}
std::vector<ReplicaGroup> CreateReplicaGroups(
absl::Span<const std::vector<int64_t>> groups) {
std::vector<ReplicaGroup> replica_groups;
absl::c_transform(groups, std::back_inserter(replica_groups),
[](const std::vector<int64_t>& ids) {
ReplicaGroup group;
*group.mutable_replica_ids() = {ids.begin(), ids.end()};
return group;
});
return replica_groups;
}
bool HloParserImpl::Error(LocTy loc, absl::string_view msg) {
auto line_col = lexer_.GetLineAndColumn(loc);
const unsigned line = line_col.first;
const unsigned col = line_col.second;
std::vector<std::string> error_lines;
error_lines.push_back(
StrCat("was parsing ", line, ":", col, ": error: ", msg));
error_lines.emplace_back(lexer_.GetLine(loc));
error_lines.push_back(col == 0 ? "" : StrCat(std::string(col - 1, ' '), "^"));
error_.push_back(StrJoin(error_lines, "\n"));
VLOG(kErrorLevel) << "Error: " << error_.back();
return false;
}
bool HloParserImpl::TokenError(absl::string_view msg) {
return Error(lexer_.GetLoc(), msg);
}
absl::Status HloParserImpl::Run(HloModule* module) {
lexer_.Lex();
if ((lexer_.GetKind() == TokKind::kw_HloModule) ||
(lexer_.GetKind() == TokKind::kw_ENTRY) ||
(lexer_.LookAhead() == TokKind::kLbrace)) {
bool parse_module_without_header =
(lexer_.GetKind() == TokKind::kw_HloModule) ? false : true;
if (!ParseHloModule(module, parse_module_without_header)) {
return InvalidArgument(
"Syntax error when trying to parse the text as a HloModule:\n%s",
GetError());
}
return absl::OkStatus();
}
if (!ParseSingleInstruction(module)) {
return InvalidArgument(
"Syntax error when trying to parse the text as a single "
"HloInstruction:\n%s",
GetError());
}
return absl::OkStatus();
}
std::pair<HloInstruction*, HloParserImpl::LocTy>*
HloParserImpl::FindInstruction(const std::string& name,
const optional<Shape>& shape) {
std::pair<HloInstruction*, LocTy>* instr = nullptr;
if (!name.empty()) {
instr = tsl::gtl::FindOrNull(current_name_table(), name);
}
if (instr == nullptr && create_missing_instruction_ != nullptr &&
scoped_name_tables_.size() == 1) {
if (!shape.has_value()) {
Error(lexer_.GetLoc(),
"Operand had no shape in HLO text; cannot create parameter for "
"single-instruction module.");
return nullptr;
}
return create_missing_instruction_(name, *shape);
}
if (instr != nullptr && shape.has_value() &&
!ShapeUtil::Compatible(instr->first->shape(), shape.value())) {
Error(
lexer_.GetLoc(),
StrCat("The declared operand shape ",
ShapeUtil::HumanStringWithLayout(shape.value()),
" is not compatible with the shape of the operand instruction ",
ShapeUtil::HumanStringWithLayout(instr->first->shape()), "."));
return nullptr;
}
return instr;
}
bool HloParserImpl::ParseShapeIndex(ShapeIndex* out) {
if (!ParseToken(TokKind::kLbrace, "Expects '{' at the start of ShapeIndex")) {
return false;
}
std::vector<int64_t> idxs;
while (lexer_.GetKind() != TokKind::kRbrace) {
int64_t idx;
if (!ParseInt64(&idx)) {
return false;
}
idxs.push_back(idx);
if (!EatIfPresent(TokKind::kComma)) {
break;
}
}
if (!ParseToken(TokKind::kRbrace, "Expects '}' at the end of ShapeIndex")) {
return false;
}
*out = ShapeIndex(idxs.begin(), idxs.end());
return true;
}
bool HloParserImpl::ParseAliasing(AliasingData* data) {
if (!ParseToken(TokKind::kLbrace,
"Expects '{' at the start of aliasing description")) {
return false;
}
while (lexer_.GetKind() != TokKind::kRbrace) {
ShapeIndex out;
if (!ParseShapeIndex(&out)) {
return false;
}
std::string errmsg =
"Expected format: <output_shape_index>: (<input_param>, "
"<input_param_shape_index>) OR <output_shape_index>: <input_param>";
if (!ParseToken(TokKind::kColon, errmsg)) {
return false;
}
if (!ParseToken(TokKind::kLparen, errmsg)) {
return false;
}
int64_t param_num;
ParseInt64(¶m_num);
if (!ParseToken(TokKind::kComma, errmsg)) {
return false;
}
ShapeIndex param_idx;
if (!ParseShapeIndex(¶m_idx)) {
return false;
}
HloInputOutputAliasConfig::AliasKind alias_kind =
HloInputOutputAliasConfig::kMayAlias;
if (EatIfPresent(TokKind::kComma)) {
std::string type;
ParseName(&type);
if (type == "must-alias") {
alias_kind = HloInputOutputAliasConfig::kMustAlias;
} else if (type == "may-alias") {
alias_kind = HloInputOutputAliasConfig::kMayAlias;
} else {
return TokenError("Unexpected aliasing kind; expected SYSTEM or USER");
}
}
data->emplace(std::piecewise_construct, std::forward_as_tuple(out),
std::forward_as_tuple(param_num, param_idx, alias_kind));
if (!ParseToken(TokKind::kRparen, errmsg)) {
return false;
}
if (!EatIfPresent(TokKind::kComma)) {
break;
}
}
if (!ParseToken(TokKind::kRbrace,
"Expects '}' at the end of aliasing description")) {
return false;
}
return true;
}
bool HloParserImpl::ParseBufferDonor(BufferDonor* data) {
if (!ParseToken(TokKind::kLbrace,
"Expects '{' at the start of buffer donor description")) {
return false;
}
std::string errmsg =
"Expected format: (<input_param>, <input_param_shape_index>)";
while (lexer_.GetKind() != TokKind::kRbrace) {
if (!ParseToken(TokKind::kLparen, errmsg)) {
return false;
}
int64_t param_num;
ParseInt64(¶m_num);
if (!ParseToken(TokKind::kComma, errmsg)) {
return false;
}
ShapeIndex param_idx;
if (!ParseShapeIndex(¶m_idx)) {
return false;
}
if (!ParseToken(TokKind::kRparen, errmsg)) {
return false;
}
data->emplace(param_num, param_idx);
if (!EatIfPresent(TokKind::kComma)) {
break;
}
}
if (!ParseToken(TokKind::kRbrace,
"Expects '}' at the end of buffer donor description")) {
return false;
}
return true;
}
bool HloParserImpl::ParseComputationLayout(
ComputationLayout* computation_layout) {
if (!ParseToken(TokKind::kLbrace,
"Expects '{' at the start of aliasing description")) {
return false;
}
if (!ParseToken(TokKind::kLparen, "Expects ( before parameter shape list")) {
return false;
}
while (lexer_.GetKind() != TokKind::kRparen) {
Shape param;
if (!ParseShape(¶m)) {
return false;
}
computation_layout->add_parameter_layout(ShapeLayout(param));
if (lexer_.GetKind() == TokKind::kRparen) {
break;
}
if (!ParseToken(TokKind::kComma, "Expects , between parameter shapes")) {
return false;
}
}
if (!ParseToken(TokKind::kRparen,
"Expects ) at end of parameter shape list")) {
return false;
}
if (!ParseToken(TokKind::kArrow, "Expects -> before result shape")) {
return false;
}
Shape result;
if (!ParseShape(&result)) {
return false;
}
*computation_layout->mutable_result_layout() = ShapeLayout(result);
if (!ParseToken(TokKind::kRbrace,
"Expects '}' at the end of computation layouts")) {
return false;
}
return true;
}
bool HloParserImpl::ParseInstructionOutputOperandAliasing(
std::vector<std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>*
aliasing_output_operand_pairs) {
if (!ParseToken(
TokKind::kLbrace,
"Expects '{' at the start of instruction aliasing description")) {
return false;
}
while (lexer_.GetKind() != TokKind::kRbrace) {
ShapeIndex out;
if (!ParseShapeIndex(&out)) {
return false;
}
std::string errmsg =
"Expected format: <output_shape_index>: (<operand_index>, "
"<operand_shape_index>)";
if (!ParseToken(TokKind::kColon, errmsg)) {
return false;
}
if (!ParseToken(TokKind::kLparen, errmsg)) {
return false;
}
int64_t operand_index;
ParseInt64(&operand_index);
if (!ParseToken(TokKind::kComma, errmsg)) {
return false;
}
ShapeIndex operand_shape_index;
if (!ParseShapeIndex(&operand_shape_index)) {
return false;
}
aliasing_output_operand_pairs->emplace_back(
out,
std::pair<int64_t, ShapeIndex>{operand_index, operand_shape_index});
if (!ParseToken(TokKind::kRparen, errmsg)) {
return false;
}
if (!EatIfPresent(TokKind::kComma)) {
break;
}
}
if (!ParseToken(
TokKind::kRbrace,
"Expects '}' at the end of instruction aliasing description")) {
return false;
}
return true;
}
bool HloParserImpl::ParseCustomCallSchedule(CustomCallSchedule* result) {
VLOG(kDebugLevel) << "ParseCustomCallSchedule";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects custom-call schedule");
}
std::string val = lexer_.GetStrVal();
auto status_or_result = StringToCustomCallSchedule(val);
if (!status_or_result.ok()) {
return TokenError(
StrFormat("expects custom-call schedule but sees: %s, error: %s", val,
status_or_result.status().message()));
}
*result = status_or_result.value();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseCustomCallApiVersion(CustomCallApiVersion* result) {
VLOG(kDebugLevel) << "ParseCustomCallApiVersion";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects custom-call API version");
}
std::string val = lexer_.GetStrVal();
auto status_or_result = StringToCustomCallApiVersion(val);
if (!status_or_result.ok()) {
return TokenError(
StrFormat("expects custom-call API version but sees: %s, error: %s",
val, status_or_result.status().message()));
}
*result = status_or_result.value();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseSparsityDescriptor(
std::vector<SparsityDescriptor>* result) {
VLOG(kDebugLevel) << "ParseSparsityDescriptor";
if (lexer_.GetKind() != TokKind::kSparsityDesc) {
return TokenError("expects sparsity descriptor, e.g. L.0@2:4");
}
std::string val = lexer_.GetStrVal();
std::vector<absl::string_view> split = absl::StrSplit(val, '_');
for (absl::string_view item : split) {
std::vector<absl::string_view> splitA = absl::StrSplit(item, '@');
std::vector<absl::string_view> splitB = absl::StrSplit(splitA[0], '.');
std::vector<absl::string_view> splitC = absl::StrSplit(splitA[1], ':');
SparsityDescriptor descriptor;
int dim, n, m;
if (!absl::SimpleAtoi(splitB[1], &dim) || dim < 0) {
return TokenError("Invalid dimension number");
}
if (!absl::SimpleAtoi(splitC[0], &n) || !absl::SimpleAtoi(splitC[1], &m) ||
n < 1 || m <= n) {
return TokenError("Invalid structured sparsity type");
}
descriptor.set_type(SparsityType::SPARSITY_STRUCTURED_N_M);
descriptor.set_index(splitB[0] == "L" ? 0 : 1);
descriptor.set_dimension(dim);
descriptor.set_n(n);
descriptor.set_m(m);
result->push_back(descriptor);
}
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseHloModule(HloModule* module,
bool parse_module_without_header) {
std::string name;
std::optional<bool> is_scheduled;
std::optional<int64_t> replica_count;
std::optional<int64_t> num_partitions;
std::optional<AliasingData> aliasing_data;
std::optional<BufferDonor> buffer_donor_data;
std::optional<bool> alias_passthrough_params;
absl::flat_hash_map<std::string, AttrConfig> attrs;
std::optional<ComputationLayout> entry_computation_layout;
std::optional<FrontendAttributes> frontend_attributes;
BoolList allow_spmd_sharding_propagation_to_parameters;
BoolList allow_spmd_sharding_propagation_to_output;
attrs["is_scheduled"] = {false, AttrTy::kBool, &is_scheduled};
attrs["replica_count"] = {false, AttrTy::kInt64, &replica_count};
attrs["num_partitions"] = {false, AttrTy::kInt64,
&num_partitions};
attrs["input_output_alias"] = {false, AttrTy::kAliasing,
&aliasing_data};
attrs["buffer_donor"] = {false, AttrTy::kBufferDonor,
&buffer_donor_data};
attrs["alias_passthrough_params"] = {false, AttrTy::kBool,
&alias_passthrough_params};
attrs["entry_computation_layout"] = {false,
AttrTy::kComputationLayout,
&entry_computation_layout};
attrs["frontend_attributes"] = {
false, AttrTy::kFrontendAttributes, &frontend_attributes};
attrs["allow_spmd_sharding_propagation_to_parameters"] = {
false, AttrTy::kBracedBoolListOrBool,
&allow_spmd_sharding_propagation_to_parameters};
attrs["allow_spmd_sharding_propagation_to_output"] = {
false, AttrTy::kBracedBoolListOrBool,
&allow_spmd_sharding_propagation_to_output};
if (!parse_module_without_header) {
if (lexer_.GetKind() != TokKind::kw_HloModule) {
return TokenError("expects HloModule");
}
lexer_.Lex();
if (!ParseName(&name)) {
return false;
}
if (!ParseAttributes(attrs)) {
return false;
}
module->set_name(name);
}
if (!ParseComputations(module)) {
return false;
}
if (parse_module_without_header) {
name = absl::StrCat("module_", module->entry_computation()->name());
}
module->set_name(name);
if (is_scheduled.value_or(false)) {
TF_CHECK_OK(module->set_schedule(ScheduleFromInstructionOrder(module)));
}
HloModuleConfig config = module->config();
bool default_config = true;
if (alias_passthrough_params.value_or(false)) {
config.set_alias_passthrough_params(true);
default_config = false;
}
if (num_partitions.value_or(1) != 1) {
config.set_num_partitions(*num_partitions);
config.set_use_spmd_partitioning(true);
default_config = false;
}
if (replica_count.value_or(1) != 1) {
config.set_replica_count(*replica_count);
default_config = false;
}
if (entry_computation_layout.has_value()) {
*config.mutable_entry_computation_layout() = *entry_computation_layout;
default_config = false;
} else {
HloComputation* entry_computation = module->entry_computation();
for (int64_t p = 0; p < entry_computation->num_parameters(); p++) {
const Shape& param_shape =
entry_computation->parameter_instruction(p)->shape();
TF_CHECK_OK(module->mutable_entry_computation_layout()
->mutable_parameter_layout(p)
->CopyLayoutFromShape(param_shape));
}
const Shape& result_shape = entry_computation->root_instruction()->shape();
TF_CHECK_OK(module->mutable_entry_computation_layout()
->mutable_result_layout()
->CopyLayoutFromShape(result_shape));
}
if (frontend_attributes) {
module->set_frontend_attributes(frontend_attributes.value());
}
if (!allow_spmd_sharding_propagation_to_parameters.empty()) {
config.set_allow_spmd_sharding_propagation_to_parameters(
allow_spmd_sharding_propagation_to_parameters);
default_config = false;
}
if (!allow_spmd_sharding_propagation_to_output.empty()) {
config.set_allow_spmd_sharding_propagation_to_output(
allow_spmd_sharding_propagation_to_output);
default_config = false;
}
if (!default_config) {
module->set_config(config);
}
if (aliasing_data) {
HloInputOutputAliasConfig alias_config(module->result_shape());
for (auto& p : *aliasing_data) {
absl::Status st =
alias_config.SetUpAlias(p.first, p.second.parameter_number,
p.second.parameter_index, p.second.kind);
if (!st.ok()) {
return TokenError(st.message());
}
}
module->input_output_alias_config() = alias_config;
}
if (buffer_donor_data) {
HloBufferDonorConfig buffer_donor_config;
for (auto& p : *buffer_donor_data) {
absl::Status st =
buffer_donor_config.AddBufferDonor(p.param_number, p.param_index);
if (!st.ok()) {
return TokenError(st.message());
}
}
module->buffer_donor_config() = buffer_donor_config;
}
return true;
}
bool HloParserImpl::ParseComputations(HloModule* module) {
HloComputation* entry_computation = nullptr;
do {
if (!ParseComputation(&entry_computation)) {
return false;
}
} while (lexer_.GetKind() != TokKind::kEof);
for (int i = 0; i < computations_.size(); i++) {
if ((entry_computation != nullptr &&
computations_[i].get() != entry_computation) ||
(entry_computation == nullptr && i != computations_.size() - 1)) {
module->AddEmbeddedComputation(std::move(computations_[i]));
continue;
}
module->AddEntryComputation(std::move(computations_[i]));
}
return true;
}
bool HloParserImpl::ParseComputation(HloComputation** entry_computation) {
LocTy maybe_entry_loc = lexer_.GetLoc();
const bool is_entry_computation = EatIfPresent(TokKind::kw_ENTRY);
std::string name;
LocTy name_loc = lexer_.GetLoc();
if (!ParseName(&name)) {
return false;
}
LocTy shape_loc = nullptr;
Shape shape;
if (CanBeParamListToShape() && !ParseParamListToShape(&shape, &shape_loc)) {
return false;
}
HloComputation* computation = nullptr;
if (!ParseInstructionList(&computation, name)) {
return false;
}
if (shape_loc != nullptr &&
!ShapeUtil::Compatible(computation->root_instruction()->shape(), shape)) {
return Error(
shape_loc,
StrCat(
"Shape of computation ", name, ", ", ShapeUtil::HumanString(shape),
", is not compatible with that of its root instruction ",
computation->root_instruction()->name(), ", ",
ShapeUtil::HumanString(computation->root_instruction()->shape())));
}
absl::flat_hash_map<std::string, AttrConfig> attrs;
optional<std::string> execution_thread = HloInstruction::kMainExecutionThread;
attrs["execution_thread"] = {false, AttrTy::kString,
&execution_thread};
if (!ParseAttributes(attrs)) {
return false;
}
computation->SetExecutionThread(*execution_thread);
if (is_entry_computation) {
if (*entry_computation != nullptr) {
return Error(maybe_entry_loc, "expects only one ENTRY");
}
*entry_computation = computation;
}
return AddComputation(name, computation, name_loc);
}
bool HloParserImpl::ParseInstructionList(HloComputation** computation,
const std::string& computation_name) {
Scope scope(&scoped_name_tables_);
HloComputation::Builder builder(computation_name);
if (!ParseToken(TokKind::kLbrace,
"expects '{' at the beginning of instruction list.")) {
return false;
}
std::string root_name;
do {
if (!ParseInstruction(&builder, &root_name)) {
return false;
}
} while (lexer_.GetKind() != TokKind::kRbrace);
if (!ParseToken(TokKind::kRbrace,
"expects '}' at the end of instruction list.")) {
return false;
}
HloInstruction* root = nullptr;
if (!root_name.empty()) {
std::pair<HloInstruction*, LocTy>* root_node =
tsl::gtl::FindOrNull(current_name_table(), root_name);
if (root_node == nullptr) {
LOG(FATAL) << "instruction " << root_name
<< " was marked as ROOT but the parser has not seen it before";
}
root = root_node->first;
}
computations_.emplace_back(builder.Build(root));
*computation = computations_.back().get();
return true;
}
bool HloParserImpl::ParseInstruction(HloComputation::Builder* builder,
std::string* root_name) {
std::string name;
LocTy maybe_root_loc = lexer_.GetLoc();
bool is_root = EatIfPresent(TokKind::kw_ROOT);
const LocTy name_loc = lexer_.GetLoc();
if (!ParseName(&name) ||
!ParseToken(TokKind::kEqual, "expects '=' in instruction")) {
return false;
}
if (is_root) {
if (!root_name->empty()) {
return Error(maybe_root_loc, "one computation should have only one ROOT");
}
*root_name = name;
}
return ParseInstructionRhs(builder, name, name_loc);
}
bool HloParserImpl::ParseInstructionRhs(HloComputation::Builder* builder,
std::string name, LocTy name_loc,
bool allow_attributes) {
Shape shape;
HloOpcode opcode;
std::optional<HloOpcode> async_wrapped_opcode;
std::vector<HloInstruction*> operands;
const bool parse_shape = CanBeShape();
if ((parse_shape && !ParseShape(&shape)) ||
!ParseOpcode(&opcode, &async_wrapped_opcode)) {
return false;
}
if (!parse_shape && !CanInferShape(opcode)) {
return TokenError(StrFormat("cannot infer shape for opcode: %s",
HloOpcodeString(opcode)));
}
absl::flat_hash_map<std::string, AttrConfig> attrs;
optional<HloSharding> sharding;
optional<FrontendAttributes> frontend_attributes;
optional<StatisticsViz> statistics_viz;
attrs["sharding"] = {false, AttrTy::kSharding, &sharding};
attrs["frontend_attributes"] = {
false, AttrTy::kFrontendAttributes, &frontend_attributes};
attrs["statistics"] = {false, AttrTy::kStatisticsViz,
&statistics_viz};
optional<ParameterReplication> parameter_replication;
attrs["parameter_replication"] = {false,
AttrTy::kParameterReplication,
¶meter_replication};
optional<std::vector<HloInstruction*>> predecessors;
attrs["control-predecessors"] = {false, AttrTy::kInstructionList,
&predecessors};
optional<std::shared_ptr<OriginalValue>> original_value;
attrs["origin"] = {false, AttrTy::kOriginalValue,
&original_value};
optional<OpMetadata> metadata;
attrs["metadata"] = {false, AttrTy::kMetadata, &metadata};
optional<std::string> backend_config;
attrs["backend_config"] = {false, AttrTy::kStringOrJsonDict,
&backend_config};
std::optional<Shape> maybe_shape;
if (parse_shape) {
maybe_shape = shape;
}
HloInstruction* instruction =
CreateInstruction(builder, name, maybe_shape, opcode,
async_wrapped_opcode, attrs, allow_attributes);
if (instruction == nullptr) {
return false;
}
if (name.empty()) {
name = name_uniquer_.GetUniqueName(
absl::StrCat(HloOpcodeString(instruction->opcode()), ".anon"));
} else {
name_uniquer_.GetUniqueName(name);
}
instruction->SetAndSanitizeName(name);
if (instruction->name() != name) {
return Error(name_loc,
StrCat("illegal instruction name: ", name,
"; suggest renaming to: ", instruction->name()));
}
if (sharding) {
instruction->set_sharding(
sharding->NormalizeTupleSharding(instruction->shape()));
}
if (parameter_replication) {
int leaf_count = ShapeUtil::GetLeafCount(instruction->shape());
const auto& replicated =
parameter_replication->replicated_at_leaf_buffers();
if (leaf_count != replicated.size()) {
return Error(lexer_.GetLoc(),
StrCat("parameter has ", leaf_count,
" leaf buffers, but parameter_replication has ",
replicated.size(), " elements."));
}
instruction->set_parameter_replicated_at_leaf_buffers(replicated);
}
if (predecessors) {
for (auto* pre : *predecessors) {
absl::Status status = pre->AddControlDependencyTo(instruction);
if (!status.ok()) {
return Error(name_loc, StrCat("error adding control dependency for: ",
name, " status: ", status.ToString()));
}
}
}
if (metadata) {
instruction->set_metadata(*metadata);
}
if (original_value) {
instruction->set_original_value(*original_value);
}
if (backend_config) {
instruction->set_raw_backend_config_string(std::move(*backend_config));
}
if (frontend_attributes) {
instruction->set_frontend_attributes(*frontend_attributes);
}
if (statistics_viz) {
instruction->set_statistics_viz(*statistics_viz);
}
return AddInstruction(name, instruction, name_loc);
}
HloInstruction* HloParserImpl::CreateInstruction(
HloComputation::Builder* builder, absl::string_view name,
std::optional<Shape> shape, HloOpcode opcode,
std::optional<HloOpcode> async_wrapped_opcode,
absl::flat_hash_map<std::string, AttrConfig>& attrs, bool allow_attributes,
std::vector<HloInstruction*>* preset_operands) {
std::vector<HloInstruction*> operands;
if (preset_operands) {
operands = *preset_operands;
}
const auto maybe_infer_shape =
[&](absl::FunctionRef<absl::StatusOr<Shape>()> infer) {
if (shape.has_value()) {
return true;
}
auto inferred = infer();
if (!inferred.ok()) {
return TokenError(
StrFormat("failed to infer shape for opcode: %s, error: %s",
HloOpcodeString(opcode), inferred.status().message()));
}
shape = std::move(inferred).value();
return true;
};
switch (opcode) {
case HloOpcode::kParameter: {
int64_t parameter_number;
if (!ParseToken(TokKind::kLparen,
"expects '(' before parameter number") ||
!ParseInt64(¶meter_number)) {
return nullptr;
}
const LocTy loc = lexer_.GetLoc();
if (parameter_number < 0) {
Error(loc, "parameter number must be >= 0");
return nullptr;
}
if (!ParseToken(TokKind::kRparen, "expects ')' after parameter number") ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
std::string param_name(name);
auto result = builder->AddParameter(HloInstruction::CreateParameter(
parameter_number, *shape, param_name));
if (!result.ok()) {
Error(loc, result.status().message());
return nullptr;
}
return result.value();
}
case HloOpcode::kConstant: {
Literal literal;
if (!ParseToken(TokKind::kLparen,
"expects '(' before constant literal") ||
!ParseLiteral(&literal, *shape) ||
!ParseToken(TokKind::kRparen, "expects ')' after constant literal") ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateConstant(std::move(literal)));
}
case HloOpcode::kIota: {
optional<int64_t> iota_dimension;
attrs["iota_dimension"] = {true, AttrTy::kInt64,
&iota_dimension};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 0)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateIota(*shape, *iota_dimension));
}
case HloOpcode::kTopK: {
optional<int64_t> k;
attrs["k"] = {true, AttrTy::kInt64, &k};
optional<bool> largest;
attrs["largest"] = {false, AttrTy::kBool, &largest};
if ((!preset_operands && !ParseOperands(&operands, builder,
1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferTopKShape(operands[0]->shape(), *k);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateTopK(
*shape, operands[0], *k, (largest.has_value() ? *largest : true)));
}
case HloOpcode::kAbs:
case HloOpcode::kAllGatherDone:
case HloOpcode::kAllReduceDone:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kBitcast:
case HloOpcode::kCeil:
case HloOpcode::kClz:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kCopy:
case HloOpcode::kCopyDone:
case HloOpcode::kCos:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kImag:
case HloOpcode::kIsFinite:
case HloOpcode::kFloor:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kLogistic:
case HloOpcode::kNot:
case HloOpcode::kNegate:
case HloOpcode::kPopulationCount:
case HloOpcode::kReal:
case HloOpcode::kRsqrt:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSqrt:
case HloOpcode::kCbrt:
case HloOpcode::kTan:
case HloOpcode::kTanh: {
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferUnaryOpShape(opcode, operands[0]);
})) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateUnary(*shape, opcode, operands[0]));
}
case HloOpcode::kAdd:
case HloOpcode::kDivide:
case HloOpcode::kMultiply:
case HloOpcode::kSubtract:
case HloOpcode::kAtan2:
case HloOpcode::kComplex:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kPower:
case HloOpcode::kRemainder:
case HloOpcode::kAnd:
case HloOpcode::kOr:
case HloOpcode::kXor:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical:
case HloOpcode::kStochasticConvert: {
if ((!preset_operands &&
!ParseOperands(&operands, builder, 2)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferBinaryOpShape(opcode, operands[0],
operands[1]);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateBinary(
*shape, opcode, operands[0], operands[1]));
}
case HloOpcode::kClamp:
case HloOpcode::kSelect: {
if ((!preset_operands &&
!ParseOperands(&operands, builder, 3)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferTernaryOpShape(
opcode, operands[0], operands[1], operands[2]);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateTernary(
*shape, opcode, operands[0], operands[1], operands[2]));
}
case HloOpcode::kConvert: {
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateConvert(*shape, operands[0]));
}
case HloOpcode::kBitcastConvert: {
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateBitcastConvert(*shape, operands[0]));
}
case HloOpcode::kAllGather:
case HloOpcode::kAllGatherStart: {
CollectiveDeviceList device_list;
optional<int64_t> channel_id;
optional<std::vector<int64_t>> dimensions;
optional<bool> constrain_layout;
optional<bool> use_global_device_ids;
attrs["replica_groups"] = {false,
AttrTy::kCollectiveDeviceList, &device_list};
attrs["channel_id"] = {false, AttrTy::kInt64, &channel_id};
attrs["dimensions"] = {true, AttrTy::kBracedInt64List,
&dimensions};
attrs["constrain_layout"] = {false, AttrTy::kBool,
&constrain_layout};
attrs["use_global_device_ids"] = {false, AttrTy::kBool,
&use_global_device_ids};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (opcode == HloOpcode::kAllGather) {
return builder->AddInstruction(HloInstruction::CreateAllGather(
*shape, operands, dimensions->at(0), device_list,
constrain_layout ? *constrain_layout : false, channel_id,
use_global_device_ids ? *use_global_device_ids : false));
}
return builder->AddInstruction(HloInstruction::CreateAllGatherStart(
*shape, operands, dimensions->at(0), device_list,
constrain_layout ? *constrain_layout : false, channel_id,
use_global_device_ids ? *use_global_device_ids : false));
}
case HloOpcode::kAllReduce:
case HloOpcode::kAllReduceStart:
case HloOpcode::kReduceScatter: {
CollectiveDeviceList device_list;
optional<HloComputation*> to_apply;
optional<int64_t> channel_id;
optional<bool> constrain_layout;
optional<bool> use_global_device_ids;
optional<std::vector<int64_t>> dimensions;
attrs["to_apply"] = {true, AttrTy::kHloComputation,
&to_apply};
attrs["replica_groups"] = {false,
AttrTy::kCollectiveDeviceList, &device_list};
attrs["channel_id"] = {false, AttrTy::kInt64, &channel_id};
attrs["constrain_layout"] = {false, AttrTy::kBool,
&constrain_layout};
attrs["use_global_device_ids"] = {false, AttrTy::kBool,
&use_global_device_ids};
if (opcode == HloOpcode::kReduceScatter) {
attrs["dimensions"] = {true, AttrTy::kBracedInt64List,
&dimensions};
}
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (opcode == HloOpcode::kAllReduce) {
return builder->AddInstruction(HloInstruction::CreateAllReduce(
*shape, operands, *to_apply, device_list,
constrain_layout ? *constrain_layout : false, channel_id,
use_global_device_ids ? *use_global_device_ids : false));
} else if (opcode == HloOpcode::kReduceScatter) {
return builder->AddInstruction(HloInstruction::CreateReduceScatter(
*shape, operands, *to_apply, device_list,
constrain_layout ? *constrain_layout : false, channel_id,
use_global_device_ids ? *use_global_device_ids : false,
dimensions->at(0)));
}
return builder->AddInstruction(HloInstruction::CreateAllReduceStart(
*shape, operands, *to_apply, device_list,
constrain_layout ? *constrain_layout : false, channel_id,
use_global_device_ids ? *use_global_device_ids : false));
}
case HloOpcode::kAllToAll: {
CollectiveDeviceList device_list;
attrs["replica_groups"] = {false,
AttrTy::kCollectiveDeviceList, &device_list};
optional<int64_t> channel_id;
attrs["channel_id"] = {false, AttrTy::kInt64, &channel_id};
optional<std::vector<int64_t>> dimensions;
attrs["dimensions"] = {false, AttrTy::kBracedInt64List,
&dimensions};
optional<bool> constrain_layout;
attrs["constrain_layout"] = {false, AttrTy::kBool,
&constrain_layout};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape) ||
(dimensions && dimensions->size() != 1)) {
return nullptr;
}
optional<int64_t> split_dimension;
if (dimensions) {
split_dimension = dimensions->at(0);
}
return builder->AddInstruction(HloInstruction::CreateAllToAll(
*shape, operands, device_list,
constrain_layout ? *constrain_layout : false, channel_id,
split_dimension));
}
case HloOpcode::kCollectiveBroadcast: {
CollectiveDeviceList device_list;
attrs["replica_groups"] = {true,
AttrTy::kCollectiveDeviceList, &device_list};
optional<int64_t> channel_id;
attrs["channel_id"] = {false, AttrTy::kInt64, &channel_id};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateCollectiveBroadcast(
*shape, operands, device_list, false, channel_id));
}
case HloOpcode::kCollectivePermute:
case HloOpcode::kCollectivePermuteStart: {
optional<std::vector<std::vector<int64_t>>> source_targets;
attrs["source_target_pairs"] = {
true, AttrTy::kBracedInt64ListList, &source_targets};
optional<int64_t> channel_id;
attrs["channel_id"] = {false, AttrTy::kInt64, &channel_id};
optional<std::vector<std::vector<int64_t>>> slice_sizes;
attrs["slice_sizes"] = {false, AttrTy::kBracedInt64ListList,
&slice_sizes};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
std::vector<std::pair<int64_t, int64_t>> pairs(source_targets->size());
for (int i = 0; i < pairs.size(); i++) {
if ((*source_targets)[i].size() != 2) {
TokenError("expects 'source_target_pairs=' to be a list of pairs");
return nullptr;
}
pairs[i].first = (*source_targets)[i][0];
pairs[i].second = (*source_targets)[i][1];
}
if (!slice_sizes.has_value()) {
if (operands.size() != 1) {
TokenError(
"CollectivePermute and CollectivePermuteStart must have exactly "
"one operand (input buffer) unless it performs dynamic-slice and "
"in-place update.");
return nullptr;
}
if (opcode == HloOpcode::kCollectivePermute) {
return builder->AddInstruction(
HloInstruction::CreateCollectivePermute(*shape, operands[0],
pairs, channel_id));
}
if (opcode == HloOpcode::kCollectivePermuteStart) {
return builder->AddInstruction(
HloInstruction::CreateCollectivePermuteStart(*shape, operands[0],
pairs, channel_id));
}
LOG(FATAL) << "Expect opcode to be CollectivePermute or "
"CollectivePermuteStart, but got "
<< opcode;
}
if (operands.size() != 4) {
TokenError(
"CollectivePermute and CollectivePermuteStart must "
"have exactly four operands for dynamic-slice and "
"in-place update.");
return nullptr;
}
if (opcode == HloOpcode::kCollectivePermute) {
return builder->AddInstruction(HloInstruction::CreateCollectivePermute(
*shape, operands[0], operands[1], operands[2], operands[3], pairs,
*slice_sizes, channel_id));
}
if (opcode == HloOpcode::kCollectivePermuteStart) {
return builder->AddInstruction(
HloInstruction::CreateCollectivePermuteStart(
*shape, operands[0], operands[1], operands[2], operands[3],
pairs, *slice_sizes, channel_id));
}
LOG(FATAL) << "Expect opcode to be CollectivePermute or "
"CollectivePermuteStart, but got "
<< opcode;
}
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncDone: {
std::optional<HloComputation*> async_computation;
if (!preset_operands && !ParseOperands(&operands, builder)) {
return nullptr;
}
auto is_async_shape_correct = [](const Shape& shape) {
return shape.IsTuple() && shape.tuple_shapes_size() >= 2 &&
shape.tuple_shapes(0).IsTuple();
};
if (opcode == HloOpcode::kAsyncUpdate ||
opcode == HloOpcode::kAsyncDone) {
if (operands.size() != 1 ||
!is_async_shape_correct(operands[0]->shape())) {
TokenError(
"AsyncUpdate and AsyncDone expect a single operand in the form "
"of ((async-operands), async-outputs, state).");
return nullptr;
}
}
if (opcode == HloOpcode::kAsyncStart ||
opcode == HloOpcode::kAsyncUpdate) {
if (!is_async_shape_correct(*shape)) {
TokenError(
"AsyncStart and AsyncUpdate expect the op shape to be in the "
"form of "
"((async-operands), async-outputs, state).");
return nullptr;
}
}
if (opcode == HloOpcode::kAsyncUpdate ||
opcode == HloOpcode::kAsyncDone) {
if (operands.size() != 1 ||
!is_async_shape_correct(operands[0]->shape())) {
TokenError(
"AsyncUpdate and AsyncDone expect a single operand in the form "
"of ((async-operands), async-outputs, state).");
return nullptr;
}
if (!operands[0]->IsAsynchronous()) {
TokenError(
"AsyncUpdate and AsyncDone expect their operand to be the "
"previous async op.");
return nullptr;
}
}
optional<std::string> async_execution_thread;
attrs["async_execution_thread"] = {false, AttrTy::kString,
&async_execution_thread};
if (async_wrapped_opcode) {
if (opcode == HloOpcode::kAsyncStart) {
std::vector<HloInstruction*> async_wrapped_operands;
std::vector<Shape> async_wrapped_operand_shapes;
Shape async_wrapped_root_shape;
async_wrapped_operand_shapes.reserve(operands.size());
for (const HloInstruction* operand : operands) {
async_wrapped_operand_shapes.push_back(operand->shape());
}
async_wrapped_root_shape = shape->tuple_shapes(1);
HloComputation::Builder async_wrapped_builder("async_wrapped");
async_wrapped_operands.reserve(async_wrapped_operand_shapes.size());
for (int i = 0; i < async_wrapped_operand_shapes.size(); ++i) {
async_wrapped_operands.push_back(
async_wrapped_builder.AddInstruction(
HloInstruction::CreateParameter(
i, async_wrapped_operand_shapes.at(i), "async_param")));
}
HloInstruction* root =
CreateInstruction(&async_wrapped_builder, "async_op",
async_wrapped_root_shape, *async_wrapped_opcode,
std::nullopt, attrs,
allow_attributes, &async_wrapped_operands);
if (!root) {
return nullptr;
}
computations_.emplace_back(async_wrapped_builder.Build(root));
async_computation = computations_.back().get();
} else {
if (operands[0]->async_wrapped_opcode() != *async_wrapped_opcode) {
TokenError(
StrFormat("Expect async wrapped opcode to be %s, but got %s",
HloOpcodeString(operands[0]->async_wrapped_opcode()),
HloOpcodeString(*async_wrapped_opcode)));
return nullptr;
}
}
} else {
attrs["calls"] = {opcode == HloOpcode::kAsyncStart,
AttrTy::kHloComputation, &async_computation};
}
if (!(async_wrapped_opcode && opcode == HloOpcode::kAsyncStart)) {
if (!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
}
if (opcode == HloOpcode::kAsyncUpdate ||
opcode == HloOpcode::kAsyncDone) {
if (async_execution_thread &&
operands[0]->async_execution_thread() != *async_execution_thread) {
TokenError(StrFormat(
"Expect async_execution_thread to be %s, but got %s",
operands[0]->async_execution_thread(), *async_execution_thread));
return nullptr;
}
if (async_computation &&
operands[0]->async_wrapped_computation() != *async_computation) {
TokenError(
StrFormat("Expect async_wrapped_computation to be %s, but got %s",
operands[0]->async_wrapped_computation()->name(),
(*async_computation)->name()));
return nullptr;
}
}
if (opcode == HloOpcode::kAsyncStart &&
(*async_computation)->IsAsyncComputation()) {
TokenError(StrFormat(
"Computation %s is already referenced by another async op",
(*async_computation)->name()));
return nullptr;
}
if (opcode == HloOpcode::kAsyncStart) {
if (!async_execution_thread) {
async_execution_thread = HloInstruction::kMainExecutionThread;
}
return builder->AddInstruction(HloInstruction::CreateAsyncStart(
*shape, operands, *async_computation, *async_execution_thread));
}
if (opcode == HloOpcode::kAsyncUpdate) {
return builder->AddInstruction(
HloInstruction::CreateAsyncUpdate(*shape, operands[0]));
}
return builder->AddInstruction(
HloInstruction::CreateAsyncDone(*shape, operands[0]));
}
case HloOpcode::kCopyStart: {
optional<int> cross_program_prefetch_index = std::nullopt;
attrs["cross_program_prefetch_index"] = {
false, AttrTy::kInt32, &cross_program_prefetch_index};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateCopyStart(
*shape, operands[0], cross_program_prefetch_index));
}
case HloOpcode::kReplicaId: {
if ((!preset_operands &&
!ParseOperands(&operands, builder, 0)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (shape.has_value()) {
return builder->AddInstruction(HloInstruction::CreateReplicaId(*shape));
}
return builder->AddInstruction(HloInstruction::CreateReplicaId());
}
case HloOpcode::kPartitionId: {
if ((!preset_operands &&
!ParseOperands(&operands, builder, 0)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (shape.has_value()) {
return builder->AddInstruction(
HloInstruction::CreatePartitionId(*shape));
}
return builder->AddInstruction(HloInstruction::CreatePartitionId());
}
case HloOpcode::kDynamicReshape: {
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateDynamicReshape(
*shape, operands[0],
absl::Span<HloInstruction* const>(operands).subspan(1)));
}
case HloOpcode::kReshape: {
optional<int64_t> inferred_dimension;
attrs["inferred_dimension"] = {false, AttrTy::kInt64,
&inferred_dimension};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateReshape(
*shape, operands[0], inferred_dimension.value_or(-1)));
}
case HloOpcode::kAfterAll: {
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (operands.empty()) {
return builder->AddInstruction(HloInstruction::CreateToken());
}
return builder->AddInstruction(HloInstruction::CreateAfterAll(operands));
}
case HloOpcode::kAddDependency: {
if ((!preset_operands &&
!ParseOperands(&operands, builder, 2)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateAddDependency(operands[0], operands[1]));
}
case HloOpcode::kSort: {
optional<std::vector<int64_t>> dimensions;
attrs["dimensions"] = {true, AttrTy::kBracedInt64List,
&dimensions};
optional<bool> is_stable = false;
attrs["is_stable"] = {false, AttrTy::kBool, &is_stable};
optional<HloComputation*> to_apply;
attrs["to_apply"] = {true, AttrTy::kHloComputation,
&to_apply};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape) ||
dimensions->size() != 1) {
return nullptr;
}
if (!maybe_infer_shape([&] {
absl::InlinedVector<const Shape*, 2> arg_shapes;
arg_shapes.reserve(operands.size());
for (auto* operand : operands) {
arg_shapes.push_back(&operand->shape());
}
return ShapeInference::InferVariadicOpShape(opcode, arg_shapes);
})) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateSort(*shape, dimensions->at(0), operands,
to_apply.value(), is_stable.value()));
}
case HloOpcode::kTuple: {
if ((!preset_operands &&
!(shape.has_value()
? ParseOperands(&operands, builder, shape->tuple_shapes_size())
: ParseOperands(&operands, builder))) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
absl::InlinedVector<const Shape*, 2> arg_shapes;
arg_shapes.reserve(operands.size());
for (auto* operand : operands) {
arg_shapes.push_back(&operand->shape());
}
return ShapeInference::InferVariadicOpShape(opcode, arg_shapes);
})) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateVariadic(*shape, HloOpcode::kTuple, operands));
}
case HloOpcode::kWhile: {
optional<HloComputation*> condition;
optional<HloComputation*> body;
attrs["condition"] = {true, AttrTy::kHloComputation,
&condition};
attrs["body"] = {true, AttrTy::kHloComputation, &body};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferWhileShape(
condition.value()->ComputeProgramShape(),
body.value()->ComputeProgramShape(), operands[0]->shape());
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateWhile(
*shape, *condition, *body, operands[0]));
}
case HloOpcode::kRecv: {
optional<int64_t> channel_id;
optional<bool> is_host_transfer = false;
attrs["channel_id"] = {true, AttrTy::kInt64, &channel_id};
attrs["is_host_transfer"] = {false, AttrTy::kBool,
&is_host_transfer};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateRecv(
shape->tuple_shapes(0), operands[0], *channel_id, *is_host_transfer));
}
case HloOpcode::kRecvDone: {
optional<int64_t> channel_id;
optional<bool> is_host_transfer = false;
attrs["channel_id"] = {true, AttrTy::kInt64, &channel_id};
attrs["is_host_transfer"] = {false, AttrTy::kBool,
&is_host_transfer};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (dynamic_cast<const HloChannelInstruction*>(operands[0]) != nullptr) {
if (channel_id != operands[0]->channel_id()) {
return nullptr;
}
}
return builder->AddInstruction(HloInstruction::CreateRecvDone(
operands[0], channel_id.value(), *is_host_transfer));
}
case HloOpcode::kSend: {
optional<int64_t> channel_id;
optional<bool> is_host_transfer = false;
attrs["channel_id"] = {true, AttrTy::kInt64, &channel_id};
attrs["is_host_transfer"] = {false, AttrTy::kBool,
&is_host_transfer};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 2)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateSend(
operands[0], operands[1], *channel_id, *is_host_transfer));
}
case HloOpcode::kSendDone: {
optional<int64_t> channel_id;
optional<bool> is_host_transfer = false;
attrs["channel_id"] = {true, AttrTy::kInt64, &channel_id};
attrs["is_host_transfer"] = {false, AttrTy::kBool,
&is_host_transfer};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (dynamic_cast<const HloChannelInstruction*>(operands[0]) != nullptr) {
if (channel_id != operands[0]->channel_id()) {
return nullptr;
}
}
return builder->AddInstruction(HloInstruction::CreateSendDone(
operands[0], channel_id.value(), *is_host_transfer));
}
case HloOpcode::kGetTupleElement: {
optional<int64_t> index;
attrs["index"] = {true, AttrTy::kInt64, &index};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeUtil::GetTupleElementShape(operands[0]->shape(),
*index);
})) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateGetTupleElement(*shape, operands[0], *index));
}
case HloOpcode::kCall: {
optional<HloComputation*> to_apply;
optional<bool> is_composite = false;
attrs["to_apply"] = {true, AttrTy::kHloComputation,
&to_apply};
attrs["is_composite"] = {false, AttrTy::kBool,
&is_composite};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
absl::InlinedVector<const Shape*, 2> arg_shapes;
arg_shapes.reserve(operands.size());
for (auto* operand : operands) {
arg_shapes.push_back(&operand->shape());
}
return ShapeInference::InferCallShape(
arg_shapes, to_apply.value()->ComputeProgramShape());
})) {
return nullptr;
}
auto call_op = HloInstruction::CreateCall(*shape, operands, *to_apply);
call_op->set_is_composite(is_composite.value());
return builder->AddInstruction(std::move(call_op));
}
case HloOpcode::kReduceWindow: {
optional<HloComputation*> reduce_computation;
optional<Window> window;
attrs["window"] = {false, AttrTy::kWindow, &window};
attrs["to_apply"] = {true, AttrTy::kHloComputation,
&reduce_computation};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!window) {
window.emplace();
}
if (operands.size() % 2) {
TokenError(StrCat("expects an even number of operands, but has ",
operands.size(), " operands"));
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferReduceWindowShape(
operands[0]->shape(), operands[1]->shape(), *window,
reduce_computation.value()->ComputeProgramShape());
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateReduceWindow(
*shape,
absl::Span<HloInstruction* const>(operands).subspan(
0, operands.size() / 2),
absl::Span<HloInstruction* const>(operands).subspan(operands.size() /
2),
*window, *reduce_computation));
}
case HloOpcode::kConvolution: {
optional<Window> window;
optional<ConvolutionDimensionNumbers> dnums;
optional<int64_t> feature_group_count;
optional<int64_t> batch_group_count;
attrs["window"] = {false, AttrTy::kWindow, &window};
attrs["dim_labels"] = {true,
AttrTy::kConvolutionDimensionNumbers, &dnums};
attrs["feature_group_count"] = {false, AttrTy::kInt64,
&feature_group_count};
attrs["batch_group_count"] = {false, AttrTy::kInt64,
&batch_group_count};
optional<std::vector<PrecisionConfig::Precision>> operand_precision;
attrs["operand_precision"] = {false, AttrTy::kPrecisionList,
&operand_precision};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 2)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!window) {
window.emplace();
}
if (!feature_group_count) {
feature_group_count = 1;
}
if (!batch_group_count) {
batch_group_count = 1;
}
PrecisionConfig precision_config;
if (operand_precision) {
*precision_config.mutable_operand_precision() = {
operand_precision->begin(), operand_precision->end()};
} else {
precision_config.mutable_operand_precision()->Resize(
operands.size(), PrecisionConfig::DEFAULT);
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferConvolveShape(
operands[0]->shape(), operands[1]->shape(),
*feature_group_count, *batch_group_count, *window, *dnums,
std::nullopt);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateConvolve(
*shape, operands[0], operands[1],
feature_group_count.value(), batch_group_count.value(), *window,
*dnums, precision_config));
}
case HloOpcode::kFft: {
optional<FftType> fft_type;
optional<std::vector<int64_t>> fft_length;
attrs["fft_type"] = {true, AttrTy::kFftType, &fft_type};
attrs["fft_length"] = {true, AttrTy::kBracedInt64List,
&fft_length};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferFftShape(operands[0]->shape(),
*fft_type, *fft_length);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateFft(
*shape, operands[0], *fft_type, *fft_length));
}
case HloOpcode::kTriangularSolve: {
TriangularSolveOptions options;
if ((!preset_operands &&
!ParseOperands(&operands, builder, 2)) ||
(allow_attributes && !ParseAttributesAsProtoMessage(
attrs, &options))) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferTriangularSolveShape(
operands[0]->shape(), operands[1]->shape(), options);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateTriangularSolve(
*shape, operands[0], operands[1], options));
}
case HloOpcode::kCompare: {
optional<ComparisonDirection> direction;
optional<Comparison::Type> type;
attrs["direction"] = {true, AttrTy::kComparisonDirection,
&direction};
attrs["type"] = {false, AttrTy::kComparisonType, &type};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 2)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferBinaryOpShape(opcode, operands[0],
operands[1]);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateCompare(
*shape, operands[0], operands[1], *direction, type));
}
case HloOpcode::kCholesky: {
CholeskyOptions options;
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
(allow_attributes && !ParseAttributesAsProtoMessage(
attrs, &options))) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferCholeskyShape(operands[0]->shape());
})) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateCholesky(*shape, operands[0], options));
}
case HloOpcode::kBroadcast: {
if (!preset_operands &&
!ParseOperands(&operands, builder, 1)) {
return nullptr;
}
bool operand_is_scalar = ShapeUtil::IsScalar(operands[0]->shape());
optional<std::vector<int64_t>> broadcast_dimensions;
attrs["dimensions"] = {!operand_is_scalar,
AttrTy::kBracedInt64List, &broadcast_dimensions};
if (!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (operand_is_scalar && !broadcast_dimensions.has_value()) {
broadcast_dimensions.emplace();
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferBroadcastShape(operands[0]->shape(),
*broadcast_dimensions);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateBroadcast(
*shape, operands[0], *broadcast_dimensions));
}
case HloOpcode::kConcatenate: {
optional<std::vector<int64_t>> dimensions;
attrs["dimensions"] = {true, AttrTy::kBracedInt64List,
&dimensions};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape) ||
dimensions->size() != 1) {
return nullptr;
}
if (!maybe_infer_shape([&] {
absl::InlinedVector<const Shape*, 2> arg_shapes;
arg_shapes.reserve(operands.size());
for (auto* operand : operands) {
arg_shapes.push_back(&operand->shape());
}
return ShapeInference::InferConcatOpShape(arg_shapes,
dimensions->at(0));
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateConcatenate(
*shape, operands, dimensions->at(0)));
}
case HloOpcode::kMap: {
optional<HloComputation*> to_apply;
attrs["to_apply"] = {true, AttrTy::kHloComputation,
&to_apply};
optional<std::vector<int64_t>> dimensions;
attrs["dimensions"] = {false, AttrTy::kBracedInt64List,
&dimensions};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
absl::InlinedVector<const Shape*, 2> arg_shapes;
arg_shapes.reserve(operands.size());
for (auto* operand : operands) {
arg_shapes.push_back(&operand->shape());
}
return ShapeInference::InferMapShape(
arg_shapes, to_apply.value()->ComputeProgramShape(),
*dimensions);
})) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateMap(*shape, operands, *to_apply));
}
case HloOpcode::kReduce: {
optional<HloComputation*> reduce_computation;
attrs["to_apply"] = {true, AttrTy::kHloComputation,
&reduce_computation};
optional<std::vector<int64_t>> dimensions_to_reduce;
attrs["dimensions"] = {true, AttrTy::kBracedInt64List,
&dimensions_to_reduce};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (operands.size() % 2) {
TokenError(StrCat("expects an even number of operands, but has ",
operands.size(), " operands"));
return nullptr;
}
if (!maybe_infer_shape([&] {
absl::InlinedVector<const Shape*, 2> arg_shapes;
arg_shapes.reserve(operands.size());
for (auto* operand : operands) {
arg_shapes.push_back(&operand->shape());
}
return ShapeInference::InferReduceShape(
arg_shapes, *dimensions_to_reduce,
reduce_computation.value()->ComputeProgramShape());
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateReduce(
*shape,
absl::Span<HloInstruction* const>(operands).subspan(
0, operands.size() / 2),
absl::Span<HloInstruction* const>(operands).subspan(operands.size() /
2),
*dimensions_to_reduce, *reduce_computation));
}
case HloOpcode::kReverse: {
optional<std::vector<int64_t>> dimensions;
attrs["dimensions"] = {true, AttrTy::kBracedInt64List,
&dimensions};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferReverseShape(operands[0]->shape(),
*dimensions);
})) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateReverse(*shape, operands[0], *dimensions));
}
case HloOpcode::kSelectAndScatter: {
optional<HloComputation*> select;
attrs["select"] = {true, AttrTy::kHloComputation, &select};
optional<HloComputation*> scatter;
attrs["scatter"] = {true, AttrTy::kHloComputation, &scatter};
optional<Window> window;
attrs["window"] = {false, AttrTy::kWindow, &window};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 3)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!window) {
window.emplace();
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferSelectAndScatterShape(
operands[0]->shape(), select.value()->ComputeProgramShape(),
*window, operands[1]->shape(), operands[2]->shape(),
scatter.value()->ComputeProgramShape());
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateSelectAndScatter(
*shape, operands[0], *select, *window,
operands[1], operands[2], *scatter));
}
case HloOpcode::kSlice: {
optional<SliceRanges> slice_ranges;
attrs["slice"] = {true, AttrTy::kSliceRanges, &slice_ranges};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateSlice(
*shape, operands[0], slice_ranges->starts, slice_ranges->limits,
slice_ranges->strides));
}
case HloOpcode::kDynamicSlice: {
optional<std::vector<int64_t>> dynamic_slice_sizes;
attrs["dynamic_slice_sizes"] = {
true, AttrTy::kBracedInt64List, &dynamic_slice_sizes};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (operands.empty()) {
TokenError("Expected at least one operand.");
return nullptr;
}
if (!(operands.size() == 2 && operands[1]->shape().rank() == 1) &&
operands.size() != 1 + operands[0]->shape().rank()) {
TokenError("Wrong number of operands.");
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateDynamicSlice(
*shape, operands[0],
absl::MakeSpan(operands).subspan(1),
*dynamic_slice_sizes));
}
case HloOpcode::kDynamicUpdateSlice: {
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (operands.size() < 2) {
TokenError("Expected at least two operands.");
return nullptr;
}
if (!(operands.size() == 3 && operands[2]->shape().rank() == 1) &&
operands.size() != 2 + operands[0]->shape().rank()) {
TokenError("Wrong number of operands.");
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
*shape, operands[0], operands[1],
absl::MakeSpan(operands).subspan(2)));
}
case HloOpcode::kTranspose: {
optional<std::vector<int64_t>> dimensions;
attrs["dimensions"] = {true, AttrTy::kBracedInt64List,
&dimensions};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferTransposeShape(operands[0]->shape(),
*dimensions);
})) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateTranspose(*shape, operands[0], *dimensions));
}
case HloOpcode::kBatchNormTraining: {
optional<float> epsilon;
attrs["epsilon"] = {true, AttrTy::kFloat, &epsilon};
optional<int64_t> feature_index;
attrs["feature_index"] = {true, AttrTy::kInt64,
&feature_index};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 3)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferBatchNormTrainingShape(
operands[0]->shape(), operands[1]->shape(),
operands[2]->shape(), *feature_index);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateBatchNormTraining(
*shape, operands[0], operands[1],
operands[2], *epsilon, *feature_index));
}
case HloOpcode::kBatchNormInference: {
optional<float> epsilon;
attrs["epsilon"] = {true, AttrTy::kFloat, &epsilon};
optional<int64_t> feature_index;
attrs["feature_index"] = {true, AttrTy::kInt64,
&feature_index};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 5)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferBatchNormInferenceShape(
operands[0]->shape(), operands[1]->shape(),
operands[2]->shape(), operands[3]->shape(),
operands[4]->shape(), *feature_index);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateBatchNormInference(
*shape, operands[0], operands[1],
operands[2], operands[3],
operands[4], *epsilon, *feature_index));
}
case HloOpcode::kBatchNormGrad: {
optional<float> epsilon;
attrs["epsilon"] = {true, AttrTy::kFloat, &epsilon};
optional<int64_t> feature_index;
attrs["feature_index"] = {true, AttrTy::kInt64,
&feature_index};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 5)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferBatchNormGradShape(
operands[0]->shape(), operands[1]->shape(),
operands[2]->shape(), operands[3]->shape(),
operands[4]->shape(), *feature_index);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateBatchNormGrad(
*shape, operands[0], operands[1],
operands[2], operands[3],
operands[4], *epsilon, *feature_index));
}
case HloOpcode::kPad: {
optional<PaddingConfig> padding;
attrs["padding"] = {true, AttrTy::kPaddingConfig, &padding};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 2)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferPadShape(
operands[0]->shape(), operands[1]->shape(), *padding);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreatePad(
*shape, operands[0], operands[1], *padding));
}
case HloOpcode::kFusion: {
optional<HloComputation*> fusion_computation;
attrs["calls"] = {true, AttrTy::kHloComputation,
&fusion_computation};
optional<HloInstruction::FusionKind> fusion_kind;
attrs["kind"] = {true, AttrTy::kFusionKind, &fusion_kind};
optional<
std::vector<std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>>
output_to_operand_aliasing;
attrs["output_to_operand_aliasing"] = {false,
AttrTy::kInstructionAliasing,
&output_to_operand_aliasing};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
auto instr = builder->AddInstruction(HloInstruction::CreateFusion(
*shape, *fusion_kind, operands, *fusion_computation));
auto fusion_instr = Cast<HloFusionInstruction>(instr);
if (output_to_operand_aliasing.has_value()) {
fusion_instr->set_output_to_operand_aliasing(
std::move(*output_to_operand_aliasing));
}
return instr;
}
case HloOpcode::kInfeed: {
optional<std::string> config;
attrs["infeed_config"] = {false, AttrTy::kString, &config};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!shape->IsTuple() && !ShapeUtil::IsEmptyTuple(*shape)) {
TokenError("infeed must have a non-empty tuple shape");
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateInfeed(
ShapeUtil::GetTupleElementShape(*shape, 0), operands[0],
config ? *config : ""));
}
case HloOpcode::kOutfeed: {
optional<std::string> config;
optional<Shape> outfeed_shape;
attrs["outfeed_config"] = {false, AttrTy::kString, &config};
attrs["outfeed_shape"] = {false, AttrTy::kShape,
&outfeed_shape};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 2)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
HloInstruction* const outfeed_input = operands[0];
HloInstruction* const outfeed_token = operands[1];
const Shape shape =
outfeed_shape.has_value() ? *outfeed_shape : outfeed_input->shape();
return builder->AddInstruction(HloInstruction::CreateOutfeed(
shape, outfeed_input, outfeed_token, config ? *config : ""));
}
case HloOpcode::kRng: {
optional<RandomDistribution> distribution;
attrs["distribution"] = {true, AttrTy::kDistribution,
&distribution};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateRng(*shape, *distribution, operands));
}
case HloOpcode::kRngGetAndUpdateState: {
optional<int64_t> delta;
attrs["delta"] = {true, AttrTy::kInt64, &delta};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 0)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(
HloInstruction::CreateRngGetAndUpdateState(*shape, *delta));
}
case HloOpcode::kRngBitGenerator: {
optional<RandomAlgorithm> algorithm;
attrs["algorithm"] = {true, AttrTy::kRandomAlgorithm,
&algorithm};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateRngBitGenerator(
*shape, operands[0], *algorithm));
}
case HloOpcode::kReducePrecision: {
optional<int64_t> exponent_bits;
optional<int64_t> mantissa_bits;
attrs["exponent_bits"] = {true, AttrTy::kInt64,
&exponent_bits};
attrs["mantissa_bits"] = {true, AttrTy::kInt64,
&mantissa_bits};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateReducePrecision(
*shape, operands[0], static_cast<int>(*exponent_bits),
static_cast<int>(*mantissa_bits)));
}
case HloOpcode::kConditional: {
optional<HloComputation*> true_computation;
optional<HloComputation*> false_computation;
optional<std::vector<HloComputation*>> branch_computations;
if (!preset_operands && !ParseOperands(&operands, builder)) {
return nullptr;
}
if (!ShapeUtil::IsScalar(operands[0]->shape())) {
TokenError("The first operand must be a scalar");
return nullptr;
}
const bool branch_index_is_bool =
operands[0]->shape().element_type() == PRED;
if (branch_index_is_bool) {
attrs["true_computation"] = {true, AttrTy::kHloComputation,
&true_computation};
attrs["false_computation"] = {
true, AttrTy::kHloComputation, &false_computation};
} else {
if (operands[0]->shape().element_type() != S32) {
TokenError("The first operand must be a scalar of PRED or S32");
return nullptr;
}
attrs["branch_computations"] = {true,
AttrTy::kBracedHloComputationList,
&branch_computations};
}
if (!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (branch_index_is_bool) {
branch_computations.emplace({*true_computation, *false_computation});
}
if (branch_computations->empty() ||
operands.size() != branch_computations->size() + 1) {
return nullptr;
}
if (!maybe_infer_shape([&] {
absl::InlinedVector<ProgramShape, 2> branch_computation_shapes;
branch_computation_shapes.reserve(branch_computations->size());
for (auto* computation : *branch_computations) {
branch_computation_shapes.push_back(
computation->ComputeProgramShape());
}
absl::InlinedVector<Shape, 2> branch_operand_shapes;
branch_operand_shapes.reserve(operands.size() - 1);
for (int i = 1; i < operands.size(); ++i) {
branch_operand_shapes.push_back(operands[i]->shape());
}
return ShapeInference::InferConditionalShape(
operands[0]->shape(), branch_computation_shapes,
branch_operand_shapes);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateConditional(
*shape, operands[0],
absl::MakeSpan(*branch_computations),
absl::MakeSpan(operands).subspan(1)));
}
case HloOpcode::kCustomCall: {
optional<std::string> custom_call_target;
optional<Window> window;
optional<ConvolutionDimensionNumbers> dnums;
optional<int64_t> feature_group_count;
optional<int64_t> batch_group_count;
optional<std::vector<Shape>> operand_layout_constraints;
optional<bool> custom_call_has_side_effect;
optional<HloComputation*> to_apply;
optional<
std::vector<std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>>
output_to_operand_aliasing;
optional<PaddingType> padding_type;
optional<std::vector<HloComputation*>> called_computations;
optional<CustomCallSchedule> custom_call_schedule;
optional<CustomCallApiVersion> api_version;
attrs["custom_call_target"] = {true, AttrTy::kString,
&custom_call_target};
attrs["window"] = {false, AttrTy::kWindow, &window};
attrs["dim_labels"] = {false,
AttrTy::kConvolutionDimensionNumbers, &dnums};
attrs["feature_group_count"] = {false, AttrTy::kInt64,
&feature_group_count};
attrs["batch_group_count"] = {false, AttrTy::kInt64,
&batch_group_count};
attrs["operand_layout_constraints"] = {
false, AttrTy::kShapeList, &operand_layout_constraints};
attrs["custom_call_has_side_effect"] = {false, AttrTy::kBool,
&custom_call_has_side_effect};
attrs["to_apply"] = {false, AttrTy::kHloComputation,
&to_apply};
attrs["called_computations"] = {false,
AttrTy::kBracedHloComputationList,
&called_computations};
attrs["output_to_operand_aliasing"] = {false,
AttrTy::kInstructionAliasing,
&output_to_operand_aliasing};
attrs["padding_type"] = {false, AttrTy::kPaddingType,
&padding_type};
optional<Literal> literal;
attrs["literal"] = {false, AttrTy::kLiteral, &literal};
optional<std::vector<PrecisionConfig::Precision>> operand_precision;
attrs["operand_precision"] = {false, AttrTy::kPrecisionList,
&operand_precision};
HloInstruction* instruction;
if (called_computations.has_value() && to_apply.has_value()) {
TokenError(
"A single instruction can't have both to_apply and "
"calls field");
return nullptr;
}
attrs["schedule"] = {false, AttrTy::kCustomCallSchedule,
&custom_call_schedule};
attrs["api_version"] = {false, AttrTy::kCustomCallApiVersion,
&api_version};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (api_version.has_value() &&
*api_version == CustomCallApiVersion::API_VERSION_UNSPECIFIED) {
TokenError(StrCat("Invalid API version: ",
CustomCallApiVersion_Name(*api_version)));
return nullptr;
}
if (operand_layout_constraints.has_value()) {
if (!LayoutUtil::HasLayout(*shape)) {
TokenError("Layout must be set on layout-constrained custom call");
return nullptr;
}
if (operands.size() != operand_layout_constraints->size()) {
TokenError(StrCat("Expected ", operands.size(),
" operand layout constraints, ",
operand_layout_constraints->size(), " given"));
return nullptr;
}
for (int64_t i = 0; i < operands.size(); ++i) {
const Shape& operand_shape_with_layout =
(*operand_layout_constraints)[i];
if (!LayoutUtil::HasLayout(operand_shape_with_layout)) {
TokenError(StrCat(
"Operand layout constraint shape ",
ShapeUtil::HumanStringWithLayout(operand_shape_with_layout),
" for operand ", i, " does not have a layout"));
return nullptr;
}
if (!ShapeUtil::Compatible(operand_shape_with_layout,
operands[i]->shape())) {
TokenError(StrCat(
"Operand layout constraint shape ",
ShapeUtil::HumanStringWithLayout(operand_shape_with_layout),
" for operand ", i, " is not compatible with operand shape ",
ShapeUtil::HumanStringWithLayout(operands[i]->shape())));
return nullptr;
}
}
instruction = builder->AddInstruction(HloInstruction::CreateCustomCall(
*shape, operands, *custom_call_target, *operand_layout_constraints,
""));
} else {
if (to_apply.has_value()) {
instruction =
builder->AddInstruction(HloInstruction::CreateCustomCall(
*shape, operands, *to_apply, *custom_call_target, ""));
} else if (called_computations.has_value()) {
instruction =
builder->AddInstruction(HloInstruction::CreateCustomCall(
*shape, operands, *called_computations, *custom_call_target,
""));
} else {
instruction =
builder->AddInstruction(HloInstruction::CreateCustomCall(
*shape, operands, *custom_call_target, ""));
}
}
auto custom_call_instr = Cast<HloCustomCallInstruction>(instruction);
if (window.has_value()) {
custom_call_instr->set_window(*window);
}
if (dnums.has_value()) {
custom_call_instr->set_convolution_dimension_numbers(*dnums);
}
if (feature_group_count.has_value()) {
custom_call_instr->set_feature_group_count(*feature_group_count);
}
if (batch_group_count.has_value()) {
custom_call_instr->set_batch_group_count(*batch_group_count);
}
if (padding_type.has_value()) {
custom_call_instr->set_padding_type(*padding_type);
}
if (custom_call_has_side_effect.has_value()) {
custom_call_instr->set_custom_call_has_side_effect(
*custom_call_has_side_effect);
}
if (custom_call_schedule.has_value()) {
custom_call_instr->set_custom_call_schedule(*custom_call_schedule);
}
if (api_version.has_value()) {
custom_call_instr->set_api_version(*api_version);
}
if (output_to_operand_aliasing.has_value()) {
custom_call_instr->set_output_to_operand_aliasing(
std::move(*output_to_operand_aliasing));
}
if (literal.has_value()) {
custom_call_instr->set_literal(std::move(*literal));
}
PrecisionConfig precision_config;
if (operand_precision) {
*precision_config.mutable_operand_precision() = {
operand_precision->begin(), operand_precision->end()};
} else {
precision_config.mutable_operand_precision()->Resize(
operands.size(), PrecisionConfig::DEFAULT);
}
*custom_call_instr->mutable_precision_config() = precision_config;
return instruction;
}
case HloOpcode::kDot: {
optional<std::vector<int64_t>> lhs_contracting_dims;
attrs["lhs_contracting_dims"] = {
false, AttrTy::kBracedInt64List, &lhs_contracting_dims};
optional<std::vector<int64_t>> rhs_contracting_dims;
attrs["rhs_contracting_dims"] = {
false, AttrTy::kBracedInt64List, &rhs_contracting_dims};
optional<std::vector<int64_t>> lhs_batch_dims;
attrs["lhs_batch_dims"] = {false, AttrTy::kBracedInt64List,
&lhs_batch_dims};
optional<std::vector<int64_t>> rhs_batch_dims;
attrs["rhs_batch_dims"] = {false, AttrTy::kBracedInt64List,
&rhs_batch_dims};
optional<std::vector<PrecisionConfig::Precision>> operand_precision;
attrs["operand_precision"] = {false, AttrTy::kPrecisionList,
&operand_precision};
std::vector<SparsityDescriptor> sparsity;
attrs["sparsity"] = {false, AttrTy::kSparsityDescriptor,
&sparsity};
optional<PrecisionConfig::Algorithm> algorithm;
attrs["algorithm"] = {false, AttrTy::kPrecisionAlgorithm,
&algorithm};
LocTy loc = lexer_.GetLoc();
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
int expected_size = HloDotInstruction::kOperands + sparsity.size();
if (sparsity.size() > HloDotInstruction::kOperands) {
Error(loc,
StrCat("too many sparse dot descriptors: ", sparsity.size()));
return nullptr;
}
if (operands.size() != expected_size) {
Error(loc, StrCat("expects ", expected_size, " operands, but has ",
operands.size(), " operands"));
return nullptr;
}
DotDimensionNumbers dnum;
if (lhs_contracting_dims) {
*dnum.mutable_lhs_contracting_dimensions() = {
lhs_contracting_dims->begin(), lhs_contracting_dims->end()};
}
if (rhs_contracting_dims) {
*dnum.mutable_rhs_contracting_dimensions() = {
rhs_contracting_dims->begin(), rhs_contracting_dims->end()};
}
if (lhs_batch_dims) {
*dnum.mutable_lhs_batch_dimensions() = {lhs_batch_dims->begin(),
lhs_batch_dims->end()};
}
if (rhs_batch_dims) {
*dnum.mutable_rhs_batch_dimensions() = {rhs_batch_dims->begin(),
rhs_batch_dims->end()};
}
PrecisionConfig precision_config;
if (operand_precision) {
*precision_config.mutable_operand_precision() = {
operand_precision->begin(), operand_precision->end()};
} else {
precision_config.mutable_operand_precision()->Resize(
HloDotInstruction::kOperands, PrecisionConfig::DEFAULT);
}
if (algorithm) {
precision_config.set_algorithm(*algorithm);
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferDotOpShape(
operands[0]->shape(), operands[1]->shape(), dnum,
std::nullopt, sparsity);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateDot(
*shape, operands[0], operands[1], dnum, precision_config, sparsity,
absl::MakeSpan(operands).subspan(HloDotInstruction::kOperands)));
}
case HloOpcode::kGather: {
optional<std::vector<int64_t>> offset_dims;
attrs["offset_dims"] = {true, AttrTy::kBracedInt64List,
&offset_dims};
optional<std::vector<int64_t>> collapsed_slice_dims;
attrs["collapsed_slice_dims"] = {
true, AttrTy::kBracedInt64List, &collapsed_slice_dims};
optional<std::vector<int64_t>> start_index_map;
attrs["start_index_map"] = {true, AttrTy::kBracedInt64List,
&start_index_map};
optional<int64_t> index_vector_dim;
attrs["index_vector_dim"] = {true, AttrTy::kInt64,
&index_vector_dim};
optional<std::vector<int64_t>> slice_sizes;
attrs["slice_sizes"] = {true, AttrTy::kBracedInt64List,
&slice_sizes};
optional<bool> indices_are_sorted = false;
attrs["indices_are_sorted"] = {false, AttrTy::kBool,
&indices_are_sorted};
optional<std::vector<int64_t>> operand_batching_dims;
attrs["operand_batching_dims"] = {
false, AttrTy::kBracedInt64List, &operand_batching_dims};
optional<std::vector<int64_t>> start_indices_batching_dims;
attrs["start_indices_batching_dims"] = {false,
AttrTy::kBracedInt64List,
&start_indices_batching_dims};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 2)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
GatherDimensionNumbers dim_numbers =
HloGatherInstruction::MakeGatherDimNumbers(
*offset_dims,
*collapsed_slice_dims,
*start_index_map,
*index_vector_dim,
operand_batching_dims ? *operand_batching_dims
: std::vector<int64_t>(),
start_indices_batching_dims ? *start_indices_batching_dims
: std::vector<int64_t>());
if (!maybe_infer_shape([&] {
return ShapeInference::InferGatherShape(operands[0]->shape(),
operands[1]->shape(),
dim_numbers, *slice_sizes);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateGather(
*shape, operands[0], operands[1],
dim_numbers, *slice_sizes, indices_are_sorted.value()));
}
case HloOpcode::kScatter: {
optional<std::vector<int64_t>> update_window_dims;
attrs["update_window_dims"] = {
true, AttrTy::kBracedInt64List, &update_window_dims};
optional<std::vector<int64_t>> inserted_window_dims;
attrs["inserted_window_dims"] = {
true, AttrTy::kBracedInt64List, &inserted_window_dims};
optional<std::vector<int64_t>> scatter_dims_to_operand_dims;
attrs["scatter_dims_to_operand_dims"] = {true,
AttrTy::kBracedInt64List,
&scatter_dims_to_operand_dims};
optional<int64_t> index_vector_dim;
attrs["index_vector_dim"] = {true, AttrTy::kInt64,
&index_vector_dim};
optional<HloComputation*> update_computation;
attrs["to_apply"] = {true, AttrTy::kHloComputation,
&update_computation};
optional<bool> indices_are_sorted = false;
attrs["indices_are_sorted"] = {false, AttrTy::kBool,
&indices_are_sorted};
optional<bool> unique_indices = false;
attrs["unique_indices"] = {false, AttrTy::kBool,
&unique_indices};
optional<std::vector<int64_t>> input_batching_dims;
attrs["input_batching_dims"] = {
false, AttrTy::kBracedInt64List, &input_batching_dims};
optional<std::vector<int64_t>> scatter_indices_batching_dims;
attrs["scatter_indices_batching_dims"] = {false,
AttrTy::kBracedInt64List,
&scatter_indices_batching_dims};
if ((!preset_operands && !ParseOperands(&operands, builder)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (operands.size() % 2 == 0) {
TokenError(StrCat("expects an odd number of operands, but has ",
operands.size(), " operands"));
return nullptr;
}
ScatterDimensionNumbers dim_numbers =
HloScatterInstruction::MakeScatterDimNumbers(
*update_window_dims,
*inserted_window_dims,
*scatter_dims_to_operand_dims,
*index_vector_dim,
input_batching_dims ? *input_batching_dims
: std::vector<int64_t>(),
scatter_indices_batching_dims ? *scatter_indices_batching_dims
: std::vector<int64_t>());
if (!maybe_infer_shape([&] {
absl::InlinedVector<const Shape*, 3> arg_shapes;
arg_shapes.reserve(operands.size());
for (auto* operand : operands) {
arg_shapes.push_back(&operand->shape());
}
return ShapeInference::InferScatterShape(
arg_shapes, update_computation.value()->ComputeProgramShape(),
dim_numbers);
})) {
return nullptr;
}
auto input_count = operands.size() / 2;
auto operand_span = absl::MakeConstSpan(operands);
return builder->AddInstruction(HloInstruction::CreateScatter(
*shape, operand_span.first(input_count), operands[input_count],
operand_span.last(input_count), *update_computation, dim_numbers,
indices_are_sorted.value(), unique_indices.value()));
}
case HloOpcode::kDomain: {
DomainData domain;
attrs["domain"] = {true, AttrTy::kDomain, &domain};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferUnaryOpShape(opcode, operands[0]);
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateDomain(
*shape, operands[0], std::move(domain.exit_metadata),
std::move(domain.entry_metadata)));
}
case HloOpcode::kGetDimensionSize: {
optional<std::vector<int64_t>> dimensions;
attrs["dimensions"] = {true, AttrTy::kBracedInt64List,
&dimensions};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 1)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferGetDimensionSizeShape(
operands[0]->shape(), dimensions->at(0));
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateGetDimensionSize(
*shape, operands[0], (*dimensions)[0]));
}
case HloOpcode::kSetDimensionSize: {
optional<std::vector<int64_t>> dimensions;
attrs["dimensions"] = {true, AttrTy::kBracedInt64List,
&dimensions};
if ((!preset_operands &&
!ParseOperands(&operands, builder, 2)) ||
!ParseAttributes(attrs, allow_attributes, shape)) {
return nullptr;
}
if (!maybe_infer_shape([&] {
return ShapeInference::InferSetDimensionSizeShape(
operands[0]->shape(), operands[1]->shape(), dimensions->at(0));
})) {
return nullptr;
}
return builder->AddInstruction(HloInstruction::CreateSetDimensionSize(
*shape, operands[0], operands[1], (*dimensions)[0]));
}
default:
return nullptr;
}
}
bool HloParserImpl::ParseCollectiveDeviceList(
CollectiveDeviceList* device_list) {
if (lexer_.GetKind() == TokKind::kLbrace) {
std::vector<ReplicaGroup> replica_groups;
if (!ParseReplicaGroupsOnly(&replica_groups)) {
return false;
}
*device_list = CollectiveDeviceList(replica_groups);
return true;
}
std::vector<int64_t> tile_assignment_dimensions;
std::vector<int64_t> iota_reshape_dims;
std::vector<int> iota_transpose_perm;
if (!ParseTileAssignment(tile_assignment_dimensions, iota_reshape_dims,
iota_transpose_perm, nullptr)) {
return false;
}
if (tile_assignment_dimensions.size() != 2) {
VLOG(kErrorLevel)
<< "Expected tile assignment to have 2 dimensions for collective "
"device list but got "
<< tile_assignment_dimensions.size();
return false;
}
*device_list = CollectiveDeviceList(IotaReplicaGroupList(
tile_assignment_dimensions[0], tile_assignment_dimensions[1],
iota_reshape_dims, iota_transpose_perm));
return true;
}
bool HloParserImpl::ParseSharding(std::optional<HloSharding>& sharding) {
if (!ParseToken(TokKind::kLbrace,
"expected '{' to start sharding attribute")) {
return false;
}
if (lexer_.GetKind() != TokKind::kLbrace &&
lexer_.GetKind() != TokKind::kRbrace) {
return ParseSingleSharding(sharding, true);
}
std::vector<HloSharding> tuple_shardings;
if (lexer_.GetKind() != TokKind::kRbrace) {
do {
std::optional<HloSharding> tuple_sharding;
if (!ParseSingleSharding(tuple_sharding,
false)) {
return false;
}
tuple_shardings.push_back(std::move(*tuple_sharding));
} while (EatIfPresent(TokKind::kComma));
}
sharding = HloSharding::FlatTuple(std::move(tuple_shardings));
return ParseToken(TokKind::kRbrace, "expected '}' to end sharding attribute");
}
bool HloParserImpl::ParseFrontendAttributes(
FrontendAttributes* frontend_attributes) {
CHECK(frontend_attributes != nullptr);
if (!ParseToken(TokKind::kLbrace,
"expected '{' to start frontend attributes")) {
return false;
}
if (lexer_.GetKind() == TokKind::kRbrace) {
} else {
do {
std::string attribute;
if (!ParseAttributeName(&attribute)) {
return false;
}
std::string result;
if (lexer_.GetKind() == TokKind::kString) {
if (!ParseString(&result)) {
return false;
}
} else if (lexer_.GetKind() == TokKind::kLbrace) {
if (!ParseJsonDict(&result)) {
return false;
}
} else {
return false;
}
(*frontend_attributes->mutable_map())[attribute] = result;
} while (EatIfPresent(TokKind::kComma));
}
return ParseToken(TokKind::kRbrace,
"expects '}' at the end of frontend attributes");
}
bool HloParserImpl::ParseStatisticsViz(StatisticsViz* statistics_viz) {
CHECK(statistics_viz != nullptr);
if (!ParseToken(TokKind::kLbrace, "expected '{' to start statistics")) {
return false;
}
if (lexer_.GetKind() == TokKind::kRbrace) {
} else {
std::string visualizing_index_attr_name;
if (!ParseAttributeName(&visualizing_index_attr_name)) {
return false;
}
if (lexer_.GetKind() != TokKind::kInt) {
return false;
}
statistics_viz->set_stat_index_to_visualize(lexer_.GetInt64Val());
lexer_.Lex();
while (EatIfPresent(TokKind::kComma)) {
std::string stat_name;
if (!ParseAttributeName(&stat_name)) {
return false;
}
if (lexer_.GetKind() != TokKind::kDecimal &&
lexer_.GetKind() != TokKind::kInt) {
return false;
}
Statistic statistic;
statistic.set_stat_name(stat_name);
statistic.set_stat_val(lexer_.GetKind() == TokKind::kDecimal
? lexer_.GetDecimalVal()
: lexer_.GetInt64Val());
lexer_.Lex();
*statistics_viz->add_statistics() = std::move(statistic);
}
}
return ParseToken(TokKind::kRbrace, "expects '}' at the end of statistics");
}
bool HloParserImpl::ParseTileAssignment(
std::vector<int64_t>& tile_assignment_dimensions,
std::vector<int64_t>& iota_reshape_dims,
std::vector<int>& iota_transpose_perm, std::vector<int64_t>* devices) {
if (!ParseToken(TokKind::kLsquare,
"expected '[' to start sharding devices shape")) {
return false;
}
do {
int64_t dim;
if (!ParseInt64(&dim)) {
return false;
}
tile_assignment_dimensions.push_back(dim);
} while (EatIfPresent(TokKind::kComma));
if (!ParseToken(TokKind::kRsquare,
"expected ']' to end sharding devices shape")) {
return false;
}
if (lexer_.GetKind() == TokKind::kLeq) {
lexer_.Lex();
if (!ParseToken(TokKind::kLsquare,
"expected '[' to start sharding iota_reshape_dims")) {
return false;
}
do {
int64_t dim;
if (!ParseInt64(&dim)) {
return false;
}
iota_reshape_dims.push_back(dim);
} while (EatIfPresent(TokKind::kComma));
if (iota_reshape_dims.empty()) {
return TokenError("expected non-empty iota_reshape_dims");
}
if (!ParseToken(TokKind::kRsquare,
"expected ']' to end sharding iota_reshape_dims")) {
return false;
}
if (iota_reshape_dims.size() == 1) {
iota_transpose_perm.push_back(0);
} else {
if (lexer_.GetKind() != TokKind::kIdent || lexer_.GetStrVal() != "T") {
return TokenError(
"expected 'T(' to start sharding devices "
"iota_transpose_perm");
}
lexer_.Lex();
if (!ParseToken(TokKind::kLparen,
"expected 'T(' to start sharding devices "
"iota_transpose_perm")) {
return false;
}
do {
int64_t dim;
if (!ParseInt64(&dim)) {
return false;
}
if (dim >= iota_reshape_dims.size()) {
return TokenError(absl::StrFormat(
"Out of range iota minor_to_major value %lld.", dim));
}
iota_transpose_perm.push_back(dim);
} while (EatIfPresent(TokKind::kComma));
if (!ParseToken(TokKind::kRparen,
"expected ')' to end sharding devices "
"iota_transpose_perm")) {
return false;
}
}
} else {
if (!devices) {
return TokenError(
"Caller expected iota tile assignment when parsing, which should not "
"have any manual device entries.");
}
do {
int64_t device;
if (!ParseInt64(&device)) {
return false;
}
devices->push_back(device);
} while (EatIfPresent(TokKind::kComma));
}
return true;
}
bool HloParserImpl::ParseSingleSharding(std::optional<HloSharding>& sharding,
bool lbrace_pre_lexed) {
if (!lbrace_pre_lexed &&
!ParseToken(TokKind::kLbrace,
"expected '{' to start sharding attribute")) {
return false;
}
LocTy loc = lexer_.GetLoc();
bool maximal = false;
bool replicated = false;
bool manual = false;
bool unknown = false;
bool last_tile_dim_replicate = false;
bool last_tile_dims = false;
bool shard_like = false;
bool shard_as = false;
int64_t shard_group_id;
std::vector<int64_t> devices;
std::vector<int64_t> tile_assignment_dimensions;
std::vector<int64_t> iota_reshape_dims;
std::vector<int> iota_transpose_perm;
std::vector<OpSharding::Type> subgroup_types;
std::vector<OpMetadata> metadata;
while (lexer_.GetKind() != TokKind::kRbrace) {
switch (lexer_.GetKind()) {
case TokKind::kw_maximal:
maximal = true;
lexer_.Lex();
break;
case TokKind::kw_replicated:
replicated = true;
lexer_.Lex();
break;
case TokKind::kw_manual:
manual = true;
lexer_.Lex();
break;
case TokKind::kw_unknown:
unknown = true;
lexer_.Lex();
break;
case TokKind::kAttributeName: {
if (lexer_.GetStrVal() == "device") {
if (lexer_.Lex() != TokKind::kInt) {
return TokenError("device= attribute must be an integer");
}
devices = {lexer_.GetInt64Val()};
lexer_.Lex();
} else if (lexer_.GetStrVal() == "devices") {
lexer_.Lex();
if (!ParseTileAssignment(tile_assignment_dimensions,
iota_reshape_dims, iota_transpose_perm,
&devices)) {
return false;
}
} else if (lexer_.GetStrVal() == "metadata") {
lexer_.Lex();
if (!ParseSingleOrListMetadata(metadata)) {
return false;
}
} else if (lexer_.GetStrVal() == "last_tile_dims") {
last_tile_dims = true;
lexer_.Lex();
if (!ParseListShardingType(&subgroup_types)) {
return false;
}
} else {
return TokenError(
"unknown attribute in sharding: expected device=, devices= "
"metadata= or last_tile_dims= ");
}
break;
}
case TokKind::kw_last_tile_dim_replicate:
last_tile_dim_replicate = true;
lexer_.Lex();
break;
case TokKind::kw_shard_as: {
shard_as = true;
lexer_.Lex();
if (!ParseInt64(&shard_group_id)) {
return false;
}
break;
}
case TokKind::kw_shard_like: {
shard_like = true;
lexer_.Lex();
if (!ParseInt64(&shard_group_id)) {
return false;
}
break;
}
case TokKind::kRbrace:
break;
default:
return TokenError("unexpected token");
}
}
if (replicated) {
if (!devices.empty()) {
return Error(loc,
"replicated shardings should not have any devices assigned");
}
sharding = HloSharding::Replicate(metadata);
} else if (maximal) {
if (devices.size() != 1) {
return Error(loc,
"maximal shardings should have exactly one device assigned");
}
sharding = HloSharding::AssignDevice(devices[0], metadata);
} else if (manual) {
if (!devices.empty()) {
return Error(loc,
"manual shardings should not have any devices assigned");
}
sharding = HloSharding::Manual(metadata);
} else if (unknown) {
if (!devices.empty()) {
return Error(loc,
"unknown shardings should not have any devices assigned");
}
sharding = HloSharding::Unknown(metadata);
} else {
if (tile_assignment_dimensions.empty()) {
return Error(
loc,
"non-maximal shardings must have a tile assignment list including "
"dimensions");
}
if (iota_transpose_perm.size() != iota_reshape_dims.size()) {
return Error(loc,
absl::StrFormat(
"iota_transpose_perm should have the same rank as "
"iota_reshape_dims : expected %lld, saw %lld.",
iota_reshape_dims.size(), iota_transpose_perm.size()));
}
if (last_tile_dim_replicate) {
CHECK(subgroup_types.empty());
subgroup_types.push_back(OpSharding::REPLICATED);
}
if (!iota_reshape_dims.empty()) {
CHECK(devices.empty());
sharding =
subgroup_types.empty()
? HloSharding::IotaTile(tile_assignment_dimensions,
iota_reshape_dims, iota_transpose_perm,
metadata)
: HloSharding::Subgroup(
TileAssignment(tile_assignment_dimensions,
iota_reshape_dims, iota_transpose_perm),
subgroup_types, metadata);
} else {
if (devices.size() <= 1) {
return Error(
loc,
"non-maximal shardings must have more than one device assigned");
}
auto tiles = std::make_shared<Array<int64_t>>(tile_assignment_dimensions);
absl::c_copy(devices, tiles->begin());
sharding =
subgroup_types.empty()
? HloSharding::Tile(TileAssignment(std::move(tiles)), metadata)
: HloSharding::Subgroup(TileAssignment(std::move(tiles)),
subgroup_types, metadata);
}
}
if (shard_as || shard_like) {
sharding = sharding->SetShardGroup(
shard_as ? HloSharding::ShardAs(shard_group_id)
: HloSharding::ShardLike(shard_group_id));
}
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseParameterReplication(
ParameterReplication* parameter_replication) {
if (!ParseToken(TokKind::kLbrace,
"expected '{' to start parameter_replication attribute")) {
return false;
}
if (lexer_.GetKind() != TokKind::kRbrace) {
do {
if (lexer_.GetKind() == TokKind::kw_true) {
parameter_replication->add_replicated_at_leaf_buffers(true);
} else if (lexer_.GetKind() == TokKind::kw_false) {
parameter_replication->add_replicated_at_leaf_buffers(false);
} else {
return false;
}
lexer_.Lex();
} while (EatIfPresent(TokKind::kComma));
}
return ParseToken(TokKind::kRbrace,
"expected '}' to end parameter_replication attribute");
}
bool HloParserImpl::ParseBooleanListOrSingleBoolean(BoolList* boolean_list) {
if (lexer_.GetKind() != TokKind::kLbrace &&
lexer_.GetKind() != TokKind::kw_true &&
lexer_.GetKind() != TokKind::kw_false) {
TokenError("Expected list of booleans or true/false value");
return false;
}
auto parse_boolean = [this, boolean_list]() {
if (lexer_.GetKind() == TokKind::kw_true) {
boolean_list->push_back(true);
lexer_.Lex();
return true;
} else if (lexer_.GetKind() == TokKind::kw_false) {
boolean_list->push_back(false);
lexer_.Lex();
return true;
}
return false;
};
if (parse_boolean()) {
return true;
}
if (!ParseToken(TokKind::kLbrace,
"expected '{' to start boolean list attribute")) {
return false;
}
if (lexer_.GetKind() != TokKind::kRbrace) {
do {
if (!parse_boolean()) {
return false;
}
} while (EatIfPresent(TokKind::kComma));
}
return ParseToken(TokKind::kRbrace,
"expected '}' to end boolean list attribute");
}
bool HloParserImpl::ParseReplicaGroupsOnly(
std::vector<ReplicaGroup>* replica_groups) {
std::vector<std::vector<int64_t>> result;
if (!ParseInt64ListList(TokKind::kLbrace, TokKind::kRbrace, TokKind::kComma,
&result)) {
return false;
}
*replica_groups = CreateReplicaGroups(result);
return true;
}
bool HloParserImpl::ParseDomain(DomainData* domain) {
absl::flat_hash_map<std::string, AttrConfig> attrs;
optional<std::string> kind;
optional<HloSharding> entry_sharding;
optional<HloSharding> exit_sharding;
attrs["kind"] = {true, AttrTy::kString, &kind};
attrs["entry"] = {true, AttrTy::kSharding, &entry_sharding};
attrs["exit"] = {true, AttrTy::kSharding, &exit_sharding};
if (!ParseSubAttributes(attrs)) {
return false;
}
if (*kind == ShardingMetadata::KindName()) {
auto entry_sharding_ptr =
std::make_unique<HloSharding>(std::move(*entry_sharding));
auto exit_sharding_ptr =
std::make_unique<HloSharding>(std::move(*exit_sharding));
domain->entry_metadata =
std::make_unique<ShardingMetadata>(std::move(entry_sharding_ptr));
domain->exit_metadata =
std::make_unique<ShardingMetadata>(std::move(exit_sharding_ptr));
} else {
return TokenError(StrCat("unsupported domain kind: ", *kind));
}
return true;
}
bool HloParserImpl::ParseInstructionNames(
std::vector<HloInstruction*>* instructions) {
if (!ParseToken(TokKind::kLbrace,
"expects '{' at the beginning of instruction name list")) {
return false;
}
LocTy loc = lexer_.GetLoc();
do {
std::string name;
if (!ParseName(&name)) {
return Error(loc, "expects a instruction name");
}
std::pair<HloInstruction*, LocTy>* instr = FindInstruction(name);
if (!instr) {
return TokenError(StrFormat("instruction '%s' is not defined", name));
}
instructions->push_back(instr->first);
} while (EatIfPresent(TokKind::kComma));
return ParseToken(TokKind::kRbrace,
"expects '}' at the end of instruction name list");
}
template <typename T>
std::string StringifyValue(T val) {
if constexpr (is_complex_v<T>) {
return StrFormat("(%f, %f)", val.real(), val.imag());
} else {
return StrCat(val);
}
}
template <class T>
uint64_t GetNanPayload(T val) {
if constexpr (std::is_same_v<T, double>) {
auto rep = absl::bit_cast<uint64_t>(val);
if (auto payload = rep & NanPayloadBitMask<double>()) {
return payload;
}
return QuietNanWithoutPayload<double>();
} else {
static_assert(!std::numeric_limits<T>::has_quiet_NaN);
static_assert(!std::numeric_limits<T>::has_signaling_NaN);
return 0;
}
}
template <typename LiteralNativeT, typename LiteralComponentT>
LiteralNativeT LiteralNativeFromRealImag(LiteralComponentT real,
LiteralComponentT imag) {
if constexpr (std::is_same_v<LiteralNativeT,
std::complex<LiteralComponentT>>) {
return LiteralNativeT(real, imag);
} else {
return real;
}
}
template <typename T>
struct ComponentType {
using Type = T;
};
template <typename T>
struct ComponentType<std::complex<T>> {
using Type = T;
};
template <typename T>
T GetReal(T value) {
return value;
}
template <typename T>
T GetReal(std::complex<T> value) {
return value.real();
}
template <typename T>
T GetImag(T value) {
return 0;
}
template <typename T>
T GetImag(std::complex<T> value) {
return value.imag();
}
template <typename T>
struct MinMaxFiniteValue {
static constexpr T max() { return std::numeric_limits<T>::max(); }
static constexpr T min() { return std::numeric_limits<T>::lowest(); }
};
template <typename T>
bool IsFinite(T val) {
if constexpr (std::numeric_limits<T>::has_infinity ||
std::numeric_limits<T>::has_quiet_NaN ||
std::numeric_limits<T>::has_signaling_NaN) {
return Eigen::numext::isfinite(val);
} else {
return true;
}
}
template <typename LiteralNativeT, typename ParsedElemT>
bool HloParserImpl::CheckParsedValueIsInRange(LocTy loc, ParsedElemT value) {
if constexpr (std::is_floating_point_v<ParsedElemT>) {
auto value_as_native_t = static_cast<LiteralNativeT>(value);
auto value_double_converted = static_cast<ParsedElemT>(value_as_native_t);
if (!IsFinite(value) || IsFinite(value_double_converted)) {
value = value_double_converted;
}
}
PrimitiveType literal_ty =
primitive_util::NativeToPrimitiveType<LiteralNativeT>();
if (!IsFinite(value)) {
} else if constexpr (std::is_unsigned<LiteralNativeT>::value) {
static_assert(std::is_same_v<ParsedElemT, int64_t> ||
std::is_same_v<ParsedElemT, bool>,
"Unimplemented checking for ParsedElemT");
const uint64_t unsigned_value = value;
const uint64_t upper_bound =
static_cast<uint64_t>(std::numeric_limits<LiteralNativeT>::max());
if (unsigned_value > upper_bound) {
return Error(loc, StrCat("value ", value,
" is out of range for literal's primitive type ",
PrimitiveType_Name(literal_ty), " namely [0, ",
upper_bound, "]."));
}
} else if (value > static_cast<ParsedElemT>(
MinMaxFiniteValue<LiteralNativeT>::max()) ||
value < static_cast<ParsedElemT>(
MinMaxFiniteValue<LiteralNativeT>::min())) {
return Error(
loc,
StrCat(
"value ", value, " is out of range for literal's primitive type ",
PrimitiveType_Name(literal_ty), " namely [",
static_cast<ParsedElemT>(MinMaxFiniteValue<LiteralNativeT>::min()),
", ",
static_cast<ParsedElemT>(MinMaxFiniteValue<LiteralNativeT>::max()),
"]."));
}
return true;
}
template <typename LiteralNativeT>
bool HloParserImpl::CheckParsedValueIsInRange(LocTy loc,
std::complex<double> value) {
using LiteralComplexComponentT =
decltype(std::real(std::declval<LiteralNativeT>()));
auto check_component = [&](absl::string_view name, double v) {
if (!std::isfinite(v)) {
return true;
}
double min = MinMaxFiniteValue<LiteralComplexComponentT>::min();
double max = MinMaxFiniteValue<LiteralComplexComponentT>::max();
if (v < min || v > max) {
return Error(
loc,
StrCat(name, " part ", v,
" is out of range for literal's primitive type ",
PrimitiveType_Name(
primitive_util::NativeToPrimitiveType<LiteralNativeT>()),
", namely [", min, ", ", max, "]."));
}
return true;
};
return check_component("real", std::real(value)) &&
check_component("imaginary", std::imag(value));
}
template <typename LiteralNativeT, typename ParsedElemT>
bool HloParserImpl::SetValueInLiteralHelper(LocTy loc, ParsedElemT value,
int64_t index, Literal* literal) {
if (!CheckParsedValueIsInRange<LiteralNativeT>(loc, value)) {
return false;
}
if (index >= ShapeUtil::ElementsIn(literal->shape())) {
return Error(loc, StrCat("tries to set value ", StringifyValue(value),
" to a literal in shape ",
ShapeUtil::HumanString(literal->shape()),
" at linear index ", index,
", but the index is out of range"));
}
using ParsedElemComponentT = typename ComponentType<ParsedElemT>::Type;
using LiteralNativeComponentT = typename ComponentType<LiteralNativeT>::Type;
const auto handle_nan =
[this, literal, index, loc](
ParsedElemComponentT parsed_value_component,
LiteralNativeComponentT* literal_value_component) {
if (!std::isnan(static_cast<double>(parsed_value_component))) {
return true;
}
auto nan_payload = GetNanPayload(parsed_value_component);
if constexpr (NanPayloadBits<LiteralNativeComponentT>() > 0) {
if (nan_payload == QuietNanWithoutPayload<double>()) {
nan_payload = QuietNanWithoutPayload<LiteralNativeComponentT>();
}
const auto kLargestPayload =
NanPayloadBitMask<LiteralNativeComponentT>();
if (nan_payload > kLargestPayload) {
return Error(
loc, StrCat("tries to set NaN payload 0x",
absl::Hex(nan_payload), " to a literal in shape ",
ShapeUtil::HumanString(literal->shape()),
" at linear index ", index,
", but the NaN payload is out of range (0x",
absl::Hex(kLargestPayload), ")"));
}
*literal_value_component =
NanWithSignAndPayload<LiteralNativeComponentT>(
std::signbit(
static_cast<double>(parsed_value_component)),
nan_payload);
} else {
if (nan_payload != QuietNanWithoutPayload<double>()) {
return Error(
loc, StrCat("tries to set NaN payload 0x",
absl::Hex(nan_payload), " to a literal in shape ",
ShapeUtil::HumanString(literal->shape()),
" at linear index ", index, ", but ",
primitive_util::LowercasePrimitiveTypeName(
literal->shape().element_type()),
" does not support payloads"));
}
}
return true;
};
const ParsedElemComponentT parsed_real_value = GetReal(value);
auto literal_real_value =
static_cast<LiteralNativeComponentT>(parsed_real_value);
if (std::is_floating_point_v<ParsedElemT> ||
std::is_same_v<ParsedElemT, std::complex<double>>) {
if (!handle_nan(parsed_real_value, &literal_real_value)) {
return false;
}
}
const ParsedElemComponentT parsed_imag_value = GetImag(value);
auto literal_imag_value =
static_cast<LiteralNativeComponentT>(parsed_imag_value);
if constexpr (std::is_same_v<ParsedElemT, std::complex<double>>) {
if (!handle_nan(parsed_real_value, &literal_imag_value)) {
return false;
}
}
literal->data<LiteralNativeT>().at(index) =
LiteralNativeFromRealImag<LiteralNativeT>(literal_real_value,
literal_imag_value);
return true;
}
bool HloParserImpl::SetValueInLiteral(LocTy loc, int64_t value, int64_t index,
Literal* literal) {
const Shape& shape = literal->shape();
return primitive_util::PrimitiveTypeSwitch<bool>(
[&](auto primitive_type_constant) -> bool {
if constexpr (primitive_type_constant == PRED) {
return SetValueInLiteralHelper<bool>(loc, static_cast<bool>(value),
index, literal);
}
if constexpr (primitive_util::IsIntegralType(primitive_type_constant)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type_constant>;
return SetValueInLiteralHelper<NativeT>(loc, value, index, literal);
}
LOG(FATAL) << "unknown integral primitive type "
<< PrimitiveType_Name(shape.element_type());
},
shape.element_type());
}
bool HloParserImpl::SetValueInLiteral(LocTy loc, double value, int64_t index,
Literal* literal) {
const Shape& shape = literal->shape();
return primitive_util::PrimitiveTypeSwitch<bool>(
[&](auto primitive_type_constant) -> bool {
if constexpr (primitive_util::IsFloatingPointType(
primitive_type_constant)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type_constant>;
return SetValueInLiteralHelper<NativeT>(loc, value, index, literal);
}
LOG(FATAL) << "unknown floating point primitive type "
<< PrimitiveType_Name(shape.element_type());
},
shape.element_type());
}
bool HloParserImpl::SetValueInLiteral(LocTy loc, bool value, int64_t index,
Literal* literal) {
const Shape& shape = literal->shape();
switch (shape.element_type()) {
case PRED:
return SetValueInLiteralHelper<bool>(loc, value, index, literal);
default:
LOG(FATAL) << PrimitiveType_Name(shape.element_type())
<< " is not PRED type";
}
}
bool HloParserImpl::SetValueInLiteral(LocTy loc, std::complex<double> value,
int64_t index, Literal* literal) {
const Shape& shape = literal->shape();
return primitive_util::PrimitiveTypeSwitch<bool>(
[&](auto primitive_type_constant) -> bool {
if constexpr (primitive_util::IsComplexType(primitive_type_constant)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type_constant>;
return SetValueInLiteralHelper<NativeT>(loc, value, index, literal);
}
LOG(FATAL) << PrimitiveType_Name(shape.element_type())
<< " is not a complex type";
},
shape.element_type());
}
bool HloParserImpl::ParseLiteral(Literal* literal) {
if (lexer_.GetKind() == TokKind::kLparen) {
lexer_.Lex();
std::vector<Literal> elements;
while (lexer_.GetKind() != TokKind::kRparen) {
Literal element;
if (!ParseLiteral(&element)) {
return TokenError("Fails when parsing tuple element");
}
elements.emplace_back(std::move(element));
if (lexer_.GetKind() != TokKind::kRparen) {
ParseToken(TokKind::kComma, "expects ',' to separate tuple elements");
}
}
*literal = LiteralUtil::MakeTupleOwned(std::move(elements));
return ParseToken(TokKind::kRparen, "expects ')' to close a tuple literal");
}
Shape literal_shape;
if (!ParseShape(&literal_shape)) {
return false;
}
return ParseLiteral(literal, literal_shape);
}
bool HloParserImpl::ParseLiteral(Literal* literal, const Shape& shape) {
return shape.IsTuple() ? ParseTupleLiteral(literal, shape)
: ParseNonTupleLiteral(literal, shape);
}
bool HloParserImpl::ParseTupleLiteral(Literal* literal, const Shape& shape) {
if (!ParseToken(TokKind::kLparen, "expects '(' in front of tuple elements")) {
return false;
}
std::vector<Literal> elements(ShapeUtil::TupleElementCount(shape));
if (lexer_.GetKind() == TokKind::kRparen) {
} else {
for (int i = 0; i < elements.size(); i++) {
if (i > 0) {
ParseToken(TokKind::kComma, "expects ',' to separate tuple elements");
}
if (!ParseLiteral(&elements[i],
ShapeUtil::GetTupleElementShape(shape, i))) {
return TokenError(StrCat("expects the ", i, "th element"));
}
}
}
*literal = LiteralUtil::MakeTupleOwned(std::move(elements));
return ParseToken(TokKind::kRparen,
StrCat("expects ')' at the end of the tuple with ",
ShapeUtil::TupleElementCount(shape), "elements"));
}
bool HloParserImpl::ParseNonTupleLiteral(Literal* literal, const Shape& shape) {
CHECK(LayoutUtil::IsDenseArray(shape)) << shape.ToString(true);
return ParseDenseLiteral(literal, shape);
}
bool HloParserImpl::ParseDenseLiteral(Literal* literal, const Shape& shape) {
const int rank = static_cast<int>(shape.rank());
*literal = LiteralUtil::CreateFromDimensions(shape.element_type(),
shape.dimensions());
int64_t nest_level = 0;
int64_t linear_index = 0;
std::vector<int64_t> elems_seen_per_dim(rank);
auto get_index_str = [&elems_seen_per_dim](int dim) -> std::string {
std::vector<int64_t> elems_seen_until_dim(elems_seen_per_dim.begin(),
elems_seen_per_dim.begin() + dim);
return StrCat("[",
StrJoin(elems_seen_until_dim, ",",
[](std::string* out, const int64_t num_elems) {
StrAppend(out, num_elems - 1);
}),
"]");
};
auto add_one_elem_seen = [&] {
if (rank > 0) {
if (nest_level != rank) {
return TokenError(absl::StrFormat(
"expects nested array in rank %d, but sees %d", rank, nest_level));
}
elems_seen_per_dim[rank - 1]++;
if (elems_seen_per_dim[rank - 1] > shape.dimensions(rank - 1)) {
return TokenError(absl::StrFormat(
"expects %d elements on the minor-most dimension, but "
"sees more",
shape.dimensions(rank - 1)));
}
}
return true;
};
do {
switch (lexer_.GetKind()) {
default:
return TokenError("unexpected token type in a literal");
case TokKind::kLbrace: {
nest_level++;
if (nest_level > rank) {
return TokenError(absl::StrFormat(
"expects nested array in rank %d, but sees larger", rank));
}
if (nest_level > 1) {
elems_seen_per_dim[nest_level - 2]++;
if (elems_seen_per_dim[nest_level - 2] >
shape.dimensions(nest_level - 2)) {
return TokenError(absl::StrFormat(
"expects %d elements in the %sth element, but sees more",
shape.dimensions(nest_level - 2),
get_index_str(nest_level - 2)));
}
}
lexer_.Lex();
break;
}
case TokKind::kRbrace: {
if (nest_level == 0) {
return TokenError("unexpected '}' token");
}
nest_level--;
if (elems_seen_per_dim[nest_level] != shape.dimensions(nest_level)) {
return TokenError(absl::StrFormat(
"expects %d elements in the %sth element, but sees %d",
shape.dimensions(nest_level), get_index_str(nest_level),
elems_seen_per_dim[nest_level]));
}
elems_seen_per_dim[nest_level] = 0;
lexer_.Lex();
break;
}
case TokKind::kLparen: {
if (!primitive_util::IsComplexType(shape.element_type())) {
return TokenError(
absl::StrFormat("unexpected '(' in literal. Parens are only "
"valid for complex literals"));
}
std::complex<double> value;
LocTy loc = lexer_.GetLoc();
if (!add_one_elem_seen() || !ParseComplex(&value) ||
!SetValueInLiteral(loc, value, linear_index++, literal)) {
return false;
}
break;
}
case TokKind::kDots: {
if (nest_level != 1) {
return TokenError(absl::StrFormat(
"expects `...` at nest level 1, but sees it at nest level %d",
nest_level));
}
elems_seen_per_dim[0] = shape.dimensions(0);
lexer_.Lex();
static uint32_t data = 0;
static_assert(sizeof(bool) == 1);
constexpr uint32_t kBooleanMask = 0x01010101;
constexpr uint32_t kNoMask = 0xFFFFFFFF;
const uint32_t mask =
(shape.element_type() == PRED) ? kBooleanMask : kNoMask;
uint32_t* raw_data = static_cast<uint32_t*>(literal->untyped_data());
for (int64_t i = 0; i < literal->size_bytes() / 4; ++i) {
raw_data[i] = data++ & mask;
}
uint8_t* raw_data_int8 = static_cast<uint8_t*>(literal->untyped_data());
static uint8_t data_int8 = 0;
for (int64_t i = 0; i < literal->size_bytes() % 4; ++i) {
raw_data_int8[literal->size_bytes() / 4 + i] = data_int8++ & mask;
}
break;
}
case TokKind::kComma:
lexer_.Lex();
break;
case TokKind::kw_true:
case TokKind::kw_false:
case TokKind::kInt:
case TokKind::kDecimal:
case TokKind::kw_inf:
case TokKind::kNegInf: {
add_one_elem_seen();
if (lexer_.GetKind() == TokKind::kw_true ||
lexer_.GetKind() == TokKind::kw_false) {
if (!SetValueInLiteral(lexer_.GetLoc(),
lexer_.GetKind() == TokKind::kw_true,
linear_index++, literal)) {
return false;
}
lexer_.Lex();
} else if (primitive_util::IsIntegralType(shape.element_type()) ||
shape.element_type() == PRED) {
LocTy loc = lexer_.GetLoc();
int64_t value;
if (!ParseInt64(&value)) {
return Error(loc, StrCat("expects integer for primitive type: ",
PrimitiveType_Name(shape.element_type())));
}
if (!SetValueInLiteral(loc, value, linear_index++, literal)) {
return false;
}
} else if (primitive_util::IsFloatingPointType(shape.element_type())) {
LocTy loc = lexer_.GetLoc();
double value;
if (!ParseDouble(&value)) {
return Error(
loc, StrCat("expect floating point value for primitive type: ",
PrimitiveType_Name(shape.element_type())));
}
if (!SetValueInLiteral(loc, value, linear_index++, literal)) {
return false;
}
} else {
return TokenError(StrCat("unsupported primitive type ",
PrimitiveType_Name(shape.element_type())));
}
break;
}
}
} while (nest_level > 0);
*literal = literal->Relayout(shape.layout());
return true;
}
bool HloParserImpl::ParseOperands(std::vector<HloInstruction*>* operands,
HloComputation::Builder* builder) {
CHECK(operands != nullptr);
if (!ParseToken(TokKind::kLparen,
"expects '(' at the beginning of operands")) {
return false;
}
if (lexer_.GetKind() == TokKind::kRparen) {
} else {
do {
HloLexer lexer_copy = lexer_;
std::vector<std::string> saved_errors;
std::swap(saved_errors, error_);
bool is_normal_operand = [&] {
LocTy loc = lexer_.GetLoc();
std::string name;
optional<Shape> shape;
if (CanBeShape()) {
shape.emplace();
if (!ParseShape(&shape.value())) {
return false;
}
}
if (!ParseName(&name)) {
if (shape.has_value() && create_missing_instruction_ != nullptr &&
scoped_name_tables_.size() == 1) {
name = "";
} else {
return false;
}
}
std::pair<HloInstruction*, LocTy>* instruction =
FindInstruction(name, shape);
if (instruction == nullptr) {
return Error(loc, StrCat("instruction does not exist: ", name));
}
auto next = lexer_.GetKind();
if (next != TokKind::kComma && next != TokKind::kRparen) {
return false;
}
operands->push_back(instruction->first);
return true;
}();
if (is_normal_operand) {
error_ = std::move(saved_errors);
continue;
}
std::vector<std::string> normal_operand_errors;
std::swap(error_, normal_operand_errors);
lexer_ = lexer_copy;
LocTy loc = lexer_.GetLoc();
bool is_nested_instruction = ParseInstructionRhs(
builder, "", loc, false);
if (is_nested_instruction) {
operands->push_back(builder->last_added_instruction());
error_ = std::move(saved_errors);
continue;
}
std::vector<std::string> nested_instruction_errors;
std::swap(error_, nested_instruction_errors);
error_ = std::move(saved_errors);
Error(loc,
"cannot parse as an instruction name or as a nested instruction:");
error_.insert(error_.end(),
std::make_move_iterator(normal_operand_errors.begin()),
std::make_move_iterator(normal_operand_errors.end()));
error_.insert(error_.end(),
std::make_move_iterator(nested_instruction_errors.begin()),
std::make_move_iterator(nested_instruction_errors.end()));
} while (EatIfPresent(TokKind::kComma));
}
return ParseToken(TokKind::kRparen, "expects ')' at the end of operands");
}
bool HloParserImpl::ParseOperands(std::vector<HloInstruction*>* operands,
HloComputation::Builder* builder,
const int expected_size) {
CHECK(operands != nullptr);
LocTy loc = lexer_.GetLoc();
if (!ParseOperands(operands, builder)) {
return false;
}
if (expected_size != operands->size()) {
return Error(loc, StrCat("expects ", expected_size, " operands, but has ",
operands->size(), " operands"));
}
return true;
}
bool HloParserImpl::ParseSubAttributes(
const absl::flat_hash_map<std::string, AttrConfig>& attrs) {
LocTy loc = lexer_.GetLoc();
if (!ParseToken(TokKind::kLbrace, "expects '{' to start sub attributes")) {
return false;
}
absl::flat_hash_set<std::string> seen_attrs;
if (lexer_.GetKind() == TokKind::kRbrace) {
} else {
do {
EatIfPresent(TokKind::kComma);
if (!ParseAttributeHelper(attrs, &seen_attrs)) {
return false;
}
} while (lexer_.GetKind() != TokKind::kRbrace);
}
for (const auto& attr_it : attrs) {
if (attr_it.second.required &&
seen_attrs.find(attr_it.first) == seen_attrs.end()) {
return Error(loc, StrFormat("sub-attribute %s is expected but not seen",
attr_it.first));
}
}
return ParseToken(TokKind::kRbrace, "expects '}' to end sub attributes");
}
bool HloParserImpl::ParseAttributes(
const absl::flat_hash_map<std::string, AttrConfig>& attrs,
bool allow_attributes, const std::optional<Shape>& shape) {
LocTy loc = lexer_.GetLoc();
absl::flat_hash_set<std::string> seen_attrs;
if (allow_attributes) {
while (EatIfPresent(TokKind::kComma)) {
if (!ParseAttributeHelper(attrs, &seen_attrs, shape)) {
return false;
}
}
}
for (const auto& attr_it : attrs) {
if (attr_it.second.required &&
seen_attrs.find(attr_it.first) == seen_attrs.end()) {
return Error(loc, StrFormat("attribute %s is expected but not seen",
attr_it.first));
}
}
return true;
}
bool HloParserImpl::ParseAttributeHelper(
const absl::flat_hash_map<std::string, AttrConfig>& attrs,
absl::flat_hash_set<std::string>* seen_attrs,
const std::optional<Shape>& shape) {
LocTy loc = lexer_.GetLoc();
std::string name;
if (!ParseAttributeName(&name)) {
return Error(loc, "error parsing attributes");
}
VLOG(kDebugLevel) << "Parsing attribute " << name;
if (!seen_attrs->insert(name).second) {
return Error(loc, StrFormat("attribute %s already exists", name));
}
auto attr_it = attrs.find(name);
if (attr_it == attrs.end()) {
std::string allowed_attrs;
if (attrs.empty()) {
allowed_attrs = "No attributes are allowed here.";
} else {
allowed_attrs =
StrCat("Allowed attributes: ",
StrJoin(attrs, ", ",
[&](std::string* out,
const std::pair<std::string, AttrConfig>& kv) {
StrAppend(out, kv.first);
}));
}
return Error(
loc, StrFormat("unexpected attribute \"%s\". %s", name, allowed_attrs));
}
AttrTy attr_type = attr_it->second.attr_type;
void* attr_out_ptr = attr_it->second.result;
bool success = [&] {
LocTy attr_loc = lexer_.GetLoc();
switch (attr_type) {
case AttrTy::kBool: {
bool result;
if (!ParseBool(&result)) {
return false;
}
static_cast<optional<bool>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kBracedBoolListOrBool: {
if (!ParseBooleanListOrSingleBoolean(
static_cast<BoolList*>(attr_out_ptr))) {
return false;
}
return true;
}
case AttrTy::kInt64: {
int64_t result;
if (!ParseInt64(&result)) {
return false;
}
static_cast<optional<int64_t>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kInt32: {
int64_t result;
if (!ParseInt64(&result)) {
return false;
}
if (result != static_cast<int32_t>(result)) {
return Error(attr_loc, "value out of range for int32_t");
}
static_cast<optional<int32_t>*>(attr_out_ptr)
->emplace(static_cast<int32_t>(result));
return true;
}
case AttrTy::kFloat: {
double result;
if (!ParseDouble(&result)) {
return false;
}
if (result > std::numeric_limits<float>::max() ||
result < std::numeric_limits<float>::lowest()) {
return Error(attr_loc, "value out of range for float");
}
static_cast<optional<float>*>(attr_out_ptr)
->emplace(static_cast<float>(result));
return true;
}
case AttrTy::kHloComputation: {
HloComputation* result = nullptr;
if (!ParseHloComputation(&result)) {
return false;
}
static_cast<optional<HloComputation*>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kBracedHloComputationList: {
std::vector<HloComputation*> result;
if (!ParseHloComputationList(&result)) {
return false;
}
static_cast<optional<std::vector<HloComputation*>>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kFftType: {
FftType result;
if (!ParseFftType(&result)) {
return false;
}
static_cast<optional<FftType>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kPaddingType: {
PaddingType result;
if (!ParsePaddingType(&result)) {
return false;
}
static_cast<optional<PaddingType>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kComparisonDirection: {
ComparisonDirection result;
if (!ParseComparisonDirection(&result)) {
return false;
}
static_cast<optional<ComparisonDirection>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kComparisonType: {
Comparison::Type result;
if (!ParseComparisonType(&result)) {
return false;
}
static_cast<optional<Comparison::Type>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kEnum: {
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects an enumeration value");
}
std::string result = lexer_.GetStrVal();
lexer_.Lex();
static_cast<optional<std::string>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kWindow: {
Window result;
if (!ParseWindow(&result, true)) {
return false;
}
static_cast<optional<Window>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kConvolutionDimensionNumbers: {
ConvolutionDimensionNumbers result;
if (!ParseConvolutionDimensionNumbers(&result)) {
return false;
}
static_cast<optional<ConvolutionDimensionNumbers>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kSharding: {
std::optional<HloSharding> sharding;
if (!ParseSharding(sharding)) {
return false;
}
static_cast<optional<HloSharding>*>(attr_out_ptr)
->emplace(std::move(*sharding));
return true;
}
case AttrTy::kCollectiveDeviceList: {
CollectiveDeviceList device_list;
if (!ParseCollectiveDeviceList(&device_list)) {
return false;
}
*(static_cast<CollectiveDeviceList*>(attr_out_ptr)) = device_list;
return true;
}
case AttrTy::kFrontendAttributes: {
FrontendAttributes frontend_attributes;
if (!ParseFrontendAttributes(&frontend_attributes)) {
return false;
}
static_cast<optional<FrontendAttributes>*>(attr_out_ptr)
->emplace(frontend_attributes);
return true;
}
case AttrTy::kStatisticsViz: {
StatisticsViz statistics_viz;
if (!ParseStatisticsViz(&statistics_viz)) {
return false;
}
static_cast<optional<StatisticsViz>*>(attr_out_ptr)
->emplace(statistics_viz);
return true;
}
case AttrTy::kParameterReplication: {
ParameterReplication parameter_replication;
if (!ParseParameterReplication(¶meter_replication)) {
return false;
}
static_cast<optional<ParameterReplication>*>(attr_out_ptr)
->emplace(parameter_replication);
return true;
}
case AttrTy::kInstructionList: {
std::vector<HloInstruction*> result;
if (!ParseInstructionNames(&result)) {
return false;
}
static_cast<optional<std::vector<HloInstruction*>>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kFusionKind: {
HloInstruction::FusionKind result;
if (!ParseFusionKind(&result)) {
return false;
}
static_cast<optional<HloInstruction::FusionKind>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kBracedInt64List: {
std::vector<int64_t> result;
if (!ParseInt64List(TokKind::kLbrace, TokKind::kRbrace, TokKind::kComma,
&result)) {
return false;
}
static_cast<optional<std::vector<int64_t>>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kBracedInt64ListList: {
std::vector<std::vector<int64_t>> result;
if (!ParseInt64ListList(TokKind::kLbrace, TokKind::kRbrace,
TokKind::kComma, &result)) {
return false;
}
static_cast<optional<std::vector<std::vector<int64_t>>>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kSliceRanges: {
SliceRanges result;
if (!ParseSliceRanges(&result)) {
return false;
}
static_cast<optional<SliceRanges>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kPaddingConfig: {
PaddingConfig result;
if (!ParsePaddingConfig(&result)) {
return false;
}
static_cast<optional<PaddingConfig>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kString: {
std::string result;
if (!ParseString(&result)) {
return false;
}
static_cast<optional<std::string>*>(attr_out_ptr)
->emplace(std::move(result));
return true;
}
case AttrTy::kStringOrJsonDict: {
std::string result;
if (lexer_.GetKind() == TokKind::kString) {
if (!ParseString(&result)) {
return false;
}
} else if (lexer_.GetKind() == TokKind::kLbrace) {
if (!ParseJsonDict(&result)) {
return false;
}
} else {
return false;
}
static_cast<optional<std::string>*>(attr_out_ptr)
->emplace(std::move(result));
return true;
}
case AttrTy::kOriginalValue: {
if (!shape) {
return TokenError("expects instruction shape");
}
return ParseOriginalValue(
static_cast<optional<std::shared_ptr<OriginalValue>>*>(
attr_out_ptr),
*shape);
}
case AttrTy::kMetadata: {
OpMetadata result;
if (!ParseMetadata(result)) {
return false;
}
static_cast<optional<OpMetadata>*>(attr_out_ptr)
->emplace(std::move(result));
return true;
}
case AttrTy::kDistribution: {
RandomDistribution result;
if (!ParseRandomDistribution(&result)) {
return false;
}
static_cast<optional<RandomDistribution>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kDomain: {
return ParseDomain(static_cast<DomainData*>(attr_out_ptr));
}
case AttrTy::kPrecisionList: {
std::vector<PrecisionConfig::Precision> result;
if (!ParsePrecisionList(&result)) {
return false;
}
static_cast<optional<std::vector<PrecisionConfig::Precision>>*>(
attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kShape: {
Shape result;
if (!ParseShape(&result)) {
return false;
}
static_cast<optional<Shape>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kShapeList: {
std::vector<Shape> result;
if (!ParseShapeList(&result)) {
return false;
}
static_cast<optional<std::vector<Shape>>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kRandomAlgorithm: {
RandomAlgorithm result;
if (!ParseRandomAlgorithm(&result)) {
return false;
}
static_cast<optional<RandomAlgorithm>*>(attr_out_ptr)->emplace(result);
return true;
}
case AttrTy::kPrecisionAlgorithm: {
PrecisionConfig::Algorithm result;
if (!ParseAlgorithm(&result)) {
return false;
}
static_cast<optional<PrecisionConfig::Algorithm>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kAliasing: {
AliasingData aliasing_data;
if (!ParseAliasing(&aliasing_data)) {
return false;
}
static_cast<optional<AliasingData>*>(attr_out_ptr)
->emplace(aliasing_data);
return true;
}
case AttrTy::kBufferDonor: {
BufferDonor buffer_donor;
if (!ParseBufferDonor(&buffer_donor)) {
return false;
}
static_cast<optional<BufferDonor>*>(attr_out_ptr)
->emplace(buffer_donor);
return true;
}
case AttrTy::kComputationLayout: {
ComputationLayout computation_layout(ShapeLayout(Shape{}));
if (!ParseComputationLayout(&computation_layout)) {
return false;
}
static_cast<optional<ComputationLayout>*>(attr_out_ptr)
->emplace(computation_layout);
return true;
}
case AttrTy::kInstructionAliasing: {
std::vector<std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
aliasing_output_operand_pairs;
if (!ParseInstructionOutputOperandAliasing(
&aliasing_output_operand_pairs)) {
return false;
}
static_cast<optional<std::vector<
std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>>*>(
attr_out_ptr)
->emplace(std::move(aliasing_output_operand_pairs));
return true;
}
case AttrTy::kLiteral: {
Literal result;
if (!ParseLiteral(&result)) {
return false;
}
static_cast<optional<Literal>*>(attr_out_ptr)
->emplace(std::move(result));
return true;
}
case AttrTy::kCustomCallSchedule: {
CustomCallSchedule result;
if (!ParseCustomCallSchedule(&result)) {
return false;
}
static_cast<optional<CustomCallSchedule>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kCustomCallApiVersion: {
CustomCallApiVersion result;
if (!ParseCustomCallApiVersion(&result)) {
return false;
}
static_cast<optional<CustomCallApiVersion>*>(attr_out_ptr)
->emplace(result);
return true;
}
case AttrTy::kSparsityDescriptor: {
std::vector<SparsityDescriptor> result;
if (!ParseSparsityDescriptor(&result)) {
return false;
}
*static_cast<std::vector<SparsityDescriptor>*>(attr_out_ptr) =
std::move(result);
return true;
}
}
}();
if (!success) {
return Error(loc, StrFormat("error parsing attribute %s", name));
}
return true;
}
bool HloParserImpl::CopyAttributeToProtoMessage(
absl::flat_hash_set<std::string> non_proto_attrs,
const absl::flat_hash_map<std::string, AttrConfig>& attrs,
tsl::protobuf::Message* message) {
const tsl::protobuf::Descriptor* descriptor = message->GetDescriptor();
const tsl::protobuf::Reflection* reflection = message->GetReflection();
for (const auto& p : attrs) {
const std::string& name = p.first;
if (non_proto_attrs.find(name) != non_proto_attrs.end()) {
continue;
}
const tsl::protobuf::FieldDescriptor* fd =
descriptor->FindFieldByName(name);
if (!fd) {
std::string allowed_attrs = "Allowed attributes: ";
for (int i = 0; i < descriptor->field_count(); ++i) {
if (i == 0) {
absl::StrAppend(&allowed_attrs, descriptor->field(i)->name());
} else {
absl::StrAppend(&allowed_attrs, ", ", descriptor->field(i)->name());
}
}
return TokenError(
StrFormat("unexpected attribute \"%s\". %s", name, allowed_attrs));
}
CHECK(!fd->is_repeated());
bool success = [&] {
switch (fd->type()) {
case tsl::protobuf::FieldDescriptor::TYPE_BOOL: {
auto attr_value = static_cast<optional<bool>*>(p.second.result);
if (attr_value->has_value()) {
reflection->SetBool(message, fd, **attr_value);
}
return true;
}
case tsl::protobuf::FieldDescriptor::TYPE_ENUM: {
auto attr_value =
static_cast<optional<std::string>*>(p.second.result);
if (attr_value->has_value()) {
const tsl::protobuf::EnumValueDescriptor* evd =
fd->enum_type()->FindValueByName(**attr_value);
reflection->SetEnum(message, fd, evd);
}
return true;
}
default:
return false;
}
}();
if (!success) {
return TokenError(StrFormat("error parsing attribute %s", name));
}
}
return true;
}
bool HloParserImpl::ParseAttributesAsProtoMessage(
const absl::flat_hash_map<std::string, AttrConfig>& non_proto_attrs,
tsl::protobuf::Message* message) {
const tsl::protobuf::Descriptor* descriptor = message->GetDescriptor();
absl::flat_hash_map<std::string, AttrConfig> attrs;
std::vector<optional<bool>> bool_params;
std::vector<optional<std::string>> string_params;
bool_params.reserve(descriptor->field_count());
string_params.reserve(descriptor->field_count());
for (int field_idx = 0; field_idx < descriptor->field_count(); field_idx++) {
const tsl::protobuf::FieldDescriptor* fd = descriptor->field(field_idx);
absl::string_view field_name = fd->name();
switch (fd->type()) {
case tsl::protobuf::FieldDescriptor::TYPE_BOOL: {
bool_params.emplace_back(std::nullopt);
attrs[field_name] = { false, AttrTy::kBool,
&bool_params.back()};
break;
}
case tsl::protobuf::FieldDescriptor::TYPE_ENUM: {
string_params.emplace_back(std::nullopt);
attrs[field_name] = { false, AttrTy::kEnum,
&string_params.back()};
break;
}
default:
return TokenError(absl::StrFormat(
"Unexpected protocol buffer type: %s ", fd->DebugString()));
}
}
absl::flat_hash_set<std::string> non_proto_attrs_names;
non_proto_attrs_names.reserve(non_proto_attrs.size());
for (const auto& p : non_proto_attrs) {
const std::string& attr_name = p.first;
if (attrs.find(attr_name) == attrs.end()) {
non_proto_attrs_names.insert(attr_name);
attrs[attr_name] = p.second;
}
}
if (!ParseAttributes(attrs)) {
return false;
}
return CopyAttributeToProtoMessage(non_proto_attrs_names, attrs, message);
}
bool HloParserImpl::ParseComputationName(HloComputation** value) {
std::string name;
LocTy loc = lexer_.GetLoc();
if (!ParseName(&name)) {
return Error(loc, "expects computation name");
}
std::pair<HloComputation*, LocTy>* computation =
tsl::gtl::FindOrNull(computation_pool_, name);
if (computation == nullptr) {
return Error(loc, StrCat("computation does not exist: ", name));
}
*value = computation->first;
return true;
}
bool HloParserImpl::ParseWindow(Window* window, bool expect_outer_curlies) {
LocTy loc = lexer_.GetLoc();
if (expect_outer_curlies &&
!ParseToken(TokKind::kLbrace, "expected '{' to start window attribute")) {
return false;
}
std::vector<int64_t> size;
std::vector<int64_t> stride;
std::vector<std::vector<int64_t>> pad;
std::vector<int64_t> lhs_dilate;
std::vector<int64_t> rhs_dilate;
std::vector<int64_t> rhs_reversal;
const auto end_token =
expect_outer_curlies ? TokKind::kRbrace : TokKind::kEof;
while (lexer_.GetKind() != end_token) {
LocTy attr_loc = lexer_.GetLoc();
std::string field_name;
if (!ParseAttributeName(&field_name)) {
return Error(attr_loc, "expects sub-attributes in window");
}
bool ok = [&] {
if (field_name == "size") {
return ParseDxD("size", &size);
}
if (field_name == "stride") {
return ParseDxD("stride", &stride);
}
if (field_name == "lhs_dilate") {
return ParseDxD("lhs_dilate", &lhs_dilate);
}
if (field_name == "rhs_dilate") {
return ParseDxD("rls_dilate", &rhs_dilate);
}
if (field_name == "pad") {
return ParseWindowPad(&pad);
}
if (field_name == "rhs_reversal") {
return ParseDxD("rhs_reversal", &rhs_reversal);
}
return Error(attr_loc, StrCat("unexpected attribute name: ", field_name));
}();
if (!ok) {
return false;
}
}
if (!stride.empty() && stride.size() != size.size()) {
return Error(loc, "expects 'stride=' has the same size as 'size='");
}
if (!lhs_dilate.empty() && lhs_dilate.size() != size.size()) {
return Error(loc, "expects 'lhs_dilate=' has the same size as 'size='");
}
if (!rhs_dilate.empty() && rhs_dilate.size() != size.size()) {
return Error(loc, "expects 'rhs_dilate=' has the same size as 'size='");
}
if (!pad.empty() && pad.size() != size.size()) {
return Error(loc, "expects 'pad=' has the same size as 'size='");
}
for (int i = 0; i < size.size(); i++) {
window->add_dimensions()->set_size(size[i]);
if (!pad.empty()) {
window->mutable_dimensions(i)->set_padding_low(pad[i][0]);
window->mutable_dimensions(i)->set_padding_high(pad[i][1]);
}
window->mutable_dimensions(i)->set_stride(stride.empty() ? 1 : stride[i]);
window->mutable_dimensions(i)->set_base_dilation(
lhs_dilate.empty() ? 1 : lhs_dilate[i]);
window->mutable_dimensions(i)->set_window_dilation(
rhs_dilate.empty() ? 1 : rhs_dilate[i]);
window->mutable_dimensions(i)->set_window_reversal(
rhs_reversal.empty() ? false : (rhs_reversal[i] == 1));
}
return !expect_outer_curlies ||
ParseToken(TokKind::kRbrace, "expected '}' to end window attribute");
}
bool HloParserImpl::ParseConvolutionDimensionNumbers(
ConvolutionDimensionNumbers* dnums) {
if (lexer_.GetKind() != TokKind::kDimLabels) {
return TokenError("expects dim labels pattern, e.g., 'bf0_0io->0bf'");
}
std::string str = lexer_.GetStrVal();
std::vector<std::string> split1 = absl::StrSplit(str, '_');
if (split1.size() != 2) {
LOG(FATAL) << "expects 3 items: lhs, rhs, and output dims, but sees "
<< str;
}
std::vector<std::string> split2 = absl::StrSplit(split1[1], "->");
if (split2.size() != 2) {
LOG(FATAL) << "expects 3 items: lhs, rhs, and output dims, but sees "
<< str;
}
absl::string_view lhs = split1[0];
absl::string_view rhs = split2[0];
absl::string_view out = split2[1];
auto is_unique = [](absl::string_view str) -> bool {
absl::flat_hash_set<char> chars;
for (char c : str) {
if (c == '?') {
continue;
}
if (!chars.insert(c).second) {
return false;
}
}
return true;
};
{
if (!is_unique(lhs)) {
return TokenError(
StrCat("expects unique lhs dimension numbers, but sees ", lhs));
}
for (char c : lhs) {
if (c != 'b' && c != 'f' && c != '?') {
dnums->add_input_spatial_dimensions(-1);
}
}
for (int i = 0; i < lhs.size(); i++) {
char c = lhs[i];
if (c == '?') {
continue;
} else if (c == 'b') {
dnums->set_input_batch_dimension(i);
} else if (c == 'f') {
dnums->set_input_feature_dimension(i);
} else if (c < '0' + lhs.size() && c >= '0') {
dnums->set_input_spatial_dimensions(c - '0', i);
} else {
return TokenError(StrFormat(
"expects [0-%dbf?] in lhs dimension numbers", lhs.size() - 1));
}
}
}
{
if (!is_unique(rhs)) {
return TokenError(
StrCat("expects unique rhs dimension numbers, but sees ", rhs));
}
for (char c : rhs) {
if (c != 'i' && c != 'o' && c != '?') {
dnums->add_kernel_spatial_dimensions(-1);
}
}
for (int i = 0; i < rhs.size(); i++) {
char c = rhs[i];
if (c == '?') {
continue;
} else if (c == 'i') {
dnums->set_kernel_input_feature_dimension(i);
} else if (c == 'o') {
dnums->set_kernel_output_feature_dimension(i);
} else if (c < '0' + rhs.size() && c >= '0') {
dnums->set_kernel_spatial_dimensions(c - '0', i);
} else {
return TokenError(StrFormat(
"expects [0-%dio?] in rhs dimension numbers", rhs.size() - 1));
}
}
}
{
if (!is_unique(out)) {
return TokenError(
StrCat("expects unique output dimension numbers, but sees ", out));
}
for (char c : out) {
if (c != 'b' && c != 'f' && c != '?') {
dnums->add_output_spatial_dimensions(-1);
}
}
for (int i = 0; i < out.size(); i++) {
char c = out[i];
if (c == '?') {
continue;
} else if (c == 'b') {
dnums->set_output_batch_dimension(i);
} else if (c == 'f') {
dnums->set_output_feature_dimension(i);
} else if (c < '0' + out.size() && c >= '0') {
dnums->set_output_spatial_dimensions(c - '0', i);
} else {
return TokenError(StrFormat(
"expects [0-%dbf?] in output dimension numbers", out.size() - 1));
}
}
}
if (dnums->input_spatial_dimensions_size() !=
dnums->output_spatial_dimensions_size() ||
dnums->input_spatial_dimensions_size() !=
dnums->kernel_spatial_dimensions_size()) {
return TokenError(
StrFormat("input, kernel, and output must have same number of spatial "
"dimensions, but got %d, %d, %d, respectively.",
dnums->input_spatial_dimensions_size(),
dnums->kernel_spatial_dimensions_size(),
dnums->output_spatial_dimensions_size()));
}
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseSliceRanges(SliceRanges* result) {
if (!ParseToken(TokKind::kLbrace, "expects '{' to start ranges")) {
return false;
}
std::vector<std::vector<int64_t>> ranges;
if (lexer_.GetKind() == TokKind::kRbrace) {
return ParseToken(TokKind::kRbrace, "expects '}' to end ranges");
}
do {
LocTy loc = lexer_.GetLoc();
ranges.emplace_back();
if (!ParseInt64List(TokKind::kLsquare, TokKind::kRsquare, TokKind::kColon,
&ranges.back())) {
return false;
}
const auto& range = ranges.back();
if (range.size() != 2 && range.size() != 3) {
return Error(loc,
StrFormat("expects [start:limit:step] or [start:limit], "
"but sees %d elements.",
range.size()));
}
} while (EatIfPresent(TokKind::kComma));
for (const auto& range : ranges) {
result->starts.push_back(range[0]);
result->limits.push_back(range[1]);
result->strides.push_back(range.size() == 3 ? range[2] : 1);
}
return ParseToken(TokKind::kRbrace, "expects '}' to end ranges");
}
bool HloParserImpl::ParsePrecisionList(
std::vector<PrecisionConfig::Precision>* result) {
auto parse_and_add_item = [&]() {
PrecisionConfig::Precision item;
if (!ParsePrecision(&item)) {
return false;
}
result->push_back(item);
return true;
};
return ParseList(TokKind::kLbrace, TokKind::kRbrace, TokKind::kComma,
parse_and_add_item);
}
bool HloParserImpl::ParseHloComputation(HloComputation** result) {
if (lexer_.GetKind() == TokKind::kLbrace) {
return ParseInstructionList(result, "_");
}
return ParseComputationName(result);
}
bool HloParserImpl::ParseHloComputationList(
std::vector<HloComputation*>* result) {
auto parse_and_add_item = [&]() {
HloComputation* computation;
if (!ParseHloComputation(&computation)) {
return false;
}
VLOG(kDebugLevel) << "parsed computation " << computation->name();
result->push_back(computation);
return true;
};
return ParseList(TokKind::kLbrace, TokKind::kRbrace, TokKind::kComma,
parse_and_add_item);
}
bool HloParserImpl::ParseShapeList(std::vector<Shape>* result) {
auto parse_and_add_item = [&]() {
Shape shape;
if (!ParseShape(&shape)) {
return false;
}
result->push_back(std::move(shape));
return true;
};
return ParseList(TokKind::kLbrace, TokKind::kRbrace, TokKind::kComma,
parse_and_add_item);
}
bool HloParserImpl::ParseInt64List(const TokKind start, const TokKind end,
const TokKind delim,
std::vector<int64_t>* result) {
auto parse_and_add_item = [&]() {
int64_t i;
if (!ParseInt64(&i)) {
return false;
}
result->push_back(i);
return true;
};
return ParseList(start, end, delim, parse_and_add_item);
}
bool HloParserImpl::ParseInt64ListList(
const TokKind start, const TokKind end, const TokKind delim,
std::vector<std::vector<int64_t>>* result) {
auto parse_and_add_item = [&]() {
std::vector<int64_t> item;
if (!ParseInt64List(start, end, delim, &item)) {
return false;
}
result->push_back(item);
return true;
};
return ParseList(start, end, delim, parse_and_add_item);
}
bool HloParserImpl::ParseList(const TokKind start, const TokKind end,
const TokKind delim,
absl::FunctionRef<bool()> parse_and_add_item) {
if (!ParseToken(start, StrCat("expects a list starting with ",
TokKindToString(start)))) {
return false;
}
if (lexer_.GetKind() == end) {
} else {
do {
if (!parse_and_add_item()) {
return false;
}
} while (EatIfPresent(delim));
}
return ParseToken(
end, StrCat("expects a list to end with ", TokKindToString(end)));
}
bool HloParserImpl::ParseParamListToShape(Shape* shape, LocTy* shape_loc) {
if (!ParseParamList() || !ParseToken(TokKind::kArrow, "expects '->'")) {
return false;
}
*shape_loc = lexer_.GetLoc();
return ParseShape(shape);
}
bool HloParserImpl::CanBeParamListToShape() {
return lexer_.GetKind() == TokKind::kLparen;
}
bool HloParserImpl::ParseParamList() {
if (!ParseToken(TokKind::kLparen,
"expects '(' at the beginning of param list")) {
return false;
}
if (lexer_.GetKind() == TokKind::kRparen) {
} else {
do {
Shape shape;
std::string name;
if (!ParseName(&name) || !ParseShape(&shape)) {
return false;
}
} while (EatIfPresent(TokKind::kComma));
}
return ParseToken(TokKind::kRparen, "expects ')' at the end of param list");
}
bool HloParserImpl::ParseDimensionSizes(std::vector<int64_t>* dimension_sizes,
std::vector<bool>* dynamic_dimensions) {
auto parse_and_add_item = [&]() {
int64_t i;
bool is_dynamic = false;
if (lexer_.GetKind() == TokKind::kQuestionMark) {
i = Shape::kUnboundedSize;
is_dynamic = true;
lexer_.Lex();
} else {
if (lexer_.GetKind() == TokKind::kLeq) {
is_dynamic = true;
lexer_.Lex();
}
if (!ParseInt64(&i)) {
return false;
}
}
dimension_sizes->push_back(i);
dynamic_dimensions->push_back(is_dynamic);
return true;
};
return ParseList(TokKind::kLsquare, TokKind::kRsquare, TokKind::kComma,
parse_and_add_item);
}
bool HloParserImpl::ParseDimLevelTypes(
absl::InlinedVector<DimLevelType, InlineRank()>* dim_level_types,
absl::InlinedVector<bool, InlineRank()>* dim_unique,
absl::InlinedVector<bool, InlineRank()>* dim_ordered) {
auto parse_and_add_item = [&]() {
if (lexer_.GetKind() == TokKind::kIdent) {
bool dim_level_type_valid = false;
DimLevelType dim_level_type;
if (lexer_.GetStrVal() == "D") {
lexer_.Lex();
dim_level_type = DIM_DENSE;
dim_level_type_valid = true;
} else if (lexer_.GetStrVal() == "C") {
lexer_.Lex();
dim_level_type = DIM_COMPRESSED;
dim_level_type_valid = true;
} else if (lexer_.GetStrVal() == "S") {
lexer_.Lex();
dim_level_type = DIM_SINGLETON;
dim_level_type_valid = true;
} else if (lexer_.GetStrVal() == "H") {
lexer_.Lex();
dim_level_type = DIM_LOOSE_COMPRESSED;
dim_level_type_valid = true;
}
if (dim_level_type_valid) {
bool new_dim_unique = true;
if (lexer_.GetKind() == TokKind::kPlus) {
new_dim_unique = false;
lexer_.Lex();
}
bool new_dim_ordered = true;
if (lexer_.GetKind() == TokKind::kTilde) {
new_dim_ordered = false;
lexer_.Lex();
}
if (!LayoutUtil::ValidateDimLevel(dim_level_type, new_dim_unique,
new_dim_ordered)) {
return Error(
lexer_.GetLoc(),
"invalid DimLevelType/unique/ordered combination in shape");
}
dim_level_types->push_back(dim_level_type);
dim_unique->push_back(new_dim_unique);
dim_ordered->push_back(new_dim_ordered);
return true;
}
}
return Error(lexer_.GetLoc(),
"expected a DimLevelType abbreviation (D, C, or S)");
};
return ParseList(TokKind::kLparen, TokKind::kRparen, TokKind::kComma,
parse_and_add_item);
}
bool HloParserImpl::ParseTiles(std::vector<Tile>* tiles) {
auto parse_and_add_tile_dimension = [&]() {
int64_t i;
if (ParseInt64(&i)) {
tiles->back().add_dimensions(i);
return true;
}
if (lexer_.GetKind() == TokKind::kAsterisk) {
tiles->back().add_dimensions(Tile::kCombineDimension);
lexer_.Lex();
return true;
}
return false;
};
do {
tiles->push_back(Tile());
if (!ParseList(TokKind::kLparen, TokKind::kRparen, TokKind::kComma,
parse_and_add_tile_dimension)) {
return false;
}
} while (lexer_.GetKind() == TokKind::kLparen);
return true;
}
bool HloParserImpl::ParsePhysicalShape(Shape* physical_shape) {
if (!ParseToken(TokKind::kLparen,
StrCat("expects physical shape to start with ",
TokKindToString(TokKind::kLparen)))) {
return false;
}
ParseShape(physical_shape);
if (!ParseToken(TokKind::kRparen,
StrCat("expects physical shape to end with ",
TokKindToString(TokKind::kRparen)))) {
return false;
}
return true;
}
bool HloParserImpl::ParsePrimitiveType(PrimitiveType* result) {
if (lexer_.GetKind() != TokKind::kPrimitiveType) {
return TokenError(absl::StrCat("expected primitive type, saw ",
TokKindToString(lexer_.GetKind())));
}
*result = lexer_.GetPrimitiveTypeVal();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseUnsignedIntegerType(PrimitiveType* primitive_type) {
if (!ParsePrimitiveType(primitive_type)) {
return false;
}
if (!primitive_util::IsUnsignedIntegralType(*primitive_type)) {
return TokenError("expecting an unsigned integer type");
}
return true;
}
bool HloParserImpl::ParseLayoutIntAttribute(
int64_t* attr_value, absl::string_view attr_description) {
if (!ParseToken(TokKind::kLparen,
StrCat("expects ", attr_description, " to start with ",
TokKindToString(TokKind::kLparen)))) {
return false;
}
if (!ParseInt64(attr_value)) {
return false;
}
if (!ParseToken(TokKind::kRparen,
StrCat("expects ", attr_description, " to end with ",
TokKindToString(TokKind::kRparen)))) {
return false;
}
return true;
}
bool HloParserImpl::ParseSplitConfigs(std::vector<SplitConfig>& split_configs) {
auto parse_and_add_split_index = [&]() {
int64_t i;
if (ParseInt64(&i)) {
split_configs.back().add_split_indices(i);
return true;
}
return false;
};
do {
if (!ParseToken(TokKind::kLparen,
StrCat("expects split configs to start with ",
TokKindToString(TokKind::kLparen)))) {
return false;
}
int64_t dimension;
if (!ParseInt64(&dimension)) {
return false;
}
split_configs.push_back(SplitConfig(dimension, {}));
if (!ParseList(TokKind::kColon, TokKind::kRparen, TokKind::kComma,
parse_and_add_split_index)) {
return false;
}
} while (lexer_.GetKind() == TokKind::kLparen);
return true;
}
bool HloParserImpl::ParseLayout(Layout* layout) {
absl::InlinedVector<int64_t, InlineRank()> minor_to_major;
DimLevelTypeVector dim_level_types;
absl::InlinedVector<bool, InlineRank()> dim_unique;
absl::InlinedVector<bool, InlineRank()> dim_ordered;
std::vector<Tile> tiles;
PrimitiveType index_primitive_type = PRIMITIVE_TYPE_INVALID;
PrimitiveType pointer_primitive_type = PRIMITIVE_TYPE_INVALID;
int64_t element_size_in_bits = 0;
int64_t memory_space = 0;
std::vector<SplitConfig> split_configs;
std::optional<Shape> physical_shape;
int64_t dynamic_shape_metadata_prefix_bytes = 0;
int64_t tail_padding_alignment_in_elements = 1;
auto parse_and_add_item = [&]() {
int64_t i;
if (!ParseInt64(&i)) {
return false;
}
minor_to_major.push_back(i);
return true;
};
if (!ParseToken(TokKind::kLbrace,
StrCat("expects layout to start with ",
TokKindToString(TokKind::kLbrace)))) {
return false;
}
if (lexer_.GetKind() != TokKind::kRbrace) {
if (lexer_.GetKind() == TokKind::kInt) {
do {
if (!parse_and_add_item()) {
return false;
}
} while (EatIfPresent(TokKind::kComma));
}
if (lexer_.GetKind() == TokKind::kColon) {
lexer_.Lex();
if (lexer_.GetKind() == TokKind::kIdent && lexer_.GetStrVal() == "D") {
lexer_.Lex();
ParseDimLevelTypes(&dim_level_types, &dim_unique, &dim_ordered);
}
if (lexer_.GetKind() == TokKind::kIdent && lexer_.GetStrVal() == "T") {
lexer_.Lex();
ParseTiles(&tiles);
}
if (lexer_.GetKind() == TokKind::kIdent && lexer_.GetStrVal() == "L") {
lexer_.Lex();
ParseLayoutIntAttribute(&tail_padding_alignment_in_elements,
"multiple padded to in elements");
}
if (lexer_.GetKind() == TokKind::kOctothorp) {
lexer_.Lex();
ParseToken(
TokKind::kLparen,
StrCat("expects ", TokKindToString(TokKind::kOctothorp),
" to be followed by ", TokKindToString(TokKind::kLparen)));
ParseUnsignedIntegerType(&index_primitive_type);
ParseToken(TokKind::kRparen,
StrCat("expects index primitive type to be followed by ",
TokKindToString(TokKind::kRparen)));
}
if (lexer_.GetKind() == TokKind::kAsterisk) {
lexer_.Lex();
ParseToken(
TokKind::kLparen,
StrCat("expects ", TokKindToString(TokKind::kAsterisk),
" to be followed by ", TokKindToString(TokKind::kLparen)));
ParseUnsignedIntegerType(&pointer_primitive_type);
ParseToken(TokKind::kRparen,
StrCat("expects pointer primitive type to be followed by ",
TokKindToString(TokKind::kRparen)));
}
if (lexer_.GetKind() == TokKind::kIdent && lexer_.GetStrVal() == "E") {
lexer_.Lex();
ParseLayoutIntAttribute(&element_size_in_bits, "element size in bits");
}
if (lexer_.GetKind() == TokKind::kIdent && lexer_.GetStrVal() == "S") {
lexer_.Lex();
ParseLayoutIntAttribute(&memory_space, "memory space");
}
if (lexer_.GetKind() == TokKind::kIdent && lexer_.GetStrVal() == "SC") {
lexer_.Lex();
ParseSplitConfigs(split_configs);
}
if (lexer_.GetKind() == TokKind::kIdent && lexer_.GetStrVal() == "P") {
lexer_.Lex();
physical_shape.emplace();
ParsePhysicalShape(&*physical_shape);
}
if (lexer_.GetKind() == TokKind::kIdent && lexer_.GetStrVal() == "M") {
lexer_.Lex();
ParseLayoutIntAttribute(&dynamic_shape_metadata_prefix_bytes,
"dynamic shape metadata prefix bytes");
}
}
}
if (!ParseToken(TokKind::kRbrace,
StrCat("expects layout to end with ",
TokKindToString(TokKind::kRbrace)))) {
return false;
}
std::vector<Tile> vec_tiles(tiles.size());
for (int i = 0; i < tiles.size(); i++) {
vec_tiles[i] = Tile(tiles[i]);
}
*layout = LayoutUtil::MakeLayout(
minor_to_major, dim_level_types, dim_unique, dim_ordered, vec_tiles,
tail_padding_alignment_in_elements, index_primitive_type,
pointer_primitive_type, element_size_in_bits, memory_space, split_configs,
std::move(physical_shape), dynamic_shape_metadata_prefix_bytes);
return true;
}
bool HloParserImpl::ParseShape(Shape* result) {
if (EatIfPresent(TokKind::kLparen)) {
std::vector<Shape> shapes;
if (lexer_.GetKind() == TokKind::kRparen) {
} else {
do {
shapes.emplace_back();
if (!ParseShape(&shapes.back())) {
return false;
}
} while (EatIfPresent(TokKind::kComma));
}
*result = ShapeUtil::MakeTupleShape(shapes);
return ParseToken(TokKind::kRparen, "expects ')' at the end of tuple.");
}
PrimitiveType primitive_type;
if (!ParsePrimitiveType(&primitive_type)) {
return false;
}
std::vector<int64_t> dimension_sizes;
std::vector<bool> dynamic_dimensions;
if (!ParseDimensionSizes(&dimension_sizes, &dynamic_dimensions)) {
return false;
}
result->set_element_type(primitive_type);
for (int i = 0; i < dimension_sizes.size(); ++i) {
result->add_dimensions(dimension_sizes[i]);
result->set_dynamic_dimension(i, dynamic_dimensions[i]);
}
if (options_.fill_missing_layouts() || ShapeUtil::IsScalar(*result)) {
LayoutUtil::SetToDefaultLayout(result);
}
if (lexer_.GetKind() == TokKind::kLbrace &&
(lexer_.LookAhead() == TokKind::kInt ||
lexer_.LookAhead() == TokKind::kColon)) {
Layout layout;
if (!ParseLayout(&layout)) {
return false;
}
if (layout.dim_level_types_size() != 0 &&
layout.dim_level_types_size() != result->rank()) {
return Error(
lexer_.GetLoc(),
StrFormat("Dimensions size is %ld, but dim level types size is %ld.",
result->rank(), layout.dim_level_types_size()));
}
if (layout.minor_to_major_size() != result->rank()) {
return Error(
lexer_.GetLoc(),
StrFormat("Dimensions size is %ld, but minor to major size is %ld.",
result->rank(), layout.minor_to_major_size()));
}
if (LayoutUtil::IsSparse(layout) && layout.tiles_size() > 0) {
return Error(lexer_.GetLoc(),
StrFormat("Layout has tiles, but is for a sparse array: %s",
layout.ToString()));
}
if (!LayoutUtil::IsSparse(layout) && layout.has_physical_shape()) {
return Error(
lexer_.GetLoc(),
StrFormat(
"Layout has physical shape, but is not for a sparse array: %s",
layout.ToString()));
}
*result->mutable_layout() = layout;
}
return true;
}
bool HloParserImpl::CanBeShape() {
return lexer_.GetKind() == TokKind::kPrimitiveType ||
lexer_.GetKind() == TokKind::kLparen;
}
bool HloParserImpl::ParseName(std::string* result) {
VLOG(kDebugLevel) << "ParseName";
if (lexer_.GetKind() != TokKind::kIdent &&
lexer_.GetKind() != TokKind::kName) {
return TokenError("expects name");
}
*result = lexer_.GetStrVal();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseAttributeName(std::string* result) {
if (lexer_.GetKind() != TokKind::kAttributeName) {
return TokenError("expects attribute name");
}
*result = lexer_.GetStrVal();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseString(std::string* result) {
VLOG(kDebugLevel) << "ParseString";
if (lexer_.GetKind() != TokKind::kString) {
return TokenError("expects string");
}
*result = lexer_.GetStrVal();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseJsonDict(std::string* result) {
VLOG(kDebugLevel) << "ParseJsonDict";
if (lexer_.LexJsonDict() != TokKind::kString) {
return TokenError("expects JSON dict");
}
*result = lexer_.GetStrVal();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseDxD(const std::string& name,
std::vector<int64_t>* result) {
LocTy loc = lexer_.GetLoc();
if (!result->empty()) {
return Error(loc, StrFormat("sub-attribute '%s=' already exists", name));
}
if (lexer_.GetKind() == TokKind::kInt) {
int64_t number;
if (!ParseInt64(&number)) {
return Error(loc, StrFormat("expects sub-attribute '%s=i'", name));
}
result->push_back(number);
return true;
}
if (lexer_.GetKind() == TokKind::kDxD) {
std::string str = lexer_.GetStrVal();
if (!SplitToInt64s(str, 'x', result)) {
return Error(loc, StrFormat("expects sub-attribute '%s=ixj...'", name));
}
lexer_.Lex();
return true;
}
return TokenError("expects token type kInt or kDxD");
}
bool HloParserImpl::ParseWindowPad(std::vector<std::vector<int64_t>>* pad) {
LocTy loc = lexer_.GetLoc();
if (!pad->empty()) {
return Error(loc, "sub-attribute 'pad=' already exists");
}
if (lexer_.GetKind() != TokKind::kPad) {
return TokenError("expects window pad pattern, e.g., '0_0x3_3'");
}
std::string str = lexer_.GetStrVal();
for (const auto& padding_dim_str : absl::StrSplit(str, 'x')) {
std::vector<int64_t> low_high;
if (!SplitToInt64s(padding_dim_str, '_', &low_high) ||
low_high.size() != 2) {
return Error(loc,
"expects padding_low and padding_high separated by '_'");
}
pad->push_back(low_high);
}
lexer_.Lex();
return true;
}
bool HloParserImpl::ParsePaddingConfig(PaddingConfig* padding) {
if (lexer_.GetKind() != TokKind::kPad) {
return TokenError("expects padding config, e.g., '0_0_0x3_3_1'");
}
LocTy loc = lexer_.GetLoc();
std::string str = lexer_.GetStrVal();
for (const auto& padding_dim_str : absl::StrSplit(str, 'x')) {
std::vector<int64_t> padding_dim;
if (!SplitToInt64s(padding_dim_str, '_', &padding_dim) ||
(padding_dim.size() != 2 && padding_dim.size() != 3)) {
return Error(loc,
"expects padding config pattern like 'low_high_interior' or "
"'low_high'");
}
auto* dim = padding->add_dimensions();
dim->set_edge_padding_low(padding_dim[0]);
dim->set_edge_padding_high(padding_dim[1]);
dim->set_interior_padding(padding_dim.size() == 3 ? padding_dim[2] : 0);
}
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseOriginalValue(
optional<std::shared_ptr<OriginalValue>>* original_value,
const Shape& shape) {
VLOG(kDebugLevel) << "ParseOriginalValue";
if (!ParseToken(TokKind::kLbrace, "Expects '{'")) {
return false;
}
*original_value = std::make_shared<OriginalValue>(shape);
ShapeIndex leaf_shape_index;
while (lexer_.GetKind() != TokKind::kRbrace) {
if (lexer_.GetKind() == TokKind::kLparen) {
lexer_.Lex();
leaf_shape_index.push_back(0);
} else if (lexer_.GetKind() == TokKind::kRparen) {
lexer_.Lex();
leaf_shape_index.pop_back();
} else if (lexer_.GetKind() == TokKind::kComma) {
lexer_.Lex();
++leaf_shape_index.back();
} else if (lexer_.GetKind() == TokKind::kLbrace) {
lexer_.Lex();
std::string instruction_name;
ShapeIndex shape_index;
if (!ParseString(&instruction_name)) {
return false;
}
if (lexer_.GetKind() != TokKind::kRbrace) {
if (!ParseShapeIndex(&shape_index)) {
return false;
}
}
*(**original_value)->mutable_element(leaf_shape_index) = {
instruction_name, shape_index};
if (!ParseToken(TokKind::kRbrace,
"Expects '} at end of each OriginalArray'")) {
return false;
}
} else {
return false;
}
}
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseMetadata(OpMetadata& metadata) {
absl::flat_hash_map<std::string, AttrConfig> attrs;
optional<std::string> op_type;
optional<std::string> op_name;
optional<std::string> source_file;
optional<int32_t> source_line;
optional<std::vector<int64_t>> profile_type;
optional<std::string> deduplicated_name;
optional<bool> preserve_layout;
optional<std::string> scheduling_name;
attrs["op_type"] = {false, AttrTy::kString, &op_type};
attrs["op_name"] = {false, AttrTy::kString, &op_name};
attrs["source_file"] = {false, AttrTy::kString, &source_file};
attrs["source_line"] = {false, AttrTy::kInt32, &source_line};
attrs["profile_type"] = {false, AttrTy::kBracedInt64List,
&profile_type};
attrs["deduplicated_name"] = {false, AttrTy::kString,
&deduplicated_name};
attrs["preserve_layout"] = {false, AttrTy::kBool,
&preserve_layout};
attrs["scheduling_name"] = {false, AttrTy::kString,
&scheduling_name};
if (!ParseSubAttributes(attrs)) {
return false;
}
if (op_type) {
metadata.set_op_type(*op_type);
}
if (op_name) {
metadata.set_op_name(*op_name);
}
if (source_file) {
metadata.set_source_file(*source_file);
}
if (source_line) {
metadata.set_source_line(*source_line);
}
if (profile_type) {
for (const auto& type : *profile_type) {
if (!ProfileType_IsValid(type)) {
return false;
}
metadata.add_profile_type(static_cast<ProfileType>(type));
}
}
if (deduplicated_name) {
metadata.set_deduplicated_name(*deduplicated_name);
}
if (preserve_layout) {
metadata.set_preserve_layout(*preserve_layout);
} else {
metadata.set_preserve_layout(false);
}
if (scheduling_name) {
metadata.set_scheduling_name(*scheduling_name);
}
return true;
}
bool HloParserImpl::ParseSingleOrListMetadata(
std::vector<OpMetadata>& metadata) {
if (lexer_.GetKind() == TokKind::kLbrace &&
lexer_.LookAhead() == TokKind::kLbrace) {
if (!ParseToken(TokKind::kLbrace, "expected '{' to start metadata list")) {
return false;
}
if (lexer_.GetKind() != TokKind::kRbrace) {
do {
if (!ParseMetadata(metadata.emplace_back())) {
return false;
}
} while (EatIfPresent(TokKind::kComma));
}
return ParseToken(TokKind::kRbrace, "expected '}' to end metadata list");
}
return ParseMetadata(metadata.emplace_back());
}
bool HloParserImpl::ParseOpShardingType(OpSharding::Type* type) {
switch (lexer_.GetKind()) {
case TokKind::kw_maximal:
*type = OpSharding::MAXIMAL;
lexer_.Lex();
break;
case TokKind::kw_replicated:
*type = OpSharding::REPLICATED;
lexer_.Lex();
break;
case TokKind::kw_manual:
*type = OpSharding::MANUAL;
lexer_.Lex();
break;
default:
return false;
}
return true;
}
bool HloParserImpl::ParseListShardingType(
std::vector<OpSharding::Type>* types) {
if (!ParseToken(TokKind::kLbrace,
"expected '{' to start sharding type list")) {
return false;
}
if (lexer_.GetKind() != TokKind::kRbrace) {
do {
OpSharding::Type type;
if (!ParseOpShardingType(&type)) {
return false;
}
types->emplace_back(type);
} while (EatIfPresent(TokKind::kComma));
}
return ParseToken(TokKind::kRbrace, "expected '}' to end sharding type list");
}
bool HloParserImpl::ParseOpcode(
HloOpcode* opcode, std::optional<HloOpcode>* async_wrapped_opcode) {
VLOG(kDebugLevel) << "ParseOpcode";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects opcode");
}
std::string val = lexer_.GetStrVal();
auto status_or_result = StringToHloOpcode(val);
if (!status_or_result.ok()) {
auto try_parsing_async_op = [&](absl::string_view suffix,
HloOpcode async_opcode) {
absl::string_view wrapped_opcode_view(val);
if (absl::ConsumeSuffix(&wrapped_opcode_view, suffix)) {
*opcode = async_opcode;
std::string wrapped_opcode(wrapped_opcode_view);
status_or_result = StringToHloOpcode(wrapped_opcode);
return true;
}
return false;
};
if (try_parsing_async_op("-start", HloOpcode::kAsyncStart) ||
try_parsing_async_op("-update", HloOpcode::kAsyncUpdate) ||
try_parsing_async_op("-done", HloOpcode::kAsyncDone)) {
if (!status_or_result.ok()) {
return TokenError(
StrFormat("expects async wrapped opcode but sees: %s, error: %s",
val, status_or_result.status().message()));
}
*async_wrapped_opcode = status_or_result.value();
} else {
return TokenError(StrFormat("expects opcode but sees: %s, error: %s", val,
status_or_result.status().message()));
}
} else {
*opcode = status_or_result.value();
}
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseFftType(FftType* result) {
VLOG(kDebugLevel) << "ParseFftType";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects fft type");
}
std::string val = lexer_.GetStrVal();
if (!FftType_Parse(val, result) || !FftType_IsValid(*result)) {
return TokenError(StrFormat("expects fft type but sees: %s", val));
}
lexer_.Lex();
return true;
}
bool HloParserImpl::ParsePaddingType(PaddingType* result) {
VLOG(kDebugLevel) << "ParsePaddingType";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects padding type");
}
std::string val = lexer_.GetStrVal();
if (!PaddingType_Parse(val, result) || !PaddingType_IsValid(*result)) {
return TokenError(StrFormat("expects padding type but sees: %s", val));
}
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseComparisonDirection(ComparisonDirection* result) {
VLOG(kDebugLevel) << "ParseComparisonDirection";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects comparison direction");
}
std::string val = lexer_.GetStrVal();
auto status_or_result = StringToComparisonDirection(val);
if (!status_or_result.ok()) {
return TokenError(
StrFormat("expects comparison direction but sees: %s", val));
}
*result = status_or_result.value();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseComparisonType(Comparison::Type* result) {
VLOG(kDebugLevel) << "ParseComparisonType";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects comparison type");
}
std::string val = lexer_.GetStrVal();
auto status_or_result = StringToComparisonType(val);
if (!status_or_result.ok()) {
return TokenError(StrFormat("expects comparison type but sees: %s", val));
}
*result = status_or_result.value();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseFusionKind(HloInstruction::FusionKind* result) {
VLOG(kDebugLevel) << "ParseFusionKind";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects fusion kind");
}
std::string val = lexer_.GetStrVal();
auto status_or_result = StringToFusionKind(val);
if (!status_or_result.ok()) {
return TokenError(StrFormat("expects fusion kind but sees: %s, error: %s",
val, status_or_result.status().message()));
}
*result = status_or_result.value();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseRandomDistribution(RandomDistribution* result) {
VLOG(kDebugLevel) << "ParseRandomDistribution";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects random distribution");
}
std::string val = lexer_.GetStrVal();
auto status_or_result = StringToRandomDistribution(val);
if (!status_or_result.ok()) {
return TokenError(
StrFormat("expects random distribution but sees: %s, error: %s", val,
status_or_result.status().message()));
}
*result = status_or_result.value();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseRandomAlgorithm(RandomAlgorithm* result) {
VLOG(kDebugLevel) << "ParseRandomAlgorithm";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects random algorithm");
}
std::string val = lexer_.GetStrVal();
auto status_or_result = StringToRandomAlgorithm(val);
if (!status_or_result.ok()) {
return TokenError(
StrFormat("expects random algorithm but sees: %s, error: %s", val,
status_or_result.status().message()));
}
*result = status_or_result.value();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParsePrecision(PrecisionConfig::Precision* result) {
VLOG(kDebugLevel) << "ParsePrecision";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects random distribution");
}
std::string val = lexer_.GetStrVal();
auto status_or_result = StringToPrecision(val);
if (!status_or_result.ok()) {
return TokenError(StrFormat("expects precision but sees: %s, error: %s",
val, status_or_result.status().message()));
}
*result = status_or_result.value();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseAlgorithm(PrecisionConfig::Algorithm* result) {
VLOG(kDebugLevel) << "ParseAlgorithm";
if (lexer_.GetKind() != TokKind::kIdent) {
return TokenError("expects algorithm");
}
std::string val = lexer_.GetStrVal();
auto status_or_result = StringToAlgorithm(val);
if (!status_or_result.ok()) {
return TokenError(StrFormat("expects algorithm but sees: %s, error: %s",
val, status_or_result.status().message()));
}
*result = status_or_result.value();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseInt64(int64_t* result) {
VLOG(kDebugLevel) << "ParseInt64";
if (lexer_.GetKind() != TokKind::kInt) {
return TokenError("expects integer");
}
*result = lexer_.GetInt64Val();
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseDouble(double* result) {
switch (lexer_.GetKind()) {
case TokKind::kDecimal: {
double val = lexer_.GetDecimalVal();
if (std::isinf(val)) {
return TokenError(StrCat("Constant is out of range for double (+/-",
std::numeric_limits<double>::max(),
") and so is unparsable."));
}
*result = val;
break;
}
case TokKind::kInt:
*result = static_cast<double>(lexer_.GetInt64Val());
break;
case TokKind::kw_inf:
*result = std::numeric_limits<double>::infinity();
break;
case TokKind::kNegInf:
*result = -std::numeric_limits<double>::infinity();
break;
default:
return TokenError("expects decimal or integer");
}
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseComplex(std::complex<double>* result) {
if (lexer_.GetKind() != TokKind::kLparen) {
return TokenError("expects '(' before complex number");
}
lexer_.Lex();
double real;
LocTy loc = lexer_.GetLoc();
if (!ParseDouble(&real)) {
return Error(loc,
"expect floating-point value for real part of complex number");
}
if (lexer_.GetKind() != TokKind::kComma) {
return TokenError(
absl::StrFormat("expect comma after real part of complex literal"));
}
lexer_.Lex();
double imag;
loc = lexer_.GetLoc();
if (!ParseDouble(&imag)) {
return Error(
loc,
"expect floating-point value for imaginary part of complex number");
}
if (lexer_.GetKind() != TokKind::kRparen) {
return TokenError(absl::StrFormat("expect ')' after complex number"));
}
*result = std::complex<double>(real, imag);
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseBool(bool* result) {
if (lexer_.GetKind() != TokKind::kw_true &&
lexer_.GetKind() != TokKind::kw_false) {
return TokenError("expects true or false");
}
*result = lexer_.GetKind() == TokKind::kw_true;
lexer_.Lex();
return true;
}
bool HloParserImpl::ParseToken(TokKind kind, const std::string& msg) {
VLOG(kDebugLevel) << "ParseToken " << TokKindToString(kind) << " " << msg;
if (lexer_.GetKind() != kind) {
return TokenError(msg);
}
lexer_.Lex();
return true;
}
bool HloParserImpl::EatIfPresent(TokKind kind) {
if (lexer_.GetKind() != kind) {
return false;
}
lexer_.Lex();
return true;
}
bool HloParserImpl::AddInstruction(const std::string& name,
HloInstruction* instruction,
LocTy name_loc) {
auto result = current_name_table().insert({name, {instruction, name_loc}});
if (!result.second) {
Error(name_loc, StrCat("instruction already exists: ", name));
return Error(result.first->second.second,
"instruction previously defined here");
}
return true;
}
bool HloParserImpl::AddComputation(const std::string& name,
HloComputation* computation,
LocTy name_loc) {
auto result = computation_pool_.insert({name, {computation, name_loc}});
if (!result.second) {
Error(name_loc, StrCat("computation already exists: ", name));
return Error(result.first->second.second,
"computation previously defined here");
}
return true;
}
absl::StatusOr<Shape> HloParserImpl::ParseShapeOnly() {
lexer_.Lex();
Shape shape;
if (!ParseShape(&shape)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument("Syntax error:\nExtra content after shape");
}
return shape;
}
absl::StatusOr<Layout> HloParserImpl::ParseLayoutOnly() {
lexer_.Lex();
Layout layout;
if (!ParseLayout(&layout)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument("Syntax error:\nExtra content after layout");
}
return layout;
}
absl::StatusOr<HloSharding> HloParserImpl::ParseShardingOnly() {
lexer_.Lex();
std::optional<HloSharding> sharding;
if (!ParseSharding(sharding)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument("Syntax error:\nExtra content after sharding");
}
return std::move(*sharding);
}
absl::StatusOr<FrontendAttributes>
HloParserImpl::ParseFrontendAttributesOnly() {
lexer_.Lex();
FrontendAttributes attributes;
if (!ParseFrontendAttributes(&attributes)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument(
"Syntax error:\nExtra content after frontend attributes");
}
return attributes;
}
absl::StatusOr<StatisticsViz> HloParserImpl::ParseStatisticsVizOnly() {
lexer_.Lex();
StatisticsViz statistics_viz;
if (!ParseStatisticsViz(&statistics_viz)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument("Syntax error:\nExtra content after statistics");
}
return statistics_viz;
}
absl::StatusOr<std::vector<bool>>
HloParserImpl::ParseParameterReplicationOnly() {
lexer_.Lex();
ParameterReplication parameter_replication;
if (!ParseParameterReplication(¶meter_replication)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument(
"Syntax error:\nExtra content after parameter replication");
}
return std::vector<bool>(
parameter_replication.replicated_at_leaf_buffers().begin(),
parameter_replication.replicated_at_leaf_buffers().end());
}
absl::StatusOr<HloParserImpl::BoolList>
HloParserImpl::ParseBooleanListOrSingleBooleanOnly() {
lexer_.Lex();
BoolList booleans;
if (!ParseBooleanListOrSingleBoolean(&booleans)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument("Syntax error:\nExtra content after boolean list");
}
return booleans;
}
absl::StatusOr<std::vector<ReplicaGroup>>
HloParserImpl::ParseReplicaGroupsOnly() {
lexer_.Lex();
std::vector<ReplicaGroup> replica_groups;
if (!ParseReplicaGroupsOnly(&replica_groups)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument("Syntax error:\nExtra content after replica groups");
}
return replica_groups;
}
absl::StatusOr<Window> HloParserImpl::ParseWindowOnly() {
lexer_.Lex();
Window window;
if (!ParseWindow(&window, false)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument("Syntax error:\nExtra content after window");
}
return window;
}
absl::StatusOr<ConvolutionDimensionNumbers>
HloParserImpl::ParseConvolutionDimensionNumbersOnly() {
lexer_.Lex();
ConvolutionDimensionNumbers dnums;
if (!ParseConvolutionDimensionNumbers(&dnums)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument(
"Syntax error:\nExtra content after convolution dnums");
}
return dnums;
}
absl::StatusOr<PaddingConfig> HloParserImpl::ParsePaddingConfigOnly() {
lexer_.Lex();
PaddingConfig padding_config;
if (!ParsePaddingConfig(&padding_config)) {
return InvalidArgument("Syntax error:\n%s", GetError());
}
if (lexer_.GetKind() != TokKind::kEof) {
return InvalidArgument("Syntax error:\nExtra content after PaddingConfig");
}
return padding_config;
}
bool HloParserImpl::ParseSingleInstruction(HloModule* module) {
if (create_missing_instruction_ != nullptr || !scoped_name_tables_.empty()) {
LOG(FATAL) << "Parser state is not clean. Please do not call any other "
"methods before calling ParseSingleInstruction.";
}
HloComputation::Builder builder(module->name());
int64_t parameter_count = 0;
create_missing_instruction_ =
[this, &builder, ¶meter_count](
const std::string& name,
const Shape& shape) -> std::pair<HloInstruction*, LocTy>* {
std::string new_name = name.empty() ? StrCat("_", parameter_count) : name;
HloInstruction* parameter = builder.AddInstruction(
HloInstruction::CreateParameter(parameter_count++, shape, new_name));
current_name_table()[new_name] = {parameter, lexer_.GetLoc()};
return tsl::gtl::FindOrNull(current_name_table(), new_name);
};
Scope scope(&scoped_name_tables_);
if (CanBeShape()) {
if (!ParseInstructionRhs(&builder, module->name(), lexer_.GetLoc())) {
return false;
}
} else {
std::string root_name;
if (!ParseInstruction(&builder, &root_name)) {
return false;
}
}
if (lexer_.GetKind() != TokKind::kEof) {
Error(
lexer_.GetLoc(),
"Syntax error:\nExpected eof after parsing single instruction. Did you"
" mean to write an HLO module and forget the \"HloModule\" header?");
return false;
}
module->AddEntryComputation(builder.Build());
for (auto& comp : computations_) {
module->AddEmbeddedComputation(std::move(comp));
}
TF_CHECK_OK(module->set_schedule(ScheduleFromInstructionOrder(module)));
return true;
}
}
absl::StatusOr<std::unique_ptr<HloModule>> ParseAndReturnUnverifiedModule(
absl::string_view str, const HloModuleConfig& config,
const HloParserOptions& options) {
auto module = std::make_unique<HloModule>("_", config);
HloParserImpl parser(str, options);
TF_RETURN_IF_ERROR(parser.Run(module.get()));
return std::move(module);
}
absl::StatusOr<HloSharding> ParseSharding(absl::string_view str) {
HloParserImpl parser(str);
return parser.ParseShardingOnly();
}
absl::StatusOr<FrontendAttributes> ParseFrontendAttributes(
absl::string_view str) {
HloParserImpl parser(str);
return parser.ParseFrontendAttributesOnly();
}
absl::StatusOr<StatisticsViz> ParseStatisticsViz(absl::string_view str) {
HloParserImpl parser(str);
return parser.ParseStatisticsVizOnly();
}
absl::StatusOr<std::vector<bool>> ParseParameterReplication(
absl::string_view str) {
HloParserImpl parser(str);
return parser.ParseParameterReplicationOnly();
}
absl::StatusOr<HloParserImpl::BoolList> ParseBooleanListOrSingleBoolean(
absl::string_view str) {
HloParserImpl parser(str);
return parser.ParseBooleanListOrSingleBooleanOnly();
}
absl::StatusOr<std::vector<ReplicaGroup>> ParseReplicaGroupsOnly(
absl::string_view str) {
HloParserImpl parser(str);
return parser.ParseReplicaGroupsOnly();
}
absl::StatusOr<Window> ParseWindow(absl::string_view str) {
HloParserImpl parser(str);
return parser.ParseWindowOnly();
}
absl::StatusOr<ConvolutionDimensionNumbers> ParseConvolutionDimensionNumbers(
absl::string_view str) {
HloParserImpl parser(str);
return parser.ParseConvolutionDimensionNumbersOnly();
}
absl::StatusOr<PaddingConfig> ParsePaddingConfig(absl::string_view str) {
HloParserImpl parser(str);
return parser.ParsePaddingConfigOnly();
}
absl::StatusOr<Shape> ParseShape(absl::string_view str) {
HloParserImpl parser(str);
return parser.ParseShapeOnly();
}
absl::StatusOr<Layout> ParseLayout(absl::string_view str) {
HloParserImpl parser(str);
return parser.ParseLayoutOnly();
}
std::unique_ptr<HloParser> HloParser::CreateHloParserForTests(
absl::string_view str) {
return std::make_unique<HloParserImpl>(str);
}
} | #include "xla/hlo/parser/hlo_parser.h"
#include <cstdint>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/ascii.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/parser/hlo_lexer.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
namespace m = ::xla::match;
using ::absl::string_view;
using ::testing::ElementsAre;
using ::testing::HasSubstr;
struct TestData {
std::string test_name;
std::string module_string;
int64_t replica_count = 1;
bool enable_verification = true;
};
std::string TestDataToString(const ::testing::TestParamInfo<TestData>& data) {
return data.param.test_name;
}
struct NonRoundtripTestData {
std::string test_name;
std::string input_module_string;
std::string output_module_string;
};
std::string NonRoundtripTestDataToString(
const ::testing::TestParamInfo<NonRoundtripTestData>& data) {
return data.param.test_name;
}
std::vector<TestData> CreateTestCases() {
return std::vector<TestData>({
{
"AxpyParam",
R"(HloModule axpy_module, entry_computation_layout={(f32[], f32[2,4]{1,0}, f32[2,4]{1,0})->f32[2,4]{1,0}}
ENTRY %axpy.v5 (alpha: f32[], x: f32[2,4], y: f32[2,4]) -> f32[2,4] {
%alpha = f32[] parameter(0)
%broadcast = f32[2,4]{1,0} broadcast(f32[] %alpha), dimensions={}
%x = f32[2,4]{1,0} parameter(1)
%multiply = f32[2,4]{1,0} multiply(f32[2,4]{1,0} %broadcast, f32[2,4]{1,0} %x)
%y = f32[2,4]{1,0} parameter(2)
ROOT %add = f32[2,4]{1,0} add(f32[2,4]{1,0} %multiply, f32[2,4]{1,0} %y)
}
)"
},
{
"ParamReplication",
R"(HloModule param_replication_module, entry_computation_layout={(f32[], (f32[2,4]{1,0}, (f32[2,4]{1,0})))->(f32[], (f32[2,4]{1,0}, (f32[2,4]{1,0})))}
ENTRY %param_replication (a: f32[], b: (f32[2,4], (f32[2,4]))) -> (f32[], (f32[2,4], (f32[2,4]))) {
%a = f32[] parameter(0), parameter_replication={true}
%b = (f32[2,4]{1,0}, (f32[2,4]{1,0})) parameter(1), parameter_replication={false,true}
ROOT %tuple = (f32[], (f32[2,4]{1,0}, (f32[2,4]{1,0}))) tuple(f32[] %a, (f32[2,4]{1,0}, (f32[2,4]{1,0})) %b)
}
)"
},
{
"ConstantPred",
R"(HloModule constant_pred_module, entry_computation_layout={()->pred[]}
ENTRY %constant_pred () -> pred[] {
ROOT %constant = pred[] constant(true), metadata={op_type="const" op_name="\"it\'s not a problem\n" source_file="path/to/test.cc" source_line=68}, backend_config="foo\" bar"
}
)"
},
{
"ConstantPredArray",
R"(HloModule module, entry_computation_layout={()->pred[2,3]{1,0}}
ENTRY %constant_pred_array () -> pred[2,3] {
ROOT %constant = pred[2,3]{1,0} constant({ { 0, 1, 0 }, { 1, 0, 1 } })
}
)"
},
{
"ConstantS32",
R"(HloModule constant_s32_module, entry_computation_layout={()->s32[]}
ENTRY %constant_s32 () -> s32[] {
ROOT %constant = s32[] constant(-42)
}
)"
},
{
"ConstantS32WithStatistics",
R"(HloModule constant_s32_module, entry_computation_layout={()->s32[]}
ENTRY %constant_s32 () -> s32[] {
ROOT %constant = s32[] constant(-42), statistics={visualizing_index=1,stat-1=33,stat-2=44}
}
)"
},
{
"ConstantF32",
R"(HloModule ConstantF32_module, entry_computation_layout={()->f32[]}
ENTRY %ConstantF32.v4 () -> f32[] {
ROOT %constant = f32[] constant(42), backend_config="this is a configuration"
}
)"
},
{
"ConstantF32R1Empty",
R"(HloModule ConstantF32Empty_module, entry_computation_layout={()->f32[0]{0}}
ENTRY %ConstantF32Empty.v4 () -> f32[0] {
ROOT %constant = f32[0]{0} constant({})
}
)"
},
{
"ConstantF32R4Empty",
R"(HloModule ConstantF32R4Empty_module, entry_computation_layout={()->f32[2,0,4,3]{3,2,1,0}}
ENTRY %ConstantF32R4Empty.v4 () -> f32[2,0,4,3] {
ROOT %constant = f32[2,0,4,3]{3,2,1,0} constant({ { }, { } })
}
)"
},
{
"Constant4D",
R"(HloModule Small_3x2x1x1_module, entry_computation_layout={()->f32[3,2,1,1]{3,2,1,0}}
ENTRY %Small_3x2x1x1.v1 () -> f32[3,2,1,1] {
ROOT %constant = f32[3,2,1,1]{3,2,1,0} constant({ { { {-1} }, { {4.1} } }, { { {2} }, { {4.1} } }, { { {5} }, { {4.4} } } })
}
)"
},
{
"ConstantNonFinite",
R"(HloModule IsFiniteR1F32s_module, entry_computation_layout={()->pred[6]{0}}
ENTRY %IsFiniteR1F32s.v2 () -> pred[6] {
%constant = f32[6]{0} constant({nan, 7, nan, -1, inf, -inf})
ROOT %is-finite = pred[6]{0} is-finite(f32[6]{0} %constant)
}
)"
},
{
"ConstantNonFiniteE4M3",
R"(HloModule ConstantR1F8E4M3FNs_module, entry_computation_layout={()->f8e4m3fn[3]{0}}
ENTRY %IsFiniteR1F32s.v2 () -> f8e4m3fn[3] {
ROOT %constant = f8e4m3fn[3]{0} constant({nan, 7, -nan})
}
)"
},
{
"ConstantNonFiniteE4M3B11",
R"(HloModule ConstantR1F8E4M3B11_module, entry_computation_layout={()->f8e4m3b11fnuz[2]{0}}
ENTRY %IsFiniteR1F32s.v2 () -> f8e4m3b11fnuz[2] {
ROOT %constant = f8e4m3b11fnuz[2]{0} constant({-nan, 7})
}
)"
},
{
"ConstantF16",
R"(HloModule ConstantF16_module, entry_computation_layout={()->f16[]}
ENTRY %ConstantF16.v4 () -> f16[] {
ROOT %constant = f16[] constant(500)
}
)"
},
{
"BF16",
R"(HloModule BF16, entry_computation_layout={()->bf16[]}
ENTRY %BF16.v4 () -> bf16[] {
ROOT %constant = bf16[] constant(500)
}
)"
},
{
"AddConstants",
R"(HloModule add_constants_module, entry_computation_layout={()->f32[]}
ENTRY %add_constants () -> f32[] {
%constant = f32[] constant(3.14)
ROOT %add = f32[] add(f32[] %constant, f32[] %constant)
}
)"
},
{
"TupleConstant",
R"(HloModule TupleConstant_module, entry_computation_layout={()->(f32[2,1]{1,0}, f32[2]{0})}
ENTRY %TupleConstant.v1 () -> (f32[2,1], f32[2]) {
ROOT %constant = (f32[2,1]{1,0}, f32[2]{0}) constant(( { {1}, {2} }, {2, 42} ))
}
)"
},
{
"SelectR1F32",
R"(HloModule SelectR1F32WithCmpR1F32sFromParamsSmall_module, entry_computation_layout={(f32[4]{0}, f32[4]{0})->f32[4]{0}}
ENTRY %SelectR1F32WithCmpR1F32sFromParamsSmall.v4 (v1: f32[4], v2: f32[4]) -> f32[4] {
%v1 = f32[4]{0} parameter(0), sharding={maximal device=1}
%v2 = f32[4]{0} parameter(1), sharding={maximal device=1}
%greater-than = pred[4]{0} compare(f32[4]{0} %v1, f32[4]{0} %v2), direction=GT, type=TOTALORDER, sharding={replicated}
ROOT %select = f32[4]{0} select(pred[4]{0} %greater-than, f32[4]{0} %v1, f32[4]{0} %v2), sharding={replicated}
}
)"
},
{
"EmptyTupleCreate",
R"(HloModule EmptyTupleCreate_module, entry_computation_layout={()->()}
ENTRY %EmptyTupleCreate.v1 () -> () {
ROOT %tuple = () tuple()
}
)"
},
{
"TupleCreate",
R"(HloModule TupleCreate_module, entry_computation_layout={(f32[], f32[3]{0}, f32[2,3]{1,0})->(f32[], f32[3]{0}, f32[2,3]{1,0})}
ENTRY %TupleCreate.v4 (v1: f32[], v2: f32[3], v3: f32[2,3]) -> (f32[], f32[3], f32[2,3]) {
%v1 = f32[] parameter(0)
%v2 = f32[3]{0} parameter(1)
%v3 = f32[2,3]{1,0} parameter(2)
ROOT %tuple = (f32[], f32[3]{0}, f32[2,3]{1,0}) tuple(f32[] %v1, f32[3]{0} %v2, f32[2,3]{1,0} %v3)
}
)"
},
{
"LargeTupleRoundTrip",
R"(HloModule LargeTupleRoundTrip_module, entry_computation_layout={(f32[])->(f32[], f32[], f32[], f32[], f32[], f32[])}
ENTRY %TupleCreate.v4 (v: f32[]) -> (f32[], f32[], f32[], f32[], f32[], f32[]) {
%v = f32[] parameter(0)
ROOT %tuple = (f32[], f32[], f32[], f32[], f32[], f32[]) tuple(f32[] %v, f32[] %v, f32[] %v, f32[] %v, f32[] %v, f32[] %v)
}
)"
},
{
"ShardedTupleCreate",
R"(HloModule ShardedTupleCreate_module, entry_computation_layout={(f32[], f32[3]{0}, f32[2,3]{1,0})->(f32[], f32[3]{0}, f32[2,3]{1,0})}
ENTRY %ShardedTupleCreate.v4 (v1: f32[], v2: f32[3], v3: f32[2,3]) -> (f32[], f32[3], f32[2,3]) {
%v1 = f32[] parameter(0), sharding={manual}
%v2 = f32[3]{0} parameter(1)
%v3 = f32[2,3]{1,0} parameter(2)
ROOT %tuple = (f32[], f32[3]{0}, f32[2,3]{1,0}) tuple(f32[] %v1, f32[3]{0} %v2, f32[2,3]{1,0} %v3), sharding={{manual}, {maximal device=0}, {replicated}}
}
)"
},
{
"DomainParsing",
R"(HloModule DomainParsing_module, entry_computation_layout={(f32[])->f32[]}
ENTRY %DomainParsing (v1: f32[]) -> f32[] {
%v1 = f32[] parameter(0)
ROOT %dom = f32[] domain(f32[] %v1), domain={kind="sharding", entry={maximal device=0}, exit={maximal device=1}}
}
)"
},
{
"WhileWithScalarS32Result",
R"(HloModule WhileWithScalarS32Result_module, entry_computation_layout={()->s32[]}
%body.v3 (prev.1: s32[]) -> s32[] {
%constant = s32[] constant(1)
%prev.1 = s32[] parameter(0)
ROOT %add = s32[] add(s32[] %constant, s32[] %prev.1)
}
%condition.v3 (prev.2: s32[]) -> pred[] {
%constant.1 = s32[] constant(5)
%prev.2 = s32[] parameter(0)
ROOT %greater-than = pred[] compare(s32[] %constant.1, s32[] %prev.2), direction=GT
}
ENTRY %WhileWithScalarS32Result.v2 () -> s32[] {
%constant.2 = s32[] constant(0)
ROOT %while = s32[] while(s32[] %constant.2), condition=%condition.v3, body=%body.v3
}
)"
},
{
"CopyStartAndCopyDone",
R"(HloModule CopyStartAndCopyDone_module, entry_computation_layout={(f32[], f32[2,3]{1,0:S(1)})->(f32[], f32[2,3]{1,0:S(2)})}
ENTRY %CopyStartAndCopyDone (v1: f32[], v2: f32[2,3]) -> (f32[], f32[2,3]) {
%v1 = f32[] parameter(0)
%copy-start.1 = (f32[], f32[], u32[]) copy-start(f32[] %v1), cross_program_prefetch_index=0
%copy-done.1 = f32[] copy-done((f32[], f32[], u32[]) %copy-start.1)
%v2 = f32[2,3]{1,0:S(1)} parameter(1)
%copy-start.2 = (f32[2,3]{1,0:S(2)}, f32[2,3]{1,0:S(1)}, u32[]) copy-start(f32[2,3]{1,0:S(1)} %v2)
%copy-done.2 = f32[2,3]{1,0:S(2)} copy-done((f32[2,3]{1,0:S(2)}, f32[2,3]{1,0:S(1)}, u32[]) %copy-start.2)
ROOT %tuple = (f32[], f32[2,3]{1,0:S(2)}) tuple(f32[] %copy-done.1, f32[2,3]{1,0:S(2)} %copy-done.2)
}
)"
},
{
"SendRecv",
R"(HloModule TwoSendRecvBothWayRecvFist_module, entry_computation_layout={()->(f32[], token[])}
ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> (f32[], token[]) {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15, sharding={{maximal device=1}, {replicated}, {replicated}}
ROOT %recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15, sharding={{maximal device=1}, {replicated}}
%constant = f32[] constant(2.1), sharding={maximal device=0}
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0), channel_id=16, sharding={{maximal device=1}, {replicated}, {replicated}}, control-predecessors={%recv}
%send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16, sharding={maximal device=0}
}
)"
},
{
"SendRecvWithHostTransfer",
R"(HloModule HostTransferSendRecv_module, entry_computation_layout={()->(f32[], token[])}
ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> (f32[], token[]) {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15, is_host_transfer=true
ROOT %recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15, is_host_transfer=true
%constant = f32[] constant(2.1), sharding={maximal device=0}
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0), channel_id=16, is_host_transfer=true
%send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16, is_host_transfer=true
}
)"
},
{
"GetTupleElement",
R"(HloModule GetTupleElement_module, entry_computation_layout={()->s32[2,3]{1,0}}
ENTRY %GetTupleElement.v4 () -> s32[2,3] {
%constant = f32[3]{0} constant({1, 2, 3})
%constant.1 = s32[2,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 } })
%tuple = (f32[3]{0}, s32[2,3]{1,0}) tuple(f32[3]{0} %constant, s32[2,3]{1,0} %constant.1)
ROOT %get-tuple-element = s32[2,3]{1,0} get-tuple-element((f32[3]{0}, s32[2,3]{1,0}) %tuple), index=1, sharding={maximal device=0}
}
)"
},
{
"Call",
R"(HloModule CallR0F32IdentityScalar_module, entry_computation_layout={()->f32[]}
%Identity.v1 (x: f32[]) -> f32[] {
ROOT %x = f32[] parameter(0)
}
ENTRY %CallR0F32IdentityScalar.v2 () -> f32[] {
%constant = f32[] constant(42)
ROOT %call = f32[] call(f32[] %constant), to_apply=%Identity.v1
}
)"
},
{
"CompositeCall",
R"(HloModule CompositeCall, entry_computation_layout={()->f32[]}
%add (x: f32[]) -> f32[] {
%x = f32[] parameter(0)
%constant = f32[] constant(2)
ROOT %z = f32[] add(f32[] %x, f32[] %constant)
}
ENTRY %CompositeCall.v2 () -> f32[] {
%constant.1 = f32[] constant(42)
ROOT %call = f32[] call(f32[] %constant.1), to_apply=%add, is_composite=true, frontend_attributes={composite.attributes={n = 1 : i32, tensor = dense<1> : tensor<i32>},composite.name="foo.bar",composite.version="1"}
}
)"
},
{
"CompositeCallWithExtraFrontendAttributes",
R"(HloModule CompositeCall, entry_computation_layout={()->f32[]}
%add (x: f32[]) -> f32[] {
%x = f32[] parameter(0)
%constant = f32[] constant(2)
ROOT %z = f32[] add(f32[] %x, f32[] %constant)
}
ENTRY %CompositeCall.v2 () -> f32[] {
%constant.1 = f32[] constant(42)
ROOT %call = f32[] call(f32[] %constant.1), to_apply=%add, is_composite=true, frontend_attributes={composite.attributes={n = 1 : i32, tensor = dense<1> : tensor<i32>},composite.name="foo.bar",composite.version="1",foo="bar"}
}
)"
},
{
"CompositeCallOptionalAttributesAndVersion",
R"(HloModule CompositeCall, entry_computation_layout={()->f32[]}
%add (x: f32[]) -> f32[] {
%x = f32[] parameter(0)
%constant = f32[] constant(2)
ROOT %z = f32[] add(f32[] %x, f32[] %constant)
}
ENTRY %CompositeCall.v2 () -> f32[] {
%constant.1 = f32[] constant(42)
ROOT %call = f32[] call(f32[] %constant.1), to_apply=%add, is_composite=true, frontend_attributes={composite.name="foo.bar"}
}
)"
},
{
"CompositeCallOptionalAttributes",
R"(HloModule CompositeCall, entry_computation_layout={()->f32[]}
%add (x: f32[]) -> f32[] {
%x = f32[] parameter(0)
%constant = f32[] constant(2)
ROOT %z = f32[] add(f32[] %x, f32[] %constant)
}
ENTRY %CompositeCall.v2 () -> f32[] {
%constant.1 = f32[] constant(42)
ROOT %call = f32[] call(f32[] %constant.1), to_apply=%add, is_composite=true, frontend_attributes={composite.name="foo.bar",composite.version="1"}
}
)"
},
{
"CompositeCallOptionalVersion",
R"(HloModule CompositeCall, entry_computation_layout={()->f32[]}
%add (x: f32[]) -> f32[] {
%x = f32[] parameter(0)
%constant = f32[] constant(2)
ROOT %z = f32[] add(f32[] %x, f32[] %constant)
}
ENTRY %CompositeCall.v2 () -> f32[] {
%constant.1 = f32[] constant(42)
ROOT %call = f32[] call(f32[] %constant.1), to_apply=%add, is_composite=true, frontend_attributes={composite.attributes={n = 1 : i32, tensor = dense<1> : tensor<i32>},composite.name="foo.bar"}
}
)"
},
{
"CustomCallWithOpaque",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar", backend_config="this string is opaque"
}
)"
},
{
"CustomCallWithBackendConfigInCurlyBraces",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar", backend_config={key: "value"}
}
)"
},
{
"CustomCallWithLiteral",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar", literal=s32[2]{0} {1, 2}
}
)"
},
{
"CustomCallWithLiteralTuple",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar", literal=( s32[4]{0} {4, 128, 128, 3}, pred[4]{0} {1, 0, 0, 0} )
}
)"
},
{
"CustomCallWithLiteralR0",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar", literal=f32[] 0.1
}
)"
},
{
"ReduceWindow",
R"(HloModule R4UnitWindow_module, entry_computation_layout={(f32[13,12,8,15]{0,3,2,1})->f32[13,3,8,15]{0,3,2,1}}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %R4UnitWindow.v3 (operand: f32[13,12,8,15]) -> f32[13,3,8,15] {
%operand = f32[13,12,8,15]{0,3,2,1} parameter(0)
%constant = f32[] constant(0)
ROOT %reduce-window = f32[13,3,8,15]{0,3,2,1} reduce-window(f32[13,12,8,15]{0,3,2,1} %operand, f32[] %constant), window={size=1x1x7x1 stride=1x4x1x1 pad=0_0x0_0x3_3x0_0}, to_apply=%add_F32.v3
}
)"
},
{
"ReduceWindowScalar",
R"(HloModule reduce_window_scalar, entry_computation_layout={()->f32[]}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %R4UnitWindowScalar () -> f32[] {
%constant = f32[] constant(42)
%constant.1 = f32[] constant(1)
ROOT %reduce-window = f32[] reduce-window(f32[] %constant, f32[] %constant.1), to_apply=%add_F32.v3
}
)"
},
{
"ReduceWindowVariadic",
R"(HloModule reduce_window_variadic, entry_computation_layout={()->(f32[], f32[])}
%add_F32.v3 (lhs1: f32[], lhs2: f32[], rhs1: f32[], rhs2: f32[]) -> (f32[], f32[]) {
%lhs1 = f32[] parameter(0)
%rhs1 = f32[] parameter(2)
%add1 = f32[] add(f32[] %lhs1, f32[] %rhs1)
%lhs2 = f32[] parameter(1)
%rhs2 = f32[] parameter(3)
%add2 = f32[] add(f32[] %lhs2, f32[] %rhs2)
ROOT %tuple1 = (f32[], f32[]) tuple(f32[] %add1, f32[] %add2)
}
ENTRY %R4UnitWindowScalar () -> (f32[], f32[]) {
%constant = f32[] constant(42)
%constant.1 = f32[] constant(1)
ROOT %reduce-window = (f32[], f32[]) reduce-window(f32[] %constant, f32[] %constant, f32[] %constant.1, f32[] %constant.1), to_apply=%add_F32.v3
}
)"
},
{
"Convolution",
R"(HloModule Convolve1D1Window_0_module, entry_computation_layout={(f32[1,2,1]{2,1,0}, f32[1,1,1]{2,1,0})->f32[1,2,1]{2,0,1}}
ENTRY %Convolve1D1Window_0.v3 (input: f32[1,2,1], filter: f32[1,1,1]) -> f32[1,2,1] {
%input = f32[1,2,1]{2,1,0} parameter(0)
%copy = f32[1,2,1]{2,0,1} copy(f32[1,2,1]{2,1,0} %input)
%filter = f32[1,1,1]{2,1,0} parameter(1)
ROOT %convolution = f32[1,2,1]{2,0,1} convolution(f32[1,2,1]{2,0,1} %copy, f32[1,1,1]{2,1,0} %filter), window={size=1}, dim_labels=b0f_0io->b0f, operand_precision={high,default}
}
)"
},
{
"ConvolutionDynamic",
R"(HloModule Convolve1D1Window_0_module, entry_computation_layout={(f32[1,2,1]{2,1,0}, f32[1,1,1]{2,1,0})->f32[1,2,1]{2,0,1}}
ENTRY %Convolve1D1Window_0.v3 (input: f32[1,2,1], filter: f32[1,1,1]) -> f32[1,2,1] {
%input = f32[1,2,1]{2,1,0} parameter(0)
%copy = f32[1,2,1]{2,0,1} copy(f32[1,2,1]{2,1,0} %input)
%filter = f32[1,1,1]{2,1,0} parameter(1)
ROOT %custom-call.52 = f32[1,2,1]{2,0,1} custom-call(f32[1,2,1]{2,0,1} %copy, f32[1,1,1]{2,1,0} %filter), window={size=1}, dim_labels=b0f_0io->b0f, operand_precision={high,default}, custom_call_target="DynamicConvolutionForward", metadata={op_type="Conv2D" op_name="conv1d"}
}
)"
},
{
"ConvolutionR2",
R"(HloModule ConvolveR2_module, entry_computation_layout={(f32[1,2]{1,0}, f32[2,2]{1,0})->f32[1,2]{0,1}}
ENTRY %ConvolveR2.v3 (input: f32[1,2], filter: f32[2,2]) -> f32[1,2] {
%input = f32[1,2]{1,0} parameter(0)
%filter = f32[2,2]{1,0} parameter(1)
ROOT %convolution = f32[1,2]{0,1} convolution(f32[1,2]{1,0} %input, f32[2,2]{1,0} %filter), dim_labels=bf_io->bf
}
)"
},
{
"ConvolutionBackward",
R"(HloModule ConvolveBackward_module, entry_computation_layout={(f32[128,7,7,512]{0,3,2,1}, f32[3,3,512,512]{3,2,1,0})->f32[128,14,14,512]{0,3,2,1}}
ENTRY %ConvolveBackward (input: f32[128,7,7,512], filter: f32[3,3,512,512]) -> f32[128,14,14,512] {
%input = f32[128,7,7,512]{0,3,2,1} parameter(0)
%filter = f32[3,3,512,512]{3,2,1,0} parameter(1)
ROOT %convolution-base-dilated = f32[128,14,14,512]{0,3,2,1} convolution(f32[128,7,7,512]{0,3,2,1} %input, f32[3,3,512,512]{3,2,1,0} %filter), window={size=3x3 pad=1_2x1_2 lhs_dilate=2x2 rhs_reversal=1x1}, dim_labels=b01f_01oi->b01f
}
)"
},
{
"Reverse4D",
R"(HloModule Reverse4DFloatArrayOnDim01_module, entry_computation_layout={()->f32[4,3,2,1]{0,1,2,3}}
ENTRY %Reverse4DFloatArrayOnDim01.v2 () -> f32[4,3,2,1] {
%constant = f32[4,3,2,1]{0,1,2,3} constant({ { { {1}, {2} }, { {3}, {4} }, { {5}, {6} } }, { { {7}, {8} }, { {9}, {10} }, { {11}, {12} } }, { { {13}, {14} }, { {15}, {16} }, { {17}, {18} } }, { { {19}, {20} }, { {21}, {22} }, { {23}, {24} } } })
ROOT %reverse = f32[4,3,2,1]{0,1,2,3} reverse(f32[4,3,2,1]{0,1,2,3} %constant), dimensions={0,1}
}
)"
},
{
"Concat",
R"(HloModule Concat2x3With2x5_module, entry_computation_layout={()->f32[2,8]{1,0}}
ENTRY %Concat2x3With2x5.v3 () -> f32[2,8] {
%constant = f32[2,3]{1,0} constant({ { 0, 1, 2 }, { 1000, 1001, 1002 } })
%constant.1 = f32[2,5]{1,0} constant({ { 64, 65, 66, 67, 68 }, { 1064, 1065, 1066, 1067, 1068 } })
ROOT %concatenate = f32[2,8]{1,0} concatenate(f32[2,3]{1,0} %constant, f32[2,5]{1,0} %constant.1), dimensions={1}
}
)"
},
{
"SelectAndScatter",
R"(HloModule R4F32OverlapSmall_module, entry_computation_layout={()->f32[4,5,1,1]{3,2,1,0}}
%ge_F32.v3 (lhs: f32[], rhs: f32[]) -> pred[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %greater-than-or-equal-to = pred[] compare(f32[] %lhs, f32[] %rhs), direction=GE, type=TOTALORDER
}
%add_F32.v3 (lhs.1: f32[], rhs.1: f32[]) -> f32[] {
%lhs.1 = f32[] parameter(0)
%rhs.1 = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs.1, f32[] %rhs.1)
}
ENTRY %R4F32OverlapSmall.v4 () -> f32[4,5,1,1] {
%constant = f32[4,5,1,1]{3,2,1,0} constant({ { { {7} }, { {2} }, { {5} }, { {3} }, { {8} } }, { { {3} }, { {8} }, { {9} }, { {3} }, { {4} } }, { { {1} }, { {5} }, { {7} }, { {5} }, { {6} } }, { { {0} }, { {6} }, { {2} }, { {10} }, { {2} } } })
%constant.1 = f32[2,2,1,1]{3,2,1,0} constant({ { { {2} }, { {6} } }, { { {3} }, { {1} } } })
%constant.2 = f32[] constant(0)
ROOT %select-and-scatter = f32[4,5,1,1]{3,2,1,0} select-and-scatter(f32[4,5,1,1]{3,2,1,0} %constant, f32[2,2,1,1]{3,2,1,0} %constant.1, f32[] %constant.2), window={size=2x3x1x1 stride=2x2x1x1}, select=%ge_F32.v3, scatter=%add_F32.v3
}
)"
},
{
"SelectAndScatterScalar",
R"(HloModule select_and_scatter_scalar, entry_computation_layout={()->f32[]}
%ge_F32.v3 (lhs: f32[], rhs: f32[]) -> pred[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %greater-than-or-equal-to = pred[] compare(f32[] %lhs, f32[] %rhs), direction=GE
}
%add_F32.v3 (lhs.1: f32[], rhs.1: f32[]) -> f32[] {
%lhs.1 = f32[] parameter(0)
%rhs.1 = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs.1, f32[] %rhs.1)
}
ENTRY %SelectAndScatterScalar () -> f32[] {
%constant = f32[] constant(42)
%constant.1 = f32[] constant(1)
%constant.2 = f32[] constant(2)
ROOT %select-and-scatter = f32[] select-and-scatter(f32[] %constant, f32[] %constant.1, f32[] %constant.2), select=%ge_F32.v3, scatter=%add_F32.v3
}
)"
},
{
"Slice",
R"(HloModule slice_module, entry_computation_layout={(f32[3,3,4,4]{3,2,1,0})->f32[3,3,2,4]{3,2,1,0}}
ENTRY %slice.v2 (p0: f32[3,3,4,4]) -> f32[3,3,2,4] {
%p0 = f32[3,3,4,4]{3,2,1,0} parameter(0)
ROOT %slice = f32[3,3,2,4]{3,2,1,0} slice(f32[3,3,4,4]{3,2,1,0} %p0), slice={[0:3:1], [0:3:1], [0:4:2], [0:4:1]}
}
)"
},
{
"SliceNoStride",
R"(HloModule Slice3x3x3_To_1x3x3_F32_module, entry_computation_layout={()->f32[1,3,3]{2,1,0}}
ENTRY %Slice3x3x3_To_1x3x3_F32.v2 () -> f32[1,3,3] {
%constant = f32[3,3,3]{2,1,0} constant({ { { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 } }, { { 9, 10, 11 }, { 12, 13, 14 }, { 15, 16, 17 } }, { { 18, 19, 20 }, { 21, 22, 23 }, { 24, 25, 26 } } })
ROOT %slice = f32[1,3,3]{2,1,0} slice(f32[3,3,3]{2,1,0} %constant), slice={[0:1], [0:3], [0:3]}
}
)"
},
{
"SliceR0",
R"(HloModule SliceR0_module, entry_computation_layout={()->s32[]}
ENTRY %SliceR0.v2 () -> s32[] {
%constant = s32[] constant(1)
ROOT %slice = s32[] slice(s32[] %constant), slice={}
}
)"
},
{
"Transpose",
R"(HloModule Transpose_module, entry_computation_layout={()->s32[1,2,3]{2,1,0}}
ENTRY %Transpose.v2 () -> s32[1,2,3] {
%constant = s32[1,2,3]{2,1,0} constant({ { { 1, 2, 3 }, { 4, 5, 6 } } })
ROOT %transpose = s32[1,2,3]{2,1,0} transpose(s32[1,2,3]{2,1,0} %constant), dimensions={0,1,2}
}
)"
},
{
"TransposeC128",
R"(HloModule TransposeC128_module, entry_computation_layout={(c128[1,2,3]{2,1,0})->c128[1,2,3]{2,1,0}}
ENTRY %Transpose.v3 (input: c128[1,2,3]) -> c128[1,2,3] {
%input = c128[1,2,3]{2,1,0} parameter(0)
ROOT %transpose = c128[1,2,3]{2,1,0} transpose(c128[1,2,3]{2,1,0} %input), dimensions={0,1,2}
}
)"
},
{
"TriangularSolve",
R"(HloModule TriangularSolve_module, entry_computation_layout={(f32[4,4]{1,0}, f32[3,4]{1,0})->f32[3,4]{1,0}}
ENTRY %SimpleRightLowerNotranspose.4 (a.1: f32[4,4], b.2: f32[3,4]) -> f32[3,4] {
%a.1 = f32[4,4]{1,0} parameter(0)
%b.2 = f32[3,4]{1,0} parameter(1)
ROOT %triangular-solve.3 = f32[3,4]{1,0} triangular-solve(f32[4,4]{1,0} %a.1, f32[3,4]{1,0} %b.2), lower=true, transpose_a=NO_TRANSPOSE
}
)"
},
{
"DynamicSlice",
R"(HloModule DynamicSlice_module, entry_computation_layout={(s32[2,2,258]{2,1,0}, s32[1]{0})->s32[2,2,258]{2,1,0}}
ENTRY %DynamicSlice.v5 (original_parameter: s32[2,2,258], start_index: s32[1]) -> s32[2,2,258] {
%original_parameter = s32[2,2,258]{2,1,0} parameter(0)
%constant = s32[1]{0} constant({0})
%start_index = s32[1]{0} parameter(1)
%concatenate = s32[3]{0} concatenate(s32[1]{0} %constant, s32[1]{0} %constant, s32[1]{0} %start_index), dimensions={0}
ROOT %dynamic-slice = s32[2,2,258]{2,1,0} dynamic-slice(s32[2,2,258]{2,1,0} %original_parameter, s32[3]{0} %concatenate), dynamic_slice_sizes={2,2,258}
}
)"
},
{
"DynamicSliceScalarIndices",
R"(HloModule DynamicSlice_module, entry_computation_layout={(s32[2,2,258]{2,1,0}, s32[])->s32[2,2,258]{2,1,0}}
ENTRY %DynamicSlice.v5 (original_parameter: s32[2,2,258], start_index: s32[]) -> s32[2,2,258] {
%original_parameter = s32[2,2,258]{2,1,0} parameter(0)
%constant = s32[] constant(0)
%start_index = s32[] parameter(1)
ROOT %dynamic-slice = s32[2,2,258]{2,1,0} dynamic-slice(s32[2,2,258]{2,1,0} %original_parameter, s32[] %constant, s32[] %constant, s32[] %start_index), dynamic_slice_sizes={2,2,258}
}
)"
},
{
"DynamicUpdateSlice",
R"(HloModule DynamicSlice_module, entry_computation_layout={(s32[1,1,25,1]{3,2,1,0}, s32[1,1,2,1]{3,2,1,0}, s32[4]{0})->s32[1,1,25,1]{3,2,1,0}}
ENTRY %DynamicUpdateSlice.v4 (input: s32[1,1,25,1], update: s32[1,1,2,1], start_indices: s32[4]) -> s32[1,1,25,1] {
%input = s32[1,1,25,1]{3,2,1,0} parameter(0)
%update = s32[1,1,2,1]{3,2,1,0} parameter(1)
%start_indices = s32[4]{0} parameter(2)
ROOT %dynamic-update-slice = s32[1,1,25,1]{3,2,1,0} dynamic-update-slice(s32[1,1,25,1]{3,2,1,0} %input, s32[1,1,2,1]{3,2,1,0} %update, s32[4]{0} %start_indices)
}
)"
},
{
"DynamicUpdateSliceScalarIndex",
R"(HloModule DynamicUpdateSlice_module, entry_computation_layout={(s32[1,1,25,1]{3,2,1,0}, s32[1,1,2,1]{3,2,1,0}, s32[], s32[], s32[], s32[])->s32[1,1,25,1]{3,2,1,0}}
ENTRY %DynamicUpdateSlice.v4 (input: s32[1,1,25,1], update: s32[1,1,2,1], start_index.0: s32[], start_index.1: s32[], start_index.2: s32[], start_index.3: s32[]) -> s32[1,1,25,1] {
%input = s32[1,1,25,1]{3,2,1,0} parameter(0)
%update = s32[1,1,2,1]{3,2,1,0} parameter(1)
%start_index.0 = s32[] parameter(2)
%start_index.1 = s32[] parameter(3)
%start_index.2 = s32[] parameter(4)
%start_index.3 = s32[] parameter(5)
ROOT %dynamic-update-slice = s32[1,1,25,1]{3,2,1,0} dynamic-update-slice(s32[1,1,25,1]{3,2,1,0} %input, s32[1,1,2,1]{3,2,1,0} %update, s32[] %start_index.0, s32[] %start_index.1, s32[] %start_index.2, s32[] %start_index.3)
}
)"
},
{
"BatchNormTraining",
R"(HloModule BasicTraining_module, entry_computation_layout={()->(f32[2,2,1,2]{3,2,1,0}, f32[2]{0}, f32[2]{0})}
ENTRY %BasicTraining.v4 () -> (f32[2,2,1,2], f32[2], f32[2]) {
%constant = f32[2,2,1,2]{3,2,1,0} constant({ { { { 1, 2 } }, { { 3, 4 } } }, { { { 5, 6 } }, { { 7, 8 } } } })
%constant.1 = f32[2]{0} constant({2, 3})
%constant.2 = f32[2]{0} constant({1, 2})
ROOT %batch-norm-training = (f32[2,2,1,2]{3,2,1,0}, f32[2]{0}, f32[2]{0}) batch-norm-training(f32[2,2,1,2]{3,2,1,0} %constant, f32[2]{0} %constant.1, f32[2]{0} %constant.2), epsilon=0.001, feature_index=3
}
)"
},
{
"BatchNormInference",
R"(HloModule BatchNormInference_module, entry_computation_layout={(f32[2,2,2,2]{3,2,1,0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2]{0})->f32[2,2,2,2]{3,2,1,0}}
ENTRY %BatchNormInference.v6 (input: f32[2,2,2,2], offset: f32[2], scale: f32[2], mean: f32[2], variance: f32[2]) -> f32[2,2,2,2] {
%input = f32[2,2,2,2]{3,2,1,0} parameter(0)
%offset = f32[2]{0} parameter(1)
%scale = f32[2]{0} parameter(2)
%mean = f32[2]{0} parameter(3)
%variance = f32[2]{0} parameter(4)
ROOT %batch-norm-inference = f32[2,2,2,2]{3,2,1,0} batch-norm-inference(f32[2,2,2,2]{3,2,1,0} %input, f32[2]{0} %offset, f32[2]{0} %scale, f32[2]{0} %mean, f32[2]{0} %variance), epsilon=0.001, feature_index=0
}
)"
},
{
"BatchNormGrad",
R"(HloModule BatchNormGrad_module, entry_computation_layout={(f32[2,2,2,2]{3,2,1,0}, f32[2]{0}, f32[2]{0}, f32[2]{0}, f32[2,2,2,2]{3,2,1,0})->(f32[2,2,2,2]{3,2,1,0}, f32[2]{0}, f32[2]{0})}
ENTRY %BatchNormGrad.v4 (input: f32[2,2,2,2], scale: f32[2], mean: f32[2], variance: f32[2], grad_output: f32[2,2,2,2]) -> (f32[2,2,2,2], f32[2], f32[2]) {
%input = f32[2,2,2,2]{3,2,1,0} parameter(0)
%scale = f32[2]{0} parameter(1)
%mean = f32[2]{0} parameter(2)
%variance = f32[2]{0} parameter(3)
%grad_output = f32[2,2,2,2]{3,2,1,0} parameter(4)
ROOT %batch-norm-grad = (f32[2,2,2,2]{3,2,1,0}, f32[2]{0}, f32[2]{0}) batch-norm-grad(f32[2,2,2,2]{3,2,1,0} %input, f32[2]{0} %scale, f32[2]{0} %mean, f32[2]{0} %variance, f32[2,2,2,2]{3,2,1,0} %grad_output), epsilon=0.001, feature_index=0
}
)"
},
{
"Fft",
R"(HloModule Fft_module, entry_computation_layout={(c64[8,32]{1,0})->c64[8,32]{1,0}}
ENTRY %Fft (input: c64[8,32]) -> c64[8,32] {
%input = c64[8,32]{1,0} parameter(0)
ROOT %fft = c64[8,32]{1,0} fft(c64[8,32]{1,0} %input), fft_type=FFT, fft_length={32}
}
)"
},
{
"Ifft2d",
R"(HloModule Ifft2d_module, entry_computation_layout={(c64[5,8,32]{2,1,0})->c64[5,8,32]{2,1,0}}
ENTRY %Ifft2d (input: c64[5,8,32]) -> c64[5,8,32] {
%input = c64[5,8,32]{2,1,0} parameter(0)
ROOT %fft = c64[5,8,32]{2,1,0} fft(c64[5,8,32]{2,1,0} %input), fft_type=IFFT, fft_length={8,32}
}
)"
},
{
"Rfft2d",
R"(HloModule Rfft2d_module, entry_computation_layout={(f32[5,64,32]{2,1,0})->c64[5,64,17]{2,1,0}}
ENTRY %Rfft2d (input: f32[5,64,32]) -> c64[5,64,17] {
%input = f32[5,64,32]{2,1,0} parameter(0)
ROOT %fft = c64[5,64,17]{2,1,0} fft(f32[5,64,32]{2,1,0} %input), fft_type=RFFT, fft_length={64,32}
}
)"
},
{
"Irfft3d",
R"(HloModule Irfft3d_module, entry_computation_layout={(c64[5,64,128,33]{3,2,1,0})->f32[5,64,128,64]{3,2,1,0}}
ENTRY %Irfft3d (input: c64[5,64,128,33]) -> f32[5,64,128,64] {
%input = c64[5,64,128,33]{3,2,1,0} parameter(0)
ROOT %fft = f32[5,64,128,64]{3,2,1,0} fft(c64[5,64,128,33]{3,2,1,0} %input), fft_type=IRFFT, fft_length={64,128,64}
}
)"
},
{
"Pad",
R"(HloModule Pad1DS3Array_module, entry_computation_layout={()->f32[7]{0}}
ENTRY %Pad1DS3Array.v3 () -> f32[7] {
%constant = f32[3]{0} constant({1, 2, 3})
%constant.1 = f32[] constant(0.1)
ROOT %pad = f32[7]{0} pad(f32[3]{0} %constant, f32[] %constant.1), padding=3_1
}
)"
},
{
"PadHasInterior",
R"(HloModule PadHasInterior_module, entry_computation_layout={(f32[1,25,7,7]{3,2,1,0})->f32[1,25,17,11]{3,2,1,0}}
ENTRY %PadHasInterior.v3 (input: f32[1,25,7,7]) -> f32[1,25,17,11] {
%input = f32[1,25,7,7]{3,2,1,0} parameter(0)
%constant = f32[] constant(-5.123)
ROOT %pad = f32[1,25,17,11]{3,2,1,0} pad(f32[1,25,7,7]{3,2,1,0} %input, f32[] %constant), padding=0_0_0x0_0_0x2_2_1x2_2_0
}
)"
},
{
"RoundNearestEven",
R"(HloModule RoundNearestEven_module, entry_computation_layout={(f32[2,2]{1,0})->f32[2,2]{1,0}}
ENTRY %RoundNearestEven (input: f32[2,2]) -> f32[2,2] {
%input = f32[2,2]{1,0} parameter(0)
ROOT %round-nearest-even = f32[2,2]{1,0} round-nearest-even(f32[2,2]{1,0} %input)
}
)"
},
{
"PadHasNegativePadding",
R"(HloModule PadHasNegativePadding_module, entry_computation_layout={(f32[1,25,7,7,10]{4,3,2,1,0})->f32[1,15,6,3,35]{4,3,2,1,0}}
ENTRY %PadHasNegativePadding (input: f32[1,25,7,7,10]) -> f32[1,15,6,3,35] {
%input = f32[1,25,7,7,10]{4,3,2,1,0} parameter(0)
%constant = f32[] constant(-5.123)
ROOT %pad = f32[1,15,6,3,35]{4,3,2,1,0} pad(f32[1,25,7,7,10]{4,3,2,1,0} %input, f32[] %constant), padding=0_0_0x0_-10_0x0_-1_0x-2_-2_0x-1_-1_3
}
)"
},
{
"Fusion",
R"(HloModule fusion_module, entry_computation_layout={()->f32[3,2,1,1]{3,2,1,0}}
%fused_computation (constant.param_0: f32[3,2,1,1], constant.1.param_1: f32[2]) -> f32[3,2,1,1] {
%constant.param_0 = f32[3,2,1,1]{3,2,1,0} parameter(0)
%constant.1.param_1 = f32[2]{0} parameter(1)
%broadcast = f32[3,2,1,1]{3,2,1,0} broadcast(f32[2]{0} %constant.1.param_1), dimensions={1}
ROOT %subtract = f32[3,2,1,1]{3,2,1,0} subtract(f32[3,2,1,1]{3,2,1,0} %constant.param_0, f32[3,2,1,1]{3,2,1,0} %broadcast)
}
ENTRY %fusion.v3 () -> f32[3,2,1,1] {
%constant = f32[3,2,1,1]{3,2,1,0} constant({ { { {-1} }, { {4.1} } }, { { {2} }, { {4.1} } }, { { {5} }, { {4.4} } } })
%constant.1 = f32[2]{0} constant({3.14, 4.25})
ROOT %fusion = f32[3,2,1,1]{3,2,1,0} fusion(f32[3,2,1,1]{3,2,1,0} %constant, f32[2]{0} %constant.1), kind=kLoop, calls=%fused_computation
}
)"
},
{
"FusionWithAliasing",
R"(HloModule FusionWithAliasing, entry_computation_layout={((f32[2,2]{0,1}, f32[42,2,3]{0,1,2}), f32[123,4]{0,1})->(f32[123,4]{0,1}, f32[2,2]{0,1}, f32[1,2,3]{0,1,2})}
%FusedComp (p0: (f32[2,2], f32[42,2,3]), p1: f32[123,4]) -> (f32[123,4], f32[2,2], f32[1,2,3]) {
%p1 = f32[123,4]{0,1} parameter(1)
%p0 = (f32[2,2]{0,1}, f32[42,2,3]{0,1,2}) parameter(0)
%elem1 = f32[2,2]{0,1} get-tuple-element((f32[2,2]{0,1}, f32[42,2,3]{0,1,2}) %p0), index=0
%constant0 = f32[] constant(1)
%broadcast0 = f32[1,2,3]{0,1,2} broadcast(f32[] %constant0), dimensions={}
ROOT %tuple = (f32[123,4]{0,1}, f32[2,2]{0,1}, f32[1,2,3]{0,1,2}) tuple(f32[123,4]{0,1} %p1, f32[2,2]{0,1} %elem1, f32[1,2,3]{0,1,2} %broadcast0)
}
ENTRY %FusionWithAliasing (p0.1: (f32[2,2], f32[42,2,3]), p1.1: f32[123,4]) -> (f32[123,4], f32[2,2], f32[1,2,3]) {
%p0.1 = (f32[2,2]{0,1}, f32[42,2,3]{0,1,2}) parameter(0)
%p1.1 = f32[123,4]{0,1} parameter(1)
ROOT %fusion = (f32[123,4]{0,1}, f32[2,2]{0,1}, f32[1,2,3]{0,1,2}) fusion((f32[2,2]{0,1}, f32[42,2,3]{0,1,2}) %p0.1, f32[123,4]{0,1} %p1.1), kind=kLoop, output_to_operand_aliasing={{0}: (1, {}), {1}: (0, {0})}, calls=%FusedComp
}
)"
},
{
"Gather",
R"(HloModule StringifyGather, entry_computation_layout={(f32[50,49,48,47,46]{4,3,2,1,0}, s64[10,9,8,7,5]{4,3,2,1,0})->f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0}}
ENTRY %Gather (input_tensor: f32[50,49,48,47,46], start_indices: s64[10,9,8,7,5]) -> f32[10,9,8,7,30,29,28,27,26] {
%input_tensor = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0)
%start_indices = s64[10,9,8,7,5]{4,3,2,1,0} parameter(1)
ROOT %gather = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} gather(f32[50,49,48,47,46]{4,3,2,1,0} %input_tensor, s64[10,9,8,7,5]{4,3,2,1,0} %start_indices), offset_dims={4,5,6,7,8}, collapsed_slice_dims={}, start_index_map={0,1,2,3,4}, index_vector_dim=4, slice_sizes={30,29,28,27,26}
}
)"
},
{
"SortedGather",
R"(HloModule StringifyGather, entry_computation_layout={(f32[50,49,48,47,46]{4,3,2,1,0}, s64[10,9,8,7,5]{4,3,2,1,0})->f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0}}
ENTRY %Gather (input_tensor: f32[50,49,48,47,46], start_indices: s64[10,9,8,7,5]) -> f32[10,9,8,7,30,29,28,27,26] {
%input_tensor = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0)
%start_indices = s64[10,9,8,7,5]{4,3,2,1,0} parameter(1)
ROOT %gather = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} gather(f32[50,49,48,47,46]{4,3,2,1,0} %input_tensor, s64[10,9,8,7,5]{4,3,2,1,0} %start_indices), offset_dims={4,5,6,7,8}, collapsed_slice_dims={}, start_index_map={0,1,2,3,4}, index_vector_dim=4, slice_sizes={30,29,28,27,26}, indices_are_sorted=true
}
)"
},
{
"BatchGather",
R"(HloModule StringifyGather, entry_computation_layout={(f32[50,49,48,47,46,512]{5,4,3,2,1,0}, s64[10,9,8,7,5,512]{5,4,3,2,1,0})->f32[10,9,8,7,30,29,28,27,26,512]{9,8,7,6,5,4,3,2,1,0}}
ENTRY %Gather (input_tensor: f32[50,49,48,47,46,512], start_indices: s64[10,9,8,7,5,512]) -> f32[10,9,8,7,30,29,28,27,26,512] {
%input_tensor = f32[50,49,48,47,46,512]{5,4,3,2,1,0} parameter(0)
%start_indices = s64[10,9,8,7,5,512]{5,4,3,2,1,0} parameter(1)
ROOT %gather = f32[10,9,8,7,30,29,28,27,26,512]{9,8,7,6,5,4,3,2,1,0} gather(f32[50,49,48,47,46,512]{5,4,3,2,1,0} %input_tensor, s64[10,9,8,7,5,512]{5,4,3,2,1,0} %start_indices), offset_dims={4,5,6,7,8}, collapsed_slice_dims={}, start_index_map={0,1,2,3,4}, operand_batching_dims={5}, start_indices_batching_dims={5}, index_vector_dim=4, slice_sizes={30,29,28,27,26,1}
}
)"
},
{
"Scatter",
R"(HloModule StringifyScatter, entry_computation_layout={(f32[50,49,48,47,46]{4,3,2,1,0}, s64[10,9,8,7,5]{4,3,2,1,0}, f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0})->f32[50,49,48,47,46]{4,3,2,1,0}}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %Scatter (input_tensor: f32[50,49,48,47,46], scatter_indices: s64[10,9,8,7,5], updates: f32[10,9,8,7,30,29,28,27,26]) -> f32[50,49,48,47,46] {
%input_tensor = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0)
%scatter_indices = s64[10,9,8,7,5]{4,3,2,1,0} parameter(1)
%updates = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} parameter(2)
ROOT %scatter = f32[50,49,48,47,46]{4,3,2,1,0} scatter(f32[50,49,48,47,46]{4,3,2,1,0} %input_tensor, s64[10,9,8,7,5]{4,3,2,1,0} %scatter_indices, f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} %updates), update_window_dims={4,5,6,7,8}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1,2,3,4}, index_vector_dim=4, to_apply=%add_F32.v3
}
)"
},
{
"BatchScatter",
R"(HloModule StringifyScatter, entry_computation_layout={(f32[50,49,48,47,46,512]{5,4,3,2,1,0}, s64[10,9,8,7,5,512]{5,4,3,2,1,0}, f32[10,9,8,7,30,29,28,27,26,512]{9,8,7,6,5,4,3,2,1,0})->f32[50,49,48,47,46,512]{5,4,3,2,1,0}}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %Scatter (input_tensor: f32[50,49,48,47,46,512], scatter_indices: s64[10,9,8,7,5,512], updates: f32[10,9,8,7,30,29,28,27,26,512]) -> f32[50,49,48,47,46,512] {
%input_tensor = f32[50,49,48,47,46,512]{5,4,3,2,1,0} parameter(0)
%scatter_indices = s64[10,9,8,7,5,512]{5,4,3,2,1,0} parameter(1)
%updates = f32[10,9,8,7,30,29,28,27,26,512]{9,8,7,6,5,4,3,2,1,0} parameter(2)
ROOT %scatter = f32[50,49,48,47,46,512]{5,4,3,2,1,0} scatter(f32[50,49,48,47,46,512]{5,4,3,2,1,0} %input_tensor, s64[10,9,8,7,5,512]{5,4,3,2,1,0} %scatter_indices, f32[10,9,8,7,30,29,28,27,26,512]{9,8,7,6,5,4,3,2,1,0} %updates), update_window_dims={4,5,6,7,8}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1,2,3,4}, input_batching_dims={5}, scatter_indices_batching_dims={5}, index_vector_dim=4, to_apply=%add_F32.v3
}
)"
},
{
"TupleScatter",
R"(HloModule TupleScatter, entry_computation_layout={(f32[50,49,48,47,46]{4,3,2,1,0}, bf16[50,49,48,47,46]{4,3,2,1,0}, s64[10,9,8,7,5]{4,3,2,1,0}, f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0}, bf16[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0})->(f32[50,49,48,47,46]{4,3,2,1,0}, bf16[50,49,48,47,46]{4,3,2,1,0})}
%add_F32_mul_BF16 (lhs_0: f32[], lhs_1: bf16[], rhs_0: f32[], rhs_1: bf16[]) -> (f32[], bf16[]) {
%lhs_0 = f32[] parameter(0)
%rhs_0 = f32[] parameter(2)
%add = f32[] add(f32[] %lhs_0, f32[] %rhs_0)
%lhs_1 = bf16[] parameter(1)
%rhs_1 = bf16[] parameter(3)
%mul = bf16[] multiply(bf16[] %lhs_1, bf16[] %rhs_1)
ROOT %tuple = (f32[], bf16[]) tuple(f32[] %add, bf16[] %mul)
}
ENTRY %Scatter (input_0: f32[50,49,48,47,46], input_1: bf16[50,49,48,47,46], scatter_indices: s64[10,9,8,7,5], updates_0: f32[10,9,8,7,30,29,28,27,26], updates_1: bf16[10,9,8,7,30,29,28,27,26]) -> (f32[50,49,48,47,46], bf16[50,49,48,47,46]) {
%input_0 = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0)
%input_1 = bf16[50,49,48,47,46]{4,3,2,1,0} parameter(1)
%scatter_indices = s64[10,9,8,7,5]{4,3,2,1,0} parameter(2)
%updates_0 = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} parameter(3)
%updates_1 = bf16[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} parameter(4)
ROOT %scatter = (f32[50,49,48,47,46]{4,3,2,1,0}, bf16[50,49,48,47,46]{4,3,2,1,0}) scatter(f32[50,49,48,47,46]{4,3,2,1,0} %input_0, bf16[50,49,48,47,46]{4,3,2,1,0} %input_1, s64[10,9,8,7,5]{4,3,2,1,0} %scatter_indices, f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} %updates_0, bf16[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} %updates_1), update_window_dims={4,5,6,7,8}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1,2,3,4}, index_vector_dim=4, to_apply=%add_F32_mul_BF16
}
)"
},
{
"SortedScatter",
R"(HloModule StringifySortedScatter, entry_computation_layout={(f32[50,49,48,47,46]{4,3,2,1,0}, s64[10,9,8,7,5]{4,3,2,1,0}, f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0})->f32[50,49,48,47,46]{4,3,2,1,0}}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %Scatter (input_tensor: f32[50,49,48,47,46], scatter_indices: s64[10,9,8,7,5], updates: f32[10,9,8,7,30,29,28,27,26]) -> f32[50,49,48,47,46] {
%input_tensor = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0)
%scatter_indices = s64[10,9,8,7,5]{4,3,2,1,0} parameter(1)
%updates = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} parameter(2)
ROOT %scatter = f32[50,49,48,47,46]{4,3,2,1,0} scatter(f32[50,49,48,47,46]{4,3,2,1,0} %input_tensor, s64[10,9,8,7,5]{4,3,2,1,0} %scatter_indices, f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} %updates), update_window_dims={4,5,6,7,8}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1,2,3,4}, index_vector_dim=4, indices_are_sorted=true, to_apply=%add_F32.v3
}
)"
},
{
"UniqueIndicesScatter",
R"(HloModule StringifyUniqueIndicesScatter, entry_computation_layout={(f32[50,49,48,47,46]{4,3,2,1,0}, s64[10,9,8,7,5]{4,3,2,1,0}, f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0})->f32[50,49,48,47,46]{4,3,2,1,0}}
%add_F32.v3 (lhs: f32[], rhs: f32[]) -> f32[] {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %lhs, f32[] %rhs)
}
ENTRY %Scatter (input_tensor: f32[50,49,48,47,46], scatter_indices: s64[10,9,8,7,5], updates: f32[10,9,8,7,30,29,28,27,26]) -> f32[50,49,48,47,46] {
%input_tensor = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0)
%scatter_indices = s64[10,9,8,7,5]{4,3,2,1,0} parameter(1)
%updates = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} parameter(2)
ROOT %scatter = f32[50,49,48,47,46]{4,3,2,1,0} scatter(f32[50,49,48,47,46]{4,3,2,1,0} %input_tensor, s64[10,9,8,7,5]{4,3,2,1,0} %scatter_indices, f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} %updates), update_window_dims={4,5,6,7,8}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1,2,3,4}, index_vector_dim=4, unique_indices=true, to_apply=%add_F32.v3
}
)"
},
{
"ConstantUnsignedNoUnderflow",
R"(HloModule ConstantUnsignedNoUnderflow_module, entry_computation_layout={()->u64[]}
ENTRY %ConstantUnsignedNoUnderflow () -> u64[] {
ROOT %constant = u64[] constant(1)
}
)"
},
{
"ConstantUnsignedNoOverflow",
R"(HloModule ConstantUnsignedNoOverflow_module, entry_computation_layout={()->u64[]}
ENTRY %ConstantUnsignedNoOverflow () -> u64[] {
ROOT %constant = u64[] constant(9223372036854775807)
}
)"
},
{
"CustomCallWithLayoutConstraints",
R"(HloModule CustomCallWithLayoutConstraints, entry_computation_layout={(f32[42,2,3]{0,1,2}, f32[123,4]{0,1})->f32[1,2,3]{0,2,1}}
ENTRY %CustomCallWithLayoutConstraints (p0: f32[42,2,3], p1: f32[123,4]) -> f32[1,2,3] {
%p0 = f32[42,2,3]{0,1,2} parameter(0)
%p1 = f32[123,4]{0,1} parameter(1)
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(f32[42,2,3]{0,1,2} %p0, f32[123,4]{0,1} %p1), custom_call_target="baz", operand_layout_constraints={f32[42,2,3]{0,1,2}, f32[123,4]{1,0}}
}
)"
},
{
"CustomCallWithLayoutConstraintsNoOperands",
R"(HloModule CustomCallWithLayoutConstraintsNoOperands, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCallWithLayoutConstraints () -> f32[1,2,3] {
ROOT %custom-call = f32[1,2,3]{0,2,1} custom-call(), custom_call_target="baz", operand_layout_constraints={}
}
)"
},
{
"CustomCallWithLayoutConstraintsTupleShapes",
R"(HloModule CustomCallWithLayoutConstraintsTupleShapes, entry_computation_layout={((f32[2,2]{0,1}, f32[42,2,3]{0,1,2}), f32[123,4]{0,1})->(f32[1,2,3]{0,2,1}, f32[1,2,3]{1,2,0})}
ENTRY %CustomCallWithLayoutConstraints (p0: (f32[2,2], f32[42,2,3]), p1: f32[123,4]) -> (f32[1,2,3], f32[1,2,3]) {
%p0 = (f32[2,2]{0,1}, f32[42,2,3]{0,1,2}) parameter(0)
%p1 = f32[123,4]{0,1} parameter(1)
ROOT %custom-call = (f32[1,2,3]{0,2,1}, f32[1,2,3]{1,2,0}) custom-call((f32[2,2]{0,1}, f32[42,2,3]{0,1,2}) %p0, f32[123,4]{0,1} %p1), custom_call_target="baz", operand_layout_constraints={(f32[2,2]{1,0}, f32[42,2,3]{2,0,1}), f32[123,4]{1,0}}
}
)"
},
{
"CustomCallWithHasSideEffect",
R"(HloModule CustomCallWithHasSideEffect, entry_computation_layout={((f32[2,2]{0,1}, f32[42,2,3]{0,1,2}), f32[123,4]{0,1})->(f32[1,2,3]{0,2,1}, f32[1,2,3]{1,2,0})}
ENTRY %CustomCallWithHasSideEffect (p0: (f32[2,2], f32[42,2,3]), p1: f32[123,4]) -> (f32[1,2,3], f32[1,2,3]) {
%p0 = (f32[2,2]{0,1}, f32[42,2,3]{0,1,2}) parameter(0)
%p1 = f32[123,4]{0,1} parameter(1)
ROOT %custom-call = (f32[1,2,3]{0,2,1}, f32[1,2,3]{1,2,0}) custom-call((f32[2,2]{0,1}, f32[42,2,3]{0,1,2}) %p0, f32[123,4]{0,1} %p1), custom_call_target="baz", custom_call_has_side_effect=true
}
)"
},
{
"CustomCallWithAliasing",
R"(HloModule CustomCallWithAliasing, entry_computation_layout={((f32[2,2]{0,1}, f32[42,2,3]{0,1,2}), f32[123,4]{0,1})->(f32[123,4]{0,1}, f32[2,2]{0,1}, f32[1,2,3]{0,1,2})}
ENTRY %CustomCallWithAliasing (p0: (f32[2,2], f32[42,2,3]), p1: f32[123,4]) -> (f32[123,4], f32[2,2], f32[1,2,3]) {
%p0 = (f32[2,2]{0,1}, f32[42,2,3]{0,1,2}) parameter(0)
%p1 = f32[123,4]{0,1} parameter(1)
ROOT %custom-call = (f32[123,4]{0,1}, f32[2,2]{0,1}, f32[1,2,3]{0,1,2}) custom-call((f32[2,2]{0,1}, f32[42,2,3]{0,1,2}) %p0, f32[123,4]{0,1} %p1), custom_call_target="baz", output_to_operand_aliasing={{0}: (1, {}), {1}: (0, {0})}
}
)"
},
{
"CustomCallWithSchedule",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
%custom-call.0 = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo", schedule=SCHEDULE_EARLIEST
ROOT %custom-call.1 = f32[1,2,3]{0,2,1} custom-call(f32[1,2,3]{0,2,1} %custom-call.0), custom_call_target="bar", schedule=SCHEDULE_LATEST
}
)"
},
{
"CustomCallWithStatusReturningVersion",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call.1 = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo", api_version=API_VERSION_STATUS_RETURNING
}
)"
},
{
"ParseC64Literal",
R"(HloModule ParseC64Literal, entry_computation_layout={()->c64[2]{0}}
ENTRY %ParseC64Literal () -> c64[2] {
ROOT %c = c64[2]{0} constant({(1, 2), (-inf, nan)})
}
)"
},
{
"ParseC128Literal",
R"(HloModule ParseC128Literal, entry_computation_layout={()->c128[2]{0}}
ENTRY %ParseC128Literal () -> c128[2] {
ROOT %c = c128[2]{0} constant({(1, 2), (-inf, nan)})
}
)"
},
{
"IndexedConditional",
R"(HloModule indexed_conditional, entry_computation_layout={()->f32[]}
%Negate (x: f32[]) -> f32[] {
%x = f32[] parameter(0)
ROOT %negate = f32[] negate(f32[] %x)
}
%Identity (y: f32[]) -> f32[] {
%y = f32[] parameter(0)
ROOT %copy = f32[] copy(f32[] %y)
}
%Floor (z: f32[]) -> f32[] {
%z = f32[] parameter(0)
ROOT %floor = f32[] floor(f32[] %z)
}
ENTRY %Parameters1.v4 () -> f32[] {
%constant = s32[] constant(1)
%constant.1 = f32[] constant(56)
%constant.2 = f32[] constant(12)
%constant.3 = f32[] constant(13)
ROOT %conditional = f32[] conditional(s32[] %constant, f32[] %constant.1, f32[] %constant.2, f32[] %constant.3), branch_computations={%Negate, %Identity, %Floor}
}
)"
},
{
"RngGetAndUpdateState",
R"(HloModule rng_get_and_update_state, entry_computation_layout={()->u64[2]{0}}
ENTRY %RngGetAndUpdateState () -> u64[2] {
ROOT %rng-get-and-update-state = u64[2]{0} rng-get-and-update-state(), delta=4096
}
)"
},
{
"RngBitGenerator",
R"(HloModule gng_bit_generator, entry_computation_layout={(u64[2]{0})->(u64[2]{0}, u32[11,17]{1,0})}
ENTRY %RngBitGenerator (p0: u64[2]) -> (u64[2], u32[11,17]) {
%p0 = u64[2]{0} parameter(0)
ROOT %rand = (u64[2]{0}, u32[11,17]{1,0}) rng-bit-generator(u64[2]{0} %p0), algorithm=rng_three_fry
}
)"
},
{
"AsyncOpsWithSyntaxSugar",
R"(HloModule AsyncOpsWithSyntaxSugar, entry_computation_layout={(f32[10]{0})->f32[20]{0}}
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%async-start = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-start(f32[10]{0} %p0), custom_call_target="foo"
%async-update = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-update(((f32[10]{0}), f32[20]{0}, s32[]) %async-start)
ROOT %async-done = f32[20]{0} custom-call-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-update)
}
)"
},
{
"AsyncOpsWithSyntaxSugarAndThreadName",
R"(HloModule AsyncOpsWithSyntaxSugarAndThreadName, entry_computation_layout={(f32[10]{0})->f32[20]{0}}
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%async-start = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-start(f32[10]{0} %p0), async_execution_thread="parallel_thread", custom_call_target="foo"
%async-update = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-update(((f32[10]{0}), f32[20]{0}, s32[]) %async-start)
ROOT %async-done = f32[20]{0} custom-call-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-update)
}
)"
},
{
"HloComputationWithParallelThreadName",
R"(HloModule HloComputationWithParallelThreadName, entry_computation_layout={(f32[10]{0})->f32[20]{0}}
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%async-start = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-start(f32[10]{0} %p0), async_execution_thread="parallel_thread", custom_call_target="foo"
%async-update = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-update(((f32[10]{0}), f32[20]{0}, s32[]) %async-start)
ROOT %async-done = f32[20]{0} custom-call-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-update)
}, execution_thread="main_thread"
)"
},
{
"MetadataFields",
R"(HloModule test, entry_computation_layout={(f32[100]{0})->u32[100]{0}}
ENTRY %test (p: f32[100]) -> u32[100] {
%p = f32[100]{0} parameter(0)
ROOT %root = u32[100]{0} bitcast-convert(f32[100]{0} %p), metadata={op_type="a" op_name="b" source_file="c" source_line=1 profile_type={1} deduplicated_name="d" scheduling_name="foo"}
}
)"
},
{
"MetadataPreserveLayout",
R"(HloModule test, entry_computation_layout={(f32[100]{0})->u32[100]{0}}
ENTRY %test (p: f32[100]) -> u32[100] {
%p = f32[100]{0} parameter(0)
ROOT %root = u32[100]{0} bitcast-convert(f32[100]{0} %p), metadata={op_type="a" op_name="b" source_file="c" source_line=1 profile_type={1} deduplicated_name="d" preserve_layout=true}
}
)"
},
{
"OriginalValue",
R"(HloModule test, entry_computation_layout={(f32[], f32[3]{0}, f32[2,3]{1,0})->((f32[], f32[3]{0}), f32[2,3]{1,0})}
ENTRY %test (v1: f32[], v2: f32[3], v3: f32[2,3]) -> ((f32[], f32[3]), f32[2,3]) {
%v1 = f32[] parameter(0), origin={{"v1"}}
%v2 = f32[3]{0} parameter(1), origin={{"v2"}}
%tuple = (f32[], f32[3]{0}) tuple(f32[] %v1, f32[3]{0} %v2), origin={({"v1"}, {"v2"})}
%v3 = f32[2,3]{1,0} parameter(2), origin={{"v3"}}
ROOT %nested_tuple = ((f32[], f32[3]{0}), f32[2,3]{1,0}) tuple((f32[], f32[3]{0}) %tuple, f32[2,3]{1,0} %v3), origin={(({"v1"}, {"v2"}), {"v3"})}
}
)"
},
});
}
std::vector<TestData> CreateShortTestCases() {
return std::vector<TestData>({
{
"Map",
R"(HloModule MapBinaryAdder_module, entry_computation_layout={(f32[4]{0}, f32[4]{0})->f32[4]{0}}
add_F32.v3 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY MapBinaryAdder.v3 {
param0 = f32[4]{0} parameter(0)
param1 = f32[4]{0} parameter(1)
ROOT map = f32[4]{0} map(param0, param1), dimensions={0}, to_apply=add_F32.v3
}
)"
},
{
"Reduce",
R"(HloModule ReduceR3ToR2_module, entry_computation_layout={(f32[8,16,256]{2,1,0})->f32[8,16]{1,0}}
add_F32.v3 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY ReduceR3ToR2.v3 {
input = f32[8,16,256]{2,1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[8,16]{1,0} reduce(input, constant), dimensions={2}, to_apply=add_F32.v3
}
)"
},
{
"TupleReduce",
R"(HloModule TupleReduce, entry_computation_layout={(f32[1024]{0}, s32[1024]{0})->(f32[], s32[])}
max_argmax {
value = f32[] parameter(2)
prev_max = f32[] parameter(0)
is_next_larger = pred[] compare(value, prev_max), direction=GE
max = f32[] select(is_next_larger, value, prev_max)
index = s32[] parameter(3)
prev_argmax = s32[] parameter(1)
argmax = s32[] select(is_next_larger, index, prev_argmax)
ROOT pair = (f32[], s32[]) tuple(max, argmax)
}
ENTRY reduce_entry {
values = f32[1024]{0} parameter(0)
indices = s32[1024]{0} parameter(1)
init_value = f32[] constant(-inf)
init_index = s32[] constant(-1)
ROOT result = (f32[], s32[]) reduce(values, indices, init_value, init_index), dimensions={0}, to_apply=max_argmax
}
)"
},
{
"InfeedOutfeed",
R"(HloModule outfeed_module, entry_computation_layout={()->((u32[3]{0}, pred[]), token[])}
ENTRY InfeedToOutfeed {
token0 = token[] after-all()
infeed = ((u32[3]{0}, pred[]), token[]) infeed(token0)
infeed.data = (u32[3]{0}, pred[]) get-tuple-element(infeed), index=0
outfeed = token[] outfeed(infeed.data, token0), outfeed_shape=(u32[3]{0}, pred[])
ROOT infeed.1 = ((u32[3]{0}, pred[]), token[]) infeed(token0)
infeed.1.data = (u32[3]{0}, pred[]) get-tuple-element(infeed.1), index=0
infeed.1.token = token[] get-tuple-element(infeed.1), index=1
outfeed.1 = token[] outfeed(infeed.1.data, infeed.1.token), outfeed_shape=(u32[3]{0}, pred[])
}
)"
},
{
"Rng",
R"(HloModule rng_module, entry_computation_layout={()->f32[8]{0}}
ENTRY Rng {
constant = f32[] constant(0)
constant.1 = f32[] constant(1)
ROOT rng = f32[8]{0} rng(constant, constant.1), distribution=rng_uniform
}
)"
},
{
"ReducePrecision",
R"(HloModule reduce_precision, entry_computation_layout={()->f32[1]{0}}
ENTRY ReducePrecision {
constant = f32[1]{0} constant({3.14159})
ROOT reduce-precision = f32[1]{0} reduce-precision(constant), exponent_bits=8, mantissa_bits=10
}
)"
},
{
"SortKey",
R"(HloModule sort, entry_computation_layout={(f32[1024]{0})->f32[1024]{0}}
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY Sort {
x = f32[1024]{0} parameter(0)
ROOT sorted = f32[1024]{0} sort(x), dimensions={0}, to_apply=compare
}
)"
},
{
"SortKeyValue",
R"(HloModule sort, entry_computation_layout={(f32[1024]{0}, s32[1024]{0})->(f32[1024]{0}, s32[1024]{0})}
compare {
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY Sort {
keys = f32[1024]{0} parameter(0)
values = s32[1024]{0} parameter(1)
ROOT sorted = (f32[1024]{0}, s32[1024]{0}) sort(keys, values), dimensions={0}, to_apply=compare
}
)"
},
{
"SortKeyR2",
R"(HloModule sort, entry_computation_layout={(f32[1024,16]{0,1})->f32[1024,16]{0,1}}
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY Sort {
x = f32[1024,16]{0,1} parameter(0)
ROOT sorted = f32[1024,16]{0,1} sort(x), dimensions={0}, to_apply=compare
}
)"
},
{
"SortKeyValueR2",
R"(HloModule sort, entry_computation_layout={(f32[1024,16]{0,1}, s32[1024,16]{0,1})->(f32[1024,16]{0,1}, s32[1024,16]{0,1})}
compare {
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY Sort {
keys = f32[1024,16]{0,1} parameter(0)
values = s32[1024,16]{0,1} parameter(1)
ROOT sorted = (f32[1024,16]{0,1}, s32[1024,16]{0,1}) sort(keys, values), dimensions={0}, to_apply=compare
}
)"
},
{
"SortManyValues",
R"(HloModule sort, entry_computation_layout={(f32[1024,16]{0,1}, s32[1024,16]{0,1}, u32[1024,16]{0,1}, f32[1024,16]{0,1})->(f32[1024,16]{0,1}, s32[1024,16]{0,1}, u32[1024,16]{0,1}, f32[1024,16]{0,1})}
compare {
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
p.2.lhs = u32[] parameter(4)
p.2.rhs = u32[] parameter(5)
p.3.lhs = f32[] parameter(6)
p.3.rhs = f32[] parameter(7)
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY Sort {
keys = f32[1024,16]{0,1} parameter(0)
values.0 = s32[1024,16]{0,1} parameter(1)
values.1 = u32[1024,16]{0,1} parameter(2)
values.2 = f32[1024,16]{0,1} parameter(3)
ROOT sorted = (f32[1024,16]{0,1}, s32[1024,16]{0,1}, u32[1024,16]{0,1}, f32[1024,16]{0,1}) sort(keys, values.0, values.1, values.2), dimensions={0}, to_apply=compare
}
)"
},
{
"SortKeyStable",
R"(HloModule sort, entry_computation_layout={(f32[1024]{0})->f32[1024]{0}}
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY Sort {
x = f32[1024]{0} parameter(0)
ROOT sorted = f32[1024]{0} sort(x), dimensions={0}, is_stable=true, to_apply=compare
}
)"
},
{
"TopK",
R"(HloModule topk, entry_computation_layout={(f32[10,10]{0,1})->(f32[10,2]{0,1}, s32[10,2]{0,1})}
ENTRY TopK {
x = f32[10,10]{0,1} parameter(0)
ROOT topk = (f32[10,2]{0,1}, s32[10,2]{0,1}) topk(x), k=2, largest=true
}
)"
},
{
"IndexedConditional",
R"(HloModule indexed_conditional, entry_computation_layout={()->f32[]}
Negate {
x = f32[] parameter(0)
ROOT negate = f32[] negate(x)
}
Identity {
y = f32[] parameter(0)
ROOT copy = f32[] copy(y)
}
Floor {
z = f32[] parameter(0)
ROOT floor = f32[] floor(z)
}
ENTRY Parameters1.v4 {
constant = s32[] constant(1)
constant.1 = f32[] constant(56)
constant.2 = f32[] constant(12)
constant.3 = f32[] constant(13)
ROOT conditional = f32[] conditional(constant, constant.1, constant.2, constant.3), branch_computations={Negate, Identity, Floor}
}
)"
},
{
"PredicatedConditional",
R"(HloModule pred_conditional, entry_computation_layout={()->f32[]}
Negate {
x = f32[] parameter(0)
ROOT negate = f32[] negate(x)
}
Identity {
y = f32[] parameter(0)
ROOT copy = f32[] copy(y)
}
ENTRY Parameters1.v4 {
constant = pred[] constant(true)
constant.1 = f32[] constant(56)
constant.2 = f32[] constant(12)
ROOT conditional = f32[] conditional(constant, constant.1, constant.2), true_computation=Negate, false_computation=Identity
}
)"
},
{
"CustomCall",
R"(HloModule custom_call, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
ENTRY CustomCall {
constant = f32[1]{0} constant({12345})
ROOT custom-call = f32[1,2,3]{0,2,1} custom-call(constant), custom_call_target="foo\"bar"
}
)"
},
{
"CustumCallSingleComp",
R"(HloModule custom_call_with_comp, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
max_F32 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT maximum = f32[] maximum(lhs, rhs)
}
ENTRY CustomCall {
constant = f32[1]{0} constant({12345})
ROOT custom-call = f32[1,2,3]{0,2,1} custom-call(constant), custom_call_target="foo\"bar", called_computations={max_F32}
}
)"
},
{
"CustumCallMultipleComps",
R"(HloModule custom_call_with_comps, entry_computation_layout={()->f32[1,2,3]{0,2,1}}
max_F32 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT maximum = f32[] maximum(lhs, rhs)
}
ENTRY CustomCall {
constant = f32[1]{0} constant({12345})
ROOT custom-call = f32[1,2,3]{0,2,1} custom-call(constant), custom_call_target="foo\"bar", called_computations={max_F32, max_F32}
}
)"
},
{
"NonDefaultNames",
R"(HloModule add_constants_module, entry_computation_layout={()->f32[]}
ENTRY add_constants {
foo = f32[] constant(3.14)
ROOT bar = f32[] add(foo, foo)
}
)"
},
{
"Dot",
R"(HloModule dot, entry_computation_layout={(f32[2,10]{1,0}, f32[10,2]{1,0})->f32[2]{0}}
ENTRY dot {
a = f32[2,10]{1,0} parameter(0)
b = f32[10,2]{1,0} parameter(1)
ROOT dot = f32[2]{0} dot(a, b), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={1}, rhs_contracting_dims={0}
}
)"
},
{
"DotSparseOperand",
R"(HloModule dot, entry_computation_layout={(f16[32,32]{1,0}, f16[64,32]{1,0}, u16[32,4]{1,0})->f16[32,32]{1,0}}
ENTRY dot {
a = f16[32,32]{1,0} parameter(0)
b = f16[64,32]{1,0} parameter(1)
meta = u16[32,4]{1,0} parameter(2)
ROOT dot = f16[32,32]{1,0} dot(a, b, meta), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4
}
)"
},
{
"DotSparseOperands",
R"(HloModule dot, entry_computation_layout={(f16[32,32]{1,0}, f16[32,32]{1,0}, u16[32,4]{1,0}, u16[4,32]{1,0})->f16[32,32]{1,0}}
ENTRY dot {
a = f16[32,32]{1,0} parameter(0)
b = f16[32,32]{1,0} parameter(1)
a_meta = u16[32,4]{1,0} parameter(2)
b_meta = u16[4,32]{1,0} parameter(3)
ROOT dot = f16[32,32]{1,0} dot(a, b, a_meta, b_meta), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4_R.0@2:4
}
)"
},
{
"DotWithAlgorithm",
R"(HloModule dot, entry_computation_layout={(f32[2,10]{1,0}, f32[10,2]{1,0})->f32[2]{0}}
ENTRY dot {
a = f32[2,10]{1,0} parameter(0)
b = f32[10,2]{1,0} parameter(1)
ROOT dot = f32[2]{0} dot(a, b), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={1}, rhs_contracting_dims={0}, algorithm=dot_tf32_tf32_f32
}
)"
},
{
"gather",
R"(HloModule gather, entry_computation_layout={(f32[50,49,48,47,46]{4,3,2,1,0}, s64[10,9,8,7,5]{4,3,2,1,0})->f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0}}
ENTRY Gather {
input_tensor = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0)
start_indices = s64[10,9,8,7,5]{4,3,2,1,0} parameter(1)
ROOT gather = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} gather(input_tensor, start_indices), offset_dims={4,5,6,7,8}, collapsed_slice_dims={}, start_index_map={0,1,2,3,4}, index_vector_dim=4, slice_sizes={30,29,28,27,26}
}
)"
},
{
"AllReduce",
R"(HloModule CRS, entry_computation_layout={(f32[8]{0})->f32[8]{0}}
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
ROOT crs = f32[8]{0} all-reduce(input), replica_groups={}, to_apply=add
}
)"
},
{
"AllReduceWithSubgroups",
R"(HloModule CRS_Subgroups, entry_computation_layout={(f32[128,32]{0,1})->f32[128,32]{0,1}}, replica_count=4
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY AllReduceWithSubgroups {
input = f32[128,32]{0,1} parameter(0)
ROOT all-reduce = f32[128,32]{0,1} all-reduce(input), replica_groups={{0,1},{2,3}}, to_apply=add
}
)",
4,
},
{
"AllReduceWithSubgroupsIotaList",
R"(HloModule CRS_Subgroups, entry_computation_layout={(f32[128,32]{0,1})->f32[128,32]{0,1}}, replica_count=20
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY AllReduceWithSubgroupsIotaList {
input = f32[128,32]{0,1} parameter(0)
ROOT all-reduce = f32[128,32]{0,1} all-reduce(input), replica_groups=[2,10]<=[20], to_apply=add
}
)",
20,
},
{
"AllReduceWithLayout",
R"(HloModule CRS, entry_computation_layout={(f32[8]{0})->f32[8]{0}}
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
ROOT crs = f32[8]{0} all-reduce(input), replica_groups={}, constrain_layout=true, to_apply=add
}
)"
},
{
"AllReduceAllReduce",
R"(HloModule CRS, entry_computation_layout={(f32[8]{0})->f32[8]{0}}
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
crs.1 = f32[8]{0} all-reduce(input), channel_id=1, replica_groups={{0}}, to_apply=add
ROOT crs.0 = f32[8]{0} all-reduce(input), channel_id=1, replica_groups={{0}}, to_apply=add
}
)"
},
{
"AllReduceStartAndDone",
R"(HloModule CRS, entry_computation_layout={(f32[8]{0})->f32[8]{0}}
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
crs = f32[8]{0} all-reduce-start(input), replica_groups={}, to_apply=add
ROOT done = f32[8]{0} all-reduce-done(crs)
}
)"
},
{
"ReduceScatter",
R"(HloModule RS, entry_computation_layout={(f32[8]{0})->f32[4]{0}}
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[8]{0} parameter(0)
ROOT ars = f32[4]{0} reduce-scatter(input), replica_groups={{0,1}}, dimensions={0}, to_apply=add
}
)"
},
{
"AllGather",
R"(HloModule AllGather, entry_computation_layout={(f32[128,32]{0,1})->f32[128,128]{0,1}}
ENTRY AllGather {
input = f32[128,32]{0,1} parameter(0)
ROOT ag = f32[128,128]{0,1} all-gather(input), replica_groups={}, dimensions={1}
}
)"
},
{
"AllGatherWithLayout",
R"(HloModule AllGather, entry_computation_layout={(f32[128,32]{0,1})->f32[128,128]{0,1}}
ENTRY AllGather {
input = f32[128,32]{0,1} parameter(0)
ROOT ag = f32[128,128]{0,1} all-gather(input), replica_groups={}, constrain_layout=true, dimensions={1}
}
)"
},
{
"AllGatherWithSubgroups",
R"(HloModule AllGatherWithSubgroups, entry_computation_layout={(f32[128,32]{0,1})->f32[128,64]{0,1}}, replica_count=4
ENTRY AllGatherWithSubgroups {
input = f32[128,32]{0,1} parameter(0)
ROOT ag = f32[128,64]{0,1} all-gather(input), replica_groups={{0,1},{2,3}}, dimensions={1}
}
)",
4,
},
{
"AllGatherWithSubgroupsIotaList",
R"(HloModule AllGatherWithSubgroupsIotaList, entry_computation_layout={(f32[128,32]{0,1})->f32[128,320]{0,1}}, replica_count=30
ENTRY AllGatherWithSubgroupsIotaList {
input = f32[128,32]{0,1} parameter(0)
ROOT ag = f32[128,320]{0,1} all-gather(input), replica_groups=[3,10]<=[6,5]T(1,0), dimensions={1}
}
)",
30,
},
{
"AllToAll",
R"(HloModule AllToAll, entry_computation_layout={(f32[128,32]{0,1})->(f32[128,32]{0,1})}
ENTRY AllToAll {
input = f32[128,32]{0,1} parameter(0)
ROOT a2a = (f32[128,32]{0,1}) all-to-all(input), replica_groups={}
}
)"
},
{
"AllToAllWithSubgroups",
R"(HloModule AllToAllWithSubgroups, entry_computation_layout={(f32[128,32]{0,1}, f32[128,32]{0,1})->(f32[128,32]{0,1}, f32[128,32]{0,1})}, replica_count=4
ENTRY AllToAllWithSubgroups {
p0 = f32[128,32]{0,1} parameter(0)
p1 = f32[128,32]{0,1} parameter(1)
ROOT a2a = (f32[128,32]{0,1}, f32[128,32]{0,1}) all-to-all(p0, p1), replica_groups={{1,2},{3,0}}
}
)",
4,
},
{
"AllToAllWithSubgroupsIotaList",
R"(HloModule AllToAllWithSubgroupsIotaList, entry_computation_layout={(f32[128,32]{0,1})->f32[128,32]{0,1}}, replica_count=32
ENTRY AllToAllWithSubgroupsIotaList {
p0 = f32[128,32]{0,1} parameter(0)
ROOT a2a = f32[128,32]{0,1} all-to-all(p0), replica_groups=[4,8]<=[4,8]T(1,0), dimensions={0}
}
)",
40
},
{
"CollectiveBroadcast",
R"(HloModule CollectiveBroadcast, entry_computation_layout={(f32[128,32]{0,1})->f32[128,32]{0,1}}, replica_count=4
ENTRY CollectiveBroadcast {
input = f32[128,32]{0,1} parameter(0)
ROOT cb = f32[128,32]{0,1} collective-broadcast(input), replica_groups={{1,0},{2,3}}
}
)",
4,
},
{
"CollectivePermute",
R"(HloModule CollectivePermute, entry_computation_layout={(f32[128,32]{0,1})->f32[128,32]{0,1}}, replica_count=4
ENTRY CollectivePermute {
input = f32[128,32]{0,1} parameter(0)
ROOT root = f32[128,32]{0,1} collective-permute(input), source_target_pairs={{0,1},{1,2},{2,3}}
}
)",
4
},
{
"CollectivePermuteInPlaceUpdate",
R"(HloModule CollectivePermuteInPlaceUpdate, entry_computation_layout={(f32[128,32]{0,1})->f32[128,128]{0,1}}, replica_count=4
ENTRY CollectivePermuteInPlaceUpdate {
input = f32[128,32]{0,1} parameter(0)
constant = f32[] constant(1)
output = f32[128,128]{0,1} broadcast(constant), dimensions={}
constant.1 = s32[] constant(0)
tuple.1 = (s32[], s32[]) tuple(constant.1, constant.1)
constant.2 = s32[] constant(64)
tuple.2 = (s32[], s32[]) tuple(constant.1, constant.2)
ROOT root = f32[128,128]{0,1} collective-permute(input, output, tuple.1, tuple.2), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{128,32}}
}
)",
4
},
{
"CollectivePermuteInPlaceUpdateMultipleReadWrite",
R"(HloModule CollectivePermuteInPlaceUpdateMultipleReadWrite, entry_computation_layout={(f32[8,8,128]{2,1,0})->f32[8,8,128]{2,1,0}}, replica_count=4
ENTRY CollectivePermuteInPlaceUpdate {
constant.3 = s32[] constant(2)
constant.1 = s32[] constant(0)
output_offset.3 = (s32[], s32[], s32[]) tuple(constant.3, constant.1, constant.1)
constant.4 = s32[] constant(3)
output_offset.4 = (s32[], s32[], s32[]) tuple(constant.4, constant.1, constant.1)
input = f32[8,8,128]{2,1,0} parameter(0)
constant = f32[] constant(1)
output = f32[8,8,128]{2,1,0} broadcast(constant), dimensions={}
input_offset.1 = (s32[], s32[], s32[]) tuple(constant.1, constant.1, constant.1)
constant.2 = s32[] constant(1)
input_offset.2 = (s32[], s32[], s32[]) tuple(constant.2, constant.1, constant.1)
input_offset = ((s32[], s32[], s32[]), (s32[], s32[], s32[])) tuple(input_offset.1, input_offset.2)
output_offset = ((s32[], s32[], s32[]), (s32[], s32[], s32[])) tuple(input_offset.1, input_offset.2)
ROOT root = f32[8,8,128]{2,1,0} collective-permute(input, output, input_offset, output_offset), source_target_pairs={{0,1},{1,2},{2,3},{0,3},{2,1},{3,2}}, slice_sizes={{1,8,128},{1,8,128}}
}
)",
4
},
{
"CollectivePermuteInPlaceUpdateTupleMultipleReadWrite",
R"(HloModule hlo_runner_test_0.1, entry_computation_layout={()->(u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)})}, replica_count=4
ENTRY hlo_runner_test_0.1 {
replica_id = u32[] replica-id()
broadcast.0 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(replica_id), dimensions={}
tuple.input = (u32[2,8,128]{2,1,0:T(2,128)}, u32[2,8,128]{2,1,0:T(2,128)}) tuple(broadcast.0, broadcast.0)
constant.1 = u32[] constant(1000)
broadcast.1 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(constant.1), dimensions={}
broadcast.2 = u32[4,8,128]{2,1,0:T(2,128)} broadcast(constant.1), dimensions={}
tuple.output = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) tuple(broadcast.1, broadcast.2)
constant.2 = s32[] constant(0)
tuple.2 = (s32[], s32[], s32[]) tuple(constant.2, constant.2, constant.2)
constant.3 = s32[] constant(1)
tuple.3 = (s32[], s32[], s32[]) tuple(constant.3, constant.2, constant.2)
tuple.4 = ((s32[], s32[], s32[]), (s32[], s32[], s32[])) tuple(tuple.2, tuple.3)
tuple.7 = ((s32[], s32[], s32[]), (s32[], s32[], s32[])) tuple(tuple.2, tuple.2)
tuple.8 = (((s32[], s32[], s32[]), (s32[], s32[], s32[])), ((s32[], s32[], s32[]), (s32[], s32[], s32[]))) tuple(tuple.4, tuple.7)
constant.4 = s32[] constant(2)
tuple.5 = (s32[], s32[], s32[]) tuple(constant.4, constant.2, constant.2)
tuple.6 = ((s32[], s32[], s32[]), (s32[], s32[], s32[])) tuple(tuple.2, tuple.5)
tuple.9 = (((s32[], s32[], s32[]), (s32[], s32[], s32[])), ((s32[], s32[], s32[]), (s32[], s32[], s32[]))) tuple(tuple.4, tuple.6)
ROOT collective-permute.53 = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) collective-permute(tuple.input, tuple.output, tuple.8, tuple.9), source_target_pairs={{0,1},{1,2},{2,3},{3,0},{0,3},{3,2},{2,1},{1,0}}, slice_sizes={{1,8,128},{1,8,128},{2,8,128},{2,8,128}}
}
)",
4
},
{
"CollectivePermuteTupleInPlaceUpdate",
R"(HloModule CollectivePermuteTupleInPlaceUpdate, entry_computation_layout={(f32[128,32]{0,1})->(f32[128,128]{0,1}, f32[128,128]{0,1})}, replica_count=4
ENTRY CollectivePermuteInPlaceUpdate {
input = f32[128,32]{0,1} parameter(0)
tuple.input = (f32[128,32]{0,1}, f32[128,32]{0,1}) tuple(input, input)
constant = f32[] constant(1)
output = f32[128,128]{0,1} broadcast(constant), dimensions={}
tuple.output = (f32[128,128]{0,1}, f32[128,128]{0,1}) tuple(output, output)
constant.1 = s32[] constant(0)
tuple.1 = (s32[], s32[]) tuple(constant.1, constant.1)
constant.2 = s32[] constant(64)
tuple.2 = (s32[], s32[]) tuple(constant.2, constant.1)
tuple.3 = ((s32[], s32[]), (s32[], s32[])) tuple(tuple.1, tuple.2)
tuple.4 = (s32[], s32[]) tuple(constant.1, constant.1)
tuple.5 = (s32[], s32[]) tuple(constant.2, constant.2)
tuple.6 = ((s32[], s32[]), (s32[], s32[])) tuple(tuple.4, tuple.5)
ROOT root = (f32[128,128]{0,1}, f32[128,128]{0,1}) collective-permute(tuple.input, tuple.output, tuple.3, tuple.6), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{64,32},{64,32}}
}
)",
4
},
{
"CollectivePermuteStartAndDone",
R"(HloModule CollectivePermuteStartAndDone, entry_computation_layout={(f32[128,32]{0,1})->f32[128,32]{0,1}}, replica_count=4
ENTRY CollectivePermuteStartAndDone {
input = f32[128,32]{0,1} parameter(0)
collective-permute-start.1 = (f32[128,32]{0,1}, f32[128,32]{0,1}, u32[], u32[]) collective-permute-start(input), source_target_pairs={{0,1},{1,2},{2,3}}
ROOT collective-permute-done.1 = f32[128,32]{0,1} collective-permute-done(collective-permute-start.1)
}
)",
4
},
{
"CollectivePermuteStartAndDoneInplaceUpdate",
R"(HloModule CollectivePermuteStartAndDoneInplaceUpdate, entry_computation_layout={(f32[128,32]{0,1})->f32[128,128]{0,1}}, replica_count=4
ENTRY CollectivePermuteStartAndDoneInplaceUpdate {
input = f32[128,32]{0,1} parameter(0)
constant = f32[] constant(1)
output = f32[128,128]{0,1} broadcast(constant), dimensions={}
constant.1 = s32[] constant(0)
tuple.1 = (s32[], s32[]) tuple(constant.1, constant.1)
constant.2 = s32[] constant(64)
tuple.2 = (s32[], s32[]) tuple(constant.1, constant.2)
collective-permute-start.1 = (f32[128,32]{0,1}, f32[128,128]{0,1}, u32[], u32[]) collective-permute-start(input, output, tuple.1, tuple.2), source_target_pairs={{0,1},{1,2},{2,3}}, slice_sizes={{64,32}}
ROOT collective-permute-done.1 = f32[128,128]{0,1} collective-permute-done(collective-permute-start.1)
}
)",
4
},
{
"ReplicaId",
R"(HloModule replica-id, entry_computation_layout={()->u32[]}
ENTRY Replica-id {
ROOT replica-id = u32[] replica-id()
}
)"
},
{
"PartitionId",
R"(HloModule partition-id, entry_computation_layout={()->u32[]}
ENTRY PartitionId {
ROOT id = u32[] partition-id()
}
)"
},
{
"Iota",
R"(HloModule iota, entry_computation_layout={()->f32[100]{0}}
ENTRY Iota {
ROOT iota = f32[100]{0} iota(), iota_dimension=0
}
)"
},
{
"CustomCallWithWindowAndDimLabelsAndFeatureGroupCount",
R"(HloModule CustomCallWithWindowAndDimLabelsAndFeatureGroupCount, entry_computation_layout={()->f32[100]{0}}
ENTRY Computation {
ROOT r = f32[100]{0} custom-call(), window={size=2x2}, dim_labels=b01f_01io->b01f, feature_group_count=2, custom_call_target="target"
}
)"
},
{
"CustomCallWithUnknownDimLabels",
R"(HloModule CustomCallWithUnknownDimLabels, entry_computation_layout={()->f32[100]{0}}
ENTRY Computation {
ROOT r = f32[100]{0} custom-call(), window={size=2x2}, dim_labels=?b01f_0?1io->b01?f, custom_call_target="target"
}
)"
},
{
"ScheduledModule",
R"(HloModule scheduled_module, is_scheduled=true, entry_computation_layout={(f32[1024]{0}, s32[1024]{0})->(f32[1024]{0}, s32[1024]{0})}
compare {
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
ROOT lhs = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY Sort {
keys = f32[1024]{0} parameter(0)
values = s32[1024]{0} parameter(1)
ROOT sorted = (f32[1024]{0}, s32[1024]{0}) sort(keys, values), dimensions={0}, to_apply=compare
}
)"
},
{
"AfterAllWithMultipleOperands",
R"(HloModule AfterAllWithMultipleOperands, entry_computation_layout={(f32[])->token[]}
ENTRY AfterAllWithMultipleOperands {
p0 = f32[] parameter(0)
token0 = token[] after-all()
token1 = token[] after-all()
ROOT after-all = token[] after-all(p0, token0, token1)
}
)"
},
{
"AddDependency",
R"(HloModule AddDependency, entry_computation_layout={(f32[])->f32[]}
ENTRY AddDependency {
p = f32[] parameter(0)
neg = f32[] negate(p)
token0 = token[] after-all(neg)
p_after_token = f32[] add-dependency(p, token0)
exp = f32[] exponential(p_after_token)
ROOT sum = f32[] add(neg, exp)
}
)"
},
{
"MinMaxValues",
R"(HloModule MinMaxValues, entry_computation_layout={()->c128[2]{0}}
ENTRY MinMaxValues {
x.s4 = s4[2]{0} constant({-8, 7})
x.s8 = s8[2]{0} constant({-128, 127})
x.s16 = s16[2]{0} constant({-32768, 32767})
x.s32 = s32[2]{0} constant({-2147483648, 2147483647})
x.u4 = u4[2]{0} constant({0, 15})
x.u8 = u8[2]{0} constant({0, 255})
x.u16 = u16[2]{0} constant({0, 65535})
x.u32 = u32[2]{0} constant({0, 4294967295})
x.f16 = f16[2]{0} constant({-65504, 65504})
x.bf16 = bf16[2]{0} constant({-3.39e+38, 3.39e+38})
x.f32 = f32[2]{0} constant({-3.40282e+38, 3.40282e+38})
x.f64 = f64[2]{0} constant({-1.79769e+308, 1.79769e+308})
x.c64 = c64[2]{0} constant({(-3.40282e+38, 3.40282e+38), (3.40282e+38, -3.40282e+38)})
ROOT c.c128 = c128[2]{0} constant({(-1.79769e+308, 1.79769e+308), (1.79769e+308, -1.79769e+308)})
}
)"
},
{
"BitcastConvert",
R"(HloModule BitcastConvert, entry_computation_layout={(f32[100]{0})->u32[100]{0}}
ENTRY BitcastConvertUsage {
p = f32[100]{0} parameter(0)
ROOT out = u32[100]{0} bitcast-convert(p)
}
)"
},
});
}
std::vector<NonRoundtripTestData> CreateNonRoundtripTestCases() {
return std::vector<NonRoundtripTestData>({
{
"SimpleNesting",
R"(HloModule test
ENTRY test {
ROOT root = add(f32[10] parameter(0), multiply(f32[10] parameter(1), f32[10] parameter(2)))
})",
R"(HloModule test, entry_computation_layout={(f32[10]{0}, f32[10]{0}, f32[10]{0})->f32[10]{0}}
ENTRY test {
parameter.anon = f32[10]{0} parameter(0)
parameter.anon.1 = f32[10]{0} parameter(1)
parameter.anon.2 = f32[10]{0} parameter(2)
multiply.anon = f32[10]{0} multiply(parameter.anon.1, parameter.anon.2)
ROOT root = f32[10]{0} add(parameter.anon, multiply.anon)
})"
},
{
"AmbiguousNames",
R"(HloModule test
ENTRY test {
add = add(f32[10] parameter(0), f32[10] parameter(1))
ROOT add2 = add(add, add(add, add))
})",
R"(HloModule test, entry_computation_layout={(f32[10]{0}, f32[10]{0})->f32[10]{0}}
ENTRY test {
parameter.anon = f32[10]{0} parameter(0)
parameter.anon.1 = f32[10]{0} parameter(1)
add = f32[10]{0} add(parameter.anon, parameter.anon.1)
add.anon = f32[10]{0} add(add, add)
ROOT add2 = f32[10]{0} add(add, add.anon)
})"
},
{
"TupleShapeInsideAnonymousInstr",
R"(HloModule test
ENTRY test {
ROOT root = get-tuple-element(
(f32[10], f16[10]) tuple(f32[10] parameter(0), f16[10] parameter(1))
), index=0
})",
R"(HloModule test, entry_computation_layout={(f32[10]{0}, f16[10]{0})->f32[10]{0}}
ENTRY test {
parameter.anon = f32[10]{0} parameter(0)
parameter.anon.1 = f16[10]{0} parameter(1)
tuple.anon = (f32[10]{0}, f16[10]{0}) tuple(parameter.anon, parameter.anon.1)
ROOT root = f32[10]{0} get-tuple-element(tuple.anon), index=0
})"
},
{
"MixAnonAndNonAnonOperands",
R"(HloModule test
ENTRY test {
add = add(f32[10] parameter(0), f32[10] parameter(1))
ROOT root = tuple(add, add(add, add), add)
})",
R"(HloModule test, entry_computation_layout={(f32[10]{0}, f32[10]{0})->(f32[10]{0}, f32[10]{0}, f32[10]{0})}
ENTRY test {
parameter.anon = f32[10]{0} parameter(0)
parameter.anon.1 = f32[10]{0} parameter(1)
add = f32[10]{0} add(parameter.anon, parameter.anon.1)
add.anon = f32[10]{0} add(add, add)
ROOT root = (f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(add, add.anon, add)
})"
},
{
"BroadcastOfScalarDoesntNeedDimensionsAttr",
R"(HloModule test
ENTRY test {
ROOT root = sqrt(f32[10,10] broadcast(f32[] parameter(0)))
})",
R"(HloModule test, entry_computation_layout={(f32[])->f32[10,10]{1,0}}
ENTRY test {
parameter.anon = f32[] parameter(0)
broadcast.anon = f32[10,10]{1,0} broadcast(parameter.anon), dimensions={}
ROOT root = f32[10,10]{1,0} sqrt(broadcast.anon)
})"
},
{
"SparseShape",
R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)} parameter(0)
})",
R"(HloModule test, entry_computation_layout={(f32[10,10]{1,0:D(D,C)})->f32[10,10]{1,0:D(D,C)}}
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)} parameter(0)
})",
},
{
"SparseShapeWithIndexPrimitiveType",
R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)#(u32)} parameter(0)
})",
R"(HloModule test, entry_computation_layout={(f32[10,10]{1,0:D(D,C)#(u32)})->f32[10,10]{1,0:D(D,C)#(u32)}}
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)#(u32)} parameter(0)
})",
},
{
"SparseShapeWithPointerPrimitiveType",
R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)*(u32)} parameter(0)
})",
R"(HloModule test, entry_computation_layout={(f32[10,10]{1,0:D(D,C)*(u32)})->f32[10,10]{1,0:D(D,C)*(u32)}}
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)*(u32)} parameter(0)
})",
},
{
"SparseShapeWithPhysicalShape",
R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)P((s32[10]{0:T(100)}, s32[10]{0:T(100)}, f32[10]{0:T(100)}))} parameter(0)
})",
R"(HloModule test, entry_computation_layout={(f32[10,10]{1,0:D(D,C)P((s32[10]{0:T(100)}, s32[10]{0:T(100)}, f32[10]{0:T(100)}))})->f32[10,10]{1,0:D(D,C)P((s32[10]{0:T(100)}, s32[10]{0:T(100)}, f32[10]{0:T(100)}))}}
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)P((s32[10]{0:T(100)}, s32[10]{0:T(100)}, f32[10]{0:T(100)}))} parameter(0)
})",
},
{
"SparseShapeFull",
R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)#(u64)*(u32)S(42)P((s32[10]{0:T(100)}, s32[10]{0:T(100)}, f32[10]{0:T(100)}))} parameter(0)
})",
R"(HloModule test, entry_computation_layout={(f32[10,10]{1,0:D(D,C)#(u64)*(u32)S(42)P((s32[10]{0:T(100)}, s32[10]{0:T(100)}, f32[10]{0:T(100)}))})->f32[10,10]{1,0:D(D,C)#(u64)*(u32)S(42)P((s32[10]{0:T(100)}, s32[10]{0:T(100)}, f32[10]{0:T(100)}))}}
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)#(u64)*(u32)S(42)P((s32[10]{0:T(100)}, s32[10]{0:T(100)}, f32[10]{0:T(100)}))} parameter(0)
})",
},
{
"SparseCOO",
R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:D(C+,S)} parameter(0)
})",
R"(HloModule test, entry_computation_layout={(f32[10,10]{1,0:D(C+,S)})->f32[10,10]{1,0:D(C+,S)}}
ENTRY test {
ROOT root = f32[10,10]{1,0:D(C+,S)} parameter(0)
})",
},
{
"SparseCOOUnordered",
R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:D(C+~,S~)} parameter(0)
})",
R"(HloModule test, entry_computation_layout={(f32[10,10]{1,0:D(C+~,S~)})->f32[10,10]{1,0:D(C+~,S~)}}
ENTRY test {
ROOT root = f32[10,10]{1,0:D(C+~,S~)} parameter(0)
})",
},
});
}
template <bool short_form, bool proto_round_trip>
class HloParameterizedParserTest
: public ::testing::Test,
public ::testing::WithParamInterface<TestData> {
protected:
void ExpectEqual() {
std::unique_ptr<HloModule> module;
const std::string& original = GetParam().module_string;
HloModuleConfig config;
config.set_replica_count(GetParam().replica_count);
if (GetParam().enable_verification) {
auto verified_module = std::make_unique<VerifiedHloModule>(
GetParam().test_name, config,
false,
true,
ShapeUtil::ByteSizeOfElements);
TF_ASSERT_OK(verified_module->ParseHloStringAndVerifyModule(original));
module = std::move(verified_module);
} else {
TF_ASSERT_OK_AND_ASSIGN(module,
ParseAndReturnUnverifiedModule(original, config));
}
if (proto_round_trip) {
TF_ASSERT_OK_AND_ASSIGN(module, HloModule::CreateFromProto(
module->ToProto(), module->config()));
}
if (short_form) {
EXPECT_EQ(original, module->ToString(HloPrintOptions::ShortParsable()));
} else {
EXPECT_EQ(
original,
module->ToString(HloPrintOptions().set_print_large_constants(true)));
}
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instr : computation->instructions()) {
if (instr->opcode() == HloOpcode::kWhile) {
EXPECT_EQ(instr->while_body()->WhileCallInstruction(), instr);
EXPECT_TRUE(instr->while_body()->IsWhileBodyComputation());
}
}
}
}
};
using HloParserTestLong = HloParameterizedParserTest<false, false>;
using HloParserTestLongProto = HloParameterizedParserTest<false, true>;
using HloParserTestShort = HloParameterizedParserTest<true, false>;
using HloParserTestShortProto = HloParameterizedParserTest<true, true>;
TEST_P(HloParserTestLong, Run) { ExpectEqual(); }
TEST_P(HloParserTestLongProto, Run) { ExpectEqual(); }
TEST_P(HloParserTestShort, Run) { ExpectEqual(); }
TEST_P(HloParserTestShortProto, Run) { ExpectEqual(); }
INSTANTIATE_TEST_SUITE_P(HloParserTestSuccessInstantiation, HloParserTestLong,
::testing::ValuesIn(CreateTestCases()),
TestDataToString);
INSTANTIATE_TEST_SUITE_P(HloParserTestSuccessInstantiation,
HloParserTestLongProto,
::testing::ValuesIn(CreateTestCases()),
TestDataToString);
INSTANTIATE_TEST_SUITE_P(HloParserTestSuccessInstantiation, HloParserTestShort,
::testing::ValuesIn(CreateShortTestCases()),
TestDataToString);
INSTANTIATE_TEST_SUITE_P(HloParserTestSuccessInstantiation,
HloParserTestShortProto,
::testing::ValuesIn(CreateShortTestCases()),
TestDataToString);
class HloNonRoundtripParserTest
: public ::testing::TestWithParam<NonRoundtripTestData> {};
TEST_P(HloNonRoundtripParserTest, Run) {
auto module = std::make_unique<VerifiedHloModule>(
GetParam().test_name, HloModuleConfig{},
false,
true,
ShapeUtil::ByteSizeOfElements);
TF_ASSERT_OK(
module->ParseHloStringAndVerifyModule(GetParam().input_module_string));
EXPECT_EQ(absl::StripAsciiWhitespace(GetParam().output_module_string),
absl::StripAsciiWhitespace(
module->ToString(HloPrintOptions::ShortParsable())));
}
INSTANTIATE_TEST_SUITE_P(HloParserTestSuccessInstantiation,
HloNonRoundtripParserTest,
::testing::ValuesIn(CreateNonRoundtripTestCases()),
NonRoundtripTestDataToString);
class HloParserTest : public ::testing::Test {
protected:
static void ExpectHasSubstr(string_view s, string_view expected) {
EXPECT_TRUE(absl::StrContains(s, expected))
<< "'" << s << "' does not contain '" << expected << "'";
}
absl::StatusOr<std::unique_ptr<VerifiedHloModule>>
ParseAndReturnVerifiedModule(absl::string_view hlo_text) {
auto module = std::make_unique<VerifiedHloModule>(
::testing::UnitTest::GetInstance()->current_test_info()->name(),
HloModuleConfig(),
false,
true,
ShapeUtil::ByteSizeOfElements);
TF_RETURN_IF_ERROR(module->ParseHloStringAndVerifyModule(hlo_text));
return std::move(module);
}
};
TEST_F(HloParserTest, Empty) {
const std::string original = "";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, Garbage) {
const std::string original =
"HloModule thi$ str1ng makes# N0 sen$e @all!*&^%$";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, WrongOpcode) {
const std::string original = R"(HloModule wrong_opcode:
ENTRY %blabla (x: f32[], y: f32[]) -> f32[] {
%x = f32[]{} parameter(0)
%y = f32[]{} parameter(1)
%le = pred[]{} le(f32[]{} %x, f32[]{} %y)
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, MetadataWithCholesky) {
const std::string original = R"(HloModule metadata_with_cholesky
ENTRY %blabla (a: f32[1,291,291]) -> f32[1,291,291] {
%a = f32[1,291,291] parameter(0)
%out = f32[1,291,291] cholesky(f32[1,291,291] %a), lower=true, metadata={op_type="Cholesky" op_name="Cholesky" profile_type={1}}
}
)";
auto result = ParseAndReturnVerifiedModule(original);
EXPECT_EQ(absl::OkStatus(), result.status());
EXPECT_EQ("Cholesky", result.value()
->entry_computation()
->root_instruction()
->metadata()
.op_name());
EXPECT_EQ("Cholesky", result.value()
->entry_computation()
->root_instruction()
->metadata()
.op_type());
EXPECT_EQ(WINDOW, *result.value()
->entry_computation()
->root_instruction()
->metadata()
.profile_type()
.begin());
}
TEST_F(HloParserTest, WrongShape) {
const std::string original = R"(HloModule wrong_opcode:
ENTRY %blabla (x: g32[]) -> g32[] {
%x = g32[]{} parameter(0)
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, WrongOperandsSize) {
const std::string original = R"(HloModule wrong_opcode:
ENTRY %blabla (x: f32[]) -> pred[] {
%x = f32[]{} parameter(0)
%eq = pred[]{} compare(f32[]{} %x), direction=EQ
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, OperandNotFound) {
const std::string original = R"(HloModule operand_not_found:
ENTRY %blabla (x: f32[]) -> pred[] {
%x = f32[]{} parameter(0)
%eq = pred[]{} compare(f32[]{} %x, f32[]{} %y), direction=EQ
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, MoreConstants) {
const std::string original = R"(HloModule SelectScalarS32True_module
ENTRY %SelectScalarS32True.v4 () -> s32[] {
%constant.2 = pred[] constant(true)
%constant.1 = s32[] constant(-42), sharding={replicated}
%constant = s32[] constant(42)
%select = s32[] select(pred[] %constant.2, s32[] %constant.1, s32[] %constant)
}
)";
auto result = ParseAndReturnVerifiedModule(original);
TF_EXPECT_OK(result.status());
}
TEST_F(HloParserTest, ConfigurationField) {
const std::string original = R"(HloModule AModule
ENTRY %configuration_test() -> s32[] {
%constant = s32[] constant(42), backend_config="foo bar"
})";
auto result = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(result.status());
EXPECT_EQ("foo bar", result.value()
->entry_computation()
->root_instruction()
->raw_backend_config_string());
}
TEST_F(HloParserTest, LiteralDimensionsError) {
const std::string original = R"(HloModule some_2x3_module
ENTRY %some_2x3 () -> f32[2,3] {
ROOT %constant = f32[2,3]{1,0} constant(}{1, 2, 3}, {4, 5, 6}})
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
ExpectHasSubstr(result.status().message(), "unexpected '}' token");
}
TEST_F(HloParserTest, LiteralDimensionsMismatch_1) {
const std::string original = R"(HloModule some_2_module
ENTRY %some_2 () -> f32[2] {
ROOT %constant = f32[2]{0} constant({1,{2}})
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
ExpectHasSubstr(result.status().message(),
"expects nested array in rank 1, but sees larger");
}
TEST_F(HloParserTest, LiteralDimensionsMismatch_2) {
const std::string original = R"(HloModule some_2x3_module
ENTRY %some_2x3 () -> f32[2,3] {
ROOT %constant = f32[2,3]{1,0} constant({1, 2, 3, 4, 5, 6})
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
ExpectHasSubstr(result.status().message(),
"expects nested array in rank 2, but sees 1");
}
TEST_F(HloParserTest, LiteralDimensionsMismatch_3) {
const std::string original = R"(HloModule some_2x3x2_module
ENTRY %some_2x3x2 () -> f32[2,3,2] {
ROOT %constant = f32[2,3,2]{2,1,0} constant({{{1, 2}, {3, 4}, {5, 6}, {7, 8}, {9, 10}, {11, 12}}})
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
ExpectHasSubstr(result.status().message(),
"expects 3 elements in the [0]th element");
}
TEST_F(HloParserTest, ConstantF16Overflow) {
const std::string original =
R"(HloModule ConstantF16Overflow_module
ENTRY %ConstantF16Overflow.v4 () -> f16[] {
ROOT %constant = f16[] constant(-65520)
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
ExpectHasSubstr(result.status().message(),
"is out of range for literal's primitive type F16");
}
TEST_F(HloParserTest, ConstantBf16NoOverflow) {
const std::string original = R"(
HloModule test_module
ENTRY test {
ROOT c = bf16[] constant(-65505)
})";
EXPECT_EQ(absl::OkStatus(), ParseAndReturnVerifiedModule(original).status());
}
TEST_F(HloParserTest, ConstantBf16Overflow) {
const std::string original = R"(
HloModule test_module
ENTRY test {
ROOT c = bf16[] constant(1e100)
})";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"out of range");
}
TEST_F(HloParserTest, ConstantU4Underflow) {
const std::string original = R"(
HloModule ConstantU4Underflow_module
ENTRY %ConstantU4Underflow () -> u4[] {
ROOT %constant = u4[] constant(-1)
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
ExpectHasSubstr(result.status().message(),
"is out of range for literal's primitive type U4");
}
TEST_F(HloParserTest, ConstantU4Overflow) {
const std::string original = R"(
HloModule ConstantU4Overflow_module
ENTRY %ConstantU4Overflow () -> u4[] {
ROOT %constant = u4[] constant(16)
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
ExpectHasSubstr(result.status().message(),
"is out of range for literal's primitive type U4");
}
TEST_F(HloParserTest, ConstantS4Underflow) {
const std::string original = R"(
HloModule ConstantS4Underflow_module
ENTRY %ConstantS4Underflow () -> s4[] {
ROOT %constant = s4[] constant(-9)
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
ExpectHasSubstr(result.status().message(),
"is out of range for literal's primitive type S4");
}
TEST_F(HloParserTest, ConstantS4Overflow) {
const std::string original = R"(
HloModule ConstantS4Overflow_module
ENTRY %ConstantS4Overflow () -> s4[] {
ROOT %constant = s4[] constant(8)
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
ExpectHasSubstr(result.status().message(),
"is out of range for literal's primitive type S4");
}
TEST_F(HloParserTest, ConstantUnsignedUnderflow) {
const std::string original = R"(
HloModule ConstantUnsignedUnderflow_module
ENTRY %ConstantUnsignedUnderflow () -> u64[] {
ROOT %constant = u64[] constant(-1)
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_EQ(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, ConstantUnsignedOverflow) {
const std::string original = R"(
HloModule ConstantUnsignedOverflow_module
ENTRY %ConstantUnsignedOverflow () -> u32[] {
ROOT %constant = u32[] constant(4294967296)
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
ExpectHasSubstr(result.status().message(),
"is out of range for literal's primitive type U32");
}
TEST_F(HloParserTest, ConstantUnsignedInt64Overflow) {
const std::string original = R"(
HloModule ConstantUnsignedOverflow_module
ENTRY %ConstantUnsignedOverflow () -> u64[] {
ROOT %constant = u64[] constant(9223372036854775808)
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_EQ(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, ConstantC64Overflow) {
const std::string original = R"(
HloModule test_module
ENTRY test () -> c64[] {
ROOT c = c64[] constant((1e100, 0))
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, ConstantC64Underflow) {
const std::string original = R"(
HloModule test_module
ENTRY test () -> c64[] {
ROOT c = c64[] constant((0, -1e100))
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, ConstantF64Overflow) {
const std::string original = R"(
HloModule test_module
ENTRY test {
ROOT c = f64[] constant(1.8e308)
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, ConstantF64Underflow) {
const std::string original = R"(
HloModule test_module
ENTRY test {
ROOT c = f64[] constant(-1.8e308)
})";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_NE(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, ConstantWithExp) {
const std::string original = R"(HloModule ConstantWithExp_module
ENTRY %ConstantWithExp.v4 () -> f32[] {
%constant.1 = f32[] constant(3e+2)
}
)";
auto result = ParseAndReturnVerifiedModule(original);
TF_EXPECT_OK(result.status());
}
TEST_F(HloParserTest, ShortConstant) {
const std::string original =
R"(HloModule ShortConstant_module, entry_computation_layout={()->f32[67,89]{1,0}}
ENTRY %ShortConstant.v4 () -> f32[67,89] {
ROOT %constant.1 = f32[67,89]{1,0} constant({...})
}
)";
auto result = ParseAndReturnVerifiedModule(original);
TF_EXPECT_OK(result.status());
EXPECT_EQ(result.value()->ToString(HloPrintOptions()), original);
}
TEST_F(HloParserTest, NegativeNan) {
const std::string original =
R"(HloModule NegativeNan_module, entry_computation_layout={()->bf16[2]{0}}
ENTRY %NegativeNan () -> bf16[2] {
ROOT %constant = bf16[2]{0} constant({-nan, -nan})
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_EQ(absl::OkStatus(), result.status());
EXPECT_EQ(result.value()->ToString(HloPrintOptions()), original);
}
TEST_F(HloParserTest, NanPayload) {
const std::string original =
R"(HloModule NanPayload_module, entry_computation_layout={()->bf16[2]{0}}
ENTRY %NanPayload () -> bf16[2] {
ROOT %constant = bf16[2]{0} constant({-nan(0x7f), -nan(0x3f)})
}
)";
auto result = ParseAndReturnUnverifiedModule(original);
EXPECT_EQ(absl::OkStatus(), result.status());
EXPECT_EQ(result.value()->ToString(HloPrintOptions()), original);
}
TEST_F(HloParserTest, InvalidNanPayloadBf16) {
const std::string original =
R"(HloModule InvalidNanPayloadBf16_module, entry_computation_layout={()->bf16[1]{0}}
ENTRY %NanPayload () -> bf16[1] {
ROOT %constant = bf16[1]{0} constant({nan(0x3ff)})
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"tries to set NaN payload 0x3ff");
}
TEST_F(HloParserTest, InvalidNanPayloadF8e4m3fn) {
const std::string original =
R"(HloModule InvalidNanPayloadF8e4m3fn_module, entry_computation_layout={()->f8e4m3fn[1]{0}}
ENTRY %NanPayload () -> f8e4m3fn[1] {
ROOT %constant = f8e4m3fn[1]{0} constant({nan(0x1)})
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"tries to set NaN payload 0x1");
}
TEST_F(HloParserTest, InvalidNanPayloadF8e4m3b11fnuz) {
const std::string original =
R"(HloModule InvalidNanPayloadF8e4m3b11fnuz_module, entry_computation_layout={()->f8e4m3b11fnuz[1]{0}}
ENTRY %NanPayload () -> f8e4m3b11fnuz[1] {
ROOT %constant = f8e4m3b11fnuz[1]{0} constant({nan(0x1)})
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"tries to set NaN payload 0x1");
}
TEST_F(HloParserTest, AttributesAnyOrder) {
const std::string original = R"(HloModule any_order_module
ENTRY %Convolve1D1Window_0.v3 (input: f32[1,2,1], filter: f32[1,1,1]) -> f32[1,4,1] {
%input = f32[1,2,1]{2,1,0} parameter(0)
%copy = f32[1,2,1]{2,0,1} copy(f32[1,2,1]{2,1,0} %input)
%filter = f32[1,1,1]{2,1,0} parameter(1)
ROOT %convolution = f32[1,4,1]{2,0,1} convolution(f32[1,2,1]{2,0,1} %copy, f32[1,1,1]{2,1,0} %filter), feature_group_count=1, sharding={maximal device=1}, backend_config="foo", dim_labels=b0f_0io->b0f, window={pad=1_1 size=1}
}
)";
TF_EXPECT_OK(ParseAndReturnVerifiedModule(original).status());
}
TEST_F(HloParserTest, InvalidDimLabels) {
std::string prefix = R"(HloModule invalid_dim_labels_module
ENTRY %Convolve1D1Window_0.v3 (input: f32[1,2,1], filter: f32[1,1,1]) -> f32[1,2,1] {
%input = f32[1,2,1]{2,1,0} parameter(0)
%copy = f32[1,2,1]{2,0,1} copy(f32[1,2,1]{2,1,0} %input)
%filter = f32[1,1,1]{2,1,0} parameter(1)
ROOT %convolution = f32[1,2,1]{2,0,1} convolution(f32[1,2,1]{2,0,1} %copy, f32[1,1,1]{2,1,0} %filter), window={size=1} )";
std::string suffix = R"(
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(
absl::StrCat(prefix, ",dim_labels=00_01->10", suffix))
.status()
.message(),
"expects unique");
ExpectHasSubstr(ParseAndReturnUnverifiedModule(
absl::StrCat(prefix, ",dim_labels=012_0123->210", suffix))
.status()
.message(),
"must have same number of spatial dimensions");
ExpectHasSubstr(ParseAndReturnUnverifiedModule(
absl::StrCat(prefix, ",dim_labels=013_0123->210", suffix))
.status()
.message(),
"expects [0-2bf?]");
}
TEST_F(HloParserTest, UnexpectedAttribute) {
const std::string original = R"(HloModule unexpected_attr_module
ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> f32[] {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15
%recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15
ROOT %constant = f32[] constant(2.1)
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0), channel_id=16, calls=%recv
%send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"unexpected attribute \"calls\"");
}
TEST_F(HloParserTest, MissingAttribute) {
const std::string original = R"(HloModule missing_attr_module
ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> f32[] {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15
%recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15
ROOT %constant = f32[] constant(-2.1)
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0)
%send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"attribute channel_id is expected but not seen");
}
TEST_F(HloParserTest, PredecessorUndefined) {
const std::string original = R"(HloModule pre_not_found_module
ENTRY %TwoSendRecvBothWayRecvFist.v3 () -> f32[] {
%token0 = token[] after-all()
%recv = (f32[], u32[], token[]) recv(token[] %token0), channel_id=15
%recv-done = (f32[], token[]) recv-done((f32[], u32[], token[]) %recv), channel_id=15
ROOT %constant = f32[] constant(2.1)
%send = (f32[], u32[], token[]) send(f32[] %constant, token[] %token0), channel_id=16, control-predecessors={%done}
%send-done = token[] send-done((f32[], u32[], token[]) %send), channel_id=16
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"'done' is not defined");
}
TEST_F(HloParserTest, SliceAllowOmitStride1) {
const std::string original = R"(HloModule slice_module
ENTRY %slice.v2 (p0: f32[3,3,4,4]) -> f32[3,3,2,4] {
%p0 = f32[3,3,4,4]{3,2,1,0} parameter(0)
ROOT %slice = f32[3,3,2,4]{3,2,1,0} slice(f32[3,3,4,4]{3,2,1,0} %p0), slice={[0:3], [0:3], [0:4:2], [0:4]}
}
)";
TF_EXPECT_OK(ParseAndReturnVerifiedModule(original).status());
}
TEST_F(HloParserTest, PaddingConfigIsNotWindowPad) {
const std::string original = R"(HloModule window_pad_module
ENTRY %Convolve1D1Window_0.v3 (input: f32[1,2,1], filter: f32[1,1,1]) -> f32[1,2,1] {
%input = f32[1,2,1]{2,1,0} parameter(0)
%copy = f32[1,2,1]{2,0,1} copy(f32[1,2,1]{2,1,0} %input)
%filter = f32[1,1,1]{2,1,0} parameter(1)
ROOT %convolution = f32[1,2,1]{2,0,1} convolution(f32[1,2,1]{2,0,1} %copy, f32[1,1,1]{2,1,0} %filter), dim_labels=b0f_0io->b0f, window={pad=1_1_0 size=1}
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"expects padding_low and padding_high separated by '_'");
}
TEST_F(HloParserTest, CommaBetweenSubAttributes) {
const std::string original = R"(HloModule test_comma_module
ENTRY %test_comma.v4 () -> f32[] {
ROOT %constant = f32[] constant(-4.2), metadata={source_line=5, op_type="::const"}
}
)";
TF_EXPECT_OK(ParseAndReturnVerifiedModule(original).status());
}
TEST_F(HloParserTest, ComputationShapeDoesNotMatchRootShape) {
const std::string original = R"(HloModule custom_call:
ENTRY %CustomCall () -> f32[1] {
%constant = f32[1]{0} constant({12345})
ROOT %foo = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo\"bar"
})";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"Shape of computation CustomCall, f32[1], is not compatible "
"with that of its root instruction foo, f32[1,2,3]");
}
TEST_F(HloParserTest, EntryComputationLayoutNotDefined) {
const std::string original = R"(
HloModule layout_not_defined
add_F32.v3 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %Reduce (input: f32[8,16,256]) -> f32[8,16] {
input = f32[8,16,256]{0,1,2} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[8,16]{0,1} reduce(input, constant), dimensions={2}, to_apply=add_F32.v3
})";
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
auto program_layout = module.value()->entry_computation_layout();
ASSERT_EQ(program_layout.parameter_count(), 1);
auto param_layout = program_layout.parameter_layout(0).layout();
auto result_layout = program_layout.result_layout().layout();
EXPECT_TRUE(
LayoutUtil::Equal(LayoutUtil::MakeLayout({0, 1, 2}), param_layout))
<< "actual layout of parameter(0) is "
<< LayoutUtil::HumanString(param_layout);
EXPECT_TRUE(LayoutUtil::Equal(LayoutUtil::MakeLayout({0, 1}), result_layout))
<< "actual layout of result is "
<< LayoutUtil::HumanString(result_layout);
}
TEST_F(HloParserTest, EntryComputationLayoutDefined) {
const std::string original = R"(
HloModule layout_defined, entry_computation_layout={(f32[8,16,256]) -> f32[8,16]}
add_F32.v3 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %Reduce (input: f32[8,16,256]) -> f32[8,16] {
input = f32[8,16,256]{0,1,2} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[8,16]{0,1} reduce(input, constant), dimensions={2}, to_apply=add_F32.v3
})";
absl::StatusOr<std::unique_ptr<HloModule>> module =
ParseAndReturnUnverifiedModule(
original, {}, HloParserOptions().set_fill_missing_layouts(false));
TF_ASSERT_OK(module.status());
EXPECT_FALSE(module.value()->entry_computation_layout().AnyLayoutSet());
}
TEST_F(HloParserTest, DoNotSetEntryComputationLayoutIfSet) {
const std::string original = R"(
HloModule layout_defined, entry_computation_layout={(f32[8,16,256]{1,2,0}) -> f32[8,16]}
add_F32.v3 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %Reduce (input: f32[8,16,256]) -> f32[8,16] {
input = f32[8,16,256]{0,1,2} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[8,16]{0,1} reduce(input, constant), dimensions={2}, to_apply=add_F32.v3
})";
absl::StatusOr<std::unique_ptr<HloModule>> module =
ParseAndReturnUnverifiedModule(
original, {}, HloParserOptions().set_fill_missing_layouts(true));
TF_ASSERT_OK(module.status());
EXPECT_THAT(module.value()
->entry_computation_layout()
.parameter_layout(0)
.layout()
.minor_to_major(),
ElementsAre(1, 2, 0));
}
TEST_F(HloParserTest, SetEntryComputationLayoutIfNotSet) {
const std::string original = R"(
HloModule layout_defined, entry_computation_layout={(f32[8,16,256]) -> f32[8,16]}
add_F32.v3 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %Reduce (input: f32[8,16,256]) -> f32[8,16] {
input = f32[8,16,256]{0,1,2} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[8,16]{0,1} reduce(input, constant), dimensions={2}, to_apply=add_F32.v3
})";
absl::StatusOr<std::unique_ptr<HloModule>> module =
ParseAndReturnUnverifiedModule(
original, {}, HloParserOptions().set_fill_missing_layouts(true));
TF_ASSERT_OK(module.status());
EXPECT_THAT(module.value()
->entry_computation_layout()
.parameter_layout(0)
.layout()
.minor_to_major(),
ElementsAre(2, 1, 0));
}
TEST_F(HloParserTest, DoNotFallBackToDefaultLayoutIfDisabled) {
const std::string original = R"(
HloModule t
ENTRY main {
p0 = f16[16,32,48,64]{3,2,1,0} parameter(0)
p1 = f16[80,64,48,32]{3,2,1,0} parameter(1)
ROOT dot = f16[64,32,16,80] dot(p0, p1), lhs_contracting_dims={2}, rhs_contracting_dims={2}, lhs_batch_dims={3,1}, rhs_batch_dims={1,3}
})";
absl::StatusOr<std::unique_ptr<HloModule>> module =
ParseAndReturnUnverifiedModule(
original, {}, HloParserOptions().set_fill_missing_layouts(false));
TF_ASSERT_OK(module.status());
EXPECT_FALSE(module.value()
->entry_computation()
->root_instruction()
->shape()
.has_layout());
}
TEST_F(HloParserTest, FallBackToDefaultLayoutIfEnabled) {
const std::string original = R"(
HloModule t
ENTRY main {
p0 = f16[16,32,48,64]{3,2,1,0} parameter(0)
p1 = f16[80,64,48,32]{3,2,1,0} parameter(1)
ROOT dot = f16[64,32,16,80] dot(p0, p1), lhs_contracting_dims={2}, rhs_contracting_dims={2}, lhs_batch_dims={3,1}, rhs_batch_dims={1,3}
})";
absl::StatusOr<std::unique_ptr<HloModule>> module =
ParseAndReturnUnverifiedModule(
original, {}, HloParserOptions().set_fill_missing_layouts(true));
TF_ASSERT_OK(module.status());
EXPECT_THAT(module.value()
->entry_computation()
->root_instruction()
->shape()
.layout()
.minor_to_major(),
ElementsAre(3, 2, 1, 0));
}
TEST_F(HloParserTest, FallBackToDefaultLayoutIfAlreadySet) {
const std::string original = R"(
HloModule t
ENTRY main {
p0 = f16[16,32,48,64]{3,2,1,0} parameter(0)
p1 = f16[80,64,48,32]{3,2,1,0} parameter(1)
ROOT dot = f16[64,32,16,80]{1,2,0,3} dot(p0, p1), lhs_contracting_dims={2}, rhs_contracting_dims={2}, lhs_batch_dims={3,1}, rhs_batch_dims={1,3}
})";
absl::StatusOr<std::unique_ptr<HloModule>> module =
ParseAndReturnUnverifiedModule(
original, {}, HloParserOptions().set_fill_missing_layouts(true));
TF_ASSERT_OK(module.status());
EXPECT_THAT(module.value()
->entry_computation()
->root_instruction()
->shape()
.layout()
.minor_to_major(),
ElementsAre(1, 2, 0, 3));
}
TEST_F(HloParserTest, NoEntry) {
const std::string original = R"(HloModule no_entry:
c1 {
const1 = f32[1]{0} constant({12345})
}
c2 {
const2 = f32[1]{0} constant({67890})
})";
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
EXPECT_EQ(module.value()->entry_computation()->name(), "c2");
}
TEST_F(HloParserTest, NoRoot) {
const std::string original = R"(HloModule no_root:
ENTRY consts {
first = f32[1]{0} constant({12345})
last = f32[1]{0} constant({67890})
})";
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
EXPECT_EQ(module.value()->entry_computation()->root_instruction()->name(),
"last");
}
TEST_F(HloParserTest, Comments) {
const std::string original = R"(
HloModule comments:
ENTRY c1 {
ROOT const1 = f32[1]{0} constant({12345 })
}
)";
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
}
TEST_F(HloParserTest, MultilineComments) {
const std::string original = R"(HloModule multiline_comment:
ENTRY c1 {
ROOT const1 = f32[1]{0} constant({12345})
})";
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
}
TEST_F(HloParserTest, UnterminatedComment) {
const std::string original = R"(HloModule unterminated_comment:
ENTRY c1 {
/* unterminated
ROOT const1 = f32[1]{0} constant({12345})
})";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"/* unterminated\n^");
}
TEST_F(HloParserTest, SlashSlashComments) {
const std::string original = R"(HloModule slash_slash_comment:
ENTRY c1 {
ROOT const1 = f32[1]{0} constant({12345})
})";
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
}
TEST_F(HloParserTest, SlashSlashCommentMsDosEolFormat) {
const std::string original =
"HloModule slash_slash_comment:\r\n
"bar\r\nROOT const1 = f32[1]{0} constant({12345})
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
}
TEST_F(HloParserTest, SlashSlashCommentMacEolFormat) {
const std::string original =
"HloModule slash_slash_comment:\r
"bar\rROOT const1 = f32[1]{0} constant({12345})
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
}
TEST_F(HloParserTest, MultipleEntries) {
const std::string original = R"(HloModule multiple_entries:
ENTRY c1 {
const1 = f32[1]{0} constant({12345})
}
ENTRY c2 {
const2 = f32[1]{0} constant({67890})
})";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"expects only one ENTRY");
}
TEST_F(HloParserTest, SimpleAliasing) {
const std::string original = R"(
HloModule Module, input_output_alias={ {0}: (0, {0}, must-alias), {1}: (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
std::unique_ptr<HloModule> parsed_module = std::move(module).value();
EXPECT_EQ(parsed_module->input_output_alias_config().GetAliasedOutput(0, {0}),
ShapeIndex{0});
EXPECT_TRUE(
parsed_module->input_output_alias_config().ParameterMustAlias(0, {0}));
EXPECT_EQ(parsed_module->input_output_alias_config().GetAliasedOutput(0, {1}),
ShapeIndex{1});
EXPECT_FALSE(
parsed_module->input_output_alias_config().ParameterMustAlias(0, {1}));
}
TEST_F(HloParserTest, NestedAliasing) {
const std::string original = R"(
HloModule Module, input_output_alias={ {0, 0}: (0, {0}), {1, 1}: (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
%t0 = (f32[], f32[]) tuple(%p0, %p1)
%t1 = (f32[], f32[]) tuple(%p0, %p1)
ROOT %out = ((f32[], f32[]), (f32[], f32[])) tuple(%t0, %t1)
}
)";
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
std::unique_ptr<HloModule> parsed_module = std::move(module).value();
EXPECT_EQ(parsed_module->input_output_alias_config().GetAliasedOutput(0, {0}),
ShapeIndex({0, 0}));
EXPECT_EQ(parsed_module->input_output_alias_config().GetAliasedOutput(0, {1}),
ShapeIndex({1, 1}));
}
TEST_F(HloParserTest, AliasingWrongIndex) {
const std::string original = R"(
HloModule Module, input_output_alias={ {0 : (0, {0}), {1}: (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"Expects '}' at the end of ShapeIndex");
}
TEST_F(HloParserTest, AliasingShapeIndexNotNumerical) {
const std::string original = R"(
HloModule Module, input_output_alias={ {0, a}: (0, {0}), {1}: (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"expects integer");
}
TEST_F(HloParserTest, AliasingWrongFormatNoColon) {
const std::string original = R"(
HloModule Module, input_output_alias={ {0, 0}: (0, {0}), (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"Expects '{' at the start of ShapeIndex");
}
TEST_F(HloParserTest, AliasingWrongFormatTwoColons) {
const std::string original = R"(
HloModule Module, input_output_alias={ {0}: (0, {0}): {0, 1}, {1}: (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"Expects '}' at the end of aliasing description");
}
TEST_F(HloParserTest, AliasingWrongFormatAlphaParam) {
const std::string original = R"(
HloModule Module, input_output_alias={ {0, a}: (zero, {0}), {1}: (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"expects integer");
}
TEST_F(HloParserTest, SimpleBufferDonor) {
const std::string original = R"(
HloModule Module, buffer_donor={ (0, {0}), (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
auto module = ParseAndReturnVerifiedModule(original);
TF_ASSERT_OK(module.status());
std::unique_ptr<HloModule> parsed_module = std::move(module).value();
EXPECT_TRUE(
parsed_module->buffer_donor_config().ParameterIsBufferDonor(0, {0}));
EXPECT_TRUE(
parsed_module->buffer_donor_config().ParameterIsBufferDonor(0, {1}));
EXPECT_FALSE(
parsed_module->buffer_donor_config().ParameterIsBufferDonor(0, {}));
}
TEST_F(HloParserTest, BufferDonorShapeIndexNotNumerical) {
const std::string original = R"(
HloModule Module, buffer_donor={ (0, {0, a}), (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"expects integer");
}
TEST_F(HloParserTest, BufferDonorWrongFormatAlphaParam) {
const std::string original = R"(
HloModule Module, buffer_donor={ (zero, {0}), (0, {1}) }
ENTRY entry {
%p = (f32[], f32[]) parameter(0)
%p0 = f32[] get-tuple-element((f32[], f32[]) %p), index=0
%p1 = f32[] get-tuple-element((f32[], f32[]) %p), index=1
ROOT %out = (f32[], f32[]) tuple(%p0, %p1)
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"expects integer");
}
TEST_F(HloParserTest, MultipleRoots) {
const std::string original = R"(HloModule multiple_roots:
ENTRY consts {
ROOT const1 = f32[1]{0} constant({12345})
ROOT const2 = f32[1]{0} constant({12345})
})";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"one computation should have only one ROOT");
}
TEST_F(HloParserTest, ComputationExists) {
const std::string original = R"(HloModule comp_exists
comp {
const1 = f32[1]{0} constant({12345})
}
comp {
const2 = f32[1]{0} constant({67890})
})";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
R"(was parsing 2:1: error: computation previously defined here
comp {
^)");
}
TEST_F(HloParserTest, CrossComputationLookup) {
const std::string original = R"(HloModule cross_computation_lookup:
tcalla (a: (s32[], s32[])) -> (s32[], s32[]) {
ROOT aparam = (s32[], s32[]) parameter(0)
}
tcallb (b: (s32[], s32[])) -> s32[] {
rparam = (s32[], s32[]) parameter(0)
ROOT gte0 = s32[] get-tuple-element(aparam), index=0
}
ENTRY entry {
param = (s32[], s32[]) parameter(0)
call0 = (s32[], s32[]) call(param), to_apply=tcalla
ROOT call1 = s32[] call(param), to_apply=tcallb
})";
ExpectHasSubstr(
ParseAndReturnUnverifiedModule(original).status().message(),
"was parsing 8:39: error: instruction does not exist: aparam");
}
TEST_F(HloParserTest, SameNameDiffComputations) {
const std::string original = R"(HloModule same_names:
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT result = f32[] add(p0, p1)
}
ENTRY ReduceR3ToR2 {
p0 = f32[8,16,256]{2,1,0} parameter(0)
p1 = f32[] constant(0)
ROOT result = f32[8,16]{1,0} reduce(p0, p1), dimensions={2}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(original));
ASSERT_NE(module->entry_computation(), nullptr);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Reduce()));
}
TEST_F(HloParserTest, ParseSharding) {
const std::string original = "{maximal device=42}";
TF_ASSERT_OK_AND_ASSIGN(HloSharding sharding, ParseSharding(original));
EXPECT_EQ(sharding.ToString(), original);
}
TEST_F(HloParserTest, ParseShardingPartialReplication) {
const std::string original = "{devices=[2,2]0,1,2,3 last_tile_dim_replicate}";
TF_ASSERT_OK_AND_ASSIGN(HloSharding sharding, ParseSharding(original));
EXPECT_EQ(sharding.ToString(), original);
Array<int64_t> tiling_last_dim_replicated({{0, 1}, {2, 3}});
EXPECT_EQ(HloSharding::PartialTile(tiling_last_dim_replicated).ToString(),
original);
}
TEST_F(HloParserTest, ParseShardingSubGroup) {
const std::string original =
"{devices=[2,2,2,2]0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 "
"last_tile_dims={manual, replicated}}";
TF_ASSERT_OK_AND_ASSIGN(HloSharding sharding, ParseSharding(original));
EXPECT_EQ(sharding.ToString(), original);
Array<int64_t> tile_assignment({2, 2, 2, 2});
tile_assignment.FillIota(0);
std::vector<OpSharding::Type> subgroup_types = {OpSharding::MANUAL,
OpSharding::REPLICATED};
EXPECT_EQ(HloSharding::Subgroup(tile_assignment, subgroup_types).ToString(),
original);
}
TEST_F(HloParserTest, ParseTrivialIotaShardingPartialReplication) {
const std::string original = "{devices=[2,2]<=[4] last_tile_dim_replicate}";
TF_ASSERT_OK_AND_ASSIGN(HloSharding sharding, ParseSharding(original));
EXPECT_EQ(sharding.ToString(), original);
TileAssignment tiling_last_dim_replicated((absl::Span<const int64_t>){2, 2});
EXPECT_EQ(HloSharding::PartialTile(tiling_last_dim_replicated).ToString(),
original);
}
TEST_F(HloParserTest, ParseTrivialIotaShardingSubGroup) {
const std::string original =
"{devices=[2,2,2,2]<=[16] last_tile_dims={manual, replicated}}";
TF_ASSERT_OK_AND_ASSIGN(HloSharding sharding, ParseSharding(original));
EXPECT_EQ(sharding.ToString(), original);
TileAssignment tile_assignment({2, 2, 2, 2});
std::vector<OpSharding::Type> subgroup_types = {OpSharding::MANUAL,
OpSharding::REPLICATED};
EXPECT_EQ(HloSharding::Subgroup(tile_assignment, subgroup_types).ToString(),
original);
}
TEST_F(HloParserTest, ParseTransposedIotaShardingPartialReplication) {
const std::string original =
"{devices=[2,2]<=[2,2]T(1,0) last_tile_dim_replicate}";
TF_ASSERT_OK_AND_ASSIGN(HloSharding sharding, ParseSharding(original));
EXPECT_EQ(sharding.ToString(), original);
TileAssignment tiling_last_dim_replicated({2, 2}, {2, 2}, {1, 0});
EXPECT_EQ(HloSharding::PartialTile(tiling_last_dim_replicated).ToString(),
original);
}
TEST_F(HloParserTest, ParseTransposedIotaShardingSubGroup) {
const std::string original =
"{devices=[2,2,2,2]<=[2,2,4]T(2,1,0) last_tile_dims={manual, "
"replicated}}";
TF_ASSERT_OK_AND_ASSIGN(HloSharding sharding, ParseSharding(original));
EXPECT_EQ(sharding.ToString(), original);
TileAssignment tile_assignment({2, 2, 2, 2}, {2, 2, 4}, {2, 1, 0});
std::vector<OpSharding::Type> subgroup_types = {OpSharding::MANUAL,
OpSharding::REPLICATED};
EXPECT_EQ(HloSharding::Subgroup(tile_assignment, subgroup_types).ToString(),
original);
}
TEST_F(HloParserTest, ParseShardAs) {
const std::string original = "{manual shard_as 1}";
TF_ASSERT_OK_AND_ASSIGN(HloSharding sharding, ParseSharding(original));
EXPECT_EQ(sharding.ToString(), original);
EXPECT_EQ(
HloSharding::Manual().SetShardGroup(HloSharding::ShardAs(1)).ToString(),
original);
}
TEST_F(HloParserTest, ParseShardLike) {
const std::string original =
"{devices=[2,2,2,2]<=[16] last_tile_dims={manual, replicated} shard_like "
"1}";
TF_ASSERT_OK_AND_ASSIGN(HloSharding sharding, ParseSharding(original));
EXPECT_EQ(sharding.ToString(), original);
TileAssignment tile_assignment({2, 2, 2, 2});
std::vector<OpSharding::Type> subgroup_types = {OpSharding::MANUAL,
OpSharding::REPLICATED};
EXPECT_EQ(HloSharding::Subgroup(tile_assignment, subgroup_types)
.SetShardGroup(HloSharding::ShardLike(1))
.ToString(),
original);
}
TEST_F(HloParserTest, ParseUnknownSharding) {
const std::string original = "{unknown}";
TF_ASSERT_OK_AND_ASSIGN(HloSharding sharding, ParseSharding(original));
EXPECT_EQ(sharding.ToString(), original);
EXPECT_EQ(HloSharding::Unknown().ToString(), original);
}
TEST_F(HloParserTest, ParseFrontendAttributes) {
const std::string original =
R"({attr_a="test_a",attr_b="b",attr_c="s64",attr_d="a/b"})";
TF_ASSERT_OK_AND_ASSIGN(FrontendAttributes frontend_attributes,
ParseFrontendAttributes(original));
EXPECT_EQ(FrontendAttributesToString(frontend_attributes), original);
}
TEST_F(HloParserTest, ParseWindow) {
Window original = window_util::MakeWindow({1, 2, 3});
TF_ASSERT_OK_AND_ASSIGN(Window parsed,
ParseWindow(window_util::ToString(original)))
EXPECT_EQ(window_util::ToString(original), window_util::ToString(parsed));
}
TEST_F(HloParserTest, ParseConvolutionDimensionNumbers) {
const std::string original = "b0f_0io->b0f";
TF_ASSERT_OK_AND_ASSIGN(ConvolutionDimensionNumbers dnums,
ParseConvolutionDimensionNumbers(original));
EXPECT_EQ(original, ConvolutionDimensionNumbersToString(dnums));
}
TEST_F(HloParserTest, ParseConvolutionDimensionNumbersWithUnknownDims) {
const std::string original = "b0?f_?0?io->?b?0?f";
TF_ASSERT_OK_AND_ASSIGN(ConvolutionDimensionNumbers dnums,
ParseConvolutionDimensionNumbers(original));
EXPECT_EQ(original, ConvolutionDimensionNumbersToString(dnums));
}
TEST_F(HloParserTest, ParseReplicaGroups) {
const std::string original = "{{0,1},{2,3}}";
TF_ASSERT_OK_AND_ASSIGN(std::vector<ReplicaGroup> replica_groups,
ParseReplicaGroupsOnly(original));
EXPECT_EQ(original, ReplicaGroupsToString(replica_groups));
}
TEST_F(HloParserTest, ParsePaddingConfigNoInteriorPadding) {
const std::string original = "0_1x2_3";
TF_ASSERT_OK_AND_ASSIGN(PaddingConfig dnums, ParsePaddingConfig(original));
EXPECT_EQ(original, PaddingConfigToString(dnums));
}
TEST_F(HloParserTest, ParsePaddingConfigInteriorPadding) {
const std::string original = "0_1_0x2_3_4";
TF_ASSERT_OK_AND_ASSIGN(PaddingConfig dnums, ParsePaddingConfig(original));
EXPECT_EQ(original, PaddingConfigToString(dnums));
}
TEST_F(HloParserTest, ParsePaddingConfigInteriorPaddingImplicitZeroDim) {
TF_ASSERT_OK_AND_ASSIGN(PaddingConfig dnums, ParsePaddingConfig("0_1x2_3_4"));
EXPECT_EQ("0_1_0x2_3_4", PaddingConfigToString(dnums));
}
TEST_F(HloParserTest, NontupleInfeed) {
const std::string original = R"(HloModule nontuple_infeed:
ENTRY nontuple_infeed {
token0 = token[] after-all()
ROOT infeed = pred[] infeed(token0)
})";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"infeed must have a non-empty tuple shape");
}
TEST(HloParserSingleOpTest, SingleOp) {
const std::string text =
"%multiply = f32[2,4]{1,0} multiply(f32[2,4]{1,0} %broadcast, "
"f32[2,4]{1,0} %x)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(text));
const HloComputation* computation = module->entry_computation();
ASSERT_NE(computation, nullptr);
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Multiply(m::Parameter(0), m::Parameter(1))));
}
TEST(HloParserSingleOpTest, SingleOpNoShapeProducesError) {
const std::string text =
"multiply(f32[2,4]{1,0} %broadcast, f32[2,4]{1,0} %x)";
absl::StatusOr<std::unique_ptr<HloModule>> module =
ParseAndReturnUnverifiedModule(text);
ASSERT_TRUE(!module.status().ok());
LOG(INFO) << "Status: " << module.status();
EXPECT_THAT(module.status().ToString(),
HasSubstr("expects '=' in instruction"));
}
TEST(HloParserSingleOpTest, SingleOpNoOperandShapesProducesError) {
const std::string text = "%multiply = f32[2,4]{1,0} multiply(%broadcast, %x)";
absl::StatusOr<std::unique_ptr<HloModule>> module =
ParseAndReturnUnverifiedModule(text);
ASSERT_TRUE(!module.status().ok());
LOG(INFO) << "Status: " << module.status();
EXPECT_THAT(module.status().ToString(),
HasSubstr("Operand had no shape in HLO text"));
}
TEST(HloParserSingleOpTest, SingleOpNoNames) {
const std::string text =
"%multiply = f32[2,4]{1,0} multiply(f32[2,4]{1,0}, f32[2,4]{1,0})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(text));
const HloComputation* computation = module->entry_computation();
ASSERT_NE(computation, nullptr);
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Multiply(m::Parameter(0), m::Parameter(1))));
}
TEST(HloParserSingleOpTest, CanonicalOp) {
const std::string text =
"f32[2,4]{1,0} multiply(f32[2,4]{1,0}, f32[2,4]{1,0})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(text));
const HloComputation* computation = module->entry_computation();
ASSERT_NE(computation, nullptr);
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Multiply(m::Parameter(0), m::Parameter(1))));
EXPECT_EQ(
computation->root_instruction()->ToString(HloPrintOptions::Canonical()),
text);
}
TEST(HloParserSingleOpTest, CanonicalOpWithNested) {
const std::string text =
R"(f32[5,20]{1,0} while(f32[5,10]{1,0}), condition=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
ROOT tmp_2 = f32[5,20]{1,0} fusion(f32[5,10]{1,0} tmp_0, f32[20,10]{1,0} tmp_1), kind=kLoop, calls=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
tmp_2 = f32[10,20]{1,0} transpose(f32[20,10]{1,0} tmp_1), dimensions={1,0}
ROOT tmp_3 = f32[5,20]{1,0} dot(f32[5,10]{1,0} tmp_0, f32[10,20]{1,0} tmp_2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
}, body=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
ROOT tmp_2 = f32[5,20]{1,0} fusion(f32[5,10]{1,0} tmp_0, f32[20,10]{1,0} tmp_1), kind=kLoop, calls=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
tmp_2 = f32[10,20]{1,0} transpose(f32[20,10]{1,0} tmp_1), dimensions={1,0}
ROOT tmp_3 = f32[5,20]{1,0} dot(f32[5,10]{1,0} tmp_0, f32[10,20]{1,0} tmp_2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(text));
const HloComputation* computation = module->entry_computation();
ASSERT_NE(computation, nullptr);
EXPECT_EQ(
computation->root_instruction()->ToString(HloPrintOptions::Canonical()),
text);
}
TEST(HloParserSingleOpTest, CanonicalOpIndexedConditionalInlinedBranches) {
const std::string text =
R"(f32[5,10]{1,0} conditional(s32[], f32[5,10]{1,0}, f32[5,10]{1,0}, f32[5,10]{1,0}), branch_computations={
{
tmp_0 = f32[5,10]{1,0} parameter(0)
ROOT tmp_1 = f32[5,10]{1,0} ceil(f32[5,10]{1,0} tmp_0)
},
{
tmp_0 = f32[5,10]{1,0} parameter(0)
ROOT tmp_1 = f32[5,10]{1,0} floor(f32[5,10]{1,0} tmp_0)
},
{
tmp_0 = f32[5,10]{1,0} parameter(0)
ROOT tmp_1 = f32[5,10]{1,0} copy(f32[5,10]{1,0} tmp_0)
}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(text));
const HloComputation* computation = module->entry_computation();
ASSERT_NE(computation, nullptr);
EXPECT_EQ(
computation->root_instruction()->ToString(HloPrintOptions::Canonical()),
text);
}
TEST(HloParserSingleOpTest, SingleOpWithNested) {
const std::string text =
R"(%fusion = f32[3,2,1,1]{3,2,1,0} fusion(f32[3,2,1,1]{3,2,1,0} %p0, f32[2]{0} %p1), kind=kLoop, calls=
{
%param_0 = f32[3,2,1,1]{3,2,1,0} parameter(0)
%param_1 = f32[2]{0} parameter(1)
%broadcast = f32[3,2,1,1]{3,2,1,0} broadcast(f32[2]{0} %param_1), dimensions={1}
ROOT %subtract = f32[3,2,1,1]{3,2,1,0} subtract(f32[3,2,1,1]{3,2,1,0} %param_0, f32[3,2,1,1]{3,2,1,0} %broadcast)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(text));
const HloComputation* computation = module->entry_computation();
ASSERT_NE(computation, nullptr);
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Op()
.WithOpcode(HloOpcode::kFusion)
.WithNumOperands(2)
.WithOperand(0, m::Parameter(0))
.WithOperand(1, m::Parameter(1))));
}
TEST(HloParserSingleOpTest, SingleOpWithNested_DoesNotExist) {
const std::string text =
R"(reduce = f32[] reduce(f32[10], f32[]), dimensions={1}, to_apply=
{
result = f32[] add(f32[] x, f32[] y)
})";
auto status = ParseAndReturnUnverifiedModule(text).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("does not exist: x"));
}
TEST(HloParserSingleOpTest, SingleOpWithNested_NoLhs) {
const std::string text =
R"(reduce = f32[] reduce(f32[10], f32[]), dimensions={1}, to_apply=
{
f32[] add(f32[] x, f32[] y)
})";
auto status = ParseAndReturnUnverifiedModule(text).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("expects name"));
}
TEST(HloParserSingleOpTest, SingleOpWithNested_NoOperandName) {
const std::string text =
R"(reduce = f32[] reduce(f32[10], f32[]), dimensions={1}, to_apply=
{
result = f32[] add(f32[], f32[])
})";
auto status = ParseAndReturnUnverifiedModule(text).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("expects name"));
}
TEST(HloParserSingleOpTest, ConvolutionTrivialFeatureGroupCount) {
const std::string text =
R"(%convolution = f32[1,2,1]{2,0,1} convolution(f32[1,2,1]{2,0,1} %copy, f32[1,1,1]{2,1,0} %filter), window={size=1}, dim_labels=b0f_0io->b0f)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(text));
const HloComputation* computation = module->entry_computation();
ASSERT_NE(computation, nullptr);
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Convolution(m::Parameter(0), m::Parameter(1))));
auto* convolution =
Cast<HloConvolutionInstruction>(computation->root_instruction());
EXPECT_EQ(convolution->feature_group_count(), 1);
}
TEST(HloParserSingleOpTest, MultipleOpsProducesError) {
const std::string text = R"(
param = f32[2,5,1,3] parameter(0)
transpose = f32[1,5,2,3] transpose(param), dimensions={2,1,0,3}
)";
auto status = ParseAndReturnUnverifiedModule(text).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(), HasSubstr("Expected eof"));
}
TEST_F(HloParserTest, IsScheduledIsFalse) {
const std::string text = R"(
HloModule axpy_module, is_scheduled=false
ENTRY %axpy.v5 (alpha: f32[], x: f32[2,4], y: f32[2,4]) -> f32[2,4] {
%alpha = f32[] parameter(0)
%broadcast = f32[2,4]{1,0} broadcast(f32[] %alpha), dimensions={}
%x = f32[2,4]{1,0} parameter(1)
%multiply = f32[2,4]{1,0} multiply(f32[2,4]{1,0} %broadcast, f32[2,4]{1,0} %x)
%y = f32[2,4]{1,0} parameter(2)
ROOT %add = f32[2,4]{1,0} add(f32[2,4]{1,0} %multiply, f32[2,4]{1,0} %y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
ASSERT_FALSE(module->has_schedule());
}
TEST_F(HloParserTest, IsScheduledNotPresent) {
const std::string text = R"(
HloModule axpy_module
ENTRY %axpy.v5 (alpha: f32[], x: f32[2,4], y: f32[2,4]) -> f32[2,4] {
%alpha = f32[] parameter(0)
%broadcast = f32[2,4]{1,0} broadcast(f32[] %alpha), dimensions={}
%x = f32[2,4]{1,0} parameter(1)
%multiply = f32[2,4]{1,0} multiply(f32[2,4]{1,0} %broadcast, f32[2,4]{1,0} %x)
%y = f32[2,4]{1,0} parameter(2)
ROOT %add = f32[2,4]{1,0} add(f32[2,4]{1,0} %multiply, f32[2,4]{1,0} %y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
ASSERT_FALSE(module->has_schedule());
}
TEST_F(HloParserTest, IsScheduledIsTrue) {
const std::string text = R"(
HloModule axpy_module, is_scheduled=true
ENTRY %axpy.v5 (alpha: f32[], x: f32[2,4], y: f32[2,4]) -> f32[2,4] {
%alpha = f32[] parameter(0)
%broadcast = f32[2,4]{1,0} broadcast(f32[] %alpha), dimensions={}
%x = f32[2,4]{1,0} parameter(1)
%multiply = f32[2,4]{1,0} multiply(f32[2,4]{1,0} %broadcast, f32[2,4]{1,0} %x)
%y = f32[2,4]{1,0} parameter(2)
ROOT %add = f32[2,4]{1,0} add(f32[2,4]{1,0} %multiply, f32[2,4]{1,0} %y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
ASSERT_TRUE(module->has_schedule());
TF_ASSERT_OK(module->schedule().Verify());
EXPECT_EQ(module->schedule().sequences().size(), 1);
ASSERT_TRUE(
module->schedule().is_computation_scheduled(module->entry_computation()));
EXPECT_THAT(
module->schedule().sequence(module->entry_computation()).instructions(),
ElementsAre(GmockMatch(m::Parameter()), GmockMatch(m::Broadcast()),
GmockMatch(m::Parameter()), GmockMatch(m::Multiply()),
GmockMatch(m::Parameter()), GmockMatch(m::Add())));
}
TEST_F(HloParserTest, IsScheduledIsTrueDifferentOrder) {
const std::string text = R"(
HloModule axpy_module, is_scheduled=true
ENTRY %axpy.v5 (alpha: f32[], x: f32[2,4], y: f32[2,4]) -> f32[2,4] {
%alpha = f32[] parameter(0)
%x = f32[2,4]{1,0} parameter(1)
%y = f32[2,4]{1,0} parameter(2)
%broadcast = f32[2,4]{1,0} broadcast(f32[] %alpha), dimensions={}
%multiply = f32[2,4]{1,0} multiply(f32[2,4]{1,0} %broadcast, f32[2,4]{1,0} %x)
ROOT %add = f32[2,4]{1,0} add(f32[2,4]{1,0} %multiply, f32[2,4]{1,0} %y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
ASSERT_TRUE(module->has_schedule());
TF_ASSERT_OK(module->schedule().Verify());
EXPECT_EQ(module->schedule().sequences().size(), 1);
ASSERT_TRUE(
module->schedule().is_computation_scheduled(module->entry_computation()));
EXPECT_THAT(
module->schedule().sequence(module->entry_computation()).instructions(),
ElementsAre(GmockMatch(m::Parameter()), GmockMatch(m::Parameter()),
GmockMatch(m::Parameter()), GmockMatch(m::Broadcast()),
GmockMatch(m::Multiply()), GmockMatch(m::Add())));
}
TEST_F(HloParserTest, CustomCallWrongNumberofOperandConstraints) {
const std::string original =
R"(HloModule CustomCallWrongNumberofOperandConstraints
ENTRY %CustomCallWrongNumberofOperandConstraints (p0: f32[42,2,3], p1: f32[123,4]) -> f32[1,2,3] {
%p0 = f32[42,2,3]{0,1,2} parameter(0)
%p1 = f32[123,4]{0,1} parameter(1)
ROOT %custom-call = f32[1,2,3]{0,1,2} custom-call(f32[42,2,3]{0,1,2} %p0, f32[123,4]{0,1} %p1), custom_call_target="baz", operand_layout_constraints={f32[42,2,3]{0,1,2}}
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"Expected 2 operand layout constraints, 1 given");
}
TEST_F(HloParserTest, CustomCallIncompatibleOperandConstraints) {
const std::string original =
R"(HloModule CustomCallIncompatibleOperandConstraints
ENTRY %CustomCallIncompatibleOperandConstraints (p0: f32[42,2,3], p1: f32[123,4]) -> f32[1,2,3] {
%p0 = f32[42,2,3]{0,1,2} parameter(0)
%p1 = f32[123,4]{0,1} parameter(1)
ROOT %custom-call = f32[1,2,3]{0,1,2} custom-call(f32[42,2,3]{0,1,2} %p0, f32[123,4]{0,1} %p1), custom_call_target="baz", operand_layout_constraints={f32[42,2,3]{0,1,2}, f32[555,5]{1,0}}
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"operand 1 is not compatible with operand shape");
}
TEST_F(HloParserTest, CustomCallWithNonexistentVersion) {
const std::string original = R"(HloModule custom_call
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call.1 = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo", api_version=API_VERSION_THAT_DOESNT_EXIST
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"Unknown API version");
}
TEST_F(HloParserTest, CustomCallWithUnspecifiedVersion) {
const std::string original = R"(HloModule custom_call
ENTRY %CustomCall () -> f32[1,2,3] {
%constant = f32[1]{0} constant({12345})
ROOT %custom-call.1 = f32[1,2,3]{0,2,1} custom-call(f32[1]{0} %constant), custom_call_target="foo", api_version=API_VERSION_UNSPECIFIED
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(original).status().message(),
"Invalid API version");
}
TEST_F(HloParserTest, AllowShapeWhitespace) {
const std::string text = R"(
HloModule module
ENTRY entry {
ROOT root = f32[ 1, 2,3, 4, 5]{0, 1, 2,3, 4 } parameter(0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
}
TEST_F(HloParserTest, ShapeMismatchInOperand) {
const std::string text = R"(
HloModule foobar
ENTRY %entrycomp (p: f32[2,2]) -> f32[2,2] {
%p = f32[2,2] parameter(0)
%constant.1 = f32[2,2] constant({{1, 2}, {3, 4}})
ROOT %add.1 = f32[2,2] add(f32[2,2] %p, f32[2,5] %constant.1)
}
)";
ExpectHasSubstr(ParseAndReturnUnverifiedModule(text).status().message(),
"The declared operand shape f32[2,5]{1,0} is not compatible"
" with the shape of the operand instruction f32[2,2]{1,0}.");
}
TEST_F(HloParserTest, ParseShapeStringR2F32) {
std::string shape_string = "f32[123,456]";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected = ShapeUtil::MakeShape(F32, {123, 456});
ASSERT_TRUE(ShapeUtil::Equal(expected, actual))
<< "expected: " << ShapeUtil::HumanString(expected)
<< "actual: " << ShapeUtil::HumanString(actual);
}
TEST_F(HloParserTest, ParseShapeStringUnbounded) {
std::string shape_string = "f32[?,784]";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected =
ShapeUtil::MakeShape(F32, {Shape::kUnboundedSize, 784}, {true, false});
ASSERT_TRUE(ShapeUtil::Equal(expected, actual))
<< "expected: " << ShapeUtil::HumanString(expected)
<< "actual: " << ShapeUtil::HumanString(actual);
}
TEST_F(HloParserTest, ParseShapeStringTupleOfArrays) {
std::string shape_string = "(f32[1572864],s8[5120,1024])";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected =
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {1572864}),
ShapeUtil::MakeShape(S8, {5120, 1024})});
ASSERT_TRUE(ShapeUtil::Equal(expected, actual))
<< "expected: " << ShapeUtil::HumanString(expected)
<< "actual: " << ShapeUtil::HumanString(actual);
}
TEST_F(HloParserTest, ParseShapeStringNestedTuple) {
std::string shape_string = "(f32[1],(f32[2], token[]), opaque[], f32[3])";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected = ShapeUtil::MakeTupleShape({
ShapeUtil::MakeShape(F32, {1}),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {2}), ShapeUtil::MakeTokenShape()}),
ShapeUtil::MakeOpaqueShape(),
ShapeUtil::MakeShape(F32, {3}),
});
ASSERT_TRUE(ShapeUtil::Equal(expected, actual))
<< "expected: " << ShapeUtil::HumanString(expected)
<< "actual: " << ShapeUtil::HumanString(actual);
}
TEST_F(HloParserTest, ParseShapeStringWithLayout) {
std::string shape_string = "f32[123,456]{0,1}";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected = ShapeUtil::MakeShapeWithDenseLayout(F32, {123, 456}, {0, 1});
ASSERT_TRUE(ShapeUtil::Equal(expected, actual))
<< "expected: " << ShapeUtil::HumanString(expected)
<< "actual: " << ShapeUtil::HumanString(actual);
}
TEST_F(HloParserTest, ParseShapeStringWithTilingLayout) {
std::string shape_string = "f32[123,456]{0,1:T(2,128)}";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected = ShapeUtil::MakeShapeWithDenseLayout(F32, {123, 456}, {0, 1},
{Tile({2, 128})});
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
shape_string = "f32[123,456,789]{0,1,2:T(2, * , 128)}";
TF_ASSERT_OK_AND_ASSIGN(actual, ParseShape(shape_string));
expected = ShapeUtil::MakeShapeWithDenseLayout(
F32, {123, 456, 789}, {0, 1, 2},
{Tile({2, Tile::kCombineDimension, 128})});
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
shape_string = "bf16[123,456,789]{2,1,0:T(2,*,128)(2,1)}";
TF_ASSERT_OK_AND_ASSIGN(actual, ParseShape(shape_string));
expected = ShapeUtil::MakeShapeWithDenseLayout(
BF16, {123, 456, 789}, {2, 1, 0},
{Tile({2, Tile::kCombineDimension, 128}), Tile({2, 1})});
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
shape_string = "f32[123,456,789]{1:T(2, * , 128)}";
auto result = ParseShape(shape_string);
ExpectHasSubstr(result.status().message(),
"Dimensions size is 3, but minor to major size is 1.");
}
TEST_F(HloParserTest, ParseShapeStringWithElementSizeInBits) {
std::string shape_string = "s4[123,456]{1,0:T(2,128)E(4)}";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected = ShapeUtil::MakeShapeWithDenseLayout(S4, {123, 456}, {1, 0},
{Tile({2, 128})}, 1, 4);
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
}
TEST_F(HloParserTest, ParseShapeStringWithMemorySpaceLayout) {
std::string shape_string = "pred[123,456]{1,0:T(2,128)S(3)}";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected = ShapeUtil::MakeShapeWithDenseLayout(
PRED, {123, 456}, {1, 0}, {Tile({2, 128})}, 1, 0, 3);
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
shape_string = "pred[123,456]{1,0:S(3)}";
TF_ASSERT_OK_AND_ASSIGN(actual, ParseShape(shape_string));
expected = ShapeUtil::MakeShapeWithDenseLayout(PRED, {123, 456}, {1, 0}, {},
1, 0, 3);
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
shape_string = "pred[123,456]{1,0:S(3)}";
TF_ASSERT_OK_AND_ASSIGN(actual, ParseShape(shape_string));
expected = ShapeUtil::MakeShapeWithDenseLayout(PRED, {123, 456}, {1, 0}, {},
1, 0, 3);
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
}
TEST_F(HloParserTest, ParseShapeStringWithDynamicShapeMetadataPrefix) {
std::string shape_string = "f32[123,456]{1,0:T(16,128)M(1024)}";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected = ShapeUtil::MakeShapeWithDenseLayout(F32, {123, 456}, {1, 0},
{Tile({16, 128})});
expected.mutable_layout()->set_dynamic_shape_metadata_prefix_bytes(1024);
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
}
TEST_F(HloParserTest, ParseShapeStringWithSplitConfigLayout) {
std::string shape_string = "pred[123,456]{1,0:T(2,128)S(3)SC(1:200)}";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected = ShapeUtil::MakeShapeWithDenseLayout(
PRED, {123, 456}, {1, 0}, {Tile({2, 128})}, 1, 0, 3,
{SplitConfig(1, {200})});
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
shape_string = "pred[123,456]{1,0:S(3)SC(0:10)(1:4,5)}";
TF_ASSERT_OK_AND_ASSIGN(actual, ParseShape(shape_string));
expected = ShapeUtil::MakeShapeWithDenseLayout(
PRED, {123, 456}, {1, 0}, {}, 1, 0, 3,
{SplitConfig(0, {10}), SplitConfig(1, {4, 5})});
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
shape_string = "pred[123,456]{1,0:SC(1:50,200)}";
TF_ASSERT_OK_AND_ASSIGN(actual, ParseShape(shape_string));
expected = ShapeUtil::MakeShapeWithDenseLayout(
PRED, {123, 456}, {1, 0}, {}, 1, 0, 0, {SplitConfig(1, {50, 200})});
EXPECT_EQ(expected, actual)
<< "expected: " << ShapeUtil::HumanStringWithLayout(expected)
<< "actual: " << ShapeUtil::HumanStringWithLayout(actual);
}
TEST_F(HloParserTest, ParseOpaqueType) {
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape("opaque[]"));
Shape expected = ShapeUtil::MakeOpaqueShape();
ASSERT_TRUE(ShapeUtil::Equal(expected, actual))
<< "expected: " << ShapeUtil::HumanString(expected)
<< "actual: " << ShapeUtil::HumanString(actual);
}
TEST_F(HloParserTest, ParseTokenType) {
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape("token[]"));
Shape expected = ShapeUtil::MakeTokenShape();
ASSERT_TRUE(ShapeUtil::Equal(expected, actual))
<< "expected: " << ShapeUtil::HumanString(expected)
<< "actual: " << ShapeUtil::HumanString(actual);
}
TEST_F(HloParserTest, ParseInvalidShapeString) {
std::string shape_strings[] = {"f32[123,456]foobar{0,1}", "f32[123,456]{foo}",
"f32[123,456]dense{foo}"};
for (const std::string& shape_string : shape_strings) {
absl::StatusOr<Shape> result = ParseShape(shape_string);
ASSERT_FALSE(result.ok()) << "shape: " << shape_string;
}
}
TEST_F(HloParserTest, ParseDynamicArray) {
std::string shape_string = "f32[123,<=456]";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected = ShapeUtil::MakeShape(F32, {123, 456}, {false, true});
ASSERT_TRUE(ShapeUtil::Equal(expected, actual))
<< "expected: " << ShapeUtil::HumanString(expected)
<< "actual: " << ShapeUtil::HumanString(actual);
}
TEST_F(HloParserTest, ParseDynamicTuple) {
std::string shape_string = "(f32[42], u32[<=123,<=456])";
TF_ASSERT_OK_AND_ASSIGN(Shape actual, ParseShape(shape_string));
Shape expected = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {42}),
ShapeUtil::MakeShape(U32, {123, 456}, {true, true})});
ASSERT_TRUE(ShapeUtil::Equal(expected, actual))
<< "expected: " << ShapeUtil::HumanString(expected)
<< "actual: " << ShapeUtil::HumanString(actual);
}
TEST_F(HloParserTest, ParseInvalidDimLevel) {
constexpr std::string_view shape_string = "f32[123]{0:D(D+~)}";
absl::StatusOr<Shape> result = ParseShape(shape_string);
ASSERT_THAT(
result.status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
testing::HasSubstr(
"invalid DimLevelType/unique/ordered combination in shape")));
}
TEST_F(HloParserTest, NegativeParameterNumber) {
const std::string hlo_string = "par0 = f32[3,5] parameter(-1)";
auto result = ParseAndReturnUnverifiedModule(hlo_string);
ASSERT_FALSE(result.status().ok());
EXPECT_THAT(result.status().message(),
HasSubstr("parameter number must be >= 0"));
}
TEST_F(HloParserTest, DuplicateParameterNumberIsDetected) {
const std::string kHloString = R"(
ENTRY e {
a = s8[] parameter(0)
b = s8[] parameter(0)
ROOT a = s8[] add(a, b)
}
)";
auto result = ParseAndReturnUnverifiedModule(kHloString);
ASSERT_FALSE(result.status().ok());
EXPECT_THAT(result.status().message(),
HasSubstr("Duplicate parameter number 0"));
}
TEST_F(HloParserTest, WrongNumberOfParameterLeafBuffersInReplication) {
const std::string hlo_string =
"par0 = (f32[3,5], f32[]) parameter(0), "
"parameter_replication={true,false,true}";
auto result = ParseAndReturnUnverifiedModule(hlo_string);
ASSERT_FALSE(result.status().ok());
EXPECT_THAT(result.status().message(),
HasSubstr("parameter has 2 leaf buffers, but "
"parameter_replication has 3 elements"));
}
TEST_F(HloParserTest, CheckIndexedConditionalDimension) {
const char* const hlo_string = R"(
HloModule Module
branch0 {
tparam = f32[4] parameter(0)
ROOT tgte1 = f32[4] ceil(tparam)
}
branch1 {
fparam = f32[4] parameter(0)
ROOT fgte1 = f32[4] floor(fparam)
}
ENTRY entry {
p0 = f32[4] parameter(0)
b0 = s32[2] parameter(1)
ROOT conditional = f32[4] conditional(b0, p0, p0),
branch_computations={branch0, branch1}
}
)";
auto result = ParseAndReturnUnverifiedModule(hlo_string);
EXPECT_NE(absl::OkStatus(), result.status());
EXPECT_THAT(result.status().message(),
HasSubstr("The first operand must be a scalar"));
}
TEST_F(HloParserTest, CheckIndexedConditionalElementType) {
const char* const hlo_string = R"(
HloModule Module
branch0 {
tparam = f32[4] parameter(0)
ROOT tgte1 = f32[4] ceil(tparam)
}
branch1 {
fparam = f32[4] parameter(0)
ROOT fgte1 = f32[4] floor(fparam)
}
ENTRY entry {
p0 = f32[4] parameter(0)
b0 = f32[] parameter(1)
ROOT conditional = f32[4] conditional(b0, p0, p0),
branch_computations={branch0, branch1}
}
)";
auto result = ParseAndReturnUnverifiedModule(hlo_string);
EXPECT_NE(absl::OkStatus(), result.status());
EXPECT_THAT(result.status().message(),
HasSubstr("The first operand must be a scalar of PRED or S32"));
}
TEST_F(HloParserTest,
CheckPredicatedConditionalRequiresTrueAndFalseComputation) {
const char* const hlo_string = R"(
HloModule Module
branch0 {
tparam = f32[4] parameter(0)
ROOT tgte1 = f32[4] ceil(tparam)
}
branch1 {
fparam = f32[4] parameter(0)
ROOT fgte1 = f32[4] floor(fparam)
}
ENTRY entry {
p0 = f32[4] parameter(0)
b0 = pred[] parameter(1)
ROOT conditional = f32[4] conditional(b0, p0, p0),
branch_computations={branch0, branch1}
}
)";
auto result = ParseAndReturnUnverifiedModule(hlo_string);
EXPECT_NE(absl::OkStatus(), result.status());
EXPECT_THAT(result.status().message(),
HasSubstr("unexpected attribute \"branch_computations\""));
}
TEST_F(HloParserTest, InferUnaryShape) {
constexpr char text[] = R"(HloModule InferUnaryShapeTest
ENTRY InferUnaryShape {
a = f32[2,10]{1,0} parameter(0)
ROOT v = abs(a)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
}
TEST_F(HloParserTest, InferBinaryShape) {
constexpr char text[] = R"(HloModule InferBinaryShapeTest
ENTRY InferBinaryShape {
a = f32[2,10]{1,0} parameter(0)
b = f32[2,10]{1,0} parameter(1)
ROOT sum = add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
EXPECT_TRUE(ShapeUtil::Equal(
module->entry_computation()->ComputeProgramShape().result(),
ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 10}, {1, 0})));
}
TEST_F(HloParserTest, InferTernaryShape) {
constexpr char text[] = R"(HloModule InferTernaryShapeTest
ENTRY InferTernaryShape {
p = pred[] constant(true)
f = s32[] constant(-42)
t = s32[] constant(42)
ROOT select = select(p, f, t)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
EXPECT_TRUE(ShapeUtil::Equal(
module->entry_computation()->ComputeProgramShape().result(),
ShapeUtil::MakeScalarShape(S32)));
}
TEST_F(HloParserTest, TupleTypo) {
constexpr char text[] = R"(HloModule TupleTypoTest
ENTRY TupleTypo {
pow = s32[] constant(42)
ROOT v = (s32[]) tuple(power)
}
)";
auto result = ParseAndReturnVerifiedModule(text);
EXPECT_THAT(result.status(),
tsl::testing::StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("instruction does not exist")));
}
TEST_F(HloParserTest, InferDotShape) {
constexpr char text[] = R"(HloModule InferDotShapeTest
ENTRY InferDotShape {
a = f32[2,10]{1,0} parameter(0)
b = f32[10,2]{1,0} parameter(1)
ROOT dot = dot(a, b), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
EXPECT_TRUE(ShapeUtil::Equal(
module->entry_computation()->ComputeProgramShape().result(),
ShapeUtil::MakeShape(F32, {2}, {0})));
}
TEST_F(HloParserTest, InferSparseDotShape) {
constexpr char text[] = R"(HloModule InferSparseDotShapeTest
ENTRY InferSparseDotShape {
a = f32[2,16]{1,0} parameter(0)
b = f32[32,2]{1,0} parameter(1)
meta = u16[2,2]{1,0} parameter(2)
ROOT dot = dot(a, b, meta), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
EXPECT_TRUE(ShapeUtil::Equal(
module->entry_computation()->ComputeProgramShape().result(),
ShapeUtil::MakeShape(F32, {2}, {0})));
}
TEST_F(HloParserTest, InferTupleShape) {
constexpr char text[] = R"(HloModule InferTupleShapeTest
ENTRY InferTupleShape () -> s32[2,3] {
c0 = f32[3]{0} constant({1, 2, 3})
c1 = s32[2,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 } })
tuple = tuple(c0, c1)
ROOT get = get-tuple-element(tuple), index=1, sharding={maximal device=0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
EXPECT_TRUE(ShapeUtil::Equal(
module->entry_computation()->ComputeProgramShape().result(),
ShapeUtil::MakeShapeWithDenseLayout(S32, {2, 3}, {1, 0})));
}
TEST_F(HloParserTest, InferShapeMixedExplicitShape) {
constexpr char text[] = R"(HloModule InferUnaryShapeTest
Negate {
x = f32[] parameter(0)
ROOT negate = negate(x)
}
Identity {
y = f32[] parameter(0)
ROOT copy = copy(y)
}
ENTRY InferUnaryShape {
a = f32[] parameter(0)
b = f32[] parameter(1)
p = pred[] parameter(2)
c = f32[] add(a, b)
ROOT conditional = conditional(p, a, c), true_computation=Negate, false_computation=Identity
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
EXPECT_TRUE(ShapeUtil::Equal(
module->entry_computation()->ComputeProgramShape().result(),
ShapeUtil::MakeScalarShape(F32)));
}
TEST_F(HloParserTest, CheckAliasPassthroughParams) {
const char* const hlo_string = R"(
HloModule TestModule, alias_passthrough_params=true
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
ROOT root = (f16[2048,1024], f16[2048,1024]) tuple(p0, p1)
}
)";
auto result = ParseAndReturnVerifiedModule(hlo_string);
TF_EXPECT_OK(result.status());
EXPECT_TRUE(result.value()->config().alias_passthrough_params());
}
TEST_F(HloParserTest, CheckReplicaCount) {
const char* const hlo_string = R"(
HloModule TestModule, replica_count=5
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
ROOT root = (f16[2048,1024], f16[2048,1024]) tuple(p0, p1)
}
)";
auto result = ParseAndReturnVerifiedModule(hlo_string);
TF_EXPECT_OK(result.status());
EXPECT_EQ(result.value()->config().replica_count(), 5);
}
TEST_F(HloParserTest, CheckNumPartitions) {
const char* const hlo_string = R"(
HloModule TestModule, num_partitions=3
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
ROOT root = (f16[2048,1024], f16[2048,1024]) tuple(p0, p1)
}
)";
auto result = ParseAndReturnVerifiedModule(hlo_string);
TF_EXPECT_OK(result.status());
EXPECT_EQ(result.value()->config().num_partitions(), 3);
EXPECT_TRUE(result.value()->config().use_spmd_partitioning());
}
TEST_F(HloParserTest, CheckFrontendAttributes) {
const char* const hlo_string = R"(
HloModule TestModule, frontend_attributes={attr_name="attr_value"}
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
ROOT root = (f16[2048,1024], f16[2048,1024]) tuple(p0, p1)
}
)";
auto result = ParseAndReturnVerifiedModule(hlo_string);
TF_EXPECT_OK(result.status());
EXPECT_EQ(result.value()->frontend_attributes().map().size(), 1);
EXPECT_EQ(result.value()->frontend_attributes().map().begin()->first,
"attr_name");
EXPECT_EQ(result.value()->frontend_attributes().map().begin()->second,
"attr_value");
}
TEST_F(HloParserTest, CheckAllowSpmdShardingPropagationToParameters) {
const char* const hlo_string = R"(
HloModule TestModule, allow_spmd_sharding_propagation_to_parameters=true
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
ROOT root = (f16[2048,1024], f16[2048,1024]) tuple(p0, p1)
}
)";
auto result = ParseAndReturnVerifiedModule(hlo_string);
TF_EXPECT_OK(result.status());
EXPECT_EQ((*result)
->config()
.allow_spmd_sharding_propagation_to_parameters()
.size(),
1);
EXPECT_TRUE(
(*result)->config().allow_spmd_sharding_propagation_to_parameters()[0]);
}
TEST_F(HloParserTest, CheckAllowSpmdShardingPropagationToParametersVec) {
const char* const hlo_string = R"(
HloModule TestModule, allow_spmd_sharding_propagation_to_parameters={true,false}
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
ROOT root = (f16[2048,1024], f16[2048,1024]) tuple(p0, p1)
}
)";
auto result = ParseAndReturnVerifiedModule(hlo_string);
TF_EXPECT_OK(result.status());
EXPECT_EQ((*result)
->config()
.allow_spmd_sharding_propagation_to_parameters()
.size(),
2);
EXPECT_TRUE(
(*result)->config().allow_spmd_sharding_propagation_to_parameters()[0]);
EXPECT_FALSE(
(*result)->config().allow_spmd_sharding_propagation_to_parameters()[1]);
}
TEST_F(HloParserTest, CheckAllowSpmdShardingPropagationToOutput) {
const char* const hlo_string = R"(
HloModule TestModule, allow_spmd_sharding_propagation_to_output=true
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
ROOT root = (f16[2048,1024], f16[2048,1024]) tuple(p0, p1)
}
)";
auto result = ParseAndReturnVerifiedModule(hlo_string);
TF_EXPECT_OK(result.status());
EXPECT_EQ(
(*result)->config().allow_spmd_sharding_propagation_to_output().size(),
1);
EXPECT_TRUE(
(*result)->config().allow_spmd_sharding_propagation_to_output()[0]);
}
TEST_F(HloParserTest, CheckAllowSpmdShardingPropagationToOutputVec) {
const char* const hlo_string = R"(
HloModule TestModule, allow_spmd_sharding_propagation_to_output={true,false}
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
ROOT root = (f16[2048,1024], f16[2048,1024]) tuple(p0, p1)
}
)";
auto result = ParseAndReturnVerifiedModule(hlo_string);
TF_EXPECT_OK(result.status());
EXPECT_EQ(
(*result)->config().allow_spmd_sharding_propagation_to_output().size(),
2);
EXPECT_TRUE(
(*result)->config().allow_spmd_sharding_propagation_to_output()[0]);
EXPECT_FALSE(
(*result)->config().allow_spmd_sharding_propagation_to_output()[1]);
}
TEST_F(HloParserTest, NestedBroadcastWithoutDimensionsAttribute) {
const char* const hlo_string = R"(
HloModule test
ENTRY test {
ROOT root = sqrt(f32[10,10] broadcast(f32[10] parameter(0)))
}
)";
auto result = ParseAndReturnVerifiedModule(hlo_string);
EXPECT_NE(absl::OkStatus(), result.status());
EXPECT_THAT(result.status().message(), HasSubstr("dimensions"));
}
TEST_F(HloParserTest, InvalidDimLevelType) {
const std::string original = R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:D(X,C)} parameter(0)
})";
EXPECT_THAT(ParseAndReturnUnverifiedModule(original).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("expected a DimLevelType abbreviation")));
}
TEST_F(HloParserTest, InvalidDimLevelTypeCount) {
const std::string original = R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:D(C)} parameter(0)
})";
EXPECT_THAT(
ParseAndReturnUnverifiedModule(original).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("Dimensions size is 2, but dim level types size is 1")));
}
TEST_F(HloParserTest, RejectSparseTiles) {
const std::string original = R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:D(D,C)T(128,8)} parameter(0)
})";
EXPECT_THAT(ParseAndReturnUnverifiedModule(original).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("Layout has tiles, but is for a sparse array")));
}
TEST_F(HloParserTest, RejectDensePhysicalShape) {
const std::string original = R"(HloModule test
ENTRY test {
ROOT root = f32[10,10]{1,0:T(128,8)P(f32[10,10])} parameter(0)
})";
EXPECT_THAT(
ParseAndReturnUnverifiedModule(original).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr(
"Layout has physical shape, but is not for a sparse array")));
}
TEST_F(HloParserTest, ParseSingleComputation) {
const std::string original = R"(
test {
ROOT root = f32[1,64,10,128]{1,0,2,3} parameter(0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(original));
EXPECT_TRUE(module->entry_computation()
->ComputeProgramShape()
.parameters()[0]
.has_layout());
EXPECT_TRUE(
module->entry_computation()->ComputeProgramShape().result().has_layout());
EXPECT_EQ(module->entry_computation()
->ComputeProgramShape()
.parameters()[0]
.layout(),
Layout({1, 0, 2, 3}));
EXPECT_EQ(
module->entry_computation()->ComputeProgramShape().result().layout(),
Layout({1, 0, 2, 3}));
}
TEST_F(HloParserTest, ParseComputationNameClosingBrace) {
const std::string original = R"(
test {
ROOT root = f32[1,64,10,128]{1,0,2,3} parameter(0)
}
)";
EXPECT_TRUE(ParseAndReturnUnverifiedModule(original).ok());
}
TEST_F(HloParserTest, ParseSingleEntryComputation) {
const std::string original = R"(
ENTRY test {
ROOT root = f32[1,64,10,128]{1,0,2,3} parameter(0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(original));
EXPECT_TRUE(module->entry_computation()
->ComputeProgramShape()
.parameters()[0]
.has_layout());
EXPECT_TRUE(
module->entry_computation()->ComputeProgramShape().result().has_layout());
EXPECT_EQ(module->entry_computation()
->ComputeProgramShape()
.parameters()[0]
.layout(),
Layout({1, 0, 2, 3}));
EXPECT_EQ(
module->entry_computation()->ComputeProgramShape().result().layout(),
Layout({1, 0, 2, 3}));
}
TEST_F(HloParserTest, ParseMultiComputations) {
const std::string original = R"(
comp1 {
ROOT root = f32[1,64,10,128]{3,2,1,0} parameter(0)
}
comp2 {
ROOT root = f32[1,64,10,128]{1,0,2,3} parameter(0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(original));
EXPECT_TRUE(module->entry_computation()
->ComputeProgramShape()
.parameters()[0]
.has_layout());
EXPECT_TRUE(
module->entry_computation()->ComputeProgramShape().result().has_layout());
EXPECT_EQ(module->entry_computation()
->ComputeProgramShape()
.parameters()[0]
.layout(),
Layout({1, 0, 2, 3}));
EXPECT_EQ(
module->entry_computation()->ComputeProgramShape().result().layout(),
Layout({1, 0, 2, 3}));
}
TEST_F(HloParserTest, ParseMultiComputationsWithEntry) {
const std::string original = R"(
ENTRY comp1 {
ROOT root = f32[1,64,10,128]{1,0,2,3} parameter(0)
}
comp2 {
ROOT root = f32[1,64,10,128]{3,2,1,0} parameter(0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(original));
EXPECT_TRUE(module->entry_computation()
->ComputeProgramShape()
.parameters()[0]
.has_layout());
EXPECT_TRUE(
module->entry_computation()->ComputeProgramShape().result().has_layout());
EXPECT_EQ(module->entry_computation()
->ComputeProgramShape()
.parameters()[0]
.layout(),
Layout({1, 0, 2, 3}));
EXPECT_EQ(
module->entry_computation()->ComputeProgramShape().result().layout(),
Layout({1, 0, 2, 3}));
}
TEST_F(HloParserTest, NontrivialAsyncOpRoundTrip) {
const std::string original = R"(
HloModule module
%async_wrapped {
%async_param.1 = s32[1024]{0} parameter(0)
%copy = s32[1024]{0} copy(s32[1024]{0} %async_param.1)
%async_param.2 = s32[256]{0} parameter(1)
%async_param.3 = s32[] parameter(2)
ROOT %dus = s32[1024]{0} dynamic-update-slice(s32[1024]{0} %copy, s32[256]{0} %async_param.2, s32[] %async_param.3)
}
ENTRY %main {
%input.5 = s32[] parameter(1)
%broadcast = s32[1024]{0} broadcast(s32[] %input.5), dimensions={}
%input.0 = s32[256]{0} parameter(0)
%async-start = ((s32[1024]{0}, s32[256]{0}, s32[]), s32[1024]{0}, u32[]) async-start(%broadcast, %input.0, %input.5), calls=%async_wrapped
ROOT %async-done = s32[1024]{0} async-done(((s32[1024]{0}, s32[256]{0}, s32[]), s32[1024]{0}, u32[]) %async-start), calls=%async_wrapped
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(original));
TF_ASSERT_OK_AND_ASSIGN(
auto roundtrip_module,
ParseAndReturnUnverifiedModule(module->ToString(
HloPrintOptions().set_syntax_sugar_async_ops(true))));
auto fp_options = HloPrintOptions::Fingerprint();
EXPECT_EQ(roundtrip_module->ToString(fp_options),
module->ToString(fp_options));
}
TEST_F(HloParserTest, LexesAsJsonDict) {
EXPECT_TRUE(LexesAsJsonDict("{}"));
EXPECT_TRUE(LexesAsJsonDict("{abc: 123}"));
EXPECT_TRUE(LexesAsJsonDict("{{abc: 123}, {{{d}}}}"));
EXPECT_TRUE(LexesAsJsonDict(R"({"}"})"));
EXPECT_TRUE(LexesAsJsonDict(R"({"\"}"})"));
EXPECT_TRUE(LexesAsJsonDict(R"({"\"{"})"));
EXPECT_FALSE(LexesAsJsonDict(""));
EXPECT_FALSE(LexesAsJsonDict("{"));
EXPECT_FALSE(LexesAsJsonDict("}"));
EXPECT_FALSE(LexesAsJsonDict("{{}"));
EXPECT_FALSE(LexesAsJsonDict("{}}"));
EXPECT_FALSE(LexesAsJsonDict("{}a"));
EXPECT_FALSE(LexesAsJsonDict("a{}"));
EXPECT_FALSE(LexesAsJsonDict("{{{{}}}"));
}
TEST_F(HloParserTest, AsyncStartMissingOperandWrapper) {
const char* const hlo_string = R"(
HloModule Module
async_computation {
p = f32[2,3] parameter(0)
ROOT custom-call = f32[3,2] custom-call(p), custom_call_target="foo"
}
ENTRY AsyncStartMissingOperandWrapper {
p0 = f32[2,3] parameter(0)
async-start = (f32[2,3], f32[3,2], s32[]) async-start(p0), calls=async_computation
async-update = ((f32[2,3]), f32[3,2], s32[]) async-update(async-start), calls=async_computation
ROOT async-done = f32[3,2] async-done(async-update), calls=async_computation
}
)";
EXPECT_THAT(
ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("AsyncStart and AsyncUpdate expect the op shape to be "
"in the form of "
"((async-operands), async-outputs, state).")));
}
TEST_F(HloParserTest, AsyncUpdateMissingOperandWrapper) {
const char* const hlo_string = R"(
HloModule Module
async_computation {
p = f32[2,3] parameter(0)
ROOT custom-call = f32[3,2] custom-call(p), custom_call_target="foo"
}
ENTRY AsyncUpdateMissingOperandWrapper {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3]), f32[3,2], s32[]) async-start(p0), calls=async_computation
async-update = (f32[2,3], f32[3,2], s32[]) async-update(async-start), calls=async_computation
ROOT async-done = f32[3,2] async-done(async-update), calls=async_computation
}
)";
EXPECT_THAT(
ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("AsyncStart and AsyncUpdate expect the op shape to be "
"in the form of "
"((async-operands), async-outputs, state).")));
}
TEST_F(HloParserTest, AsyncOpTupleWrongType) {
const char* const hlo_string = R"(
HloModule Module
async_computation {
p = f32[2,3] parameter(0)
ROOT custom-call = f32[3,2] custom-call(p), custom_call_target="foo"
}
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3])) async-start(p0), calls=async_computation
ROOT async-done = f32[3,2] async-done(async-start), calls=async_computation
}
)";
EXPECT_THAT(
ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("AsyncStart and AsyncUpdate expect the op shape to be "
"in the form of "
"((async-operands), async-outputs, state).")));
}
TEST_F(HloParserTest, AsyncDoneNoAsyncStart) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
p1 = u32[] parameter(1)
tuple = ((f32[2,3]), f32[2,3], u32[]) tuple(p0, p0, p1)
ROOT async-done = f32[2,3] custom-call-done(tuple)
}
)";
EXPECT_THAT(
ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("AsyncUpdate and AsyncDone expect their operand to be "
"the previous async op.")));
}
TEST_F(HloParserTest, AsyncUpdateAndAsyncDoneNoAsyncStart) {
const char* const hlo_string = R"(
HloModule Module
ENTRY AsyncStartAndAsyncDone {
p0 = f32[2,3] parameter(0)
p1 = u32[] parameter(1)
tuple = ((f32[2,3]), f32[2,3], u32[]) tuple(p0, p0, p1)
async-update = ((f32[2,3]), f32[2,3], u32[]) custom-call-update(tuple)
ROOT async-done = f32[2,3] custom-call-done(tuple)
}
)";
EXPECT_THAT(
ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("AsyncUpdate and AsyncDone expect their operand to be "
"the previous async op.")));
}
TEST_F(HloParserTest, AsyncUpdateWithSyntaxSugarWrongOp) {
const char* const hlo_string = R"(
HloModule AsyncUpdateWithSyntaxSugarWrongOp
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%async-start = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-start(f32[10]{0} %p0), custom_call_target="foo"
%async-update = ((f32[10]{0}), f32[20]{0}, s32[]) add-update(((f32[10]{0}), f32[20]{0}, s32[]) %async-start)
ROOT %async-done = f32[20]{0} custom-call-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-update)
}
)";
EXPECT_THAT(ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("Expect async wrapped opcode to be custom-call, "
"but got add")));
}
TEST_F(HloParserTest, AsyncDoneWithSyntaxSugarWrongOp) {
const char* const hlo_string = R"(
HloModule AsyncUpdateWithSyntaxSugarWrongOp
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%async-start = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-start(f32[10]{0} %p0), custom_call_target="foo"
%async-update = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-update(((f32[10]{0}), f32[20]{0}, s32[]) %async-start)
ROOT %async-done = f32[20]{0} add-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-update)
}
)";
EXPECT_THAT(ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("Expect async wrapped opcode to be custom-call, "
"but got add")));
}
TEST_F(HloParserTest, AsyncOpSharedComputation) {
const char* const hlo_string = R"(
HloModule AsyncOpSharedComputation
%async_wrapped (async_param: f32[10]) -> f32[20] {
%async_param = f32[10]{0} parameter(0)
ROOT %call = f32[20]{0} custom-call(f32[10]{0} %async_param), custom_call_target="foo"
}
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%async-start.0 = ((f32[10]{0}), f32[20]{0}, s32[]) async-start(f32[10]{0} %p0), calls=%async_wrapped
%async-done.0 = f32[20]{0} async-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-start.0)
%async-start.1 = ((f32[10]{0}), f32[20]{0}, s32[]) async-start(f32[10]{0} %p0), calls=%async_wrapped
ROOT %async-done.1 = f32[20]{0} async-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-start.1)
}
)";
EXPECT_THAT(ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("Computation async_wrapped is already referenced "
"by another async op")));
}
TEST_F(HloParserTest, AsyncUpdateWrongComputation) {
const char* const hlo_string = R"(
HloModule AsyncUpdateWrongComputation
%async_wrapped.0 (async_param: f32[10]) -> f32[20] {
%async_param = f32[10]{0} parameter(0)
ROOT %custom-call = f32[20]{0} custom-call(f32[10]{0} %async_param), custom_call_target="foo"
}
%async_wrapped.1 (async_param: f32[10]) -> f32[20] {
%async_param = f32[10]{0} parameter(0)
ROOT %custom-call = f32[20]{0} custom-call(f32[10]{0} %async_param), custom_call_target="foo"
}
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%async-start = ((f32[10]{0}), f32[20]{0}, s32[]) async-start(f32[10]{0} %p0), calls=%async_wrapped.0
%async-update = ((f32[10]{0}), f32[20]{0}, s32[]) async-update(((f32[10]{0}), f32[20]{0}, s32[]) %async-start), calls=%async_wrapped.1
ROOT %async-done = f32[20]{0} async-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-update)
}
)";
EXPECT_THAT(
ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("Expect async_wrapped_computation to be async_wrapped.0, "
"but got async_wrapped.1")));
}
TEST_F(HloParserTest, AsyncDoneWrongComputation) {
const char* const hlo_string = R"(
HloModule AsyncDoneWrongComputation
%async_wrapped.0 (async_param: f32[10]) -> f32[20] {
%async_param = f32[10]{0} parameter(0)
ROOT %custom-call = f32[20]{0} custom-call(f32[10]{0} %async_param), custom_call_target="foo"
}
%async_wrapped.1 (async_param: f32[10]) -> f32[20] {
%async_param = f32[10]{0} parameter(0)
ROOT %custom-call = f32[20]{0} custom-call(f32[10]{0} %async_param), custom_call_target="foo"
}
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%async-start = ((f32[10]{0}), f32[20]{0}, s32[]) async-start(f32[10]{0} %p0), calls=%async_wrapped.0
%async-update = ((f32[10]{0}), f32[20]{0}, s32[]) async-update(((f32[10]{0}), f32[20]{0}, s32[]) %async-start)
ROOT %async-done = f32[20]{0} async-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-update), calls=%async_wrapped.1
}
)";
EXPECT_THAT(
ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("Expect async_wrapped_computation to be async_wrapped.0, "
"but got async_wrapped.1")));
}
TEST_F(HloParserTest, AsyncUpdateWrongDefaultThread) {
const char* const hlo_string = R"(
HloModule AsyncUpdateWrongDefaultThread
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%async-start = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-start(f32[10]{0} %p0), custom_call_target="foo"
%async-update = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-update(((f32[10]{0}), f32[20]{0}, s32[]) %async-start), async_execution_thread="foo_thread"
ROOT %async-done = f32[20]{0} custom-call-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-update)
}
)";
EXPECT_THAT(ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("Expect async_execution_thread to be main, "
"but got foo_thread")));
}
TEST_F(HloParserTest, AsyncDoneWrongDefaultThread) {
const char* const hlo_string = R"(
HloModule AsyncDoneWrongDefaultThread
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%async-start = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-start(f32[10]{0} %p0), custom_call_target="foo"
%async-update = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-update(((f32[10]{0}), f32[20]{0}, s32[]) %async-start)
ROOT %async-done = f32[20]{0} custom-call-done(((f32[10]{0}), f32[20]{0}, s32[]) %async-update), async_execution_thread="foo_thread"
}
)";
EXPECT_THAT(ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("Expect async_execution_thread to be main, "
"but got foo_thread")));
}
TEST_F(HloParserTest, PipelinedSendRecv) {
const std::string hlo_string = R"(
HloModule test
cond {
param = (u32[], (u32[2], u32[], token[]), (u32[2], u32[], token[])) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(1)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], (u32[2], u32[], token[]), (u32[2], u32[], token[])) parameter(0)
count = get-tuple-element(%param), index=0
recv.0 = (u32[2], u32[], token[]) get-tuple-element(param), index=1
recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
recv-data.0 = u32[2] get-tuple-element(recv-done.0), index=0
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
send.0 = (u32[2], u32[], token[]) get-tuple-element(param), index=2
send-done.0 = (u32[2], token[]) recv-done(send.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
after-all.0.n = token[] after-all()
recv.0.n = (u32[2], u32[], token[]) recv(after-all.0.n), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}",
_xla_send_recv_pipeline="0"
}
after-all.1.n = token[] after-all()
send.0.n = (u32[2], u32[], token[]) send(recv-data.0, after-all.1.n),
channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}",
_xla_send_recv_pipeline="0"
}
ROOT result = (u32[], (u32[2], u32[], token[]), (u32[2], u32[], token[])) tuple(new_count, recv.0.n, send.0.n)
}
ENTRY test_computation {
c0 = u32[] constant(0)
init = u32[2] broadcast(c0), dimensions={}
after-all.0.p = token[] after-all()
recv.0.p = (u32[2], u32[], token[]) recv(after-all.0.p), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}",
_xla_send_recv_pipeline="0"
}
after-all.1.p = token[] after-all()
send.0.p = (u32[2], u32[], token[]) send(init, after-all.1.p),
channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}",
_xla_send_recv_pipeline="0"
}
while_init = (u32[], (u32[2], u32[], token[]), (u32[2], u32[], token[])) tuple(c0, recv.0.p, send.0.p)
while_result = (u32[], (u32[2], u32[], token[]), (u32[2], u32[], token[])) while(while_init), body=body, condition=cond
recv.0.q = (u32[2], u32[], token[]) get-tuple-element(while_result), index=1
recv-done.0.q = (u32[2], token[]) recv-done(recv.0.q), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
send.0.q = (u32[2], u32[], token[]) get-tuple-element(while_result), index=2
send-done.0.q = token[] send-done(send.0.q), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
ROOT recv-data.0.q = u32[2] get-tuple-element(recv-done.0.q), index=0
})";
auto result = ParseAndReturnUnverifiedModule(hlo_string);
EXPECT_EQ(absl::OkStatus(), result.status());
}
TEST_F(HloParserTest, ReplicaIdWithLayout) {
const char* const hlo_string = R"(
HloModule ReplicaId
ENTRY ReplicaId {
ROOT replica-id.18600 = u32[]{:T(128)} replica-id()
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
EXPECT_TRUE(
module->entry_computation()->root_instruction()->shape().has_layout());
EXPECT_FALSE(module->entry_computation()
->root_instruction()
->shape()
.layout()
.tiles()
.empty());
}
TEST_F(HloParserTest, OriginalValueWithoutShape) {
const std::string hlo_string = R"(HloModule test
ENTRY %test {
%a = f32[2,10]{1,0} parameter(0), origin={{"a"}}
ROOT %v = abs(%a), origin={{"v"}}
}
)";
EXPECT_THAT(ParseAndReturnUnverifiedModule(hlo_string).status(),
tsl::testing::StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("expects instruction shape")));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/parser/hlo_parser.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/parser/hlo_parser_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b24371fc-a9b3-45b6-b90e-9778a16abea8 | cpp | tensorflow/tensorflow | hlo_constant_splitter | third_party/xla/xla/hlo/transforms/hlo_constant_splitter.cc | third_party/xla/xla/hlo/transforms/hlo_constant_splitter_test.cc | #include "xla/hlo/transforms/hlo_constant_splitter.h"
#include <iterator>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsSupportedConstant(const HloInstruction* instruction,
bool split_expressions) {
return instruction->opcode() == HloOpcode::kConstant ||
(split_expressions && instruction->opcode() == HloOpcode::kIota);
}
bool IsSupportedConstantExpression(const HloInstruction* instruction) {
if (instruction->HasSideEffect()) {
return false;
}
if (instruction->IsElementwise()) {
return true;
}
switch (instruction->opcode()) {
case HloOpcode::kBroadcast:
case HloOpcode::kSlice:
return true;
default:
return false;
}
}
absl::StatusOr<bool> DuplicateConstantExpressionPerUser(
HloComputation* computation, HloInstruction* to_clone,
HloInstruction* user) {
absl::InlinedVector<std::pair<const HloInstruction*, int>, 8> worklist(
1, std::make_pair(to_clone, 0));
absl::InlinedVector<const HloInstruction*, 8> to_clone_vec;
absl::flat_hash_set<const HloInstruction*> visited;
bool changed = false;
VLOG(10) << "Duplicating: " << to_clone->ToString() << " for user "
<< user->ToString();
while (!worklist.empty()) {
auto& [to_clone_i, index] = worklist.back();
if (index >= to_clone_i->operand_count()) {
to_clone_vec.push_back(to_clone_i);
worklist.pop_back();
continue;
}
int64_t prev_idx = index++;
if (visited.insert(to_clone_i->operands()[prev_idx]).second) {
VLOG(10) << "Adding operand to worklist: "
<< to_clone_i->operands()[prev_idx]->ToString();
worklist.push_back(std::make_pair(to_clone_i->operands()[prev_idx], 0));
}
}
absl::flat_hash_map<const HloInstruction*, HloInstruction*>
cloned_instructions_map;
for (auto* i : to_clone_vec) {
absl::InlinedVector<HloInstruction*, 4> new_operand_vector;
for (auto* op : i->operands()) {
auto it = cloned_instructions_map.find(op);
CHECK(it != cloned_instructions_map.end())
<< "Expected already cloned instruction for operand: "
<< op->ToString() << " Instruction to clone: " << i->ToString();
new_operand_vector.push_back(it->second);
}
HloInstruction* cloned_instr = computation->AddInstruction(
i->CloneWithNewOperands(i->shape(), new_operand_vector));
cloned_instructions_map[i] = cloned_instr;
if (i == to_clone) {
TF_RETURN_IF_ERROR(to_clone->ReplaceUseWith(user, cloned_instr));
changed = true;
}
}
return changed;
}
}
absl::StatusOr<bool> HloConstantSplitter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation : module->computations(execution_threads)) {
absl::flat_hash_set<HloInstruction*> constants_set;
std::vector<HloInstruction*> constants_list;
std::vector<HloInstruction*> worklist;
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
VLOG(10) << "Considering: " << instruction->ToString();
if (IsSupportedConstant(instruction, split_expressions_) &&
extra_constraints_(instruction)) {
VLOG(10) << "Adding to constant list: " << instruction->ToString();
constants_set.insert(instruction);
constants_list.push_back(instruction);
}
}
int64_t previous_total_constants = 0;
while (constants_list.size() != previous_total_constants) {
VLOG(10) << "Previous total: " << previous_total_constants
<< " current constants: " << constants_list.size();
previous_total_constants = constants_list.size();
worklist.clear();
worklist.insert(worklist.end(), constants_list.begin(),
constants_list.end());
while (!worklist.empty()) {
auto* i = worklist.back();
worklist.pop_back();
bool is_constant = true;
for (auto* ops : i->operands()) {
if (!constants_set.contains(ops)) {
is_constant = false;
break;
}
}
if (is_constant) {
if (constants_set.insert(i).second) {
constants_list.push_back(i);
}
if (split_expressions_) {
for (auto* u : i->users()) {
if (IsSupportedConstantExpression(u) &&
!constants_set.contains(u)) {
worklist.push_back(u);
}
}
}
}
}
}
if (VLOG_IS_ON(5)) {
VLOG(5) << "For computation: " << computation->ToString();
for (HloInstruction* instruction : constants_list) {
VLOG(5) << "Is a constant: " << instruction->ToString();
}
}
for (HloInstruction* instruction : constants_list) {
if (IsSupportedConstant(instruction, split_expressions_) &&
instruction->user_count() <= 1) {
continue;
}
absl::InlinedVector<HloInstruction*, 8> users;
users.reserve(instruction->user_count());
for (HloInstruction* user : instruction->users()) {
if (instruction->opcode() == HloOpcode::kConstant ||
!constants_set.contains(user)) {
users.push_back(user);
}
}
for (auto* u : users) {
TF_ASSIGN_OR_RETURN(bool duplicated, DuplicateConstantExpressionPerUser(
computation, instruction, u));
changed |= duplicated;
}
}
}
return changed;
}
} | #include "xla/hlo/transforms/hlo_constant_splitter.h"
#include <cstdint>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_parser.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using HloConstantSplitterTest = HloTestBase;
TEST_F(HloConstantSplitterTest, SplitConstants) {
const char* module_str = R"(
HloModule test_module
ENTRY entry_computation {
param = (f32[], f32[]) parameter(0),
sharding={{maximal device=0}, {maximal device=0}}
gte0 = f32[] get-tuple-element(param), index=0
gte1 = f32[] get-tuple-element(param), index=1
constant = f32[] constant(94.1934)
add1 = f32[] add(constant, gte0)
add2 = f32[] add(constant, gte1)
ROOT root = (f32[], f32[], f32[]) tuple(constant, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(module_str));
TF_ASSERT_OK(HloConstantSplitter().Run(module.get()).status());
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kConstant) {
EXPECT_LE(instruction->user_count(), 1);
}
}
}
}
TEST_F(HloConstantSplitterTest, OnlySplitConstantsAllowedBySeedConstraints) {
const char* module_str = R"(
HloModule test_module
ENTRY entry_computation {
param = (f32[], f32[]) parameter(0),
sharding={{maximal device=0}, {maximal device=0}}
gte0 = f32[] get-tuple-element(param), index=0
gte1 = f32[] get-tuple-element(param), index=1
constant1 = f32[] constant(1)
add0 = f32[] add(constant1, gte0)
add1 = f32[] add(constant1, add0)
constant2 = f32[] constant(2)
add2 = f32[] multiply(constant2, gte1)
ROOT root = (f32[], f32[], f32[]) tuple(constant2, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(module_str));
TF_ASSERT_OK(HloConstantSplitter( false,
[](const HloInstruction* instruction) {
return instruction->name() != "constant1";
})
.Run(module.get())
.status());
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kConstant &&
instruction->name() != "constant1") {
EXPECT_LE(instruction->user_count(), 1);
}
}
}
const HloInstruction* constant1 = FindInstruction(module.get(), "constant1");
ASSERT_NE(constant1, nullptr);
EXPECT_EQ(constant1->user_count(), 2);
}
TEST_F(HloConstantSplitterTest, PreservingConstantsWithZeroUsers) {
const char* module_str = R"(
HloModule test_module
ENTRY entry_computation {
param = (f32[], f32[]) parameter(0),
sharding={{maximal device=0}, {maximal device=0}}
gte0 = f32[] get-tuple-element(param), index=0
gte1 = f32[] get-tuple-element(param), index=1
constant1 = f32[] constant(94.1934)
constant2 = f32[] constant(9.1934)
ROOT root = (f32[], f32[]) tuple(gte0, gte1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(module_str));
HloConstantSplitter pass = HloConstantSplitter();
const auto status_or = HloTestBase::RunHloPass(&pass, module.get());
TF_ASSERT_OK(status_or.status());
EXPECT_FALSE(status_or.value());
}
TEST_F(HloConstantSplitterTest, SplittingExpressionsWithBroadcast) {
const char* module_str = R"(
HloModule test_module
ENTRY entry_computation {
gte0 = f32[1024] parameter(0)
gte1 = f32[1024] parameter(1)
constant1 = f32[1024] iota(), iota_dimension=0
constant2 = f32[] constant(9.1934)
constant3 = f32[] constant(0.0)
constant4 = f32[] constant(1.0)
b = f32[1024] broadcast(constant2), dimensions={}
b2 = f32[1024] broadcast(constant3), dimensions={}
b3 = f32[1024] broadcast(constant4), dimensions={}
cmp = pred[1024] compare(constant1, b), direction=LT
s = f32[1024] select(cmp, b2, b3)
a1 = f32[1024] add(s, gte0)
a2 = f32[1024] add(s, gte1)
ROOT root = (f32[1024], f32[1024]) tuple(a1, a2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(module_str));
HloConstantSplitter pass = HloConstantSplitter(true);
const auto status_or = HloTestBase::RunHloPass(&pass, module.get());
TF_ASSERT_OK(status_or.status());
EXPECT_TRUE(status_or.value());
HloDCE dce;
TF_ASSERT_OK(dce.Run(module.get()).status());
XLA_VLOG_LINES(1, module->entry_computation()->ToString());
EXPECT_EQ(module->entry_computation()->instruction_count(), 23);
}
TEST_F(HloConstantSplitterTest, SplittingExpressionsWithSlice) {
const char* module_str = R"(
HloModule test_module
ENTRY entry_computation {
iota.0 = u32[64] iota(), iota_dimension=0
slice.0 = u32[32] slice(iota.0), slice={[0:32]}
broadcast.0 = u32[16,32] broadcast(slice.0), dimensions={1}
broadcast.1 = u32[32,32] broadcast(slice.0), dimensions={1}
p.0 = u32[16,32] parameter(0)
p.1 = u32[32,32] parameter(1)
add.0 = u32[16,32] add(p.0, broadcast.0)
add.1 = u32[32,32] add(p.1, broadcast.1)
ROOT root = (u32[16,32], u32[32,32]) tuple(add.0, add.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(module_str));
HloConstantSplitter pass = HloConstantSplitter(true);
const auto status_or = HloTestBase::RunHloPass(&pass, module.get());
TF_ASSERT_OK(status_or.status());
EXPECT_TRUE(status_or.value());
HloDCE dce;
TF_ASSERT_OK(dce.Run(module.get()).status());
XLA_VLOG_LINES(1, module->entry_computation()->ToString());
EXPECT_EQ(module->entry_computation()->instruction_count(), 11);
}
TEST_F(HloConstantSplitterTest, NoSplittingSideEffectExpressions) {
const char* module_str = R"(
HloModule test_module
ENTRY entry_computation {
gte0 = f32[1024] parameter(0)
gte1 = f32[1024] parameter(1)
constant1 = f32[1024] iota(), iota_dimension=0
constant2 = f32[] constant(9.1934)
constant3 = f32[] constant(0.0)
constant4 = f32[] constant(0.0)
constant5 = f32[] constant(1.0)
b = f32[1024] broadcast(constant2), dimensions={}
b2 = f32[1024] broadcast(constant3), dimensions={}
rng = f32[] rng(constant4, constant5), distribution=rng_uniform
b3 = f32[1024] broadcast(rng), dimensions={}
cmp = pred[1024] compare(constant1, b), direction=LT
s = f32[1024] select(cmp, b2, b3)
a1 = f32[1024] add(s, gte0)
a2 = f32[1024] add(s, gte1)
ROOT root = (f32[1024], f32[1024]) tuple(a1, a2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(module_str));
HloConstantSplitter pass = HloConstantSplitter(true);
const int64_t count_before = module->entry_computation()->instruction_count();
TF_ASSERT_OK_AND_ASSIGN(bool changed,
HloTestBase::RunHloPass(&pass, module.get()));
HloDCE dce;
TF_ASSERT_OK(dce.Run(module.get()).status());
const int64_t count_after_dce =
module->entry_computation()->instruction_count();
EXPECT_TRUE(changed);
EXPECT_EQ(count_before, count_after_dce);
int64_t rng_count = 0;
for (HloInstruction* instruction :
module->entry_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kRng) {
rng_count++;
}
}
EXPECT_EQ(rng_count, 1);
}
TEST_F(HloConstantSplitterTest, InstructionsWithOneUser) {
const char* module_str = R"(
HloModule test_module, entry_computation_layout={(f32[1024]{0:T(512)})->f32[1024]{0:T(512)}}
reduce.add {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry_computation {
constant1 = f32[] constant(1.1)
b1 = f32[1024]{0} broadcast(constant1), dimensions={}
iota.1 = f32[1024]{0} iota(), iota_dimension=0
add.1 = f32[1024]{0} add(b1, iota.1)
p0 = f32[1024]{0} parameter(0), sharding={devices=[4]0,1,2,3}
custom-call.0 = f32[256]{0} custom-call(p0), custom_call_target="SPMDFullToShardShape", sharding={manual}
constant0 = f32[] constant(0)
reduce.1 = f32[] reduce(custom-call.0, constant0), dimensions={0}, to_apply=reduce.add
b3 = f32[1024]{0} broadcast(reduce.1), dimensions={}
add.2 = f32[1024]{0} add(add.1, b3)
custom-call.1 = f32[4096]{0} custom-call(add.2), custom_call_target="SPMDShardToFullShape", sharding={devices=[4]0,1,2,3}
reshape = f32[4,1024]{1,0} reshape(custom-call.1)
reduce.2 = f32[1024]{0} reduce(reshape, constant0), dimensions={0}, to_apply=reduce.add
iota.2 = f32[1024]{0} iota(), iota_dimension=0
mul = f32[1024]{0} multiply(b1, iota.2)
ROOT sub = f32[1024]{0} subtract(reduce.2, mul), sharding={devices=[4]0,1,2,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(module_str));
HloConstantSplitter pass = HloConstantSplitter(true);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
HloTestBase::RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
int64_t broadcast_count_before_dce = 0, broadcast_count_after_dce = 0;
for (HloInstruction* instruction :
module->entry_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kBroadcast) {
broadcast_count_before_dce++;
}
}
EXPECT_EQ(broadcast_count_before_dce, 4);
HloDCE dce;
TF_ASSERT_OK(dce.Run(module.get()).status());
for (HloInstruction* instruction :
module->entry_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kBroadcast) {
broadcast_count_after_dce++;
}
}
EXPECT_EQ(broadcast_count_after_dce, 3);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/transforms/hlo_constant_splitter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/transforms/hlo_constant_splitter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
Subsets and Splits