ID
int64 0
2.65k
| Language
stringclasses 1
value | Repository Name
stringclasses 21
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 10
111
⌀ | File Path for Unit Test
stringlengths 16
116
⌀ | Code
stringlengths 66
1.91M
| Unit Test - (Ground Truth)
stringlengths 40
32.1k
⌀ |
---|---|---|---|---|---|---|---|
1,700 | cpp | tensorflow/tensorflow | tensor_slice_writer | tensorflow/core/util/tensor_slice_writer.cc | tensorflow/core/util/tensor_slice_writer_test.cc | #ifndef TENSORFLOW_CORE_UTIL_TENSOR_SLICE_WRITER_H_
#define TENSORFLOW_CORE_UTIL_TENSOR_SLICE_WRITER_H_
#include <functional>
#include <map>
#include <unordered_map>
#include <utility>
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_slice.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/saved_tensor_slice.pb.h"
#include "tensorflow/core/util/saved_tensor_slice_util.h"
namespace tensorflow {
namespace checkpoint {
class TensorSliceWriter {
public:
class Builder {
public:
virtual ~Builder() = default;
virtual void Add(StringPiece key, StringPiece value) = 0;
virtual Status Finish(int64_t* file_size) = 0;
};
typedef std::function<Status(const string&, Builder**)> CreateBuilderFunction;
TensorSliceWriter(const string& filename,
CreateBuilderFunction create_builder);
virtual ~TensorSliceWriter() = default;
template <typename T>
Status Add(const string& name, const TensorShape& shape,
const TensorSlice& slice, const T* data);
Status Finish();
template <typename T>
static Status SaveData(const T* data, int64_t num_elements, SavedSlice* ss);
static size_t MaxBytesPerElement(DataType dt);
private:
static size_t MaxBytesPerElementOrZero(DataType dt);
static constexpr size_t kMaxMessageBytes = 1LL << 31;
static constexpr size_t kTensorProtoHeaderBytes = 1 << 10;
const string filename_;
const CreateBuilderFunction create_builder_;
string data_filename_;
bool use_temp_file_;
std::unordered_map<string, int> name_to_index_;
SavedTensorSlices sts_;
std::map<string, string> data_;
int slices_;
TensorSliceWriter(const TensorSliceWriter&) = delete;
void operator=(const TensorSliceWriter&) = delete;
};
template <typename T>
Status TensorSliceWriter::Add(const string& name, const TensorShape& shape,
const TensorSlice& slice, const T* data) {
if (shape.dims() != slice.dims()) {
return errors::Internal("Incompatible tensor shape and slice: ", "shape = ",
shape.DebugString(),
", slice = ", slice.DebugString());
}
DataType dt = DataTypeToEnum<T>::value;
int index = gtl::FindWithDefault(name_to_index_, name, -1);
if (index >= 0) {
const SavedSliceMeta& ssm = sts_.meta().tensor(index);
CHECK_EQ(name, ssm.name()) << ssm.ShortDebugString();
TensorShape ssm_shape(ssm.shape());
if (!shape.IsSameSize(ssm_shape)) {
return errors::Internal(
"Mismatching shapes: existing tensor = ", ssm_shape.DebugString(),
", trying to add name ", name, ", shape = ", shape.DebugString());
}
if (dt != ssm.type()) {
return errors::Internal(
"Mismatching types: existing type = ", DataTypeString(ssm.type()),
", trying to add name ", name, ", type = ", DataTypeString(dt));
}
} else {
index = sts_.meta().tensor_size();
name_to_index_.insert(std::make_pair(name, index));
SavedSliceMeta* ssm = sts_.mutable_meta()->add_tensor();
ssm->set_name(name);
shape.AsProto(ssm->mutable_shape());
ssm->set_type(dt);
}
SavedSliceMeta* ssm = sts_.mutable_meta()->mutable_tensor(index);
slice.AsProto(ssm->add_slice());
{
SavedTensorSlices sts;
SavedSlice* ss = sts.mutable_data();
ss->set_name(name);
slice.AsProto(ss->mutable_slice());
TensorShape saved_shape(ssm->shape());
TensorShape sliced_shape;
TF_RETURN_IF_ERROR(slice.SliceTensorShape(saved_shape, &sliced_shape));
TF_RETURN_IF_ERROR(SaveData(data, sliced_shape.num_elements(), ss));
string key = EncodeTensorNameSlice(name, slice);
std::pair<string, string> key_value(key, "");
if (!sts.AppendToString(&key_value.second)) {
return errors::Internal("Error writing Tensor. Possible size overflow.");
}
data_.insert(key_value);
}
++slices_;
return absl::OkStatus();
}
template <typename T>
Status TensorSliceWriter::SaveData(const T* data, int64_t num_elements,
SavedSlice* ss) {
size_t max_bytes_per_element =
MaxBytesPerElementOrZero(DataTypeToEnum<T>::value);
if (max_bytes_per_element == 0) {
return errors::InvalidArgument(
"Tensor slice serialization not implemented for dtype ",
DataTypeToEnum<T>::value);
}
size_t size_bound = ss->ByteSize() + kTensorProtoHeaderBytes +
(max_bytes_per_element * num_elements);
if (size_bound > kMaxMessageBytes) {
return errors::InvalidArgument(
"Tensor slice is too large to serialize (conservative estimate: ",
size_bound, " bytes)");
}
Fill(data, num_elements, ss->mutable_data());
DCHECK_GE(ss->ByteSize(), 0);
DCHECK_LE(ss->ByteSize(), size_bound);
return absl::OkStatus();
}
template <>
Status TensorSliceWriter::SaveData(const tstring* data, int64_t num_elements,
SavedSlice* ss);
Status CreateTableTensorSliceBuilder(const string& filename,
TensorSliceWriter::Builder** builder);
}
}
#endif
#include "tensorflow/core/util/tensor_slice_writer.h"
#include <memory>
#include <utility>
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/io/table_builder.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/saved_tensor_slice_util.h"
namespace tensorflow {
namespace checkpoint {
namespace {
class TableBuilder : public TensorSliceWriter::Builder {
public:
TableBuilder(const string& name, WritableFile* f) : name_(name), file_(f) {
table::Options option;
option.compression = table::kNoCompression;
builder_ = std::make_unique<table::TableBuilder>(option, f);
}
void Add(StringPiece key, StringPiece val) override {
builder_->Add(key, val);
}
Status Finish(int64_t* file_size) override {
*file_size = -1;
Status s = builder_->Finish();
if (s.ok()) {
s = file_->Close();
if (s.ok()) {
*file_size = builder_->FileSize();
}
}
if (!s.ok()) {
s = errors::Internal("Error writing (tmp) checkpoint file: ", name_, ": ",
s.message());
}
builder_.reset();
file_.reset();
return s;
}
private:
string name_;
std::unique_ptr<WritableFile> file_;
std::unique_ptr<table::TableBuilder> builder_;
};
}
Status CreateTableTensorSliceBuilder(const string& name,
TensorSliceWriter::Builder** builder) {
*builder = nullptr;
std::unique_ptr<WritableFile> f;
Status s = Env::Default()->NewWritableFile(name, &f);
if (s.ok()) {
*builder = new TableBuilder(name, f.release());
return absl::OkStatus();
} else {
return s;
}
}
TensorSliceWriter::TensorSliceWriter(const string& filename,
CreateBuilderFunction create_builder)
: filename_(filename),
create_builder_(std::move(create_builder)),
slices_(0) {
Env* env = Env::Default();
Status status = env->CanCreateTempFile(filename_, &use_temp_file_);
if (!status.ok()) {
LOG(ERROR) << "Failed to get CanCreateTempFile attribute: " << filename_;
use_temp_file_ = true;
}
data_filename_ = filename_;
if (use_temp_file_) {
data_filename_ = strings::StrCat(filename_, ".tempstate", random::New64());
}
VersionDef* versions = sts_.mutable_meta()->mutable_versions();
versions->set_producer(TF_CHECKPOINT_VERSION);
versions->set_min_consumer(TF_CHECKPOINT_VERSION_MIN_CONSUMER);
}
Status TensorSliceWriter::Finish() {
Builder* b;
Status s = create_builder_(data_filename_, &b);
if (!s.ok()) {
delete b;
return s;
}
std::unique_ptr<Builder> builder(b);
string meta;
sts_.AppendToString(&meta);
builder->Add(kSavedTensorSlicesKey, meta);
for (const auto& x : data_) {
builder->Add(x.first, x.second);
}
int64_t file_size;
s = builder->Finish(&file_size);
if (use_temp_file_) {
if (s.ok()) {
s = Env::Default()->RenameFile(data_filename_, filename_);
if (s.ok()) {
VLOG(1) << "Written " << slices_ << " slices for "
<< sts_.meta().tensor_size() << " tensors (" << file_size
<< " bytes) to " << filename_;
} else {
LOG(ERROR) << "Failed to rename file " << data_filename_ << " to "
<< filename_;
}
} else {
Env::Default()->DeleteFile(data_filename_).IgnoreError();
}
}
return s;
}
size_t TensorSliceWriter::MaxBytesPerElement(DataType dt) {
size_t max_bytes_per_element =
TensorSliceWriter::MaxBytesPerElementOrZero(dt);
if (max_bytes_per_element == 0) {
LOG(FATAL) << "MaxBytesPerElement not implemented for dtype: " << dt;
}
return max_bytes_per_element;
}
size_t TensorSliceWriter::MaxBytesPerElementOrZero(DataType dt) {
switch (dt) {
case DT_FLOAT:
return 4;
case DT_DOUBLE:
return 8;
case DT_INT32:
return 10;
case DT_UINT8:
return 2;
case DT_INT16:
return 10;
case DT_INT8:
return 10;
case DT_COMPLEX64:
return 8;
case DT_INT64:
return 10;
case DT_BOOL:
return 1;
case DT_QINT8:
return 10;
case DT_QUINT8:
return 2;
case DT_QINT32:
return 10;
case DT_QINT16:
return 10;
case DT_QUINT16:
return 3;
case DT_UINT16:
return 3;
case DT_COMPLEX128:
return 16;
case DT_HALF:
return 3;
case DT_INVALID:
case DT_STRING:
case DT_BFLOAT16:
default:
return 0;
}
}
template <>
Status TensorSliceWriter::SaveData(const tstring* data, int64_t num_elements,
SavedSlice* ss) {
size_t size_bound = ss->ByteSize() + kTensorProtoHeaderBytes +
(num_elements * MaxBytesPerElement(DT_INT32));
for (int64_t i = 0; i < num_elements; ++i) {
size_bound += data[i].size();
}
if (size_bound > kMaxMessageBytes) {
return errors::InvalidArgument(
"Tensor slice is too large to serialize (conservative estimate: ",
size_bound, " bytes)");
}
Fill(data, num_elements, ss->mutable_data());
DCHECK_GE(ss->ByteSize(), 0);
DCHECK_LE(ss->ByteSize(), size_bound);
return absl::OkStatus();
}
}
} | #include "tensorflow/core/util/tensor_slice_writer.h"
#include <algorithm>
#include <array>
#include <memory>
#include <vector>
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/saved_tensor_slice_util.h"
#include "tensorflow/core/util/tensor_slice_reader.h"
namespace tensorflow {
namespace checkpoint {
class TensorSliceWriteTestHelper {
public:
static void CheckEntries(const string& fname);
static void GetData(TensorSliceReader::Table* table, const string& name,
const TensorSlice& slice, SavedSlice* ss);
};
namespace {
void ExpectIdenticalFloatArrays(const float* expected, int size,
const float* actual) {
for (int i = 0; i < size; ++i) {
EXPECT_NEAR(expected[i], actual[i], 1e-6);
}
}
template <typename T, typename U>
void ExpectIdenticalIntArrays(const T* expected, int size, const U* actual) {
for (int i = 0; i < size; ++i) {
EXPECT_EQ(expected[i], static_cast<T>(actual[i]));
}
}
template <typename T, unsigned SIZE>
inline size_t ArraySize(const T (&v)[SIZE]) {
return SIZE;
}
TEST(TensorSliceWriteTest, SimpleWrite) {
const string filename = io::JoinPath(testing::TmpDir(), "checkpoint");
TensorSliceWriter writer(filename, CreateTableTensorSliceBuilder);
{
TensorShape shape({5, 10});
TensorSlice slice = TensorSlice::ParseOrDie("-:0,1");
const int32 data[] = {0, 1, 2, 3, 4};
TF_CHECK_OK(writer.Add("test", shape, slice, data));
}
{
TensorShape shape({5, 10});
TensorSlice slice = TensorSlice::ParseOrDie("-:3,1");
const int32 data[] = {10, 11, 12, 13, 14};
TF_CHECK_OK(writer.Add("test", shape, slice, data));
}
{
TensorShape shape({3, 2});
TensorSlice slice = TensorSlice::ParseOrDie("-:-");
const float data[] = {1.2, 1.3, 1.4, 2.1, 2.2, 2.3};
TF_CHECK_OK(writer.Add("AA", shape, slice, data));
}
{
TensorShape shape({5, 10});
TensorSlice slice = TensorSlice::ParseOrDie("-:3,1");
const int64_t data[] = {10, 11, 12, 13, 14};
TF_CHECK_OK(writer.Add("int64", shape, slice, data));
}
{
TensorShape shape({5, 10});
TensorSlice slice = TensorSlice::ParseOrDie("-:3,1");
const int16 data[] = {10, 11, 12, 13, 14};
TF_CHECK_OK(writer.Add("int16", shape, slice, data));
}
TF_CHECK_OK(writer.Finish());
TensorSliceWriteTestHelper::CheckEntries(filename);
}
}
void TensorSliceWriteTestHelper::GetData(TensorSliceReader::Table* table,
const string& name,
const TensorSlice& slice,
SavedSlice* ss) {
string key = EncodeTensorNameSlice(name, slice);
string value;
EXPECT_TRUE(table->Get(key, &value));
SavedTensorSlices sts;
EXPECT_TRUE(ParseProtoUnlimited(&sts, value));
EXPECT_FALSE(sts.has_meta());
*ss = sts.data();
EXPECT_EQ(name, ss->name());
TensorSlice slice2(ss->slice());
EXPECT_EQ(slice.DebugString(), slice2.DebugString());
}
void TensorSliceWriteTestHelper::CheckEntries(const string& fname) {
TensorSliceReader::Table* tptr;
TF_CHECK_OK(OpenTableTensorSliceReader(fname, &tptr));
std::unique_ptr<TensorSliceReader::Table> table(tptr);
CHECK_NOTNULL(table.get());
string value;
ASSERT_TRUE(table->Get(kSavedTensorSlicesKey, &value));
{
SavedTensorSlices sts;
EXPECT_TRUE(ParseProtoUnlimited(&sts, value));
EXPECT_TRUE(sts.has_meta());
EXPECT_EQ(4, sts.meta().tensor_size());
EXPECT_LT(0, TF_CHECKPOINT_VERSION);
EXPECT_EQ(TF_CHECKPOINT_VERSION, sts.meta().versions().producer());
EXPECT_EQ(TF_CHECKPOINT_VERSION_MIN_CONSUMER,
sts.meta().versions().min_consumer());
EXPECT_FALSE(sts.has_data());
{
const SavedSliceMeta& ssm = sts.meta().tensor(0);
EXPECT_EQ("test", ssm.name());
TensorShapeProto expected_shape_proto;
protobuf::TextFormat::ParseFromString(
"dim { size: 5 } "
"dim { size: 10 }",
&expected_shape_proto);
EXPECT_EQ(ssm.shape().ShortDebugString(),
expected_shape_proto.ShortDebugString());
EXPECT_EQ(DT_INT32, ssm.type());
EXPECT_EQ(2, ssm.slice_size());
TensorSlice s0(ssm.slice(0));
TensorSlice s1(ssm.slice(1));
EXPECT_EQ("-:0,1", s0.DebugString());
EXPECT_EQ("-:3,1", s1.DebugString());
}
{
const SavedSliceMeta& ssm = sts.meta().tensor(1);
EXPECT_EQ("AA", ssm.name());
TensorShapeProto expected_shape_proto;
protobuf::TextFormat::ParseFromString(
"dim { size: 3 } "
"dim { size: 2 }",
&expected_shape_proto);
EXPECT_EQ(ssm.shape().ShortDebugString(),
expected_shape_proto.ShortDebugString());
EXPECT_EQ(DT_FLOAT, ssm.type());
EXPECT_EQ(1, ssm.slice_size());
TensorSlice s0(ssm.slice(0));
EXPECT_EQ("-:-", s0.DebugString());
}
{
const SavedSliceMeta& ssm = sts.meta().tensor(2);
EXPECT_EQ("int64", ssm.name());
TensorShapeProto expected_shape_proto;
protobuf::TextFormat::ParseFromString(
"dim { size: 5 } "
"dim { size: 10 }",
&expected_shape_proto);
EXPECT_EQ(ssm.shape().ShortDebugString(),
expected_shape_proto.ShortDebugString());
EXPECT_EQ(DT_INT64, ssm.type());
EXPECT_EQ(1, ssm.slice_size());
TensorSlice s0(ssm.slice(0));
EXPECT_EQ("-:3,1", s0.DebugString());
}
{
const SavedSliceMeta& ssm = sts.meta().tensor(3);
EXPECT_EQ("int16", ssm.name());
TensorShapeProto expected_shape_proto;
protobuf::TextFormat::ParseFromString(
"dim { size: 5 } "
"dim { size: 10 }",
&expected_shape_proto);
EXPECT_EQ(ssm.shape().ShortDebugString(),
expected_shape_proto.ShortDebugString());
EXPECT_EQ(DT_INT16, ssm.type());
EXPECT_EQ(1, ssm.slice_size());
TensorSlice s0(ssm.slice(0));
EXPECT_EQ("-:3,1", s0.DebugString());
}
}
{
SavedSlice ss;
GetData(table.get(), "AA", TensorSlice(2), &ss);
const float data[] = {1.2, 1.3, 1.4, 2.1, 2.2, 2.3};
EXPECT_EQ(ArraySize(data), ss.data().float_val_size());
ExpectIdenticalFloatArrays(data, ArraySize(data),
ss.data().float_val().data());
}
{
SavedSlice ss;
GetData(table.get(), "test", TensorSlice({{0, -1}, {0, 1}}), &ss);
const int32 data[] = {0, 1, 2, 3, 4};
EXPECT_EQ(ArraySize(data), ss.data().int_val_size());
ExpectIdenticalIntArrays(data, ArraySize(data), ss.data().int_val().data());
}
{
SavedSlice ss;
GetData(table.get(), "test", TensorSlice({{0, -1}, {3, 1}}), &ss);
const int32 data[] = {10, 11, 12, 13, 14};
EXPECT_EQ(ArraySize(data), ss.data().int_val_size());
ExpectIdenticalIntArrays(data, ArraySize(data), ss.data().int_val().data());
}
{
SavedSlice ss;
GetData(table.get(), "int64", TensorSlice({{0, -1}, {3, 1}}), &ss);
const int64_t data[] = {10, 11, 12, 13, 14};
EXPECT_EQ(ArraySize(data), ss.data().int64_val_size());
ExpectIdenticalIntArrays(data, ArraySize(data),
ss.data().int64_val().data());
}
{
SavedSlice ss;
GetData(table.get(), "int16", TensorSlice({{0, -1}, {3, 1}}), &ss);
const int16 data[] = {10, 11, 12, 13, 14};
EXPECT_EQ(ArraySize(data), ss.data().int_val_size());
ExpectIdenticalIntArrays(data, ArraySize(data), ss.data().int_val().data());
}
}
template <typename DT>
size_t BytesPerElementHelper(DT value) {
SavedSlice ss;
std::array<DT, 1> lo_data;
std::fill(lo_data.begin(), lo_data.end(), value);
TF_EXPECT_OK(
TensorSliceWriter::SaveData(lo_data.data(), lo_data.size(), &ss));
size_t lo_byte_size = ss.ByteSizeLong();
std::array<DT, 1001> hi_data;
std::fill(hi_data.begin(), hi_data.end(), value);
TF_EXPECT_OK(
TensorSliceWriter::SaveData(hi_data.data(), hi_data.size(), &ss));
size_t hi_byte_size = ss.ByteSizeLong();
return (hi_byte_size - lo_byte_size) / (hi_data.size() - lo_data.size());
}
TEST(TensorSliceWriteTest, CheckpointSize) {
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_BOOL),
BytesPerElementHelper<bool>(false));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_BOOL),
BytesPerElementHelper<bool>(true));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_FLOAT),
BytesPerElementHelper<float>(-1.0));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_DOUBLE),
BytesPerElementHelper<double>(-1.0));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_COMPLEX64),
BytesPerElementHelper<complex64>(-1.0));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_COMPLEX128),
BytesPerElementHelper<complex128>(-1.0));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_INT32),
BytesPerElementHelper<int32>(-1));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_INT64),
BytesPerElementHelper<int64_t>(-1));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_UINT16),
BytesPerElementHelper<uint16>(std::numeric_limits<uint16>::max()));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_UINT8),
BytesPerElementHelper<uint8>(std::numeric_limits<uint8>::max()));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_INT8),
BytesPerElementHelper<int8>(-1));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_INT16),
BytesPerElementHelper<int16>(-1));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_QINT8),
BytesPerElementHelper<qint8>(-1));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_QUINT8),
BytesPerElementHelper<quint8>(std::numeric_limits<uint8>::max()));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_QINT32),
BytesPerElementHelper<qint32>(-1));
EXPECT_EQ(TensorSliceWriter::MaxBytesPerElement(DT_HALF),
BytesPerElementHelper<Eigen::half>(Eigen::half(-1.0)));
}
TEST(TensorSliceWriteTest, SizeErrors) {
const string filename = io::JoinPath(testing::TmpDir(), "checkpoint");
TensorSliceWriter writer(filename, CreateTableTensorSliceBuilder);
{
TensorShape shape({300, 1000000});
TensorSlice slice = TensorSlice::ParseOrDie("-:-");
const std::vector<int8> data(300000000, -1);
Status s = writer.Add("test1", shape, slice, data.data());
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(s.message(),
"Tensor slice is too large to serialize"));
}
{
TensorShape shape({256, 1024});
TensorSlice slice = TensorSlice::ParseOrDie("-:-");
const std::vector<tstring> data(256 * 1024, std::string(8192, 'f'));
Status s = writer.Add("test2", shape, slice, data.data());
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(s.message(),
"Tensor slice is too large to serialize"));
}
}
TEST(TensorSliceWriterTest, InvalidInput) {
SavedSlice ss;
std::array<uint32_t, 1> data;
std::fill(data.begin(), data.end(), 1234);
Status s = TensorSliceWriter::SaveData(data.data(), data.size(), &ss);
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(
s.message(), "Tensor slice serialization not implemented for dtype"));
}
}
} |
1,701 | cpp | tensorflow/tensorflow | work_sharder | tensorflow/core/util/work_sharder.cc | tensorflow/core/util/work_sharder_test.cc | #ifndef TENSORFLOW_CORE_UTIL_WORK_SHARDER_H_
#define TENSORFLOW_CORE_UTIL_WORK_SHARDER_H_
#include <functional>
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
void Shard(int max_parallelism, thread::ThreadPool* workers, int64_t total,
int64_t cost_per_unit, std::function<void(int64_t, int64_t)> work);
void SetPerThreadMaxParallelism(int max_parallelism);
int GetPerThreadMaxParallelism();
class ScopedPerThreadMaxParallelism {
public:
ScopedPerThreadMaxParallelism(int max_parallelism)
: previous_(GetPerThreadMaxParallelism()) {
SetPerThreadMaxParallelism(max_parallelism);
}
~ScopedPerThreadMaxParallelism() { SetPerThreadMaxParallelism(previous_); }
private:
int previous_ = -1;
};
class Sharder {
public:
typedef std::function<void()> Closure;
typedef std::function<void(Closure)> Runner;
typedef std::function<void(int64_t, int64_t)> Work;
static void Do(int64_t total, int64_t cost_per_unit, const Work& work,
const Runner& runner, int max_parallelism);
};
}
#endif
#include "tensorflow/core/util/work_sharder.h"
#include <algorithm>
#include <functional>
#include "xla/tsl/util/env_var.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/logging.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
namespace {
bool UseEigenParallelFor() {
static bool result = []() {
bool result = true;
if (auto status =
tsl::ReadBoolFromEnvVar("TF_USE_EIGEN_PARALLEL_FOR_IN_WORK_SHARDER",
true, &result);
status.ok()) {
return result;
}
return true;
}();
return result;
}
}
thread_local int per_thread_max_parallelism = 1000000;
void SetPerThreadMaxParallelism(int max_parallelism) {
CHECK_LE(0, max_parallelism);
per_thread_max_parallelism = max_parallelism;
}
int GetPerThreadMaxParallelism() { return per_thread_max_parallelism; }
void Shard(int max_parallelism, thread::ThreadPool* workers, int64_t total,
int64_t cost_per_unit, std::function<void(int64_t, int64_t)> work) {
CHECK_GE(total, 0);
if (total == 0) {
return;
}
max_parallelism = std::min(max_parallelism, GetPerThreadMaxParallelism());
if (max_parallelism <= 1) {
work(0, total);
return;
}
if (UseEigenParallelFor() && max_parallelism >= workers->NumThreads()) {
tsl::profiler::TraceMe trace_me([=, num_threads = workers->NumThreads()]() {
return tsl::profiler::TraceMeEncode("ParallelFor",
{{"cost_per_unit", cost_per_unit},
{"total", total},
{"max_parallelism", max_parallelism},
{"num_threads", num_threads}});
});
workers->ParallelFor(total, cost_per_unit, work);
return;
}
Sharder::Do(
total, cost_per_unit, work,
[&workers](Sharder::Closure c) { workers->Schedule(c); },
max_parallelism);
}
void Sharder::Do(int64_t total, int64_t cost_per_unit, const Work& work,
const Runner& runner, int max_parallelism) {
tsl::profiler::TraceMe trace_me([=]() {
return tsl::profiler::TraceMeEncode("Sharder::Do",
{{"cost_per_unit", cost_per_unit},
{"total", total},
{"max_parallelism", max_parallelism}});
});
cost_per_unit = std::max(int64_t{1}, cost_per_unit);
static const int64_t kMinCostPerShard = 10000;
const int num_shards =
std::max<int>(1, std::min(static_cast<int64_t>(max_parallelism),
total * cost_per_unit / kMinCostPerShard));
const int64_t block_size = (total + num_shards - 1) / num_shards;
CHECK_GT(block_size, 0);
if (block_size >= total) {
work(0, total);
return;
}
const int num_shards_used = (total + block_size - 1) / block_size;
BlockingCounter counter(num_shards_used - 1);
for (int64_t start = block_size; start < total; start += block_size) {
auto limit = std::min(start + block_size, total);
runner([&work, &counter, start, limit]() {
work(start, limit);
counter.DecrementCount();
});
}
work(0, std::min(block_size, total));
counter.Wait();
}
} | #include "tensorflow/core/util/work_sharder.h"
#include <algorithm>
#include <atomic>
#include <functional>
#include <vector>
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
void RunSharding(int64_t num_workers, int64_t total, int64_t cost_per_unit,
int64_t per_thread_max_parallelism,
thread::ThreadPool* threads) {
mutex mu;
int64_t num_shards = 0;
int64_t num_done_work = 0;
std::vector<bool> work(total, false);
Shard(num_workers, threads, total, cost_per_unit,
[=, &mu, &num_shards, &num_done_work, &work](int64_t start,
int64_t limit) {
VLOG(1) << "Shard [" << start << "," << limit << ")";
EXPECT_GE(start, 0);
EXPECT_LE(limit, total);
mutex_lock l(mu);
++num_shards;
for (; start < limit; ++start) {
EXPECT_FALSE(work[start]);
++num_done_work;
work[start] = true;
}
});
LOG(INFO) << num_workers << " " << total << " " << cost_per_unit << " "
<< num_shards;
EXPECT_EQ(num_done_work, total);
if (std::min(num_workers, per_thread_max_parallelism) <
threads->NumThreads()) {
EXPECT_LE(num_shards, 1 + per_thread_max_parallelism);
}
}
TEST(Shard, Basic) {
thread::ThreadPool threads(Env::Default(), "test", 16);
for (auto workers : {0, 1, 2, 3, 5, 7, 10, 11, 15, 100, 1000}) {
for (auto total : {0, 1, 7, 10, 64, 100, 256, 1000, 9999}) {
for (auto cost_per_unit : {0, 1, 11, 102, 1003, 10005, 1000007}) {
for (auto maxp : {1, 2, 4, 8, 100}) {
ScopedPerThreadMaxParallelism s(maxp);
RunSharding(workers, total, cost_per_unit, maxp, &threads);
}
}
}
}
}
TEST(Shard, OverflowTest) {
thread::ThreadPool threads(Env::Default(), "test", 3);
for (auto workers : {1, 2, 3}) {
const int64_t total_elements = 1LL << 32;
const int64_t cost_per_unit = 10;
std::atomic<int64_t> num_elements(0);
Shard(workers, &threads, total_elements, cost_per_unit,
[&num_elements](int64_t start, int64_t limit) {
num_elements += limit - start;
});
EXPECT_EQ(num_elements.load(), total_elements);
}
}
void BM_Sharding(::testing::benchmark::State& state) {
const int arg = state.range(0);
thread::ThreadPool threads(Env::Default(), "test", 16);
const int64_t total = 1LL << 30;
auto lambda = [](int64_t start, int64_t limit) {};
auto work = std::cref(lambda);
for (auto s : state) {
Shard(arg - 1, &threads, total, 1, work);
}
}
BENCHMARK(BM_Sharding)->Range(1, 128);
}
} |
1,702 | cpp | tensorflow/tensorflow | tensor_slice_reader | tensorflow/core/util/tensor_slice_reader.cc | tensorflow/core/util/tensor_slice_reader_test.cc | #ifndef TENSORFLOW_CORE_UTIL_TENSOR_SLICE_READER_H_
#define TENSORFLOW_CORE_UTIL_TENSOR_SLICE_READER_H_
#include <functional>
#include <memory>
#include <unordered_map>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_slice.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/saved_tensor_slice.pb.h"
#include "tensorflow/core/util/saved_tensor_slice_util.h"
#include "tensorflow/core/util/tensor_slice_set.h"
#include "tensorflow/core/util/tensor_slice_util.h"
namespace tensorflow {
namespace checkpoint {
class TensorSliceReader {
public:
class Table {
public:
virtual ~Table();
virtual bool Get(const string& key, string* value) = 0;
};
typedef std::function<Status(const string&, Table**)> OpenTableFunction;
static constexpr int kLoadAllShards = -1;
TensorSliceReader(const string& filepattern);
TensorSliceReader(const string& filepattern, OpenTableFunction open_function);
TensorSliceReader(const string& filepattern, OpenTableFunction open_function,
int preferred_shard);
virtual ~TensorSliceReader();
const string& filepattern() const { return filepattern_; }
int num_files() const { return sss_.size(); }
Status status() const { return status_; }
bool HasTensor(const string& name, TensorShape* shape, DataType* type) const;
template <typename T>
bool CopySliceData(const string& name, const TensorSlice& slice,
T* data) const;
const std::unordered_map<string, TensorSliceSet*>& Tensors() const {
return tensors_;
}
Status GetTensor(const string& name,
std::unique_ptr<tensorflow::Tensor>* out_tensor) const;
typedef std::unordered_map<string, TensorShape> VarToShapeMap;
typedef std::unordered_map<string, DataType> VarToDataTypeMap;
VarToShapeMap GetVariableToShapeMap() const;
VarToDataTypeMap GetVariableToDataTypeMap() const;
const string DebugString() const;
private:
friend class TensorSliceWriteTestHelper;
void LoadShard(int shard) const;
void LoadAllShards() const;
const TensorSliceSet* FindTensorSlice(
const string& name, const TensorSlice& slice,
std::vector<std::pair<TensorSlice, string>>* details) const;
const string filepattern_;
const OpenTableFunction open_function_;
std::vector<string> fnames_;
std::unordered_map<string, int> fname_to_index_;
mutable mutex mu_;
mutable bool all_shards_loaded_ = false;
mutable std::vector<std::unique_ptr<Table>> sss_;
mutable std::unordered_map<string, TensorSliceSet*> tensors_;
mutable Status status_;
TensorSliceReader(const TensorSliceReader&) = delete;
void operator=(const TensorSliceReader&) = delete;
};
Status OpenTableTensorSliceReader(const string& fname,
TensorSliceReader::Table** result);
template <typename T>
bool TensorSliceReader::CopySliceData(const string& name,
const TensorSlice& slice, T* data) const {
std::vector<std::pair<TensorSlice, string>> details;
const TensorSliceSet* tss;
{
mutex_lock l(mu_);
tss = FindTensorSlice(name, slice, &details);
if (!tss && !all_shards_loaded_) {
VLOG(1) << "Did not find slice in preferred shard, loading all shards."
<< name << ": " << slice.DebugString();
LoadAllShards();
tss = FindTensorSlice(name, slice, &details);
}
if (!tss) {
return false;
}
}
string value;
for (const auto& x : details) {
const TensorSlice& slice_s = x.first;
const string& fname = x.second;
int idx = gtl::FindWithDefault(fname_to_index_, fname, -1);
CHECK_GE(idx, 0) << "Failed to find the index for filename " << fname;
const string key = EncodeTensorNameSlice(name, slice_s);
if (!sss_[idx]->Get(key, &value)) {
VLOG(1) << "Failed to seek to the record for tensor " << name
<< ", slice " << slice_s.DebugString()
<< ": computed key = " << key;
return false;
}
SavedTensorSlices sts;
if (!ParseProtoUnlimited(&sts, value)) {
VLOG(1) << "Failed to parse the record for tensor " << name << ", slice "
<< slice_s.DebugString() << ": computed key = " << key;
return false;
}
TensorShape shp_s;
Status s = slice_s.SliceTensorShape(tss->shape(), &shp_s);
if (!s.ok()) {
VLOG(1) << "Failed to slice tensor " << name << ", slice "
<< slice_s.DebugString() << ": " << s;
return false;
}
if (checkpoint::TensorProtoDataSize<T>(sts.data().data()) !=
shp_s.num_elements()) {
VLOG(1) << "Tensor " << name << ", slice " << slice_s.DebugString()
<< " had an unexpected amount of data: expected = "
<< shp_s.num_elements() << ", got = "
<< checkpoint::TensorProtoDataSize<T>(sts.data().data());
return false;
}
CopyDataFromTensorSliceToTensorSlice(
tss->shape(), slice_s, slice,
checkpoint::TensorProtoData<T>(sts.data().data()), data);
}
return true;
}
}
}
#endif
#include "tensorflow/core/util/tensor_slice_reader.h"
#include <climits>
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/io/iterator.h"
#include "tensorflow/core/lib/io/table.h"
#include "tensorflow/core/lib/io/table_options.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/saved_tensor_slice_util.h"
#include "tensorflow/core/util/tensor_slice_util.h"
namespace tensorflow {
namespace checkpoint {
TensorSliceReader::Table::~Table() = default;
namespace {
class TensorSliceReaderTable : public TensorSliceReader::Table {
public:
explicit TensorSliceReaderTable(RandomAccessFile* f, table::Table* t)
: file_(f), table_(t) {}
~TensorSliceReaderTable() override {
delete table_;
delete file_;
}
bool Get(const string& key, string* value) override {
std::unique_ptr<table::Iterator> iter(table_->NewIterator());
iter->Seek(key);
if (iter->Valid() && iter->key() == key) {
StringPiece v = iter->value();
value->assign(v.data(), v.size());
return true;
} else {
return false;
}
}
private:
RandomAccessFile* file_;
table::Table* table_;
};
}
Status OpenTableTensorSliceReader(const string& fname,
TensorSliceReader::Table** result) {
*result = nullptr;
Env* env = Env::Default();
std::unique_ptr<RandomAccessFile> f;
Status s = env->NewRandomAccessFile(fname, &f);
if (s.ok()) {
uint64 file_size;
s = env->GetFileSize(fname, &file_size);
if (s.ok()) {
table::Options options;
table::Table* table;
s = table::Table::Open(options, f.get(), file_size, &table);
if (s.ok()) {
*result = new TensorSliceReaderTable(f.release(), table);
return absl::OkStatus();
} else {
s = errors::CreateWithUpdatedMessage(
s, strings::StrCat(s.message(),
": perhaps your file is in a different "
"file format and you need to use a "
"different restore operator?"));
}
}
}
LOG(WARNING) << "Could not open " << fname << ": " << s;
return s;
}
TensorSliceReader::TensorSliceReader(const string& filepattern)
: TensorSliceReader(filepattern, OpenTableTensorSliceReader,
kLoadAllShards) {}
TensorSliceReader::TensorSliceReader(const string& filepattern,
OpenTableFunction open_function)
: TensorSliceReader(filepattern, std::move(open_function), kLoadAllShards) {
}
TensorSliceReader::TensorSliceReader(const string& filepattern,
OpenTableFunction open_function,
int preferred_shard)
: filepattern_(filepattern), open_function_(std::move(open_function)) {
VLOG(1) << "TensorSliceReader for " << filepattern;
Status s = Env::Default()->GetMatchingPaths(filepattern, &fnames_);
if (!s.ok()) {
status_ = errors::InvalidArgument(
"Unsuccessful TensorSliceReader constructor: "
"Failed to get matching files on ",
filepattern, ": ", s.ToString());
return;
}
if (fnames_.empty()) {
status_ = errors::NotFound(
"Unsuccessful TensorSliceReader constructor: "
"Failed to find any matching files for ",
filepattern);
return;
}
sss_.resize(fnames_.size());
for (size_t shard = 0; shard < fnames_.size(); ++shard) {
fname_to_index_.insert(std::make_pair(fnames_[shard], shard));
}
if (preferred_shard == kLoadAllShards || fnames_.size() == 1 ||
static_cast<size_t>(preferred_shard) >= fnames_.size()) {
LoadAllShards();
} else {
VLOG(1) << "Loading shard " << preferred_shard << " for " << filepattern_;
LoadShard(preferred_shard);
}
}
void TensorSliceReader::LoadShard(int shard) const {
CHECK_LT(shard, sss_.size());
if (sss_[shard] || !status_.ok()) {
return;
}
string value;
SavedTensorSlices sts;
const string fname = fnames_[shard];
VLOG(1) << "Reading meta data from file " << fname << "...";
Table* table;
Status s = open_function_(fname, &table);
if (!s.ok()) {
status_ = errors::DataLoss("Unable to open table file ", fname, ": ",
s.ToString());
return;
}
sss_[shard].reset(table);
if (!(table->Get(kSavedTensorSlicesKey, &value) &&
ParseProtoUnlimited(&sts, value))) {
status_ = errors::Internal(
"Failed to find the saved tensor slices at the beginning of the "
"checkpoint file: ",
fname);
return;
}
status_ = CheckVersions(sts.meta().versions(), TF_CHECKPOINT_VERSION,
TF_CHECKPOINT_VERSION_MIN_PRODUCER, "Checkpoint",
"checkpoint");
if (!status_.ok()) return;
for (const SavedSliceMeta& ssm : sts.meta().tensor()) {
TensorShape ssm_shape;
status_ = TensorShape::BuildTensorShapeBase(ssm.shape(), &ssm_shape);
if (!status_.ok()) return;
for (const TensorSliceProto& tsp : ssm.slice()) {
TensorSlice ss_slice;
status_ = TensorSlice::BuildTensorSlice(tsp, &ss_slice);
if (!status_.ok()) return;
status_ = RegisterTensorSlice(ssm.name(), ssm_shape, ssm.type(), fname,
ss_slice, &tensors_);
if (!status_.ok()) return;
}
}
}
void TensorSliceReader::LoadAllShards() const {
VLOG(1) << "Loading all shards for " << filepattern_;
for (size_t i = 0; i < fnames_.size() && status_.ok(); ++i) {
LoadShard(i);
}
all_shards_loaded_ = true;
}
const TensorSliceSet* TensorSliceReader::FindTensorSlice(
const string& name, const TensorSlice& slice,
std::vector<std::pair<TensorSlice, string>>* details) const {
const TensorSliceSet* tss = gtl::FindPtrOrNull(tensors_, name);
if (tss && !tss->QueryMeta(slice, details)) {
return nullptr;
}
return tss;
}
TensorSliceReader::~TensorSliceReader() {
for (auto& temp : tensors_) {
delete temp.second;
}
tensors_.clear();
}
bool TensorSliceReader::HasTensor(const string& name, TensorShape* shape,
DataType* type) const {
mutex_lock l(mu_);
const TensorSliceSet* tss = gtl::FindPtrOrNull(tensors_, name);
if (!tss && !all_shards_loaded_) {
VLOG(1) << "Did not find tensor in preferred shard, loading all shards: "
<< name;
LoadAllShards();
tss = gtl::FindPtrOrNull(tensors_, name);
}
if (tss) {
if (shape) {
*shape = tss->shape();
}
if (type) {
*type = tss->type();
}
return true;
} else {
return false;
}
}
Status TensorSliceReader::GetTensor(
const string& name, std::unique_ptr<tensorflow::Tensor>* out_tensor) const {
DataType type;
TensorShape shape;
TensorSlice slice;
{
mutex_lock l(mu_);
const TensorSliceSet* tss = gtl::FindPtrOrNull(tensors_, name);
if (tss == nullptr) {
return errors::NotFound(name, " not found in checkpoint file");
}
if (tss->Slices().size() > 1) {
return errors::Unimplemented("Sliced checkpoints are not supported");
}
type = tss->type();
shape = tss->shape();
slice = tss->Slices().begin()->second.slice;
}
std::unique_ptr<tensorflow::Tensor> t(new tensorflow::Tensor);
Status s = tensorflow::Tensor::BuildTensor(type, shape, t.get());
if (!s.ok()) return s;
for (const auto d : shape.dim_sizes()) {
if (d == LLONG_MAX) {
return errors::InvalidArgument("Unable to read dimensions of size ",
LLONG_MAX,
". Got shape: ", shape.DebugString());
}
}
bool success = false;
#define READER_COPY(dt) \
case dt: \
success = CopySliceData(name, slice, \
t->flat<EnumToDataType<dt>::Type>().data()); \
break;
switch (type) {
READER_COPY(DT_FLOAT);
READER_COPY(DT_DOUBLE);
READER_COPY(DT_INT32);
READER_COPY(DT_UINT8);
READER_COPY(DT_INT16);
READER_COPY(DT_INT8);
READER_COPY(DT_INT64);
READER_COPY(DT_STRING);
READER_COPY(DT_BOOL);
default:
return errors::Unimplemented("Data type not supported");
}
#undef READER_COPY
if (!success) {
return errors::NotFound(name, " not found in checkpoint file");
}
std::swap(*out_tensor, t);
return absl::OkStatus();
}
TensorSliceReader::VarToShapeMap TensorSliceReader::GetVariableToShapeMap()
const {
VarToShapeMap name_to_shape;
if (status().ok()) {
for (auto& e : Tensors()) {
name_to_shape[e.first] = e.second->shape();
}
}
return name_to_shape;
}
TensorSliceReader::VarToDataTypeMap
TensorSliceReader::GetVariableToDataTypeMap() const {
VarToDataTypeMap name_to_dtype;
if (status().ok()) {
for (auto& e : Tensors()) {
name_to_dtype[e.first] = e.second->type();
}
}
return name_to_dtype;
}
const string TensorSliceReader::DebugString() const {
string shape_str;
if (status().ok()) {
for (const auto& e : Tensors()) {
strings::StrAppend(&shape_str, e.first, " (",
DataType_Name(e.second->type()), ") ",
e.second->shape().DebugString());
const int num_slices = e.second->Slices().size();
if (num_slices > 1) {
strings::StrAppend(&shape_str, ", ", num_slices, " slices");
}
strings::StrAppend(&shape_str, "\n");
}
}
return shape_str;
}
}
} | #include "tensorflow/core/util/tensor_slice_reader.h"
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/io/iterator.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/io/table.h"
#include "tensorflow/core/lib/io/table_builder.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/saved_tensor_slice.pb.h"
#include "tensorflow/core/util/saved_tensor_slice_util.h"
#include "tensorflow/core/util/tensor_slice_reader_cache.h"
#include "tensorflow/core/util/tensor_slice_writer.h"
namespace tensorflow {
namespace checkpoint {
namespace {
void SimpleFloatHelper(
const TensorSliceWriter::CreateBuilderFunction& create_function,
TensorSliceReader::OpenTableFunction open_function) {
const string fname_base = io::JoinPath(testing::TmpDir(), "float_checkpoint");
TensorShape shape({4, 5});
{
const string fname = strings::StrCat(fname_base, "_0");
TensorSliceWriter writer(fname, create_function);
const float data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
TensorSlice slice = TensorSlice::ParseOrDie("0,2:-");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
TF_CHECK_OK(writer.Finish());
}
{
const string fname = strings::StrCat(fname_base, "_1");
TensorSliceWriter writer(fname, create_function);
{
const float data[] = {10, 11, 12, 15, 16, 17};
TensorSlice slice = TensorSlice::ParseOrDie("2,2:0,3");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
}
{
const float data[] = {18, 19};
TensorSlice slice = TensorSlice::ParseOrDie("3,1:3,2");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
}
TF_CHECK_OK(writer.Finish());
}
const string filepattern = strings::StrCat(fname_base, "_*");
TensorSliceReader reader(filepattern, std::move(open_function));
TF_EXPECT_OK(reader.status());
EXPECT_EQ(2, reader.num_files());
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("test", &shape, &type));
EXPECT_EQ("[4,5]", shape.DebugString());
EXPECT_EQ(DT_FLOAT, type);
EXPECT_FALSE(reader.HasTensor("don't exist", nullptr, nullptr));
}
{
TensorSlice s = TensorSlice::ParseOrDie("0,2:-");
float expected[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
float results[10];
EXPECT_TRUE(reader.CopySliceData("test", s, results));
for (int i = 0; i < 10; ++i) {
EXPECT_EQ(expected[i], results[i]);
}
}
{
TensorSlice s = TensorSlice::ParseOrDie("1,1:-");
float expected[] = {5, 6, 7, 8, 9};
float results[5];
EXPECT_TRUE(reader.CopySliceData("test", s, results));
for (int i = 0; i < 5; ++i) {
EXPECT_EQ(expected[i], results[i]);
}
}
{
TensorSlice s = TensorSlice::ParseOrDie("1,2:2,3");
float results[6];
EXPECT_FALSE(reader.CopySliceData("test", s, results));
}
}
TEST(TensorSliceReaderTest, SimpleFloat) {
SimpleFloatHelper(CreateTableTensorSliceBuilder, OpenTableTensorSliceReader);
}
template <typename T, typename U>
void SimpleIntXHelper(
const TensorSliceWriter::CreateBuilderFunction& create_function,
TensorSliceReader::OpenTableFunction open_function,
const string& checkpoint_file) {
const string fname_base = io::JoinPath(testing::TmpDir(), checkpoint_file);
TensorShape shape({4, 5});
{
const string fname = strings::StrCat(fname_base, "_0");
TensorSliceWriter writer(fname, create_function);
const T data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
TensorSlice slice = TensorSlice::ParseOrDie("0,2:-");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
TF_CHECK_OK(writer.Finish());
}
{
const string fname = strings::StrCat(fname_base, "_1");
TensorSliceWriter writer(fname, create_function);
{
const T data[] = {10, 11, 12, 15, 16, 17};
TensorSlice slice = TensorSlice::ParseOrDie("2,2:0,3");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
}
{
const T data[] = {18, 19};
TensorSlice slice = TensorSlice::ParseOrDie("3,1:3,2");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
}
TF_CHECK_OK(writer.Finish());
}
const string filepattern = strings::StrCat(fname_base, "_*");
TensorSliceReader reader(filepattern, std::move(open_function));
TF_EXPECT_OK(reader.status());
EXPECT_EQ(2, reader.num_files());
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader.HasTensor("test", &shape, &type));
EXPECT_EQ("[4,5]", shape.DebugString());
EXPECT_EQ(DataTypeToEnum<T>::v(), type);
EXPECT_FALSE(reader.HasTensor("don't exist", nullptr, nullptr));
}
{
TensorSlice s = TensorSlice::ParseOrDie("0,2:-");
T expected[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
U results[10];
EXPECT_TRUE(reader.CopySliceData("test", s, results));
for (int i = 0; i < 10; ++i) {
EXPECT_EQ(expected[i], results[i]);
}
}
{
TensorSlice s = TensorSlice::ParseOrDie("1,1:-");
T expected[] = {5, 6, 7, 8, 9};
U results[5];
EXPECT_TRUE(reader.CopySliceData("test", s, results));
for (int i = 0; i < 5; ++i) {
EXPECT_EQ(expected[i], results[i]);
}
}
{
TensorSlice s = TensorSlice::ParseOrDie("1,2:2,3");
U results[6];
EXPECT_FALSE(reader.CopySliceData("test", s, results));
}
}
#define TEST_SIMPLE_INT(TYPE, SAVED_TYPE) \
TEST(TensorSliceReaderTest, Simple##TYPE) { \
SimpleIntXHelper<TYPE, SAVED_TYPE>(CreateTableTensorSliceBuilder, \
OpenTableTensorSliceReader, \
#TYPE "_checkpoint"); \
}
TEST_SIMPLE_INT(int32, int32)
TEST_SIMPLE_INT(int64_t, int64_t)
TEST_SIMPLE_INT(int16, int32)
TEST_SIMPLE_INT(int8, int32)
TEST_SIMPLE_INT(uint8, int32)
void MutateSavedTensorSlices(
const std::string& fname,
const std::function<std::string(SavedTensorSlices)>& mutator) {
table::Options options;
options.compression = table::kNoCompression;
std::vector<std::pair<std::string, std::string>> entries;
{
std::unique_ptr<RandomAccessFile> file;
TF_CHECK_OK(Env::Default()->NewRandomAccessFile(fname, &file));
uint64 file_size;
TF_CHECK_OK(Env::Default()->GetFileSize(fname, &file_size));
table::Table* t;
TF_CHECK_OK(table::Table::Open(options, file.get(), file_size, &t));
std::unique_ptr<table::Table> table(t);
std::unique_ptr<table::Iterator> it(table->NewIterator());
for (it->Seek(""); it->Valid(); it->Next()) {
entries.emplace_back(it->key(), it->value());
}
TF_CHECK_OK(it->status());
}
{
std::unique_ptr<WritableFile> file;
TF_CHECK_OK(Env::Default()->NewWritableFile(fname, &file));
table::TableBuilder builder(options, file.get());
for (const auto& entry : entries) {
SavedTensorSlices sts;
CHECK(sts.ParseFromString(entry.second));
builder.Add(entry.first, mutator(std::move(sts)));
}
TF_CHECK_OK(builder.Finish());
TF_CHECK_OK(file->Close());
}
}
TEST(TensorSliceReaderTest, MissingTensorType) {
const string fname = io::JoinPath(testing::TmpDir(), "invalid_checkpoint");
TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder);
const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
TensorShape shape({4, 5});
TensorSlice slice = TensorSlice::ParseOrDie("0,2:-");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
TF_CHECK_OK(writer.Finish());
MutateSavedTensorSlices(fname, [](SavedTensorSlices sts) {
if (sts.has_meta()) {
for (auto& tensor : *sts.mutable_meta()->mutable_tensor()) {
tensor.clear_type();
}
}
return sts.SerializeAsString();
});
TensorSliceReader reader(fname, OpenTableTensorSliceReader);
TF_CHECK_OK(reader.status());
EXPECT_TRUE(reader.HasTensor("test", nullptr, nullptr));
std::unique_ptr<Tensor> tensor;
EXPECT_FALSE(reader.GetTensor("test", &tensor).ok());
}
TEST(TensorSliceReaderTest, UnsupportedTensorType) {
const string fname = io::JoinPath(testing::TmpDir(), "int32_ref_checkpoint");
TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder);
const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
TensorShape shape({4, 5});
TensorSlice slice = TensorSlice::ParseOrDie("0,2:-");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
TF_CHECK_OK(writer.Finish());
MutateSavedTensorSlices(fname, [](SavedTensorSlices sts) {
if (sts.has_meta()) {
for (auto& tensor : *sts.mutable_meta()->mutable_tensor()) {
tensor.set_type(DT_INT32_REF);
}
}
return sts.SerializeAsString();
});
TensorSliceReader reader(fname, OpenTableTensorSliceReader);
TF_CHECK_OK(reader.status());
EXPECT_TRUE(reader.HasTensor("test", nullptr, nullptr));
std::unique_ptr<Tensor> tensor;
EXPECT_FALSE(reader.GetTensor("test", &tensor).ok());
}
TEST(TensorSliceReaderTest, NegativeTensorShapeDimension) {
const string fname =
io::JoinPath(testing::TmpDir(), "negative_dim_checkpoint");
TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder);
const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
TF_CHECK_OK(writer.Add("test", TensorShape({4, 5}),
TensorSlice::ParseOrDie("0,2:-"), data));
TF_CHECK_OK(writer.Finish());
MutateSavedTensorSlices(fname, [](SavedTensorSlices sts) {
if (sts.has_meta()) {
for (auto& tensor : *sts.mutable_meta()->mutable_tensor()) {
for (auto& dim : *tensor.mutable_shape()->mutable_dim()) {
dim.set_size(-dim.size());
}
}
}
return sts.SerializeAsString();
});
TensorSliceReader reader(fname, OpenTableTensorSliceReader);
EXPECT_FALSE(reader.status().ok());
}
TEST(TensorSliceReaderTest, InvalidTensorSlice) {
const string fname =
io::JoinPath(testing::TmpDir(), "invalid_slice_checkpoint");
TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder);
const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
TF_CHECK_OK(writer.Add("test", TensorShape({4, 5}),
TensorSlice::ParseOrDie("0,2:-"), data));
TF_CHECK_OK(writer.Finish());
MutateSavedTensorSlices(fname, [](SavedTensorSlices sts) {
if (sts.has_meta()) {
for (auto& tensor : *sts.mutable_meta()->mutable_tensor()) {
tensor.mutable_slice(0)->mutable_extent(0)->set_length(-10);
}
}
return sts.SerializeAsString();
});
TensorSliceReader reader(fname, OpenTableTensorSliceReader);
EXPECT_FALSE(reader.status().ok());
}
TEST(TensorSliceReaderTest, MissingTensorData) {
const string fname =
io::JoinPath(testing::TmpDir(), "missing_data_checkpoint");
TensorSliceWriter writer(fname, CreateTableTensorSliceBuilder);
const int32 data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
TF_ASSERT_OK(writer.Add("test", TensorShape({4, 5}),
TensorSlice::ParseOrDie("0,2:-"), data));
TF_ASSERT_OK(writer.Finish());
MutateSavedTensorSlices(fname, [&](SavedTensorSlices sts) {
if (sts.has_data()) {
Fill(data, 4, sts.mutable_data()->mutable_data());
}
return sts.SerializeAsString();
});
TensorSliceReader reader(fname, OpenTableTensorSliceReader);
TF_ASSERT_OK(reader.status());
EXPECT_TRUE(reader.HasTensor("test", nullptr, nullptr));
std::unique_ptr<Tensor> tensor;
EXPECT_FALSE(reader.GetTensor("test", &tensor).ok());
}
void CachedTensorSliceReaderTesterHelper(
const TensorSliceWriter::CreateBuilderFunction& create_function,
const TensorSliceReader::OpenTableFunction& open_function) {
const string fname_base = io::JoinPath(testing::TmpDir(), "float_checkpoint");
TensorShape shape({4, 5});
{
const string fname = strings::StrCat(fname_base, "_0");
TensorSliceWriter writer(fname, create_function);
const float data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
TensorSlice slice = TensorSlice::ParseOrDie("0,2:-");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
TF_CHECK_OK(writer.Finish());
}
{
const string fname = strings::StrCat(fname_base, "_1");
TensorSliceWriter writer(fname, create_function);
{
const float data[] = {10, 11, 12, 15, 16, 17};
TensorSlice slice = TensorSlice::ParseOrDie("2,2:0,3");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
}
{
const float data[] = {18, 19};
TensorSlice slice = TensorSlice::ParseOrDie("3,1:3,2");
TF_CHECK_OK(writer.Add("test", shape, slice, data));
}
TF_CHECK_OK(writer.Finish());
}
TensorSliceReaderCache cache;
const string filepattern = strings::StrCat(fname_base, "_*");
const TensorSliceReader* reader = cache.GetReader(
filepattern, open_function, TensorSliceReader::kLoadAllShards);
EXPECT_TRUE(reader != nullptr);
EXPECT_EQ(2, reader->num_files());
{
TensorShape shape;
DataType type;
EXPECT_TRUE(reader->HasTensor("test", &shape, &type));
EXPECT_EQ("[4,5]", shape.DebugString());
EXPECT_EQ(DT_FLOAT, type);
EXPECT_FALSE(reader->HasTensor("don't exist", nullptr, nullptr));
}
const TensorSliceReader* reader2 = cache.GetReader(
filepattern, open_function, TensorSliceReader::kLoadAllShards);
EXPECT_EQ(reader, reader2);
reader = cache.GetReader("file_does_not_exist", open_function,
TensorSliceReader::kLoadAllShards);
EXPECT_TRUE(reader == nullptr);
}
TEST(CachedTensorSliceReaderTest, SimpleFloat) {
CachedTensorSliceReaderTesterHelper(CreateTableTensorSliceBuilder,
OpenTableTensorSliceReader);
}
static void VersionTest(const VersionDef& versions, const string& error) {
const string path = io::JoinPath(testing::TmpDir(), "checkpoint");
{
SavedTensorSlices sts;
*sts.mutable_meta()->mutable_versions() = versions;
string contents;
EXPECT_TRUE(sts.SerializeToString(&contents));
TensorSliceWriter::Builder* builder;
TF_ASSERT_OK(CreateTableTensorSliceBuilder(path, &builder));
builder->Add(kSavedTensorSlicesKey, contents);
int64_t file_size;
TF_EXPECT_OK(builder->Finish(&file_size));
delete builder;
}
TensorSliceReader reader(path, OpenTableTensorSliceReader);
EXPECT_TRUE(reader.status().code() == error::INVALID_ARGUMENT &&
absl::StartsWith(reader.status().message(), error))
<< "Expected error starting with '" << errors::InvalidArgument(error)
<< "', got '" << reader.status() << "'";
}
TEST(CheckpointVersionTest, MinConsumer) {
VersionDef versions;
versions.set_producer(TF_CHECKPOINT_VERSION + 1);
versions.set_min_consumer(TF_CHECKPOINT_VERSION + 1);
VersionTest(
versions,
strings::StrCat("Checkpoint min consumer version ",
TF_CHECKPOINT_VERSION + 1, " above current version ",
TF_CHECKPOINT_VERSION, " for TensorFlow"));
}
TEST(CheckpointVersionTest, MinProducer) {
VersionDef versions;
versions.set_producer(TF_CHECKPOINT_VERSION_MIN_PRODUCER - 1);
VersionTest(versions, strings::StrCat("Checkpoint producer version ",
TF_CHECKPOINT_VERSION_MIN_PRODUCER - 1,
" below min producer ",
TF_CHECKPOINT_VERSION_MIN_PRODUCER,
" supported by TensorFlow"));
}
TEST(CheckpointVersionTest, BadConsumer) {
VersionDef versions;
versions.set_producer(TF_CHECKPOINT_VERSION + 1);
versions.add_bad_consumers(TF_CHECKPOINT_VERSION);
VersionTest(
versions,
strings::StrCat(
"Checkpoint disallows consumer version ", TF_CHECKPOINT_VERSION,
". Please upgrade TensorFlow: this version is likely buggy."));
}
}
}
} |
1,703 | cpp | tensorflow/tensorflow | events_writer | tensorflow/core/util/events_writer.cc | tensorflow/core/util/events_writer_test.cc | #ifndef TENSORFLOW_CORE_UTIL_EVENTS_WRITER_H_
#define TENSORFLOW_CORE_UTIL_EVENTS_WRITER_H_
#include <memory>
#include <string>
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/io/record_writer.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/event.pb.h"
namespace tensorflow {
class EventsWriter {
public:
#ifndef SWIG
static constexpr const char* kVersionPrefix = "brain.Event:";
static constexpr const int kCurrentVersion = 2;
static constexpr const char* kWriterSourceMetadata =
"tensorflow.core.util.events_writer";
#endif
explicit EventsWriter(const std::string& file_prefix);
~EventsWriter();
Status Init();
Status InitWithSuffix(const std::string& suffix);
std::string FileName();
void WriteEvent(const tensorflow::Event& event);
void WriteSerializedEvent(tensorflow::StringPiece event_str);
Status Flush();
Status Close();
private:
Status FileStillExists();
Status InitIfNeeded();
Env* env_;
const std::string file_prefix_;
std::string file_suffix_;
std::string filename_;
std::unique_ptr<WritableFile> recordio_file_;
std::unique_ptr<io::RecordWriter> recordio_writer_;
int num_outstanding_events_;
#ifndef SWIG
EventsWriter(const EventsWriter&) = delete;
void operator=(const EventsWriter&) = delete;
#endif
};
}
#endif
#include "tensorflow/core/util/events_writer.h"
#include <stddef.h>
#include <memory>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/host_info.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/event.pb.h"
namespace tensorflow {
EventsWriter::EventsWriter(const string& file_prefix)
: env_(Env::Default()),
file_prefix_(file_prefix),
num_outstanding_events_(0) {}
EventsWriter::~EventsWriter() {
Close().IgnoreError();
}
Status EventsWriter::Init() { return InitWithSuffix(""); }
Status EventsWriter::InitWithSuffix(const string& suffix) {
file_suffix_ = suffix;
return InitIfNeeded();
}
Status EventsWriter::InitIfNeeded() {
if (recordio_writer_ != nullptr) {
CHECK(!filename_.empty());
if (!FileStillExists().ok()) {
if (num_outstanding_events_ > 0) {
LOG(WARNING) << "Re-initialization, attempting to open a new file, "
<< num_outstanding_events_ << " events will be lost.";
}
} else {
return absl::OkStatus();
}
}
int64_t time_in_seconds = env_->NowMicros() / 1000000;
filename_ =
strings::Printf("%s.out.tfevents.%010lld.%s%s", file_prefix_.c_str(),
static_cast<long long>(time_in_seconds),
port::Hostname().c_str(), file_suffix_.c_str());
recordio_writer_.reset();
TF_RETURN_WITH_CONTEXT_IF_ERROR(
env_->NewWritableFile(filename_, &recordio_file_),
"Creating writable file ", filename_);
recordio_writer_ = std::make_unique<io::RecordWriter>(recordio_file_.get());
if (recordio_writer_ == nullptr) {
return errors::Unknown("Could not create record writer");
}
num_outstanding_events_ = 0;
VLOG(1) << "Successfully opened events file: " << filename_;
{
Event event;
event.set_wall_time(time_in_seconds);
event.set_file_version(strings::StrCat(kVersionPrefix, kCurrentVersion));
SourceMetadata* source_metadata = event.mutable_source_metadata();
source_metadata->set_writer(kWriterSourceMetadata);
WriteEvent(event);
TF_RETURN_WITH_CONTEXT_IF_ERROR(Flush(), "Flushing first event.");
}
return absl::OkStatus();
}
string EventsWriter::FileName() {
if (filename_.empty()) {
InitIfNeeded().IgnoreError();
}
return filename_;
}
void EventsWriter::WriteSerializedEvent(StringPiece event_str) {
if (recordio_writer_ == nullptr) {
if (!InitIfNeeded().ok()) {
LOG(ERROR) << "Write failed because file could not be opened.";
return;
}
}
num_outstanding_events_++;
recordio_writer_->WriteRecord(event_str).IgnoreError();
}
void EventsWriter::WriteEvent(const Event& event) {
string record;
event.AppendToString(&record);
WriteSerializedEvent(record);
}
Status EventsWriter::Flush() {
if (num_outstanding_events_ == 0) return absl::OkStatus();
CHECK(recordio_file_ != nullptr) << "Unexpected NULL file";
TF_RETURN_WITH_CONTEXT_IF_ERROR(recordio_writer_->Flush(), "Failed to flush ",
num_outstanding_events_, " events to ",
filename_);
TF_RETURN_WITH_CONTEXT_IF_ERROR(recordio_file_->Sync(), "Failed to sync ",
num_outstanding_events_, " events to ",
filename_);
VLOG(1) << "Wrote " << num_outstanding_events_ << " events to disk.";
num_outstanding_events_ = 0;
return absl::OkStatus();
}
Status EventsWriter::Close() {
Status status = Flush();
if (recordio_file_ != nullptr) {
Status close_status = recordio_file_->Close();
if (!close_status.ok()) {
status = close_status;
}
recordio_writer_.reset(nullptr);
recordio_file_.reset(nullptr);
}
num_outstanding_events_ = 0;
return status;
}
Status EventsWriter::FileStillExists() {
if (env_->FileExists(filename_).ok()) {
return absl::OkStatus();
}
return errors::Unknown("The events file ", filename_, " has disappeared.");
}
} | #include "tensorflow/core/util/events_writer.h"
#include <math.h>
#include <memory>
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/event.pb.h"
namespace tensorflow {
namespace {
Env* env() { return Env::Default(); }
void WriteSimpleValue(EventsWriter* writer, double wall_time, int64_t step,
const string& tag, float simple_value) {
Event event;
event.set_wall_time(wall_time);
event.set_step(step);
Summary::Value* summ_val = event.mutable_summary()->add_value();
summ_val->set_tag(tag);
summ_val->set_simple_value(simple_value);
writer->WriteEvent(event);
}
void WriteFile(EventsWriter* writer) {
WriteSimpleValue(writer, 1234, 34, "foo", 3.14159);
WriteSimpleValue(writer, 2345, 35, "bar", -42);
}
static bool ReadEventProto(io::RecordReader* reader, uint64* offset,
Event* proto) {
tstring record;
Status s = reader->ReadRecord(offset, &record);
if (!s.ok()) {
return false;
}
return ParseProtoUnlimited(proto, record);
}
void VerifyFile(const string& filename) {
TF_CHECK_OK(env()->FileExists(filename));
std::unique_ptr<RandomAccessFile> event_file;
TF_CHECK_OK(env()->NewRandomAccessFile(filename, &event_file));
io::RecordReader* reader = new io::RecordReader(event_file.get());
uint64 offset = 0;
Event actual;
CHECK(ReadEventProto(reader, &offset, &actual));
VLOG(1) << actual.ShortDebugString();
double current_time = env()->NowMicros() / 1000000.0;
EXPECT_LT(fabs(actual.wall_time() - current_time), 5);
EXPECT_EQ(actual.file_version(),
strings::StrCat(EventsWriter::kVersionPrefix,
EventsWriter::kCurrentVersion));
EXPECT_EQ(actual.source_metadata().writer(),
EventsWriter::kWriterSourceMetadata);
Event expected;
CHECK(ReadEventProto(reader, &offset, &actual));
VLOG(1) << actual.ShortDebugString();
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"wall_time: 1234 step: 34 "
"summary { value { tag: 'foo' simple_value: 3.14159 } }",
&expected));
CHECK(ReadEventProto(reader, &offset, &actual));
VLOG(1) << actual.ShortDebugString();
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(
"wall_time: 2345 step: 35 "
"summary { value { tag: 'bar' simple_value: -42 } }",
&expected));
TF_CHECK_OK(env()->DeleteFile(filename));
delete reader;
}
string GetDirName(const string& suffix) {
return io::JoinPath(testing::TmpDir(), suffix);
}
TEST(EventWriter, WriteFlush) {
string file_prefix = GetDirName("/writeflush_test");
EventsWriter writer(file_prefix);
WriteFile(&writer);
TF_EXPECT_OK(writer.Flush());
string filename = writer.FileName();
VerifyFile(filename);
}
TEST(EventWriter, WriteClose) {
string file_prefix = GetDirName("/writeclose_test");
EventsWriter writer(file_prefix);
WriteFile(&writer);
TF_EXPECT_OK(writer.Close());
string filename = writer.FileName();
VerifyFile(filename);
}
TEST(EventWriter, WriteDelete) {
string file_prefix = GetDirName("/writedelete_test");
EventsWriter* writer = new EventsWriter(file_prefix);
WriteFile(writer);
string filename = writer->FileName();
delete writer;
VerifyFile(filename);
}
TEST(EventWriter, FailFlush) {
string file_prefix = GetDirName("/failflush_test");
EventsWriter writer(file_prefix);
string filename = writer.FileName();
WriteFile(&writer);
TF_EXPECT_OK(env()->FileExists(filename));
TF_ASSERT_OK(env()->DeleteFile(filename));
EXPECT_TRUE(writer.Flush().ok());
}
TEST(EventWriter, FailClose) {
string file_prefix = GetDirName("/failclose_test");
EventsWriter writer(file_prefix);
string filename = writer.FileName();
WriteFile(&writer);
TF_EXPECT_OK(env()->FileExists(filename));
TF_ASSERT_OK(env()->DeleteFile(filename));
EXPECT_TRUE(writer.Close().ok());
}
TEST(EventWriter, InitWriteClose) {
string file_prefix = GetDirName("/initwriteclose_test");
EventsWriter writer(file_prefix);
TF_EXPECT_OK(writer.Init());
string filename0 = writer.FileName();
TF_EXPECT_OK(env()->FileExists(filename0));
WriteFile(&writer);
TF_EXPECT_OK(writer.Close());
string filename1 = writer.FileName();
EXPECT_EQ(filename0, filename1);
VerifyFile(filename1);
}
TEST(EventWriter, NameWriteClose) {
string file_prefix = GetDirName("/namewriteclose_test");
EventsWriter writer(file_prefix);
string filename = writer.FileName();
TF_EXPECT_OK(env()->FileExists(filename));
WriteFile(&writer);
TF_EXPECT_OK(writer.Close());
VerifyFile(filename);
}
TEST(EventWriter, NameClose) {
string file_prefix = GetDirName("/nameclose_test");
EventsWriter writer(file_prefix);
string filename = writer.FileName();
TF_EXPECT_OK(writer.Close());
TF_EXPECT_OK(env()->FileExists(filename));
TF_ASSERT_OK(env()->DeleteFile(filename));
}
TEST(EventWriter, FileDeletionBeforeWriting) {
string file_prefix = GetDirName("/fdbw_test");
EventsWriter writer(file_prefix);
string filename0 = writer.FileName();
TF_EXPECT_OK(env()->FileExists(filename0));
env()->SleepForMicroseconds(
2000000);
TF_ASSERT_OK(env()->DeleteFile(filename0));
TF_EXPECT_OK(writer.Init());
WriteFile(&writer);
TF_EXPECT_OK(writer.Flush());
string filename1 = writer.FileName();
EXPECT_NE(filename0, filename1);
VerifyFile(filename1);
}
}
} |
1,704 | cpp | tensorflow/tensorflow | debug_data_dumper | tensorflow/core/util/debug_data_dumper.cc | tensorflow/core/util/debug_data_dumper_test.cc | #ifndef TENSORFLOW_CORE_UTIL_DEBUG_DATA_DUMPER_H_
#define TENSORFLOW_CORE_UTIL_DEBUG_DATA_DUMPER_H_
#include <optional>
#include <set>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/platform/mutex.h"
#define DEBUG_DATA_DUMPER() ::tensorflow::DebugDataDumper::Global()
inline constexpr const char* kDebugGroupMain = "main";
inline constexpr const char* kDebugGroupOpStacktrace = "op_stacktrace";
inline constexpr const char* kDebugGroupGraphOptPass = "graph_opt_pass";
inline constexpr const char* kDebugGroupBridgePhase1Clustering =
"bridge_phase1_clustering";
inline constexpr const char* kDebugGroupRuntimeLowering = "runtime_lowering";
inline constexpr const char* kDebugGroupBridgePhase1ExecutorExport =
"bridge_phase1_executor_export";
inline constexpr const char* kDebugGroupBridgePhase2 = "bridge_phase2";
inline constexpr const char* kDebugGroupDTensorMlir = "dtensor_mlir";
inline constexpr const char* kDebugGroupDTensorGraph = "dtensor_graph";
inline constexpr const char* kDebugGroupDTensorLayout = "dtensor_layout";
namespace tensorflow {
class FunctionLibraryDefinition;
class Graph;
class DebugDataDumper {
public:
static DebugDataDumper* Global();
void LoadEnvvars();
bool ShouldDump(const std::string& name, const std::string& group) const;
void DumpOpCreationStackTraces(const std::string& name,
const std::string& group,
const std::string& tag, const Graph* graph);
void DumpGraph(const std::string& name, const std::string& group,
const std::string& tag, const Graph* graph,
const FunctionLibraryDefinition* func_lib_def,
bool bypass_filter = false);
std::string GetDumpFilename(const std::string& name, const std::string& group,
const std::string& tag);
private:
DebugDataDumper();
int GetNextDumpId(const std::string& name) {
const mutex_lock lock(lock_);
return dump_order_ids_[name]++;
}
absl::flat_hash_map<std::string, int> dump_order_ids_;
tensorflow::mutex lock_;
std::optional<std::string> name_filter_;
std::set<string> groups_filter_;
bool dump_wrapped_;
};
}
#endif
#include "tensorflow/core/util/debug_data_dumper.h"
#include <optional>
#include <set>
#include <string>
#include <vector>
#include "absl/strings/str_format.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
DebugDataDumper* DebugDataDumper::Global() {
static DebugDataDumper* global_instance_ = new DebugDataDumper();
return global_instance_;
}
DebugDataDumper::DebugDataDumper() { LoadEnvvars(); }
void DebugDataDumper::LoadEnvvars() {
const char* dump_wrapped = getenv("TF_DUMP_GRAPH_WRAPPED");
dump_wrapped_ = static_cast<bool>(dump_wrapped);
const char* name_filter = getenv("TF_DUMP_GRAPH_NAME_FILTER");
name_filter_ =
name_filter ? std::optional<std::string>{name_filter} : std::nullopt;
const char* groups_filter = getenv("TF_DUMP_GRAPH_GROUPS");
groups_filter_ =
groups_filter ? std::set<std::string>(absl::StrSplit(groups_filter, ','))
: std::set<std::string>({kDebugGroupMain});
}
bool DebugDataDumper::ShouldDump(const std::string& name,
const std::string& group) const {
if (!dump_wrapped_ && absl::StartsWith(name, "__wrapped__")) return false;
if (name_filter_ == std::nullopt) {
VLOG(1) << "Skip dumping graph '" << name
<< "', because TF_DUMP_GRAPH_NAME_FILTER is not set";
return false;
}
if (!absl::EqualsIgnoreCase(*name_filter_, "*") &&
!absl::StrContains(name, *name_filter_)) {
VLOG(1) << "Skip dumping graph '" << name
<< "', because TF_DUMP_GRAPH_NAME_FILTER is not '*' and "
<< "it is not contained by the graph name";
return false;
}
if (groups_filter_.find(group) == groups_filter_.end() &&
groups_filter_.find("*") == groups_filter_.end())
return false;
return true;
}
void DebugDataDumper::DumpOpCreationStackTraces(const std::string& name,
const std::string& group,
const std::string& tag,
const Graph* graph) {
if (!ShouldDump(name, group)) return;
std::string dump_filename = GetDumpFilename(name, group, tag);
DumpToFile(dump_filename, "", ".csv", "StackTrace",
[graph, &dump_filename](WritableFile* file) {
auto status = file->Append("node_id,node_name,stackframes\n");
if (!status.ok()) {
LOG(WARNING) << "error writing to file to " << dump_filename
<< ": " << status.message();
return status;
}
for (Node* node : graph->nodes()) {
auto stack_trace = node->GetStackTrace();
if (stack_trace == nullptr) continue;
int node_id = node->id();
const std::string& node_name = node->name();
std::vector<std::string> stackframes;
stackframes.reserve(stack_trace->ToFrames().size());
for (auto& frame : stack_trace->ToFrames()) {
stackframes.push_back(
absl::StrFormat("%s(%d): %s", frame.file_name,
frame.line_number, frame.function_name));
}
status = file->Append(
absl::StrFormat("%d,%s,%s\n", node_id, node_name,
absl::StrJoin(stackframes, ";")));
if (!status.ok()) {
LOG(WARNING) << "error writing to file to " << dump_filename
<< ": " << status.message();
return status;
}
}
return file->Close();
});
}
void DebugDataDumper::DumpGraph(const std::string& name,
const std::string& group,
const std::string& tag, const Graph* graph,
const FunctionLibraryDefinition* func_lib_def,
bool bypass_filter) {
if (!ShouldDump(name, group) && !bypass_filter) return;
std::string dump_filename = GetDumpFilename(name, group, tag);
if (dump_filename.size() > 255) {
LOG(WARNING) << "Failed to dump graph " << dump_filename << " to "
<< ", because the file name is longer than 255";
return;
}
GraphDef graph_def;
graph->ToGraphDef(&graph_def);
if (func_lib_def) {
FunctionLibraryDefinition reachable_lib_def =
func_lib_def->ReachableDefinitions(graph_def);
*graph_def.mutable_library() = reachable_lib_def.ToProto();
}
DumpGraphDefToFile(dump_filename, graph_def);
}
std::string DebugDataDumper::GetDumpFilename(const std::string& name,
const std::string& group,
const std::string& tag) {
std::string dump_name = name.empty() ? "unknown_graph" : name;
return absl::StrFormat("%s.%04d.%s.%s", dump_name, GetNextDumpId(name), group,
tag);
}
} | #include "tensorflow/core/util/debug_data_dumper.h"
#include <string>
#include "absl/strings/str_format.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(DebugDataDumper, NoPrefixTest) {
EXPECT_EQ(false, DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupMain));
}
TEST(DebugDataDumper, NoNameFilterTest) {
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(false, DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupMain));
}
TEST(DebugDataDumper, ShouldDumpTest) {
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(true, DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupMain));
setenv("TF_DUMP_GRAPH_NAME_FILTER", "DumpGraph", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(true, DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupMain));
setenv("TF_DUMP_GRAPH_NAME_FILTER", "DoNotDumpGraph", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(false, DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupMain));
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(false,
DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupBridgePhase1Clustering));
setenv("TF_DUMP_GRAPH_GROUPS", "main,bridge_phase1_clustering", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(true,
DEBUG_DATA_DUMPER()->ShouldDump("DumpGraphToFileTest",
kDebugGroupBridgePhase1Clustering));
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(false, DEBUG_DATA_DUMPER()->ShouldDump(
"__wrapped__DumpGraphToFileTest", kDebugGroupMain));
setenv("TF_DUMP_GRAPH_WRAPPED", "true", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
EXPECT_EQ(true, DEBUG_DATA_DUMPER()->ShouldDump(
"__wrapped__DumpGraphToFileTest", kDebugGroupMain));
}
TEST(DebugDataDumper, DumpFileBasenameTest) {
EXPECT_EQ("DumpFileBasenameTest1.0000.main.tag1",
DEBUG_DATA_DUMPER()->GetDumpFilename("DumpFileBasenameTest1",
kDebugGroupMain, "tag1"));
EXPECT_EQ("DumpFileBasenameTest1.0001.main.tag2",
DEBUG_DATA_DUMPER()->GetDumpFilename("DumpFileBasenameTest1",
kDebugGroupMain, "tag2"));
EXPECT_EQ("DumpFileBasenameTest2.0000.main.tag1",
DEBUG_DATA_DUMPER()->GetDumpFilename("DumpFileBasenameTest2",
kDebugGroupMain, "tag1"));
}
TEST(DebugDataDumper, DumpGraphToFileTest) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
DEBUG_DATA_DUMPER()->DumpGraph("DumpGraphToFileTest", kDebugGroupMain, "tag",
&graph, nullptr, false);
std::string dumpFilename =
io::JoinPath(dir, "DumpGraphToFileTest.0000.main.tag.pbtxt");
EXPECT_EQ(absl::OkStatus(), Env::Default()->FileExists(dumpFilename));
}
TEST(DebugDataDumper, DumpGraphLongFileNameCrashTest) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
std::string name = std::string(256, 'x');
DEBUG_DATA_DUMPER()->DumpGraph(name, kDebugGroupMain, "tag", &graph, nullptr,
false);
std::string dumpFilename = io::JoinPath(
dir, absl::StrFormat("%s.0000.main.tag.pbtxt", name.c_str()));
EXPECT_EQ(absl::StatusCode::kNotFound,
Env::Default()->FileExists(dumpFilename).code());
}
TEST(DebugDataDumper, DumpOpCreationStacktracesTest) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
setenv("TF_DUMP_OP_CREATION_STACKTRACES", "1", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
DEBUG_DATA_DUMPER()->DumpOpCreationStackTraces(
"DumpOpCreationStacktracesTest", kDebugGroupMain, "test", &graph);
std::string dumpFilename =
io::JoinPath(dir, "DumpOpCreationStacktracesTest.0000.main.test.csv");
EXPECT_EQ(absl::OkStatus(), Env::Default()->FileExists(dumpFilename));
}
TEST(DebugDataDumper, NoDumpOpCreationStacktracesTest) {
Graph graph(OpRegistry::Global());
Node* node;
TF_CHECK_OK(NodeBuilder("A", "NoOp").Finalize(&graph, &node));
std::string dir = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", dir.c_str(), 1);
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
DEBUG_DATA_DUMPER()->DumpOpCreationStackTraces(
"DumpOpCreationStacktracesTest", kDebugGroupMain, "test", &graph);
std::string dumpFilename =
io::JoinPath(dir, "DumpOpCreationStacktracesTest.0000.main.test.json");
EXPECT_EQ(absl::StatusCode::kNotFound,
Env::Default()->FileExists(dumpFilename).code());
}
}
} |
1,705 | cpp | tensorflow/tensorflow | example_proto_helper | tensorflow/core/util/example_proto_helper.cc | tensorflow/core/util/example_proto_helper_test.cc | #ifndef TENSORFLOW_CORE_UTIL_EXAMPLE_PROTO_HELPER_H_
#define TENSORFLOW_CORE_UTIL_EXAMPLE_PROTO_HELPER_H_
#include <string>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/sparse/sparse_tensor.h"
namespace tensorflow {
struct FixedLenFeature {
string key;
DataType dtype;
TensorShape shape;
Tensor default_value;
string values_output_tensor_name;
};
struct VarLenFeature {
string key;
DataType dtype;
string values_output_tensor_name;
string indices_output_tensor_name;
string shapes_output_tensor_name;
};
Status SingleExampleProtoToTensors(
const Example& example, const string& name, int batch_index,
const std::vector<FixedLenFeature>& fixed_len_features,
const std::vector<VarLenFeature>& var_len_features,
std::vector<Tensor*>* output_dense_values_tensor,
std::vector<std::vector<Tensor>>* output_sparse_values_tmp);
struct VarLenFeatureBatchShapes {
TensorShape indices_shape;
TensorShape values_shape;
int max_num_features;
};
Status GetSparseTensorShapes(const VarLenFeature& var_len_feature,
const std::vector<Tensor>& sparse_values_tmp,
int batch_size,
VarLenFeatureBatchShapes* output_shapes);
Status BatchExampleProtoToTensors(
const std::vector<const Example*>& examples,
const std::vector<string>& names,
const std::vector<FixedLenFeature>& fixed_len_features,
const std::vector<VarLenFeature>& var_len_features, Allocator* allocator,
std::vector<Tensor>* output_dense_values_tensor,
std::vector<Tensor>* output_sparse_indices_tensor,
std::vector<Tensor>* output_sparse_values_tensor,
std::vector<Tensor>* output_sparse_shapes_tensor);
Status CheckValidType(const DataType& dtype);
Status CheckTypesMatch(const Feature& feature, const DataType& dtype,
bool* match);
Status FeatureDenseCopy(std::size_t out_index, const string& name,
const string& key, const DataType& dtype,
const TensorShape& shape, const Feature& feature,
Tensor* out);
void RowDenseCopy(const std::size_t& out_index, const DataType& dtype,
const Tensor& in, Tensor* out);
Tensor FeatureSparseCopy(std::size_t batch, const string& key,
const DataType& dtype, const Feature& feature);
int64_t CopyIntoSparseTensor(const Tensor& in, int batch, int64_t offset,
Tensor* indices, Tensor* values);
Status GetDenseShapes(const std::vector<PartialTensorShape>& dense_shapes,
std::vector<bool>* variable_length,
std::vector<std::size_t>* elements_per_stride);
struct ParseExampleAttrs {
public:
template <typename ContextType>
Status Init(ContextType* ctx, int op_version = 1) {
TF_RETURN_IF_ERROR(ctx->GetAttr("sparse_types", &sparse_types));
TF_RETURN_IF_ERROR(ctx->GetAttr("Tdense", &dense_types));
TF_RETURN_IF_ERROR(ctx->GetAttr("dense_shapes", &dense_shapes));
TF_RETURN_IF_ERROR(
GetDenseShapes(dense_shapes, &variable_length, &elements_per_stride));
switch (op_version) {
case 1:
TF_RETURN_IF_ERROR(ctx->GetAttr("Nsparse", &num_sparse));
TF_RETURN_IF_ERROR(ctx->GetAttr("Ndense", &num_dense));
break;
case 2:
TF_RETURN_IF_ERROR(
ctx->GetAttr("ragged_value_types", &ragged_value_types));
TF_RETURN_IF_ERROR(ctx->GetAttr("num_sparse", &num_sparse));
TF_RETURN_IF_ERROR(
ctx->GetAttr("ragged_split_types", &ragged_split_types));
break;
default:
return errors::InvalidArgument("Unexpected op_version", op_version);
}
return FinishInit(op_version);
}
int64_t num_sparse;
int64_t num_dense;
int64_t num_ragged;
std::vector<DataType> sparse_types;
std::vector<DataType> dense_types;
std::vector<DataType> ragged_value_types;
std::vector<DataType> ragged_split_types;
std::vector<PartialTensorShape> dense_shapes;
std::vector<bool> variable_length;
std::vector<std::size_t> elements_per_stride;
private:
Status FinishInit(int op_version);
};
struct ParseSingleExampleAttrs {
public:
template <typename ContextType>
Status Init(ContextType* ctx) {
TF_RETURN_IF_ERROR(ctx->GetAttr("sparse_keys", &sparse_keys));
TF_RETURN_IF_ERROR(ctx->GetAttr("sparse_types", &sparse_types));
TF_RETURN_IF_ERROR(ctx->GetAttr("dense_keys", &dense_keys));
TF_RETURN_IF_ERROR(ctx->GetAttr("Tdense", &dense_types));
TF_RETURN_IF_ERROR(ctx->GetAttr("dense_shapes", &dense_shapes));
int num_sparse;
TF_RETURN_IF_ERROR(ctx->GetAttr("num_sparse", &num_sparse));
if (num_sparse != sparse_keys.size() || num_sparse != sparse_types.size()) {
return errors::InvalidArgument(
"num_sparse (", num_sparse, ") must match the size of sparse_keys (",
sparse_keys.size(), ") and sparse_types (", sparse_types.size(), ")");
}
TF_RETURN_IF_ERROR(
GetDenseShapes(dense_shapes, &variable_length, &elements_per_stride));
return FinishInit();
}
std::vector<tstring> sparse_keys;
std::vector<DataType> sparse_types;
std::vector<tstring> dense_keys;
std::vector<DataType> dense_types;
std::vector<PartialTensorShape> dense_shapes;
std::vector<bool> variable_length;
std::vector<std::size_t> elements_per_stride;
private:
Status FinishInit();
};
struct ParseSequenceExampleAttrs {
public:
template <typename ContextType>
Status Init(ContextType* ctx, int op_version = 1) {
switch (op_version) {
case 1: {
std::vector<string> missing_empty_vector;
TF_RETURN_IF_ERROR(ctx->GetAttr(
"feature_list_dense_missing_assumed_empty", &missing_empty_vector));
for (const string& feature : missing_empty_vector) {
feature_list_dense_missing_assumed_empty.insert(feature);
}
}
TF_RETURN_IF_ERROR(
ctx->GetAttr("context_sparse_keys", &context_sparse_keys));
TF_RETURN_IF_ERROR(
ctx->GetAttr("context_dense_keys", &context_dense_keys));
TF_RETURN_IF_ERROR(ctx->GetAttr("feature_list_sparse_keys",
&feature_list_sparse_keys));
TF_RETURN_IF_ERROR(
ctx->GetAttr("feature_list_dense_keys", &feature_list_dense_keys));
TF_RETURN_IF_ERROR(ctx->GetAttr("Ncontext_dense", &num_context_dense));
break;
case 2:
TF_RETURN_IF_ERROR(ctx->GetAttr("context_ragged_value_types",
&context_ragged_value_types));
TF_RETURN_IF_ERROR(ctx->GetAttr("context_ragged_split_types",
&context_ragged_split_types));
TF_RETURN_IF_ERROR(ctx->GetAttr("feature_list_ragged_value_types",
&feature_list_ragged_value_types));
TF_RETURN_IF_ERROR(ctx->GetAttr("feature_list_ragged_split_types",
&feature_list_ragged_split_types));
break;
default:
return errors::InvalidArgument("Unexpected op_version", op_version);
}
TF_RETURN_IF_ERROR(
ctx->GetAttr("context_sparse_types", &context_sparse_types));
TF_RETURN_IF_ERROR(
ctx->GetAttr("Nfeature_list_dense", &num_feature_list_dense));
TF_RETURN_IF_ERROR(ctx->GetAttr("Ncontext_sparse", &num_context_sparse));
TF_RETURN_IF_ERROR(ctx->GetAttr("Tcontext_dense", &context_dense_types));
TF_RETURN_IF_ERROR(
ctx->GetAttr("feature_list_sparse_types", &feature_list_sparse_types));
TF_RETURN_IF_ERROR(
ctx->GetAttr("feature_list_dense_types", &feature_list_dense_types));
TF_RETURN_IF_ERROR(
ctx->GetAttr("Nfeature_list_sparse", &num_feature_list_sparse));
TF_RETURN_IF_ERROR(
ctx->GetAttr("context_dense_shapes", &context_dense_shapes));
TF_RETURN_IF_ERROR(
ctx->GetAttr("feature_list_dense_shapes", &feature_list_dense_shapes));
return FinishInit(op_version);
}
std::unordered_set<string> feature_list_dense_missing_assumed_empty;
int64_t num_context_sparse;
int64_t num_context_dense;
int64_t num_context_ragged;
int64_t num_feature_list_sparse;
int64_t num_feature_list_dense;
int64_t num_feature_list_ragged;
std::vector<tstring> context_sparse_keys;
std::vector<tstring> context_dense_keys;
std::vector<tstring> feature_list_sparse_keys;
std::vector<tstring> feature_list_dense_keys;
std::vector<DataType> context_sparse_types;
std::vector<DataType> context_dense_types;
std::vector<TensorShape> context_dense_shapes;
std::vector<DataType> feature_list_sparse_types;
std::vector<DataType> feature_list_dense_types;
std::vector<TensorShape> feature_list_dense_shapes;
std::vector<DataType> context_ragged_value_types;
std::vector<DataType> context_ragged_split_types;
std::vector<DataType> feature_list_ragged_value_types;
std::vector<DataType> feature_list_ragged_split_types;
private:
Status FinishInit(int op_version);
};
struct ParseSingleSequenceExampleAttrs {
public:
template <typename ContextType>
Status Init(ContextType* ctx) {
TF_RETURN_IF_ERROR(
ctx->GetAttr("context_sparse_types", &context_sparse_types));
TF_RETURN_IF_ERROR(ctx->GetAttr("Ncontext_dense", &num_context_dense));
TF_RETURN_IF_ERROR(
ctx->GetAttr("Nfeature_list_dense", &num_feature_list_dense));
TF_RETURN_IF_ERROR(ctx->GetAttr("Ncontext_sparse", &num_context_sparse));
TF_RETURN_IF_ERROR(ctx->GetAttr("Tcontext_dense", &context_dense_types));
TF_RETURN_IF_ERROR(
ctx->GetAttr("feature_list_sparse_types", &feature_list_sparse_types));
TF_RETURN_IF_ERROR(
ctx->GetAttr("feature_list_dense_types", &feature_list_dense_types));
TF_RETURN_IF_ERROR(
ctx->GetAttr("Nfeature_list_sparse", &num_feature_list_sparse));
TF_RETURN_IF_ERROR(
ctx->GetAttr("context_dense_shapes", &context_dense_shapes));
TF_RETURN_IF_ERROR(
ctx->GetAttr("feature_list_dense_shapes", &feature_list_dense_shapes));
return FinishInit();
}
int64_t num_context_sparse;
int64_t num_context_dense;
int64_t num_feature_list_sparse;
int64_t num_feature_list_dense;
std::vector<DataType> context_sparse_types;
std::vector<DataType> context_dense_types;
std::vector<TensorShape> context_dense_shapes;
std::vector<DataType> feature_list_sparse_types;
std::vector<DataType> feature_list_dense_types;
std::vector<TensorShape> feature_list_dense_shapes;
private:
Status FinishInit();
};
}
#endif
#include "tensorflow/core/util/example_proto_helper.h"
#include <algorithm>
#include <limits>
#include <vector>
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/util/sparse/sparse_tensor.h"
namespace tensorflow {
Status CheckValidType(const DataType& dtype) {
switch (dtype) {
case DT_INT64:
case DT_FLOAT:
case DT_STRING:
return absl::OkStatus();
default:
return errors::InvalidArgument("Received input dtype: ",
DataTypeString(dtype));
}
}
Status CheckTypesMatch(const Feature& feature, const DataType& dtype,
bool* match) {
switch (dtype) {
case DT_INT64:
*match = (feature.kind_case() == Feature::kInt64List);
break;
case DT_FLOAT:
*match = (feature.kind_case() == Feature::kFloatList);
break;
case DT_STRING:
*match = (feature.kind_case() == Feature::kBytesList);
break;
default:
return errors::InvalidArgument("Invalid input dtype: ",
DataTypeString(dtype));
}
return absl::OkStatus();
}
Status FeatureDenseCopy(const std::size_t out_index, const string& name,
const string& key, const DataType& dtype,
const TensorShape& shape, const Feature& feature,
Tensor* out) {
const std::size_t num_elements = shape.num_elements();
const std::size_t offset = out_index * num_elements;
switch (dtype) {
case DT_INT64: {
const Int64List& values = feature.int64_list();
if (static_cast<size_t>(values.value_size()) != num_elements) {
return errors::InvalidArgument(
"Name: ", name, ", Key: ", key, ", Index: ", out_index,
". Number of int64 values != expected. "
"values size: ",
values.value_size(), " but output shape: ", shape.DebugString());
}
auto out_p = out->flat<int64_t>().data() + offset;
std::copy_n(values.value().data(), num_elements, out_p);
return absl::OkStatus();
}
case DT_FLOAT: {
const FloatList& values = feature.float_list();
if (static_cast<size_t>(values.value_size()) != num_elements) {
return errors::InvalidArgument(
"Name: ", name, ", Key: ", key, ", Index: ", out_index,
". Number of float values != expected. "
"values size: ",
values.value_size(), " but output shape: ", shape.DebugString());
}
auto out_p = out->flat<float>().data() + offset;
std::copy_n(values.value().data(), num_elements, out_p);
return absl::OkStatus();
}
case DT_STRING: {
const BytesList& values = feature.bytes_list();
if (static_cast<size_t>(values.value_size()) != num_elements) {
return errors::InvalidArgument(
"Name: ", name, ", Key ", key, ", Index: ", out_index,
". Number of bytes values != expected. "
"Values size: ",
values.value_size(), " but output shape: ", shape.DebugString());
}
auto out_p = out->flat<tstring>().data() + offset;
std::transform(values.value().data(),
values.value().data() + num_elements, out_p,
[](const string* s) { return *s; });
return absl::OkStatus();
}
default:
return errors::InvalidArgument("Invalid input dtype: ",
DataTypeString(dtype));
}
}
Tensor FeatureSparseCopy(const std::size_t batch, const string& key,
const DataType& dtype, const Feature& feature) {
switch (dtype) {
case DT_INT64: {
const Int64List& values = feature.int64_list();
const int64_t num_elements = values.value_size();
Tensor out(dtype, TensorShape({num_elements}));
auto out_p = out.flat<int64_t>().data();
std::copy_n(values.value().data(), num_elements, out_p);
return out;
}
case DT_FLOAT: {
const FloatList& values = feature.float_list();
const int64_t num_elements = values.value_size();
Tensor out(dtype, TensorShape({num_elements}));
auto out_p = out.flat<float>().data();
std::copy_n(values.value().data(), num_elements, out_p);
return out;
}
case DT_STRING: {
const BytesList& values = feature.bytes_list();
const int64_t num_elements = values.value_size();
Tensor out(dtype, TensorShape({num_elements}));
auto out_p = out.flat<tstring>().data();
std::transform(values.value().data(),
values.value().data() + num_elements, out_p,
[](const string* s) { return *s; });
return out;
}
default:
LOG(FATAL) << "not supposed to be here. dtype requested: " << dtype;
}
}
int64_t CopyIntoSparseTensor(const Tensor& in, const int batch,
const int64_t offset, Tensor* indices,
Tensor* values) {
const int64_t num_elements = in.shape().num_elements();
const DataType& dtype = in.dtype();
CHECK_EQ(dtype, values->dtype());
if (num_elements > 0) {
auto ix_t = indices->matrix<int64_t>();
int64_t* ix_p = &ix_t(offset, 0);
for (int64_t i = 0; i < num_elements; ++i, ix_p += 2) {
*ix_p = batch;
*(ix_p + 1) = i;
}
}
switch (dtype) {
case DT_INT64: {
std::copy_n(in.flat<int64_t>().data(), num_elements,
values->flat<int64_t>().data() + offset);
break;
}
case DT_FLOAT: {
std::copy_n(in.flat<float>().data(), num_elements,
values->flat<float>().data() + offset);
break;
}
case DT_STRING: {
std::copy_n(in.flat<tstring>().data(), num_elements,
values->flat<tstring>().data() + offset);
break;
}
default:
LOG(FATAL) << "Not supposed to be here. Saw dtype: " << dtype;
}
return num_elements;
}
void RowDenseCopy(const std::size_t& out_index, const DataType& dtype,
const Tensor& in, Tensor* out) {
const std::size_t num_elements = in.shape().num_elements();
const std::size_t offset = out_index * num_elements;
switch (dtype) {
case DT_INT64: {
std::copy_n(in.flat<int64_t>().data(), num_elements,
out->flat<int64_t>().data() + offset);
break;
}
case DT_FLOAT: {
std::copy_n(in.flat<float>().data(), num_elements,
out->flat<float>().data() + offset);
break;
}
case DT_STRING: {
std::copy_n(in.flat<tstring>().data(), num_elements,
out->flat<tstring>().data() + offset);
break;
}
default:
LOG(FATAL) << "Not supposed to be here. Saw dtype: " << dtype;
}
}
Status SingleExampleProtoToTensors(
const Example& example, const string& example_name, const int batch_index,
const std::vector<FixedLenFeature>& fixed_len_features,
const std::vector<VarLenFeature>& var_len_features,
std::vector<Tensor*>* output_dense_values_tensor,
std::vector<std::vector<Tensor>>* output_sparse_values_tmp) {
const Features& features = example.features();
const auto& feature_dict = features.feature();
for (size_t d = 0; d < fixed_len_features.size(); ++d) {
const FixedLenFeature& feature_config = fixed_len_features[d];
const string& key = feature_config.key;
const DataType& dtype = feature_config.dtype;
const TensorShape& shape = feature_config.shape;
const Tensor& default_value = feature_config.default_value;
bool required = (default_value.NumElements() == 0);
const auto& feature_found = feature_dict.find(key);
const bool feature_has_data =
(feature_found != feature_dict.end() &&
(feature_found->second.kind_case() != Feature::KIND_NOT_SET));
const bool required_ok = feature_has_data || !required;
if (!required_ok) {
return errors::InvalidArgument("Name: ", example_name, ", Feature: ", key,
" is required but could not be found.");
}
if (feature_has_data) {
const Feature& f = feature_found->second;
bool types_match;
TF_RETURN_IF_ERROR(CheckTypesMatch(f, dtype, &types_match));
if (!types_match) {
return errors::InvalidArgument("Name: ", example_name,
", Feature: ", key,
". Data types don't match. ",
"Expected type: ", DataTypeString(dtype),
" Feature is: ", f.DebugString());
}
TF_RETURN_IF_ERROR(FeatureDenseCopy(batch_index, example_name, key, dtype,
shape, f,
(*output_dense_values_tensor)[d]));
} else {
RowDenseCopy(batch_index, dtype, default_value,
(*output_dense_values_tensor)[d]);
}
}
for (size_t d = 0; d < var_len_features.size(); ++d) {
const VarLenFeature& feature_config = var_len_features[d];
const string& key = feature_config.key;
const DataType& dtype = feature_config.dtype;
const auto& feature_found = feature_dict.find(key);
const bool feature_has_data =
(feature_found != feature_dict.end() &&
(feature_found->second.kind_case() != Feature::KIND_NOT_SET));
if (feature_has_data) {
const Feature& f = feature_found->second;
bool types_match;
TF_RETURN_IF_ERROR(CheckTypesMatch(f, dtype, &types_match));
if (!types_match) {
return errors::InvalidArgument("Name: ", example_name,
", Feature: ", key,
". Data types don't match. ",
"Expected type: ", DataTypeString(dtype),
" Feature is: ", f.DebugString());
}
(*output_sparse_values_tmp)[d][batch_index] =
FeatureSparseCopy(batch_index, key, dtype, f);
} else {
(*output_sparse_values_tmp)[d][batch_index] =
Tensor(dtype, TensorShape({0}));
}
}
return absl::OkStatus();
}
Status GetSparseTensorShapes(const VarLenFeature& var_len_feature,
const std::vector<Tensor>& sparse_values_tmp,
const int batch_size,
VarLenFeatureBatchShapes* output_shapes) {
int64_t total_num_features = 0;
int64_t max_num_features = 0;
for (int b = 0; b < batch_size; ++b) {
const Tensor& t = sparse_values_tmp[b];
const int64_t num_elements = t.shape().num_elements();
total_num_features += num_elements;
max_num_features = std::max(max_num_features, num_elements);
}
output_shapes->indices_shape.AddDim(total_num_features);
output_shapes->indices_shape.AddDim(2);
output_shapes->values_shape.AddDim(total_num_features);
output_shapes->max_num_features = max_num_features;
return absl::OkStatus();
}
Status BatchExampleProtoToTensors(
const std::vector<const Example*>& examples,
const std::vector<string>& names,
const std::vector<FixedLenFeature>& fixed_len_features,
const std::vector<VarLenFeature>& var_len_features, Allocator* allocator,
std::vector<Tensor>* output_dense_values_tensor,
std::vector<Tensor>* output_sparse_indices_tensor,
std::vector<Tensor>* output_sparse_values_tensor,
std::vector<Tensor>* output_sparse_shapes_tensor) {
const int batch_size = examples.size();
const bool has_names = (!names.empty());
if (has_names) {
if (names.size() != examples.size()) {
return errors::InvalidArgument(
"Expected len(names) == len(examples), but got: ", names.size(),
" vs. ", examples.size());
}
}
std::vector<Tensor*> output_dense_values_tensor_ptrs(
fixed_len_features.size());
for (size_t d = 0; d < fixed_len_features.size(); ++d) {
const FixedLenFeature& config = fixed_len_features[d];
TensorShape out_shape;
out_shape.AddDim(batch_size);
const TensorShape& shape = config.shape;
const DataType& dtype = config.dtype;
for (const int dim : shape.dim_sizes()) out_shape.AddDim(dim);
(*output_dense_values_tensor)[d] = Tensor(allocator, dtype, out_shape);
output_dense_values_tensor_ptrs[d] = &(*output_dense_values_tensor)[d];
}
std::vector<std::vector<Tensor>> sparse_values_tmp(var_len_features.size());
for (size_t d = 0; d < var_len_features.size(); ++d) {
sparse_values_tmp[d] = std::vector<Tensor>(batch_size);
}
for (size_t b = 0; b < examples.size(); ++b) {
const Example& ex = *(examples[b]);
const string& example_name = (has_names) ? names[b] : "<unknown>";
TF_RETURN_IF_ERROR(SingleExampleProtoToTensors(
ex, example_name, b, fixed_len_features, var_len_features,
&output_dense_values_tensor_ptrs, &sparse_values_tmp));
}
for (size_t d = 0; d < var_len_features.size(); ++d) {
const VarLenFeature& feature_config = var_len_features[d];
const DataType& dtype = feature_config.dtype;
const std::vector<Tensor>& sparse_values_tensor = sparse_values_tmp[d];
VarLenFeatureBatchShapes sparse_tensor_batch_shapes;
TF_RETURN_IF_ERROR(GetSparseTensorShapes(feature_config,
sparse_values_tensor, batch_size,
&sparse_tensor_batch_shapes));
const TensorShape& indices_shape = sparse_tensor_batch_shapes.indices_shape;
const TensorShape& values_shape = sparse_tensor_batch_shapes.values_shape;
(*output_sparse_indices_tensor)[d] =
Tensor(allocator, DT_INT64, indices_shape);
(*output_sparse_values_tensor)[d] = Tensor(allocator, dtype, values_shape);
(*output_sparse_shapes_tensor)[d] =
Tensor(allocator, DT_INT64, TensorShape({2}));
auto shape_t = (*output_sparse_shapes_tensor)[d].vec<int64_t>();
shape_t(0) = batch_size;
shape_t(1) = sparse_tensor_batch_shapes.max_num_features;
Tensor* sp_indices_d = &(*output_sparse_indices_tensor)[d];
Tensor* sp_values_d = &(*output_sparse_values_tensor)[d];
int64_t offset = 0;
for (int b = 0; b < batch_size; ++b) {
const int64_t num_elements = CopyIntoSparseTensor(
sparse_values_tensor[b], b, offset, sp_indices_d, sp_values_d);
offset += num_elements;
}
}
return absl::OkStatus();
}
Status ParseExampleAttrs::FinishInit(int op_version) {
switch (op_version) {
case 1:
num_ragged = 0;
break;
case 2:
num_dense = dense_types.size();
num_ragged = ragged_value_types.size();
break;
default:
return errors::InvalidArgument("Unexpected op_version", op_version);
}
if (static_cast<size_t>(num_sparse) != sparse_types.size()) {
return errors::InvalidArgument("len(sparse_keys) != len(sparse_types)");
}
if (static_cast<size_t>(num_dense) != dense_types.size()) {
return errors::InvalidArgument("len(dense_keys) != len(dense_types)");
}
if (static_cast<size_t>(num_dense) != dense_shapes.size()) {
return errors::InvalidArgument("len(dense_keys) != len(dense_shapes)");
}
if (static_cast<size_t>(num_ragged) != ragged_value_types.size()) { | #include "tensorflow/core/util/example_proto_helper.h"
#include <cstdint>
#include <vector>
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/tstring.h"
namespace tensorflow {
namespace {
TEST(CopyIntoSparseTensorTest, String) {
Tensor in_tensor(DT_STRING, TensorShape({2}));
in_tensor.flat<tstring>()(0) = "hello";
in_tensor.flat<tstring>()(1) = "world";
int n_values = 5;
Tensor ix_tensor(DT_INT64, TensorShape({n_values, 2}));
auto ix_matrix = ix_tensor.matrix<int64_t>();
for (int i = 0; i < n_values; ++i) {
for (int j = 0; j < 2; ++j) {
ix_matrix(i, j) = 0;
}
}
Tensor value_tensor(DT_STRING, TensorShape({n_values}));
int batch = 67;
int64_t offset = 1;
auto n_elems =
CopyIntoSparseTensor(in_tensor, batch, offset, &ix_tensor, &value_tensor);
EXPECT_EQ(2, n_elems);
EXPECT_EQ(0, ix_matrix(0, 0));
EXPECT_EQ(0, ix_matrix(0, 1));
EXPECT_EQ(batch, ix_matrix(1, 0));
EXPECT_EQ(0, ix_matrix(1, 1));
EXPECT_EQ(batch, ix_matrix(2, 0));
EXPECT_EQ(1, ix_matrix(2, 1));
EXPECT_EQ(0, ix_matrix(3, 0));
EXPECT_EQ(0, ix_matrix(3, 1));
EXPECT_EQ(0, ix_matrix(4, 0));
EXPECT_EQ(0, ix_matrix(4, 1));
auto values = value_tensor.flat<tstring>();
EXPECT_EQ("", values(0));
EXPECT_EQ("hello", values(1));
EXPECT_EQ("world", values(2));
EXPECT_EQ("", values(3));
EXPECT_EQ("", values(4));
}
constexpr char kDenseInt64Key[] = "dense_int64";
constexpr char kDenseFloatKey[] = "dense_float";
constexpr char kDenseStringKey[] = "dense_string";
constexpr char kSparseInt64Key[] = "sparse_int64";
constexpr char kSparseFloatKey[] = "sparse_float";
constexpr char kSparseStringKey[] = "sparse_string";
class SingleExampleProtoToTensorsTest : public ::testing::Test {
protected:
void SetUp() override {
FixedLenFeature int64_dense_config;
int64_dense_config.key = kDenseInt64Key;
int64_dense_config.dtype = DT_INT64;
int64_dense_config.shape = TensorShape({1});
int64_dense_config.default_value = Tensor(DT_INT64, TensorShape({1}));
int64_dense_config.default_value.scalar<int64_t>()() = 0;
dense_vec_.push_back(int64_dense_config);
FixedLenFeature float_dense_config;
float_dense_config.key = kDenseFloatKey;
float_dense_config.dtype = DT_FLOAT;
float_dense_config.shape = TensorShape({1});
float_dense_config.default_value = Tensor(DT_FLOAT, TensorShape({1}));
float_dense_config.default_value.scalar<float>()() = 0.0;
dense_vec_.push_back(float_dense_config);
FixedLenFeature string_dense_config;
string_dense_config.key = kDenseStringKey;
string_dense_config.dtype = DT_STRING;
string_dense_config.shape = TensorShape({1});
string_dense_config.default_value = Tensor(DT_STRING, TensorShape({1}));
string_dense_config.default_value.scalar<tstring>()() = "default";
dense_vec_.push_back(string_dense_config);
VarLenFeature int64_sparse_config;
int64_sparse_config.key = kSparseInt64Key;
int64_sparse_config.dtype = DT_INT64;
sparse_vec_.push_back(int64_sparse_config);
VarLenFeature float_sparse_config;
float_sparse_config.key = kSparseFloatKey;
float_sparse_config.dtype = DT_FLOAT;
sparse_vec_.push_back(float_sparse_config);
VarLenFeature string_sparse_config;
string_sparse_config.key = kSparseStringKey;
string_sparse_config.dtype = DT_STRING;
sparse_vec_.push_back(string_sparse_config);
}
std::vector<FixedLenFeature> dense_vec_;
std::vector<VarLenFeature> sparse_vec_;
};
TEST_F(SingleExampleProtoToTensorsTest, SparseOnlyTrivial) {
Example ex;
(*ex.mutable_features()->mutable_feature())[kSparseInt64Key]
.mutable_int64_list()
->add_value(42);
(*ex.mutable_features()->mutable_feature())[kSparseFloatKey]
.mutable_float_list()
->add_value(4.2);
(*ex.mutable_features()->mutable_feature())[kSparseStringKey]
.mutable_bytes_list()
->add_value("forty-two");
std::vector<Tensor*> output_dense_values(0);
std::vector<std::vector<Tensor>> output_sparse_values_tmp(3);
for (int i = 0; i < 3; ++i) {
output_sparse_values_tmp[i] = std::vector<Tensor>(1);
}
std::vector<FixedLenFeature> empty_dense_vec;
TF_EXPECT_OK(SingleExampleProtoToTensors(ex, "", 0, empty_dense_vec,
sparse_vec_, &output_dense_values,
&output_sparse_values_tmp));
const std::vector<Tensor>& int64_tensor_vec = output_sparse_values_tmp[0];
EXPECT_EQ(1, int64_tensor_vec.size());
EXPECT_EQ(42, int64_tensor_vec[0].vec<int64_t>()(0));
const std::vector<Tensor>& float_tensor_vec = output_sparse_values_tmp[1];
EXPECT_EQ(1, float_tensor_vec.size());
EXPECT_NEAR(4.2, float_tensor_vec[0].vec<float>()(0), 0.001);
const std::vector<Tensor>& string_tensor_vec = output_sparse_values_tmp[2];
EXPECT_EQ(1, string_tensor_vec.size());
EXPECT_EQ("forty-two", string_tensor_vec[0].vec<tstring>()(0));
}
TEST_F(SingleExampleProtoToTensorsTest, SparseOnlyEmpty) {
Example empty;
std::vector<Tensor*> output_dense_values(0);
std::vector<std::vector<Tensor>> output_sparse_values_tmp(3);
for (int i = 0; i < 3; ++i) {
output_sparse_values_tmp[i] = std::vector<Tensor>(1);
}
std::vector<FixedLenFeature> empty_dense_vec;
TF_EXPECT_OK(SingleExampleProtoToTensors(empty, "", 0, empty_dense_vec,
sparse_vec_, &output_dense_values,
&output_sparse_values_tmp));
const std::vector<Tensor>& int64_tensor_vec = output_sparse_values_tmp[0];
EXPECT_EQ(1, int64_tensor_vec.size());
EXPECT_EQ(0, int64_tensor_vec[0].vec<int64_t>().size());
const std::vector<Tensor>& float_tensor_vec = output_sparse_values_tmp[1];
EXPECT_EQ(1, float_tensor_vec.size());
EXPECT_EQ(0, float_tensor_vec[0].vec<float>().size());
const std::vector<Tensor>& string_tensor_vec = output_sparse_values_tmp[2];
EXPECT_EQ(1, string_tensor_vec.size());
EXPECT_EQ(0, string_tensor_vec[0].vec<tstring>().size());
}
TEST_F(SingleExampleProtoToTensorsTest, DenseOnlyTrivial) {
Example ex;
(*ex.mutable_features()->mutable_feature())[kDenseInt64Key]
.mutable_int64_list()
->add_value(42);
(*ex.mutable_features()->mutable_feature())[kDenseFloatKey]
.mutable_float_list()
->add_value(4.2);
(*ex.mutable_features()->mutable_feature())[kDenseStringKey]
.mutable_bytes_list()
->add_value("forty-two");
std::vector<Tensor*> output_dense_values(3);
Tensor int64_dense_output(DT_INT64, TensorShape({1, 1}));
output_dense_values[0] = &int64_dense_output;
Tensor float_dense_output(DT_FLOAT, TensorShape({1, 1}));
output_dense_values[1] = &float_dense_output;
Tensor str_dense_output(DT_STRING, TensorShape({1, 1}));
output_dense_values[2] = &str_dense_output;
std::vector<VarLenFeature> empty_sparse_vec;
std::vector<std::vector<Tensor>> output_sparse_values_tmp;
TF_EXPECT_OK(SingleExampleProtoToTensors(
ex, "", 0, dense_vec_, empty_sparse_vec, &output_dense_values,
&output_sparse_values_tmp));
EXPECT_TRUE(output_sparse_values_tmp.empty());
EXPECT_EQ(1, int64_dense_output.matrix<int64_t>().size());
EXPECT_EQ(42, int64_dense_output.matrix<int64_t>()(0, 0));
EXPECT_EQ(1, float_dense_output.matrix<float>().size());
EXPECT_NEAR(4.2, float_dense_output.matrix<float>()(0, 0), 0.001);
EXPECT_EQ(1, str_dense_output.matrix<tstring>().size());
EXPECT_EQ("forty-two", str_dense_output.matrix<tstring>()(0, 0));
}
TEST_F(SingleExampleProtoToTensorsTest, DenseOnlyDefaults) {
std::vector<Tensor*> output_dense_values(3);
Tensor int64_dense_output(DT_INT64, TensorShape({1, 1}));
output_dense_values[0] = &int64_dense_output;
Tensor float_dense_output(DT_FLOAT, TensorShape({1, 1}));
output_dense_values[1] = &float_dense_output;
Tensor str_dense_output(DT_STRING, TensorShape({1, 1}));
output_dense_values[2] = &str_dense_output;
Example empty;
std::vector<VarLenFeature> empty_sparse_vec;
std::vector<std::vector<Tensor>> output_sparse_values_tmp;
TF_EXPECT_OK(SingleExampleProtoToTensors(
empty, "", 0, dense_vec_, empty_sparse_vec, &output_dense_values,
&output_sparse_values_tmp));
EXPECT_EQ(1, int64_dense_output.matrix<int64_t>().size());
EXPECT_EQ(0, int64_dense_output.matrix<int64_t>()(0, 0));
EXPECT_EQ(1, float_dense_output.matrix<float>().size());
EXPECT_NEAR(0.0, float_dense_output.matrix<float>()(0, 0), 0.001);
EXPECT_EQ(1, str_dense_output.matrix<tstring>().size());
EXPECT_EQ("default", str_dense_output.matrix<tstring>()(0, 0));
}
}
} |
1,706 | cpp | tensorflow/tensorflow | debug_events_writer | tensorflow/core/util/debug_events_writer.cc | tensorflow/core/util/debug_events_writer_test.cc | #ifndef TENSORFLOW_CORE_UTIL_DEBUG_EVENTS_WRITER_H_
#define TENSORFLOW_CORE_UTIL_DEBUG_EVENTS_WRITER_H_
#include <atomic>
#include <deque>
#include <memory>
#include <unordered_map>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/io/record_writer.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/debug_event.pb.h"
namespace tensorflow {
namespace tfdbg {
enum DebugEventFileType {
METADATA,
SOURCE_FILES,
STACK_FRAMES,
GRAPHS,
EXECUTION,
GRAPH_EXECUTION_TRACES,
};
class SingleDebugEventFileWriter {
public:
explicit SingleDebugEventFileWriter(const string& file_path);
Status Init();
void WriteSerializedDebugEvent(tensorflow::StringPiece debug_event_str);
Status Flush();
Status Close();
const string FileName();
private:
Env* env_;
const string file_path_;
std::atomic_int_fast32_t num_outstanding_events_;
std::unique_ptr<WritableFile> writable_file_;
std::unique_ptr<io::RecordWriter> record_writer_ TF_PT_GUARDED_BY(writer_mu_);
mutex writer_mu_;
};
class DebugEventsWriter {
public:
#ifndef SWIG
static constexpr const int64_t kDefaultCyclicBufferSize = 1000;
static constexpr const char* kFileNamePrefix = "tfdbg_events";
static constexpr const char* kMetadataSuffix = "metadata";
static constexpr const char* kSourceFilesSuffix = "source_files";
static constexpr const char* kStackFramesSuffix = "stack_frames";
static constexpr const char* kGraphsSuffix = "graphs";
static constexpr const char* kExecutionSuffix = "execution";
static constexpr const char* kGraphExecutionTracesSuffix =
"graph_execution_traces";
static constexpr const char* kVersionPrefix = "debug.Event:";
static constexpr const int kCurrentFormatVersion = 1;
#endif
static DebugEventsWriter* GetDebugEventsWriter(const string& dump_root,
const string& tfdbg_run_id,
int64_t circular_buffer_size);
static Status LookUpDebugEventsWriter(
const string& dump_root, DebugEventsWriter** debug_events_writer);
~DebugEventsWriter();
Status Init();
Status WriteSourceFile(SourceFile* source_file);
Status WriteStackFrameWithId(StackFrameWithId* stack_frame_with_id);
Status WriteGraphOpCreation(GraphOpCreation* graph_op_creation);
Status WriteDebuggedGraph(DebuggedGraph* debugged_graph);
Status WriteExecution(Execution* execution);
Status WriteGraphExecutionTrace(GraphExecutionTrace* graph_execution_trace);
Status WriteGraphExecutionTrace(const string& tfdbg_context_id,
const string& device_name,
const string& op_name, int32_t output_slot,
int32_t tensor_debug_mode,
const Tensor& tensor_value);
void WriteSerializedNonExecutionDebugEvent(const string& debug_event_str,
DebugEventFileType type);
void WriteSerializedExecutionDebugEvent(const string& debug_event_str,
DebugEventFileType type);
int RegisterDeviceAndGetId(const string& device_name);
Status FlushNonExecutionFiles();
Status FlushExecutionFiles();
Status Close();
private:
static std::unordered_map<string, std::unique_ptr<DebugEventsWriter>>*
GetDebugEventsWriterMap();
static mutex factory_mu_;
DebugEventsWriter(const string& dump_root, const string& tfdbg_run_id,
int64_t circular_buffer_size);
string FileName(DebugEventFileType type);
Status InitNonMetadataFile(DebugEventFileType type);
Status SerializeAndWriteDebugEvent(DebugEvent* debug_event,
DebugEventFileType type);
void SelectWriter(DebugEventFileType type,
std::unique_ptr<SingleDebugEventFileWriter>** writer);
const string GetSuffix(DebugEventFileType type);
string GetFileNameInternal(DebugEventFileType type);
Env* env_;
const string dump_root_;
const string tfdbg_run_id_;
string file_prefix_;
bool is_initialized_ TF_GUARDED_BY(initialization_mu_);
mutex initialization_mu_;
const int64_t circular_buffer_size_;
std::deque<string> execution_buffer_ TF_GUARDED_BY(execution_buffer_mu_);
mutex execution_buffer_mu_;
std::deque<string> graph_execution_trace_buffer_
TF_GUARDED_BY(graph_execution_trace_buffer_mu_);
mutex graph_execution_trace_buffer_mu_;
absl::flat_hash_map<string, int> device_name_to_id_ TF_GUARDED_BY(device_mu_);
mutex device_mu_;
std::unique_ptr<SingleDebugEventFileWriter> metadata_writer_;
std::unique_ptr<SingleDebugEventFileWriter> source_files_writer_;
std::unique_ptr<SingleDebugEventFileWriter> stack_frames_writer_;
std::unique_ptr<SingleDebugEventFileWriter> graphs_writer_;
std::unique_ptr<SingleDebugEventFileWriter> execution_writer_;
std::unique_ptr<SingleDebugEventFileWriter> graph_execution_traces_writer_;
DebugEventsWriter(const DebugEventsWriter&) = delete;
void operator=(const DebugEventsWriter&) = delete;
friend class DebugEventsWriterTest;
};
}
}
#endif
#include "tensorflow/core/util/debug_events_writer.h"
#include <deque>
#include <memory>
#include <unordered_map>
#include <utility>
#include <vector>
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/host_info.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace tfdbg {
namespace {
void MaybeSetDebugEventTimestamp(DebugEvent* debug_event, Env* env) {
if (debug_event->wall_time() == 0) {
debug_event->set_wall_time(env->NowMicros() / 1e6);
}
}
}
SingleDebugEventFileWriter::SingleDebugEventFileWriter(const string& file_path)
: env_(Env::Default()),
file_path_(file_path),
num_outstanding_events_(0),
writer_mu_() {}
Status SingleDebugEventFileWriter::Init() {
if (record_writer_ != nullptr) {
return absl::OkStatus();
}
record_writer_.reset();
TF_RETURN_WITH_CONTEXT_IF_ERROR(
env_->NewWritableFile(file_path_, &writable_file_),
"Creating writable file ", file_path_);
record_writer_ = std::make_unique<io::RecordWriter>(writable_file_.get());
if (record_writer_ == nullptr) {
return errors::Unknown("Could not create record writer at path: ",
file_path_);
}
num_outstanding_events_.store(0);
VLOG(1) << "Successfully opened debug events file: " << file_path_;
return absl::OkStatus();
}
void SingleDebugEventFileWriter::WriteSerializedDebugEvent(
StringPiece debug_event_str) {
if (record_writer_ == nullptr) {
if (!Init().ok()) {
LOG(ERROR) << "Write failed because file could not be opened.";
return;
}
}
num_outstanding_events_.fetch_add(1);
{
mutex_lock l(writer_mu_);
record_writer_->WriteRecord(debug_event_str).IgnoreError();
}
}
Status SingleDebugEventFileWriter::Flush() {
const int num_outstanding = num_outstanding_events_.load();
if (num_outstanding == 0) {
return absl::OkStatus();
}
if (writable_file_ == nullptr) {
return errors::Unknown("Unexpected NULL file for path: ", file_path_);
}
{
mutex_lock l(writer_mu_);
TF_RETURN_WITH_CONTEXT_IF_ERROR(record_writer_->Flush(), "Failed to flush ",
num_outstanding, " debug events to ",
file_path_);
}
TF_RETURN_WITH_CONTEXT_IF_ERROR(writable_file_->Sync(), "Failed to sync ",
num_outstanding, " debug events to ",
file_path_);
num_outstanding_events_.store(0);
return absl::OkStatus();
}
Status SingleDebugEventFileWriter::Close() {
Status status = Flush();
if (writable_file_ != nullptr) {
Status close_status = writable_file_->Close();
if (!close_status.ok()) {
status = close_status;
}
record_writer_.reset(nullptr);
writable_file_.reset(nullptr);
}
num_outstanding_events_ = 0;
return status;
}
const string SingleDebugEventFileWriter::FileName() { return file_path_; }
mutex DebugEventsWriter::factory_mu_(LINKER_INITIALIZED);
DebugEventsWriter::~DebugEventsWriter() { Close().IgnoreError(); }
DebugEventsWriter* DebugEventsWriter::GetDebugEventsWriter(
const string& dump_root, const string& tfdbg_run_id,
int64_t circular_buffer_size) {
mutex_lock l(DebugEventsWriter::factory_mu_);
std::unordered_map<string, std::unique_ptr<DebugEventsWriter>>* writer_pool =
DebugEventsWriter::GetDebugEventsWriterMap();
if (writer_pool->find(dump_root) == writer_pool->end()) {
std::unique_ptr<DebugEventsWriter> writer(
new DebugEventsWriter(dump_root, tfdbg_run_id, circular_buffer_size));
writer_pool->insert(std::make_pair(dump_root, std::move(writer)));
}
return (*writer_pool)[dump_root].get();
}
Status DebugEventsWriter::LookUpDebugEventsWriter(
const string& dump_root, DebugEventsWriter** debug_events_writer) {
mutex_lock l(DebugEventsWriter::factory_mu_);
std::unordered_map<string, std::unique_ptr<DebugEventsWriter>>* writer_pool =
DebugEventsWriter::GetDebugEventsWriterMap();
if (writer_pool->find(dump_root) == writer_pool->end()) {
return errors::FailedPrecondition(
"No DebugEventsWriter has been created at dump root ", dump_root);
}
*debug_events_writer = (*writer_pool)[dump_root].get();
return absl::OkStatus();
}
Status DebugEventsWriter::Init() {
mutex_lock l(initialization_mu_);
if (is_initialized_) {
return absl::OkStatus();
}
if (!env_->IsDirectory(dump_root_).ok()) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(env_->RecursivelyCreateDir(dump_root_),
"Failed to create directory ", dump_root_);
}
int64_t time_in_seconds = env_->NowMicros() / 1e6;
file_prefix_ = io::JoinPath(
dump_root_, strings::Printf("%s.%010lld.%s", kFileNamePrefix,
static_cast<long long>(time_in_seconds),
port::Hostname().c_str()));
TF_RETURN_IF_ERROR(InitNonMetadataFile(SOURCE_FILES));
TF_RETURN_IF_ERROR(InitNonMetadataFile(STACK_FRAMES));
TF_RETURN_IF_ERROR(InitNonMetadataFile(GRAPHS));
metadata_writer_.reset();
string metadata_filename = GetFileNameInternal(METADATA);
metadata_writer_ =
std::make_unique<SingleDebugEventFileWriter>(metadata_filename);
if (metadata_writer_ == nullptr) {
return errors::Unknown("Could not create debug event metadata file writer");
}
DebugEvent debug_event;
DebugMetadata* metadata = debug_event.mutable_debug_metadata();
metadata->set_tensorflow_version(TF_VERSION_STRING);
metadata->set_file_version(
strings::Printf("%s%d", kVersionPrefix, kCurrentFormatVersion));
metadata->set_tfdbg_run_id(tfdbg_run_id_);
TF_RETURN_IF_ERROR(SerializeAndWriteDebugEvent(&debug_event, METADATA));
TF_RETURN_WITH_CONTEXT_IF_ERROR(
metadata_writer_->Flush(), "Failed to flush debug event metadata writer");
TF_RETURN_IF_ERROR(InitNonMetadataFile(EXECUTION));
TF_RETURN_IF_ERROR(InitNonMetadataFile(GRAPH_EXECUTION_TRACES));
is_initialized_ = true;
return absl::OkStatus();
}
Status DebugEventsWriter::WriteSourceFile(SourceFile* source_file) {
DebugEvent debug_event;
debug_event.set_allocated_source_file(source_file);
return SerializeAndWriteDebugEvent(&debug_event, SOURCE_FILES);
}
Status DebugEventsWriter::WriteStackFrameWithId(
StackFrameWithId* stack_frame_with_id) {
DebugEvent debug_event;
debug_event.set_allocated_stack_frame_with_id(stack_frame_with_id);
return SerializeAndWriteDebugEvent(&debug_event, STACK_FRAMES);
}
Status DebugEventsWriter::WriteGraphOpCreation(
GraphOpCreation* graph_op_creation) {
DebugEvent debug_event;
debug_event.set_allocated_graph_op_creation(graph_op_creation);
return SerializeAndWriteDebugEvent(&debug_event, GRAPHS);
}
Status DebugEventsWriter::WriteDebuggedGraph(DebuggedGraph* debugged_graph) {
DebugEvent debug_event;
debug_event.set_allocated_debugged_graph(debugged_graph);
return SerializeAndWriteDebugEvent(&debug_event, GRAPHS);
}
Status DebugEventsWriter::WriteExecution(Execution* execution) {
if (circular_buffer_size_ <= 0) {
DebugEvent debug_event;
debug_event.set_allocated_execution(execution);
return SerializeAndWriteDebugEvent(&debug_event, EXECUTION);
} else {
DebugEvent debug_event;
MaybeSetDebugEventTimestamp(&debug_event, env_);
debug_event.set_allocated_execution(execution);
string serialized;
debug_event.SerializeToString(&serialized);
mutex_lock l(execution_buffer_mu_);
execution_buffer_.emplace_back(std::move(serialized));
if (execution_buffer_.size() > circular_buffer_size_) {
execution_buffer_.pop_front();
}
return absl::OkStatus();
}
}
Status DebugEventsWriter::WriteGraphExecutionTrace(
GraphExecutionTrace* graph_execution_trace) {
TF_RETURN_IF_ERROR(Init());
if (circular_buffer_size_ <= 0) {
DebugEvent debug_event;
debug_event.set_allocated_graph_execution_trace(graph_execution_trace);
return SerializeAndWriteDebugEvent(&debug_event, GRAPH_EXECUTION_TRACES);
} else {
DebugEvent debug_event;
MaybeSetDebugEventTimestamp(&debug_event, env_);
debug_event.set_allocated_graph_execution_trace(graph_execution_trace);
string serialized;
debug_event.SerializeToString(&serialized);
mutex_lock l(graph_execution_trace_buffer_mu_);
graph_execution_trace_buffer_.emplace_back(std::move(serialized));
if (graph_execution_trace_buffer_.size() > circular_buffer_size_) {
graph_execution_trace_buffer_.pop_front();
}
return absl::OkStatus();
}
}
Status DebugEventsWriter::WriteGraphExecutionTrace(
const string& tfdbg_context_id, const string& device_name,
const string& op_name, int32_t output_slot, int32_t tensor_debug_mode,
const Tensor& tensor_value) {
std::unique_ptr<GraphExecutionTrace> trace(new GraphExecutionTrace());
trace->set_tfdbg_context_id(tfdbg_context_id);
if (!op_name.empty()) {
trace->set_op_name(op_name);
}
if (output_slot > 0) {
trace->set_output_slot(output_slot);
}
if (tensor_debug_mode > 0) {
trace->set_tensor_debug_mode(TensorDebugMode(tensor_debug_mode));
}
trace->set_device_name(device_name);
tensor_value.AsProtoTensorContent(trace->mutable_tensor_proto());
return WriteGraphExecutionTrace(trace.release());
}
void DebugEventsWriter::WriteSerializedNonExecutionDebugEvent(
const string& debug_event_str, DebugEventFileType type) {
std::unique_ptr<SingleDebugEventFileWriter>* writer = nullptr;
SelectWriter(type, &writer);
(*writer)->WriteSerializedDebugEvent(debug_event_str);
}
void DebugEventsWriter::WriteSerializedExecutionDebugEvent(
const string& debug_event_str, DebugEventFileType type) {
const std::unique_ptr<SingleDebugEventFileWriter>* writer = nullptr;
std::deque<string>* buffer = nullptr;
mutex* mu = nullptr;
switch (type) {
case EXECUTION:
writer = &execution_writer_;
buffer = &execution_buffer_;
mu = &execution_buffer_mu_;
break;
case GRAPH_EXECUTION_TRACES:
writer = &graph_execution_traces_writer_;
buffer = &graph_execution_trace_buffer_;
mu = &graph_execution_trace_buffer_mu_;
break;
default:
return;
}
if (circular_buffer_size_ <= 0) {
(*writer)->WriteSerializedDebugEvent(debug_event_str);
} else {
mutex_lock l(*mu);
buffer->push_back(debug_event_str);
if (buffer->size() > circular_buffer_size_) {
buffer->pop_front();
}
}
}
int DebugEventsWriter::RegisterDeviceAndGetId(const string& device_name) {
mutex_lock l(device_mu_);
int& device_id = device_name_to_id_[device_name];
if (device_id == 0) {
device_id = device_name_to_id_.size();
DebugEvent debug_event;
MaybeSetDebugEventTimestamp(&debug_event, env_);
DebuggedDevice* debugged_device = debug_event.mutable_debugged_device();
debugged_device->set_device_name(device_name);
debugged_device->set_device_id(device_id);
string serialized;
debug_event.SerializeToString(&serialized);
graphs_writer_->WriteSerializedDebugEvent(serialized);
}
return device_id;
}
Status DebugEventsWriter::FlushNonExecutionFiles() {
TF_RETURN_IF_ERROR(Init());
if (source_files_writer_ != nullptr) {
TF_RETURN_IF_ERROR(source_files_writer_->Flush());
}
if (stack_frames_writer_ != nullptr) {
TF_RETURN_IF_ERROR(stack_frames_writer_->Flush());
}
if (graphs_writer_ != nullptr) {
TF_RETURN_IF_ERROR(graphs_writer_->Flush());
}
return absl::OkStatus();
}
Status DebugEventsWriter::FlushExecutionFiles() {
TF_RETURN_IF_ERROR(Init());
if (execution_writer_ != nullptr) {
if (circular_buffer_size_ > 0) {
mutex_lock l(execution_buffer_mu_);
while (!execution_buffer_.empty()) {
execution_writer_->WriteSerializedDebugEvent(execution_buffer_.front());
execution_buffer_.pop_front();
}
}
TF_RETURN_IF_ERROR(execution_writer_->Flush());
}
if (graph_execution_traces_writer_ != nullptr) {
if (circular_buffer_size_ > 0) {
mutex_lock l(graph_execution_trace_buffer_mu_);
while (!graph_execution_trace_buffer_.empty()) {
graph_execution_traces_writer_->WriteSerializedDebugEvent(
graph_execution_trace_buffer_.front());
graph_execution_trace_buffer_.pop_front();
}
}
TF_RETURN_IF_ERROR(graph_execution_traces_writer_->Flush());
}
return absl::OkStatus();
}
string DebugEventsWriter::FileName(DebugEventFileType type) {
if (file_prefix_.empty()) {
Init().IgnoreError();
}
return GetFileNameInternal(type);
}
Status DebugEventsWriter::Close() {
{
mutex_lock l(initialization_mu_);
if (!is_initialized_) {
return absl::OkStatus();
}
}
std::vector<string> failed_to_close_files;
if (metadata_writer_ != nullptr) {
if (!metadata_writer_->Close().ok()) {
failed_to_close_files.push_back(metadata_writer_->FileName());
}
metadata_writer_.reset(nullptr);
}
TF_RETURN_IF_ERROR(FlushNonExecutionFiles());
if (source_files_writer_ != nullptr) {
if (!source_files_writer_->Close().ok()) {
failed_to_close_files.push_back(source_files_writer_->FileName());
}
source_files_writer_.reset(nullptr);
}
if (stack_frames_writer_ != nullptr) {
if (!stack_frames_writer_->Close().ok()) {
failed_to_close_files.push_back(stack_frames_writer_->FileName());
}
stack_frames_writer_.reset(nullptr);
}
if (graphs_writer_ != nullptr) {
if (!graphs_writer_->Close().ok()) {
failed_to_close_files.push_back(graphs_writer_->FileName());
}
graphs_writer_.reset(nullptr);
}
TF_RETURN_IF_ERROR(FlushExecutionFiles());
if (execution_writer_ != nullptr) {
if (!execution_writer_->Close().ok()) {
failed_to_close_files.push_back(execution_writer_->FileName());
}
execution_writer_.reset(nullptr);
}
if (graph_execution_traces_writer_ != nullptr) {
if (!graph_execution_traces_writer_->Close().ok()) {
failed_to_close_files.push_back(
graph_execution_traces_writer_->FileName());
}
graph_execution_traces_writer_.reset(nullptr);
}
if (failed_to_close_files.empty()) {
return absl::OkStatus();
} else {
return errors::FailedPrecondition(
"Failed to close %d debug-events files associated with tfdbg",
failed_to_close_files.size());
}
}
std::unordered_map<string, std::unique_ptr<DebugEventsWriter>>*
DebugEventsWriter::GetDebugEventsWriterMap() {
static std::unordered_map<string, std::unique_ptr<DebugEventsWriter>>*
writer_pool =
new std::unordered_map<string, std::unique_ptr<DebugEventsWriter>>();
return writer_pool;
}
DebugEventsWriter::DebugEventsWriter(const string& dump_root,
const string& tfdbg_run_id,
int64_t circular_buffer_size)
: env_(Env::Default()),
dump_root_(dump_root),
tfdbg_run_id_(tfdbg_run_id),
is_initialized_(false),
initialization_mu_(),
circular_buffer_size_(circular_buffer_size),
execution_buffer_(),
execution_buffer_mu_(),
graph_execution_trace_buffer_(),
graph_execution_trace_buffer_mu_(),
device_name_to_id_(),
device_mu_() {}
Status DebugEventsWriter::InitNonMetadataFile(DebugEventFileType type) {
std::unique_ptr<SingleDebugEventFileWriter>* writer = nullptr;
SelectWriter(type, &writer);
const string filename = GetFileNameInternal(type);
writer->reset();
*writer = std::make_unique<SingleDebugEventFileWriter>(filename);
if (*writer == nullptr) {
return errors::Unknown("Could not create debug event file writer for ",
filename);
}
TF_RETURN_WITH_CONTEXT_IF_ERROR(
(*writer)->Init(), "Initializing debug event writer at path ", filename);
VLOG(1) << "Successfully opened debug event file: " << filename;
return absl::OkStatus();
}
Status DebugEventsWriter::SerializeAndWriteDebugEvent(DebugEvent* debug_event,
DebugEventFileType type) {
std::unique_ptr<SingleDebugEventFileWriter>* writer = nullptr;
SelectWriter(type, &writer);
if (writer != nullptr) {
MaybeSetDebugEventTimestamp(debug_event, env_);
string str;
debug_event->AppendToString(&str);
(*writer)->WriteSerializedDebugEvent(str);
return absl::OkStatus();
} else {
return errors::Internal(
"Unable to find debug events file writer for DebugEventsFileType ",
type);
}
}
void DebugEventsWriter::SelectWriter(
DebugEventFileType type,
std::unique_ptr<SingleDebugEventFileWriter>** writer) {
switch (type) {
case METADATA:
*writer = &metadata_writer_;
break;
case SOURCE_FILES:
*writer = &source_files_writer_;
break;
case STACK_FRAMES:
*writer = &stack_frames_writer_;
break;
case GRAPHS:
*writer = &graphs_writer_;
break;
case EXECUTION:
*writer = &execution_writer_;
break;
case GRAPH_EXECUTION_TRACES:
*writer = &graph_execution_traces_writer_;
break;
}
}
const string DebugEventsWriter::GetSuffix(DebugEventFileType type) {
switch (type) {
case METADATA:
return kMetadataSuffix;
case SOURCE_FILES:
return kSourceFilesSuffix;
case STACK_FRAMES:
return kStackFramesSuffix;
case GRAPHS:
return kGraphsSuffix;
case EXECUTION:
return kExecutionSuffix;
case GRAPH_EXECUTION_TRACES:
return kGraphExecutionTracesSuffix;
default:
string suffix;
return suffix;
}
}
string DebugEventsWriter::GetFileNameInternal(DebugEventFileType type) {
const string suffix = GetSuffix(type);
return strings::StrCat(file_prefix_, ".", suffix);
}
}
} | #include "tensorflow/core/util/debug_events_writer.h"
#include <algorithm>
#include <atomic>
#include <memory>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace tfdbg {
Env* env() { return Env::Default(); }
class DebugEventsWriterTest : public ::testing::Test {
public:
static string GetDebugEventFileName(DebugEventsWriter* writer,
DebugEventFileType type) {
return writer->FileName(type);
}
static void ReadDebugEventProtos(DebugEventsWriter* writer,
DebugEventFileType type,
std::vector<DebugEvent>* protos) {
protos->clear();
const string filename = writer->FileName(type);
std::unique_ptr<RandomAccessFile> debug_events_file;
TF_CHECK_OK(env()->NewRandomAccessFile(filename, &debug_events_file));
io::RecordReader* reader = new io::RecordReader(debug_events_file.get());
uint64 offset = 0;
DebugEvent actual;
while (ReadDebugEventProto(reader, &offset, &actual)) {
protos->push_back(actual);
}
delete reader;
}
static bool ReadDebugEventProto(io::RecordReader* reader, uint64* offset,
DebugEvent* proto) {
tstring record;
Status s = reader->ReadRecord(offset, &record);
if (!s.ok()) {
return false;
}
return ParseProtoUnlimited(proto, record);
}
void SetUp() override {
dump_root_ = io::JoinPath(
testing::TmpDir(),
strings::Printf("%010lld", static_cast<long long>(env()->NowMicros())));
tfdbg_run_id_ = "test_tfdbg_run_id";
}
void TearDown() override {
if (env()->IsDirectory(dump_root_).ok()) {
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
TF_ASSERT_OK(env()->DeleteRecursively(dump_root_, &undeleted_files,
&undeleted_dirs));
ASSERT_EQ(0, undeleted_files);
ASSERT_EQ(0, undeleted_dirs);
}
}
string dump_root_;
string tfdbg_run_id_;
};
TEST_F(DebugEventsWriterTest, GetDebugEventsWriterSameRootGivesSameObject) {
DebugEventsWriter* writer_1 = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
DebugEventsWriter* writer_2 = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
EXPECT_EQ(writer_1, writer_2);
}
TEST_F(DebugEventsWriterTest, ConcurrentGetDebugEventsWriterSameDumpRoot) {
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 4);
std::vector<DebugEventsWriter*> writers;
mutex mu;
auto fn = [this, &writers, &mu]() {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
{
mutex_lock l(mu);
writers.push_back(writer);
}
};
for (size_t i = 0; i < 4; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
EXPECT_EQ(writers.size(), 4);
EXPECT_EQ(writers[0], writers[1]);
EXPECT_EQ(writers[1], writers[2]);
EXPECT_EQ(writers[2], writers[3]);
}
TEST_F(DebugEventsWriterTest, ConcurrentGetDebugEventsWriterDiffDumpRoots) {
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 3);
std::atomic_int_fast64_t counter(0);
std::vector<DebugEventsWriter*> writers;
mutex mu;
auto fn = [this, &counter, &writers, &mu]() {
const string new_dump_root =
io::JoinPath(dump_root_, strings::Printf("%ld", counter.fetch_add(1)));
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
new_dump_root, tfdbg_run_id_,
DebugEventsWriter::kDefaultCyclicBufferSize);
{
mutex_lock l(mu);
writers.push_back(writer);
}
};
for (size_t i = 0; i < 3; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
EXPECT_EQ(writers.size(), 3);
EXPECT_NE(writers[0], writers[1]);
EXPECT_NE(writers[0], writers[2]);
EXPECT_NE(writers[1], writers[2]);
}
TEST_F(DebugEventsWriterTest, GetDebugEventsWriterDifferentRoots) {
DebugEventsWriter* writer_1 = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
const string dump_root_2 = io::JoinPath(dump_root_, "subdirectory");
DebugEventsWriter* writer_2 = DebugEventsWriter::GetDebugEventsWriter(
dump_root_2, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
EXPECT_NE(writer_1, writer_2);
}
TEST_F(DebugEventsWriterTest, GetAndInitDebugEventsWriter) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::METADATA, &actuals);
EXPECT_EQ(actuals.size(), 1);
EXPECT_GT(actuals[0].debug_metadata().tensorflow_version().length(), 0);
const string file_version = actuals[0].debug_metadata().file_version();
EXPECT_EQ(file_version.find(DebugEventsWriter::kVersionPrefix), 0);
EXPECT_GT(file_version.size(), strlen(DebugEventsWriter::kVersionPrefix));
EXPECT_EQ(actuals[0].debug_metadata().tfdbg_run_id(), "test_tfdbg_run_id");
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
}
TEST_F(DebugEventsWriterTest, CallingCloseWithoutInitIsOkay) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Close());
}
TEST_F(DebugEventsWriterTest, CallingCloseTwiceIsOkay) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Close());
TF_ASSERT_OK(writer->Close());
}
TEST_F(DebugEventsWriterTest, ConcurrentInitCalls) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 4);
auto fn = [&writer]() { TF_ASSERT_OK(writer->Init()); };
for (size_t i = 0; i < 3; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::METADATA, &actuals);
EXPECT_EQ(actuals.size(), 1);
EXPECT_GT(actuals[0].debug_metadata().tensorflow_version().length(), 0);
const string file_version = actuals[0].debug_metadata().file_version();
EXPECT_EQ(file_version.find(DebugEventsWriter::kVersionPrefix), 0);
EXPECT_GT(file_version.size(), strlen(DebugEventsWriter::kVersionPrefix));
EXPECT_EQ(actuals[0].debug_metadata().tfdbg_run_id(), "test_tfdbg_run_id");
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
}
TEST_F(DebugEventsWriterTest, InitTwiceDoesNotCreateNewMetadataFile) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::METADATA, &actuals);
EXPECT_EQ(actuals.size(), 1);
EXPECT_GT(actuals[0].debug_metadata().tensorflow_version().length(), 0);
EXPECT_EQ(actuals[0].debug_metadata().tfdbg_run_id(), "test_tfdbg_run_id");
EXPECT_GE(actuals[0].debug_metadata().file_version().size(), 0);
string metadata_path_1 =
GetDebugEventFileName(writer, DebugEventFileType::METADATA);
TF_ASSERT_OK(writer->Init());
EXPECT_EQ(GetDebugEventFileName(writer, DebugEventFileType::METADATA),
metadata_path_1);
TF_ASSERT_OK(writer->Close());
ReadDebugEventProtos(writer, DebugEventFileType::METADATA, &actuals);
EXPECT_EQ(actuals.size(), 1);
EXPECT_GT(actuals[0].debug_metadata().tensorflow_version().length(), 0);
EXPECT_EQ(actuals[0].debug_metadata().tfdbg_run_id(), "test_tfdbg_run_id");
EXPECT_GE(actuals[0].debug_metadata().file_version().size(), 0);
}
TEST_F(DebugEventsWriterTest, WriteSourceFile) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
SourceFile* source_file_1 = new SourceFile();
source_file_1->set_file_path("/home/tf_programs/main.py");
source_file_1->set_host_name("localhost.localdomain");
source_file_1->add_lines("import tensorflow as tf");
source_file_1->add_lines("");
source_file_1->add_lines("print(tf.constant([42.0]))");
source_file_1->add_lines("");
TF_ASSERT_OK(writer->WriteSourceFile(source_file_1));
SourceFile* source_file_2 = new SourceFile();
source_file_2->set_file_path("/home/tf_programs/train.py");
source_file_2->set_host_name("localhost.localdomain");
source_file_2->add_lines("import tensorflow.keras as keras");
source_file_2->add_lines("");
source_file_2->add_lines("model = keras.Sequential()");
TF_ASSERT_OK(writer->WriteSourceFile(source_file_2));
TF_ASSERT_OK(writer->FlushNonExecutionFiles());
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), 2);
EXPECT_GT(actuals[0].wall_time(), 0);
EXPECT_GT(actuals[1].wall_time(), actuals[0].wall_time());
SourceFile actual_source_file_1 = actuals[0].source_file();
EXPECT_EQ(actual_source_file_1.file_path(), "/home/tf_programs/main.py");
EXPECT_EQ(actual_source_file_1.host_name(), "localhost.localdomain");
EXPECT_EQ(actual_source_file_1.lines().size(), 4);
EXPECT_EQ(actual_source_file_1.lines()[0], "import tensorflow as tf");
EXPECT_EQ(actual_source_file_1.lines()[1], "");
EXPECT_EQ(actual_source_file_1.lines()[2], "print(tf.constant([42.0]))");
EXPECT_EQ(actual_source_file_1.lines()[3], "");
SourceFile actual_source_file_2 = actuals[1].source_file();
EXPECT_EQ(actual_source_file_2.file_path(), "/home/tf_programs/train.py");
EXPECT_EQ(actual_source_file_2.host_name(), "localhost.localdomain");
EXPECT_EQ(actual_source_file_2.lines().size(), 3);
EXPECT_EQ(actual_source_file_2.lines()[0],
"import tensorflow.keras as keras");
EXPECT_EQ(actual_source_file_2.lines()[1], "");
EXPECT_EQ(actual_source_file_2.lines()[2], "model = keras.Sequential()");
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::GRAPHS, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::EXECUTION, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::GRAPH_EXECUTION_TRACES,
&actuals);
EXPECT_EQ(actuals.size(), 0);
}
TEST_F(DebugEventsWriterTest, WriteStackFramesFile) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
StackFrameWithId* stack_frame_1 = new StackFrameWithId();
stack_frame_1->set_id("deadbeaf");
GraphDebugInfo::FileLineCol* file_line_col =
stack_frame_1->mutable_file_line_col();
file_line_col->set_file_index(12);
file_line_col->set_line(20);
file_line_col->set_col(2);
file_line_col->set_func("my_func");
file_line_col->set_code(" x = y + z");
StackFrameWithId* stack_frame_2 = new StackFrameWithId();
stack_frame_2->set_id("eeeeeeec");
file_line_col = stack_frame_2->mutable_file_line_col();
file_line_col->set_file_index(12);
file_line_col->set_line(21);
file_line_col->set_col(4);
file_line_col->set_func("my_func");
file_line_col->set_code(" x = x ** 2.0");
TF_ASSERT_OK(writer->WriteStackFrameWithId(stack_frame_1));
TF_ASSERT_OK(writer->WriteStackFrameWithId(stack_frame_2));
TF_ASSERT_OK(writer->FlushNonExecutionFiles());
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
EXPECT_EQ(actuals.size(), 2);
EXPECT_GT(actuals[0].wall_time(), 0);
EXPECT_GT(actuals[1].wall_time(), actuals[0].wall_time());
StackFrameWithId actual_stack_frame_1 = actuals[0].stack_frame_with_id();
EXPECT_EQ(actual_stack_frame_1.id(), "deadbeaf");
GraphDebugInfo::FileLineCol file_line_col_1 =
actual_stack_frame_1.file_line_col();
EXPECT_EQ(file_line_col_1.file_index(), 12);
EXPECT_EQ(file_line_col_1.line(), 20);
EXPECT_EQ(file_line_col_1.col(), 2);
EXPECT_EQ(file_line_col_1.func(), "my_func");
EXPECT_EQ(file_line_col_1.code(), " x = y + z");
StackFrameWithId actual_stack_frame_2 = actuals[1].stack_frame_with_id();
EXPECT_EQ(actual_stack_frame_2.id(), "eeeeeeec");
GraphDebugInfo::FileLineCol file_line_col_2 =
actual_stack_frame_2.file_line_col();
EXPECT_EQ(file_line_col_2.file_index(), 12);
EXPECT_EQ(file_line_col_2.line(), 21);
EXPECT_EQ(file_line_col_2.col(), 4);
EXPECT_EQ(file_line_col_2.func(), "my_func");
EXPECT_EQ(file_line_col_2.code(), " x = x ** 2.0");
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::GRAPHS, &actuals);
EXPECT_EQ(actuals.size(), 0);
}
TEST_F(DebugEventsWriterTest, WriteGraphOpCreationAndDebuggedGraph) {
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
GraphOpCreation* graph_op_creation = new GraphOpCreation();
graph_op_creation->set_op_type("MatMul");
graph_op_creation->set_op_name("Dense_1/MatMul");
TF_ASSERT_OK(writer->WriteGraphOpCreation(graph_op_creation));
DebuggedGraph* debugged_graph = new DebuggedGraph();
debugged_graph->set_graph_id("deadbeaf");
debugged_graph->set_graph_name("my_func_graph");
TF_ASSERT_OK(writer->WriteDebuggedGraph(debugged_graph));
TF_ASSERT_OK(writer->FlushNonExecutionFiles());
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::GRAPHS, &actuals);
EXPECT_EQ(actuals.size(), 2);
EXPECT_GT(actuals[0].wall_time(), 0);
EXPECT_GT(actuals[1].wall_time(), actuals[0].wall_time());
GraphOpCreation actual_op_creation = actuals[0].graph_op_creation();
EXPECT_EQ(actual_op_creation.op_type(), "MatMul");
EXPECT_EQ(actual_op_creation.op_name(), "Dense_1/MatMul");
DebuggedGraph actual_debugged_graph = actuals[1].debugged_graph();
EXPECT_EQ(actual_debugged_graph.graph_id(), "deadbeaf");
EXPECT_EQ(actual_debugged_graph.graph_name(), "my_func_graph");
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
EXPECT_EQ(actuals.size(), 0);
}
TEST_F(DebugEventsWriterTest, ConcurrentWriteCallsToTheSameFile) {
const size_t kConcurrentWrites = 100;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 8);
std::atomic_int_fast64_t counter(0);
auto fn = [&writer, &counter]() {
const string file_path = strings::Printf(
"/home/tf_programs/program_%.3ld.py", counter.fetch_add(1));
SourceFile* source_file = new SourceFile();
source_file->set_file_path(file_path);
source_file->set_host_name("localhost.localdomain");
TF_ASSERT_OK(writer->WriteSourceFile(source_file));
};
for (size_t i = 0; i < kConcurrentWrites; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), kConcurrentWrites);
std::vector<string> file_paths;
std::vector<string> host_names;
for (size_t i = 0; i < kConcurrentWrites; ++i) {
file_paths.push_back(actuals[i].source_file().file_path());
host_names.push_back(actuals[i].source_file().host_name());
}
std::sort(file_paths.begin(), file_paths.end());
for (size_t i = 0; i < kConcurrentWrites; ++i) {
EXPECT_EQ(file_paths[i],
strings::Printf("/home/tf_programs/program_%.3ld.py", i));
EXPECT_EQ(host_names[i], "localhost.localdomain");
}
}
TEST_F(DebugEventsWriterTest, ConcurrentWriteAndFlushCallsToTheSameFile) {
const size_t kConcurrentWrites = 100;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 8);
std::atomic_int_fast64_t counter(0);
auto fn = [&writer, &counter]() {
const string file_path = strings::Printf(
"/home/tf_programs/program_%.3ld.py", counter.fetch_add(1));
SourceFile* source_file = new SourceFile();
source_file->set_file_path(file_path);
source_file->set_host_name("localhost.localdomain");
TF_ASSERT_OK(writer->WriteSourceFile(source_file));
TF_ASSERT_OK(writer->FlushNonExecutionFiles());
};
for (size_t i = 0; i < kConcurrentWrites; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), kConcurrentWrites);
std::vector<string> file_paths;
std::vector<string> host_names;
for (size_t i = 0; i < kConcurrentWrites; ++i) {
file_paths.push_back(actuals[i].source_file().file_path());
host_names.push_back(actuals[i].source_file().host_name());
}
std::sort(file_paths.begin(), file_paths.end());
for (size_t i = 0; i < kConcurrentWrites; ++i) {
EXPECT_EQ(file_paths[i],
strings::Printf("/home/tf_programs/program_%.3ld.py", i));
EXPECT_EQ(host_names[i], "localhost.localdomain");
}
}
TEST_F(DebugEventsWriterTest, ConcurrentWriteCallsToTheDifferentFiles) {
const int32_t kConcurrentWrites = 30;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, DebugEventsWriter::kDefaultCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 10);
std::atomic_int_fast32_t counter(0);
auto fn = [&writer, &counter]() {
const int32_t index = counter.fetch_add(1);
if (index % 3 == 0) {
SourceFile* source_file = new SourceFile();
source_file->set_file_path(
strings::Printf("/home/tf_programs/program_%.2d.py", index));
source_file->set_host_name("localhost.localdomain");
TF_ASSERT_OK(writer->WriteSourceFile(source_file));
} else if (index % 3 == 1) {
StackFrameWithId* stack_frame = new StackFrameWithId();
stack_frame->set_id(strings::Printf("e%.2d", index));
TF_ASSERT_OK(writer->WriteStackFrameWithId(stack_frame));
} else {
GraphOpCreation* op_creation = new GraphOpCreation();
op_creation->set_op_type("Log");
op_creation->set_op_name(strings::Printf("Log_%.2d", index));
TF_ASSERT_OK(writer->WriteGraphOpCreation(op_creation));
}
};
for (size_t i = 0; i < kConcurrentWrites; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
TF_ASSERT_OK(writer->Close());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), kConcurrentWrites / 3);
std::vector<string> file_paths;
std::vector<string> host_names;
for (int32_t i = 0; i < kConcurrentWrites / 3; ++i) {
file_paths.push_back(actuals[i].source_file().file_path());
host_names.push_back(actuals[i].source_file().host_name());
}
std::sort(file_paths.begin(), file_paths.end());
for (int32_t i = 0; i < kConcurrentWrites / 3; ++i) {
EXPECT_EQ(file_paths[i],
strings::Printf("/home/tf_programs/program_%.2d.py", i * 3));
EXPECT_EQ(host_names[i], "localhost.localdomain");
}
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
EXPECT_EQ(actuals.size(), kConcurrentWrites / 3);
std::vector<string> stack_frame_ids;
for (int32_t i = 0; i < kConcurrentWrites / 3; ++i) {
stack_frame_ids.push_back(actuals[i].stack_frame_with_id().id());
}
std::sort(stack_frame_ids.begin(), stack_frame_ids.end());
for (int32_t i = 0; i < kConcurrentWrites / 3; ++i) {
EXPECT_EQ(stack_frame_ids[i], strings::Printf("e%.2d", i * 3 + 1));
}
ReadDebugEventProtos(writer, DebugEventFileType::GRAPHS, &actuals);
EXPECT_EQ(actuals.size(), kConcurrentWrites / 3);
std::vector<string> op_types;
std::vector<string> op_names;
for (int32_t i = 0; i < kConcurrentWrites / 3; ++i) {
op_types.push_back(actuals[i].graph_op_creation().op_type());
op_names.push_back(actuals[i].graph_op_creation().op_name());
}
std::sort(op_names.begin(), op_names.end());
for (int32_t i = 0; i < kConcurrentWrites / 3; ++i) {
EXPECT_EQ(op_types[i], "Log");
EXPECT_EQ(op_names[i], strings::Printf("Log_%.2d", i * 3 + 2));
}
}
TEST_F(DebugEventsWriterTest, WriteExecutionWithCyclicBufferNoFlush) {
const size_t kCyclicBufferSize = 10;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, kCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
for (size_t i = 0; i < kCyclicBufferSize * 2; ++i) {
Execution* execution = new Execution();
execution->set_op_type("Log");
execution->add_input_tensor_ids(i);
TF_ASSERT_OK(writer->WriteExecution(execution));
}
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::EXECUTION, &actuals);
EXPECT_EQ(actuals.size(), 0);
TF_ASSERT_OK(writer->Close());
}
TEST_F(DebugEventsWriterTest, WriteExecutionWithCyclicBufferFlush) {
const size_t kCyclicBufferSize = 10;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, kCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
for (size_t i = 0; i < kCyclicBufferSize * 2; ++i) {
Execution* execution = new Execution();
execution->set_op_type("Log");
execution->add_input_tensor_ids(i);
TF_ASSERT_OK(writer->WriteExecution(execution));
}
TF_ASSERT_OK(writer->FlushExecutionFiles());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::EXECUTION, &actuals);
EXPECT_EQ(actuals.size(), kCyclicBufferSize);
for (size_t i = 0; i < kCyclicBufferSize; ++i) {
EXPECT_EQ(actuals[i].execution().op_type(), "Log");
EXPECT_EQ(actuals[i].execution().input_tensor_ids().size(), 1);
EXPECT_EQ(actuals[i].execution().input_tensor_ids()[0],
kCyclicBufferSize + i);
}
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 8);
std::atomic_int_fast64_t counter(0);
auto fn = [&writer, &counter]() {
Execution* execution = new Execution();
execution->set_op_type("Abs");
execution->add_input_tensor_ids(counter.fetch_add(1));
TF_ASSERT_OK(writer->WriteExecution(execution));
};
for (size_t i = 0; i < kCyclicBufferSize * 2; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
TF_ASSERT_OK(writer->Close());
ReadDebugEventProtos(writer, DebugEventFileType::EXECUTION, &actuals);
EXPECT_EQ(actuals.size(), kCyclicBufferSize * 2);
for (size_t i = 0; i < kCyclicBufferSize; ++i) {
const size_t index = i + kCyclicBufferSize;
EXPECT_EQ(actuals[index].execution().op_type(), "Abs");
EXPECT_EQ(actuals[index].execution().input_tensor_ids().size(), 1);
EXPECT_GE(actuals[index].execution().input_tensor_ids()[0], 0);
EXPECT_LE(actuals[index].execution().input_tensor_ids()[0],
kCyclicBufferSize * 2);
}
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::GRAPHS, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::GRAPH_EXECUTION_TRACES,
&actuals);
EXPECT_EQ(actuals.size(), 0);
}
TEST_F(DebugEventsWriterTest, WriteGrahExecutionTraceWithCyclicBufferNoFlush) {
const size_t kCyclicBufferSize = 10;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, kCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
for (size_t i = 0; i < kCyclicBufferSize * 2; ++i) {
GraphExecutionTrace* trace = new GraphExecutionTrace();
trace->set_tfdbg_context_id(strings::Printf("graph_%.2ld", i));
TF_ASSERT_OK(writer->WriteGraphExecutionTrace(trace));
}
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::GRAPH_EXECUTION_TRACES,
&actuals);
EXPECT_EQ(actuals.size(), 0);
TF_ASSERT_OK(writer->Close());
}
TEST_F(DebugEventsWriterTest, WriteGrahExecutionTraceWithoutPreviousInitCall) {
const size_t kCyclicBufferSize = -1;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, kCyclicBufferSize);
GraphExecutionTrace* trace = new GraphExecutionTrace();
trace->set_tfdbg_context_id(strings::Printf("graph_0"));
TF_ASSERT_OK(writer->WriteGraphExecutionTrace(trace));
TF_ASSERT_OK(writer->FlushExecutionFiles());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::GRAPH_EXECUTION_TRACES,
&actuals);
EXPECT_EQ(actuals.size(), 1);
EXPECT_EQ(actuals[0].graph_execution_trace().tfdbg_context_id(), "graph_0");
TF_ASSERT_OK(writer->Close());
}
TEST_F(DebugEventsWriterTest, WriteGrahExecutionTraceWithCyclicBufferFlush) {
const size_t kCyclicBufferSize = 10;
DebugEventsWriter* writer = DebugEventsWriter::GetDebugEventsWriter(
dump_root_, tfdbg_run_id_, kCyclicBufferSize);
TF_ASSERT_OK(writer->Init());
for (size_t i = 0; i < kCyclicBufferSize * 2; ++i) {
GraphExecutionTrace* trace = new GraphExecutionTrace();
trace->set_tfdbg_context_id(strings::Printf("graph_%.2ld", i));
TF_ASSERT_OK(writer->WriteGraphExecutionTrace(trace));
}
TF_ASSERT_OK(writer->FlushExecutionFiles());
std::vector<DebugEvent> actuals;
ReadDebugEventProtos(writer, DebugEventFileType::GRAPH_EXECUTION_TRACES,
&actuals);
EXPECT_EQ(actuals.size(), kCyclicBufferSize);
for (size_t i = 0; i < kCyclicBufferSize; ++i) {
EXPECT_EQ(actuals[i].graph_execution_trace().tfdbg_context_id(),
strings::Printf("graph_%.2ld", i + kCyclicBufferSize));
}
thread::ThreadPool* thread_pool =
new thread::ThreadPool(Env::Default(), "test_pool", 8);
std::atomic_int_fast64_t counter(0);
auto fn = [&writer, &counter]() {
GraphExecutionTrace* trace = new GraphExecutionTrace();
trace->set_tfdbg_context_id(
strings::Printf("new_graph_%.2ld", counter.fetch_add(1)));
TF_ASSERT_OK(writer->WriteGraphExecutionTrace(trace));
};
for (size_t i = 0; i < kCyclicBufferSize * 2; ++i) {
thread_pool->Schedule(fn);
}
delete thread_pool;
TF_ASSERT_OK(writer->Close());
ReadDebugEventProtos(writer, DebugEventFileType::GRAPH_EXECUTION_TRACES,
&actuals);
EXPECT_EQ(actuals.size(), kCyclicBufferSize * 2);
for (size_t i = 0; i < kCyclicBufferSize; ++i) {
const size_t index = i + kCyclicBufferSize;
EXPECT_EQ(actuals[index].graph_execution_trace().tfdbg_context_id().find(
"new_graph_"),
0);
}
ReadDebugEventProtos(writer, DebugEventFileType::SOURCE_FILES, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::STACK_FRAMES, &actuals);
EXPECT_EQ(actuals.size(), 0);
ReadDebugEventProtos(writer, DebugEventFileType::GRAPHS, &actuals);
EXPECT_EQ(a |
1,707 | cpp | tensorflow/tensorflow | tensor_format | tensorflow/core/util/tensor_format.cc | tensorflow/core/util/tensor_format_test.cc | #ifndef TENSORFLOW_CORE_UTIL_TENSOR_FORMAT_H_
#define TENSORFLOW_CORE_UTIL_TENSOR_FORMAT_H_
#include <array>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
enum TensorFormat {
FORMAT_NHWC = 0,
FORMAT_NCHW = 1,
FORMAT_NCHW_VECT_C = 2,
FORMAT_NHWC_VECT_W = 3,
FORMAT_HWNC = 4,
FORMAT_HWCN = 5,
};
enum FilterTensorFormat {
FORMAT_HWIO = 0,
FORMAT_OIHW = 1,
FORMAT_OHWI = 2,
FORMAT_OIHW_VECT_I = 3,
};
bool FormatFromString(absl::string_view format_str, TensorFormat* format);
bool FilterFormatFromString(absl::string_view format_str,
FilterTensorFormat* format);
std::string ToString(TensorFormat format);
std::string ToString(FilterTensorFormat format);
inline int GetTensorSpatialDims(int num_dims, TensorFormat format) {
switch (format) {
case FORMAT_NHWC:
case FORMAT_NCHW:
case FORMAT_HWNC:
case FORMAT_HWCN:
return num_dims - 2;
case FORMAT_NCHW_VECT_C:
case FORMAT_NHWC_VECT_W:
return num_dims - 3;
default:
LOG(FATAL) << "Unknown format " << format;
return -1;
}
}
inline int GetFilterTensorSpatialDims(int num_dims, FilterTensorFormat format) {
if (format == FORMAT_OIHW_VECT_I) {
return num_dims - 3;
} else {
return num_dims - 2;
}
}
inline int GetTensorDimsFromSpatialDims(int num_spatial_dims,
TensorFormat format) {
switch (format) {
case FORMAT_NHWC:
case FORMAT_NCHW:
case FORMAT_HWNC:
case FORMAT_HWCN:
return num_spatial_dims + 2;
case FORMAT_NCHW_VECT_C:
case FORMAT_NHWC_VECT_W:
return num_spatial_dims + 3;
default:
LOG(FATAL) << "Unknown format " << format;
return -1;
}
}
inline int GetFilterTensorDimsFromSpatialDims(int num_spatial_dims,
FilterTensorFormat format) {
if (format == FORMAT_OIHW_VECT_I) {
return num_spatial_dims + 3;
} else {
return num_spatial_dims + 2;
}
}
inline int GetTensorBatchDimIndex(int num_dims, TensorFormat format) {
switch (format) {
case FORMAT_NHWC:
case FORMAT_NCHW:
case FORMAT_NCHW_VECT_C:
case FORMAT_NHWC_VECT_W:
return 0;
case FORMAT_HWNC:
return num_dims - 2;
case FORMAT_HWCN:
return num_dims - 1;
default:
LOG(FATAL) << "Unknown format " << format;
return -1;
}
}
inline int GetTensorFeatureDimIndex(int num_dims, TensorFormat format) {
switch (format) {
case FORMAT_NHWC:
case FORMAT_HWNC:
return num_dims - 1;
case FORMAT_NHWC_VECT_W:
case FORMAT_HWCN:
return num_dims - 2;
case FORMAT_NCHW:
case FORMAT_NCHW_VECT_C:
return 1;
default:
LOG(FATAL) << "Unknown format " << format;
return -1;
}
}
inline int GetTensorInnerFeatureDimIndex(int num_dims, TensorFormat format) {
DCHECK_EQ(format, FORMAT_NCHW_VECT_C);
return num_dims - 1;
}
inline int GetTensorInnerWidthDimIndex(int num_dims, TensorFormat format) {
DCHECK_EQ(format, FORMAT_NHWC_VECT_W);
return num_dims - 1;
}
inline int GetTensorSpatialDimIndex(int num_dims, TensorFormat format,
int spatial_dim) {
CHECK(spatial_dim >= 0 &&
spatial_dim < GetTensorSpatialDims(num_dims, format))
<< spatial_dim << " " << num_dims << " " << ToString(format);
switch (format) {
case FORMAT_NHWC:
case FORMAT_NHWC_VECT_W:
return spatial_dim + 1;
case FORMAT_NCHW:
case FORMAT_NCHW_VECT_C:
return spatial_dim + 2;
case FORMAT_HWNC:
case FORMAT_HWCN:
return spatial_dim;
default:
LOG(FATAL) << "Unknown format " << format;
return -1;
}
}
inline int GetFilterTensorSpatialDimIndex(int num_dims,
FilterTensorFormat format, int dim) {
CHECK(dim >= 0 && dim < GetFilterTensorSpatialDims(num_dims, format))
<< dim << " " << num_dims << " " << ToString(format);
switch (format) {
case FORMAT_HWIO:
return dim;
case FORMAT_OIHW:
case FORMAT_OIHW_VECT_I:
return dim + 2;
default:
LOG(FATAL) << "Unknown format " << format;
return -1;
}
}
inline int GetFilterTensorInnerInputChannelsDimIndex(
int num_dims, FilterTensorFormat format) {
DCHECK_EQ(format, FORMAT_OIHW_VECT_I);
return num_dims - 1;
}
inline int GetFilterTensorInputChannelsDimIndex(int num_dims,
FilterTensorFormat format) {
switch (format) {
case FORMAT_HWIO:
return num_dims - 2;
case FORMAT_OIHW:
case FORMAT_OIHW_VECT_I:
return 1;
default:
LOG(FATAL) << "Unknown format " << format;
return -1;
}
}
inline int GetFilterTensorOutputChannelsDimIndex(int num_dims,
FilterTensorFormat format) {
switch (format) {
case FORMAT_HWIO:
return num_dims - 1;
case FORMAT_OIHW:
case FORMAT_OIHW_VECT_I:
return 0;
default:
LOG(FATAL) << "Unknown format " << format;
return -1;
}
}
template <int NUM_SPATIAL_DIMS>
inline int32 GetTensorDimIndex(TensorFormat format, char dimension) {
if (format == FORMAT_NHWC || format == FORMAT_NHWC_VECT_W) {
switch (dimension) {
case 'N': return 0;
case '0': return 1;
case '1': return 2;
case '2': return 3;
case 'H': return NUM_SPATIAL_DIMS - 1;
case 'W': return NUM_SPATIAL_DIMS;
case 'C': return NUM_SPATIAL_DIMS + 1;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1;
}
} else if (format == FORMAT_NCHW || format == FORMAT_NCHW_VECT_C) {
switch (dimension) {
case 'N': return 0;
case 'C': return 1;
case '0': return 2;
case '1': return 3;
case '2': return 4;
case 'H': return NUM_SPATIAL_DIMS;
case 'W': return NUM_SPATIAL_DIMS + 1;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1;
}
} else if (format == FORMAT_HWNC) {
switch (dimension) {
case '0': return 0;
case '1': return 1;
case '2': return 2;
case 'H': return NUM_SPATIAL_DIMS - 2;
case 'W': return NUM_SPATIAL_DIMS - 1;
case 'N': return NUM_SPATIAL_DIMS;
case 'C': return NUM_SPATIAL_DIMS + 1;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1;
}
} else if (format == FORMAT_HWCN) {
switch (dimension) {
case '0': return 0;
case '1': return 1;
case '2': return 2;
case 'H': return NUM_SPATIAL_DIMS - 2;
case 'W': return NUM_SPATIAL_DIMS - 1;
case 'C': return NUM_SPATIAL_DIMS;
case 'N': return NUM_SPATIAL_DIMS + 1;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1;
}
} else {
LOG(FATAL) << "Invalid format: " << static_cast<int>(format);
return -1;
}
}
template <int NUM_SPATIAL_DIMS>
inline int GetFilterDimIndex(FilterTensorFormat filter_tensor_format,
char dimension) {
if (filter_tensor_format == FORMAT_HWIO) {
switch (dimension) {
case '0': return 0;
case '1': return 1;
case '2': return 2;
case 'H': return NUM_SPATIAL_DIMS - 2;
case 'W': return NUM_SPATIAL_DIMS - 1;
case 'I': return NUM_SPATIAL_DIMS;
case 'O': return NUM_SPATIAL_DIMS + 1;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1;
}
} else if (filter_tensor_format == FORMAT_OIHW ||
filter_tensor_format == FORMAT_OIHW_VECT_I) {
switch (dimension) {
case 'O': return 0;
case 'I': return 1;
case '0': return 2;
case '1': return 3;
case '2': return 4;
case 'H': return NUM_SPATIAL_DIMS;
case 'W': return NUM_SPATIAL_DIMS + 1;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1;
}
} else {
LOG(FATAL) << "Invalid format: " << static_cast<int>(filter_tensor_format);
return -1;
}
}
inline int32 GetTensorDimIndex(TensorFormat format, char dimension) {
return GetTensorDimIndex<2>(format, dimension);
}
inline int32 GetTensorDimIndex(TensorFormat format, char dimension,
int num_total_dims) {
int32_t index = (GetTensorSpatialDims(num_total_dims, format) == 3)
? GetTensorDimIndex<3>(format, dimension)
: GetTensorDimIndex<2>(format, dimension);
CHECK(index >= 0 && index < num_total_dims)
<< "Invalid index from the dimension: " << index << ", " << format << ", "
<< dimension;
return index;
}
template <typename T>
T GetTensorDim(gtl::ArraySlice<T> dimension_attributes,
TensorFormat tensor_format, char dimension) {
int index =
GetTensorDimIndex(tensor_format, dimension, dimension_attributes.size());
return dimension_attributes[index];
}
template <typename T>
T GetFilterDim(gtl::ArraySlice<T> dimension_attribute,
FilterTensorFormat filter_tensor_format, char dimension) {
int index = (GetFilterTensorSpatialDims(dimension_attribute.size(),
filter_tensor_format) == 3)
? GetFilterDimIndex<3>(filter_tensor_format, dimension)
: GetFilterDimIndex<2>(filter_tensor_format, dimension);
using size_type = typename gtl::ArraySlice<T>::size_type;
CHECK(index >= 0 &&
static_cast<size_type>(index) < dimension_attribute.size())
<< "Invalid index from the dimension: " << index << ", "
<< filter_tensor_format << ", " << dimension;
return dimension_attribute[index];
}
template <typename T>
T GetTensorDim(const std::vector<T>& attributes, TensorFormat format,
char dimension) {
return GetTensorDim(gtl::ArraySlice<T>(attributes), format, dimension);
}
inline int64_t GetTensorDim(const TensorShape& tensor_shape,
TensorFormat tensor_format, char dimension) {
return GetTensorDim(absl::Span<const int64_t>(tensor_shape.dim_sizes()),
tensor_format, dimension);
}
inline int64_t GetFilterDim(const TensorShape& tensor_shape,
FilterTensorFormat tensor_filter_format,
char dimension) {
return GetFilterDim(absl::Span<const int64_t>(tensor_shape.dim_sizes()),
tensor_filter_format, dimension);
}
inline int64_t GetTensorDim(const Tensor& tensor, TensorFormat tensor_format,
char dimension) {
return GetTensorDim(tensor.shape(), tensor_format, dimension);
}
inline int64_t GetFilterDim(const Tensor& tensor,
FilterTensorFormat filter_tensor_format,
char dimension) {
return GetFilterDim(tensor.shape(), filter_tensor_format, dimension);
}
inline void GetExplicitPaddingForDim(
const std::vector<int64_t>& explicit_paddings, TensorFormat tensor_format,
char dimension, int64_t* padding_before, int64_t* padding_after) {
int index =
GetTensorDimIndex(tensor_format, dimension, explicit_paddings.size() / 2);
*padding_before = explicit_paddings[2 * index];
*padding_after = explicit_paddings[2 * index + 1];
}
std::string GetConvnetDataFormatAttrString();
std::string GetConvnet3dDataFormatAttrString();
std::string GetConvnetFilterFormatAttrString();
std::string GetConvnet3dFilterFormatAttrString();
std::string GetConvnetDataFormat2D3DAttrString();
inline Status ShapeFromFormatWithStatus(TensorFormat format, int64_t N,
absl::Span<const int64_t> spatial,
int64_t C, TensorShape* shape) {
const int dims = GetTensorDimsFromSpatialDims(spatial.size(), format);
absl::InlinedVector<int64_t, 6UL> dim_sizes(dims);
dim_sizes[GetTensorBatchDimIndex(dims, format)] = N;
for (int dim = 0; static_cast<size_t>(dim) < spatial.size(); dim++) {
auto dim_size = spatial[dim];
if (format == FORMAT_NHWC_VECT_W &&
static_cast<size_t>(dim) == spatial.size() - 1) {
CHECK_EQ(0, dim_size % 4)
<< "FORMAT_NHWC_VECT_W requires W to be a multiple of 4, but W="
<< dim_size;
dim_sizes[GetTensorInnerWidthDimIndex(dims, format)] = 4;
dim_size /= 4;
}
dim_sizes[GetTensorSpatialDimIndex(dims, format, dim)] = dim_size;
}
int feature_index = GetTensorFeatureDimIndex(dims, format);
if (format == FORMAT_NCHW_VECT_C) {
CHECK_EQ(0, C % 4) << "NCHW_VECT_C requires C to be a multiple of 4, but C="
<< C;
C /= 4;
dim_sizes[GetTensorInnerFeatureDimIndex(dims, format)] = 4;
}
dim_sizes[feature_index] = C;
return TensorShapeUtils::MakeShape(dim_sizes, shape);
}
inline TensorShape ShapeFromFormat(TensorFormat format, int64_t N,
absl::Span<const int64_t> spatial,
int64_t C) {
TensorShape shape;
TF_CHECK_OK(ShapeFromFormatWithStatus(format, N, spatial, C, &shape));
return shape;
}
inline TensorShape ShapeFromFilterTensorFormat(
FilterTensorFormat format, absl::Span<const int64_t> spatial, int64_t I,
int64_t O) {
const int dims = GetFilterTensorDimsFromSpatialDims(spatial.size(), format);
absl::InlinedVector<int64_t, 6UL> dim_sizes(dims);
dim_sizes[GetFilterTensorOutputChannelsDimIndex(dims, format)] = O;
for (int dim = 0; static_cast<size_t>(dim) < spatial.size(); dim++) {
dim_sizes[GetFilterTensorSpatialDimIndex(dims, format, dim)] = spatial[dim];
}
if (format == FORMAT_OIHW_VECT_I) {
CHECK_EQ(0, I % 4) << "OIHW_VECT_I requires I to be a multiple of 4, but I="
<< I;
I /= 4;
dim_sizes[GetFilterTensorInnerInputChannelsDimIndex(dims, format)] = 4;
}
dim_sizes[GetFilterTensorInputChannelsDimIndex(dims, format)] = I;
return TensorShape(dim_sizes);
}
inline Status ShapeFromFormatWithStatus(TensorFormat format, int64_t N,
int64_t H, int64_t W, int64_t C,
TensorShape* shape) {
return ShapeFromFormatWithStatus(format, N, {H, W}, C, shape);
}
inline TensorShape ShapeFromFormat(TensorFormat format, int64_t N, int64_t H,
int64_t W, int64_t C) {
TensorShape shape;
TF_CHECK_OK(ShapeFromFormatWithStatus(format, N, {H, W}, C, &shape));
return shape;
}
inline TensorShape ShapeFromFilterTensorFormat(FilterTensorFormat format,
int64_t H, int64_t W, int64_t I,
int64_t O) {
return ShapeFromFilterTensorFormat(format, {H, W}, I, O);
}
inline Status ShapeFromFormatWithStatus(TensorFormat dst_format,
const TensorShape& src_shape,
TensorFormat src_format,
TensorShape* shape) {
if (src_format == dst_format) {
*shape = src_shape;
return absl::OkStatus();
}
const int64_t batch = GetTensorDim(src_shape, src_format, 'N');
const int64_t channels = GetTensorDim(src_shape, src_format, 'C') *
(src_format == FORMAT_NCHW_VECT_C ? 4 : 1);
const int num_src_spatial_dims =
GetTensorSpatialDims(src_shape.dims(), src_format);
std::vector<int64_t> spatial_dims(num_src_spatial_dims);
for (int spatial_dim = 0; spatial_dim < num_src_spatial_dims; ++spatial_dim) {
spatial_dims[spatial_dim] = absl::Span<const int64_t>(
src_shape.dim_sizes())[GetTensorSpatialDimIndex(
src_shape.dims(), src_format, spatial_dim)];
}
if (src_format == FORMAT_NHWC_VECT_W) {
spatial_dims[num_src_spatial_dims - 1] *= 4;
}
return ShapeFromFormatWithStatus(dst_format, batch, {spatial_dims}, channels,
shape);
}
inline TensorShape ShapeFromFormat(TensorFormat dst_format,
const TensorShape& src_shape,
TensorFormat src_format) {
TensorShape shape;
TF_CHECK_OK(
ShapeFromFormatWithStatus(dst_format, src_shape, src_format, &shape));
return shape;
}
inline TensorShape ShapeFromFilterFormat(FilterTensorFormat dst_filter_format,
const TensorShape& src_shape,
FilterTensorFormat src_filter_format) {
if (src_filter_format == dst_filter_format) {
return src_shape;
}
const int64_t output_channels =
GetFilterDim(src_shape, src_filter_format, 'O');
const int64_t input_channels =
GetFilterDim(src_shape, src_filter_format, 'I') *
(src_filter_format == FORMAT_OIHW_VECT_I ? 4 : 1);
if (GetFilterTensorSpatialDims(src_shape.dims(), src_filter_format) == 3) {
return ShapeFromFilterTensorFormat(
dst_filter_format,
{{GetFilterDim(src_shape, src_filter_format, '0'),
GetFilterDim(src_shape, src_filter_format, '1'),
GetFilterDim(src_shape, src_filter_format, '2')}},
input_channels, output_channels);
}
return ShapeFromFilterTensorFormat(
dst_filter_format,
{{GetFilterDim(src_shape, src_filter_format, 'H'),
GetFilterDim(src_shape, src_filter_format, 'W')}},
input_channels, output_channels);
}
}
#endif
#include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
string GetConvnetDataFormatAttrString() {
return "data_format: { 'NHWC', 'NCHW' } = 'NHWC' ";
}
string GetConvnet3dDataFormatAttrString() {
return "data_format: { 'NDHWC', 'NCDHW' } = 'NDHWC' ";
}
string GetConvnetDataFormat2D3DAttrString() {
return "data_format: { 'NHWC', 'NCHW', 'NDHWC', 'NCDHW' } = 'NHWC' ";
}
string GetConvnetFilterFormatAttrString() {
return "filter_format: { 'HWIO', 'OIHW' } = 'HWIO' ";
}
string GetConvnet3dFilterFormatAttrString() {
return "filter_format: { 'DHWIO', 'OIDHW' } = 'DHWIO' ";
}
string ToString(TensorFormat format) {
switch (format) {
case FORMAT_NHWC:
return "NHWC";
case FORMAT_NCHW:
return "NCHW";
case FORMAT_NCHW_VECT_C:
return "NCHW_VECT_C";
case FORMAT_NHWC_VECT_W:
return "NHWC_VECT_W";
case FORMAT_HWNC:
return "HWNC";
case FORMAT_HWCN:
return "HWCN";
default:
LOG(FATAL) << "Invalid Format: " << static_cast<int32>(format);
return "INVALID_FORMAT";
}
}
string ToString(FilterTensorFormat format) {
switch (format) {
case FORMAT_HWIO:
return "HWIO";
case FORMAT_OIHW:
return "OIHW";
case FORMAT_OHWI:
return "OHWI";
case FORMAT_OIHW_VECT_I:
return "OIHW_VECT_I";
default:
LOG(FATAL) << "Invalid Filter Format: " << static_cast<int32>(format);
return "INVALID_FORMAT";
}
}
bool FormatFromString(absl::string_view format_str, TensorFormat* format) {
if (format_str == "NHWC" || format_str == "NDHWC") {
*format = FORMAT_NHWC;
return true;
}
if (format_str == "NCHW" || format_str == "NCDHW") {
*format = FORMAT_NCHW;
return true;
}
if (format_str == "NCHW_VECT_C") {
*format = FORMAT_NCHW_VECT_C;
return true;
}
if (format_str == "NHWC_VECT_W") {
*format = FORMAT_NHWC_VECT_W;
return true;
}
if (format_str == "HWNC") {
*format = FORMAT_HWNC;
return true;
}
if (format_str == "HWCN") {
*format = FORMAT_HWCN;
return true;
}
return false;
}
bool FilterFormatFromString(absl::string_view format_str,
FilterTensorFormat* format) {
if (format_str == "HWIO" || format_str == "DHWIO") {
*format = FORMAT_HWIO;
return true;
}
if (format_str == "OIHW" || format_str == "OIDHW") {
*format = FORMAT_OIHW;
return true;
}
if (format_str == "OIHW_VECT_I") {
*format = FORMAT_OIHW_VECT_I;
return true;
}
return false;
}
} | #include <utility>
#include "tensorflow/core/util/tensor_format.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
#define EnumStringPair(val) \
{ val, #val }
std::pair<TensorFormat, const char*> test_data_formats[] = {
EnumStringPair(FORMAT_NHWC), EnumStringPair(FORMAT_NCHW),
EnumStringPair(FORMAT_NCHW_VECT_C), EnumStringPair(FORMAT_NHWC_VECT_W),
EnumStringPair(FORMAT_HWNC), EnumStringPair(FORMAT_HWCN),
};
std::pair<FilterTensorFormat, const char*> test_filter_formats[] = {
EnumStringPair(FORMAT_HWIO),
EnumStringPair(FORMAT_OIHW),
EnumStringPair(FORMAT_OIHW_VECT_I),
};
struct TensorDimMap {
int n() const { return dim_n; }
int h() const { return dim_h; }
int w() const { return dim_w; }
int c() const { return dim_c; }
int spatial(int spatial_index) const { return spatial_dim[spatial_index]; }
int dim_n, dim_h, dim_w, dim_c;
int spatial_dim[3];
};
struct FilterDimMap {
int h() const { return dim_h; }
int w() const { return dim_w; }
int i() const { return dim_i; }
int o() const { return dim_o; }
int spatial(int spatial_index) const { return spatial_dim[spatial_index]; }
int dim_h, dim_w, dim_i, dim_o;
int spatial_dim[3];
};
struct DimMaps {
#define StaCoExTensorDm static constexpr TensorDimMap
StaCoExTensorDm kTdmInvalid = { -1, -1, -1, -1, { -1, -1, -1 } };
StaCoExTensorDm kTdmNHWC[4] = { kTdmInvalid,
{ 0, -1, 1, 2, { 1, -1, -1 } },
{ 0, 1, 2, 3, { 1, 2, -1 } },
{ 0, 2, 3, 4, { 1, 2, 3 } }
};
StaCoExTensorDm kTdmNCHW[4] = { kTdmInvalid,
{ 0, -1, 2, 1, { 2, -1, -1 } },
{ 0, 2, 3, 1, { 2, 3, -1 } },
{ 0, 3, 4, 1, { 2, 3, 4 } }
};
StaCoExTensorDm kTdmHWNC[4] = { kTdmInvalid,
{ 1, -1, 0, 2, { 0, -1, -1 } },
{ 2, 0, 1, 3, { 0, 1, -1 } },
{ 3, 1, 2, 4, { 0, 1, 2 } }
};
StaCoExTensorDm kTdmHWCN[4] = { kTdmInvalid,
{ 2, -1, 0, 1, { 0, -1, -1 } },
{ 3, 0, 1, 2, { 0, 1, -1 } },
{ 4, 1, 2, 3, { 0, 1, 2 } }
};
#undef StaCoExTensorDm
#define StaCoExFilterDm static constexpr FilterDimMap
StaCoExFilterDm kFdmInvalid = { -1, -1, -1, -1, { -1, -1, -1 } };
StaCoExFilterDm kFdmHWIO[4] = { kFdmInvalid,
{ -1, 0, 1, 2, { 0, -1, -1 } },
{ 0, 1, 2, 3, { 0, 1, -1 } },
{ 1, 2, 3, 4, { 0, 1, 2 } }
};
StaCoExFilterDm kFdmOIHW[4] = { kFdmInvalid,
{ -1, 2, 1, 0, { 2, -1, -1 } },
{ 2, 3, 1, 0, { 2, 3, -1 } },
{ 3, 4, 1, 0, { 2, 3, 4 } }
};
#undef StaCoExFilterDm
};
inline constexpr const TensorDimMap&
GetTensorDimMap(const int num_spatial_dims, const TensorFormat format) {
return
(format == FORMAT_NHWC ||
format == FORMAT_NHWC_VECT_W) ? DimMaps::kTdmNHWC[num_spatial_dims] :
(format == FORMAT_NCHW ||
format == FORMAT_NCHW_VECT_C) ? DimMaps::kTdmNCHW[num_spatial_dims] :
(format == FORMAT_HWNC) ? DimMaps::kTdmHWNC[num_spatial_dims] :
(format == FORMAT_HWCN) ? DimMaps::kTdmHWCN[num_spatial_dims]
: DimMaps::kTdmInvalid;
}
inline constexpr const FilterDimMap&
GetFilterDimMap(const int num_spatial_dims,
const FilterTensorFormat format) {
return
(format == FORMAT_HWIO) ? DimMaps::kFdmHWIO[num_spatial_dims] :
(format == FORMAT_OIHW ||
format == FORMAT_OIHW_VECT_I) ? DimMaps::kFdmOIHW[num_spatial_dims]
: DimMaps::kFdmInvalid;
}
constexpr TensorDimMap DimMaps::kTdmInvalid;
constexpr TensorDimMap DimMaps::kTdmNHWC[4];
constexpr TensorDimMap DimMaps::kTdmNCHW[4];
constexpr TensorDimMap DimMaps::kTdmHWNC[4];
constexpr TensorDimMap DimMaps::kTdmHWCN[4];
constexpr FilterDimMap DimMaps::kFdmInvalid;
constexpr FilterDimMap DimMaps::kFdmHWIO[4];
constexpr FilterDimMap DimMaps::kFdmOIHW[4];
TEST(TensorFormatTest, FormatEnumsAndStrings) {
const string prefix = "FORMAT_";
for (auto& test_data_format : test_data_formats) {
const char* stringified_format_enum = test_data_format.second;
LOG(INFO) << stringified_format_enum << " = " << test_data_format.first;
string expected_format_str = &stringified_format_enum[prefix.size()];
TensorFormat format;
EXPECT_TRUE(FormatFromString(expected_format_str, &format));
string format_str = ToString(format);
EXPECT_EQ(expected_format_str, format_str);
EXPECT_EQ(test_data_format.first, format);
}
for (auto& test_filter_format : test_filter_formats) {
const char* stringified_format_enum = test_filter_format.second;
LOG(INFO) << stringified_format_enum << " = " << test_filter_format.first;
string expected_format_str = &stringified_format_enum[prefix.size()];
FilterTensorFormat format;
EXPECT_TRUE(FilterFormatFromString(expected_format_str, &format));
string format_str = ToString(format);
EXPECT_EQ(expected_format_str, format_str);
EXPECT_EQ(test_filter_format.first, format);
}
}
template <int num_spatial_dims>
void RunDimensionIndexesTest() {
for (auto& test_data_format : test_data_formats) {
TensorFormat format = test_data_format.first;
auto& tdm = GetTensorDimMap(num_spatial_dims, format);
int num_dims = GetTensorDimsFromSpatialDims(num_spatial_dims, format);
LOG(INFO) << ToString(format) << ", num_spatial_dims=" << num_spatial_dims
<< ", num_dims=" << num_dims;
EXPECT_EQ(GetTensorBatchDimIndex(num_dims, format), tdm.n());
EXPECT_EQ(GetTensorDimIndex<num_spatial_dims>(format, 'N'), tdm.n());
EXPECT_EQ(GetTensorFeatureDimIndex(num_dims, format), tdm.c());
EXPECT_EQ(GetTensorDimIndex<num_spatial_dims>(format, 'C'), tdm.c());
for (int i = 0; i < num_spatial_dims; ++i) {
EXPECT_EQ(GetTensorSpatialDimIndex(num_dims, format, i), tdm.spatial(i));
EXPECT_EQ(GetTensorDimIndex<num_spatial_dims>(format, '0' + i),
tdm.spatial(i));
}
}
for (auto& test_filter_format : test_filter_formats) {
FilterTensorFormat format = test_filter_format.first;
auto& fdm = GetFilterDimMap(num_spatial_dims, format);
int num_dims = GetFilterTensorDimsFromSpatialDims(num_spatial_dims, format);
LOG(INFO) << ToString(format) << ", num_spatial_dims=" << num_spatial_dims
<< ", num_dims=" << num_dims;
EXPECT_EQ(GetFilterTensorOutputChannelsDimIndex(num_dims, format), fdm.o());
EXPECT_EQ(GetFilterDimIndex<num_spatial_dims>(format, 'O'), fdm.o());
EXPECT_EQ(GetFilterTensorInputChannelsDimIndex(num_dims, format), fdm.i());
EXPECT_EQ(GetFilterDimIndex<num_spatial_dims>(format, 'I'), fdm.i());
for (int i = 0; i < num_spatial_dims; ++i) {
EXPECT_EQ(GetFilterTensorSpatialDimIndex(num_dims, format, i),
fdm.spatial(i));
EXPECT_EQ(GetFilterDimIndex<num_spatial_dims>(format, '0' + i),
fdm.spatial(i));
}
}
}
TEST(TensorFormatTest, DimensionIndexes) {
RunDimensionIndexesTest<1>();
RunDimensionIndexesTest<2>();
RunDimensionIndexesTest<3>();
}
} |
1,708 | cpp | tensorflow/tensorflow | bad_indices_policy | tensorflow/core/util/bad_indices_policy.cc | tensorflow/core/util/bad_indices_policy_test.cc | #ifndef TENSORFLOW_CORE_UTIL_BAD_INDICES_POLICY_H_
#define TENSORFLOW_CORE_UTIL_BAD_INDICES_POLICY_H_
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
namespace tensorflow {
enum class BadIndicesPolicy {
kDefault,
kError,
kIgnore,
};
absl::StatusOr<BadIndicesPolicy> BadIndicesPolicyFromString(
absl::string_view str);
}
#endif
#include "tensorflow/core/util/bad_indices_policy.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace tensorflow {
constexpr char kDefault[] = "DEFAULT";
constexpr char kErrorStr[] = "ERROR";
constexpr char kIgnoreStr[] = "IGNORE";
absl::StatusOr<BadIndicesPolicy> BadIndicesPolicyFromString(
absl::string_view str) {
if (str.empty()) return BadIndicesPolicy::kDefault;
if (str == kDefault) return BadIndicesPolicy::kDefault;
if (str == kErrorStr) return BadIndicesPolicy::kError;
if (str == kIgnoreStr) return BadIndicesPolicy::kIgnore;
return absl::InvalidArgumentError(
absl::StrCat("Unknown bad indices handling attribute: ", str));
}
} | #include "tensorflow/core/util/bad_indices_policy.h"
#include <gmock/gmock.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
constexpr absl::string_view kDefault = "DEFAULT";
constexpr absl::string_view kErrorStr = "ERROR";
constexpr absl::string_view kIgnoreStr = "IGNORE";
class BadIndicesPolicyFromStringTest : public ::testing::Test {
protected:
void TestValidInput(absl::string_view input, BadIndicesPolicy expected) {
absl::StatusOr<BadIndicesPolicy> result = BadIndicesPolicyFromString(input);
ASSERT_TRUE(result.ok());
EXPECT_EQ(result.value(), expected);
}
};
TEST_F(BadIndicesPolicyFromStringTest, EmptyString) {
TestValidInput("", BadIndicesPolicy::kDefault);
}
TEST_F(BadIndicesPolicyFromStringTest, DefaultKeyword) {
TestValidInput(kDefault, BadIndicesPolicy::kDefault);
}
TEST_F(BadIndicesPolicyFromStringTest, ErrorKeyword) {
TestValidInput(kErrorStr, BadIndicesPolicy::kError);
}
TEST_F(BadIndicesPolicyFromStringTest, IgnoreKeyword) {
TestValidInput(kIgnoreStr, BadIndicesPolicy::kIgnore);
}
TEST_F(BadIndicesPolicyFromStringTest, InvalidInput) {
absl::StatusOr<BadIndicesPolicy> result =
BadIndicesPolicyFromString("unknown");
ASSERT_FALSE(result.ok());
EXPECT_THAT(result.status().message(),
::testing::HasSubstr("Unknown bad indices handling attribute"));
}
}
} |
1,709 | cpp | tensorflow/tensorflow | equal_graph_def | tensorflow/core/util/equal_graph_def.cc | tensorflow/core/util/equal_graph_def_test.cc | #ifndef TENSORFLOW_CORE_UTIL_EQUAL_GRAPH_DEF_H_
#define TENSORFLOW_CORE_UTIL_EQUAL_GRAPH_DEF_H_
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
class GraphDef;
class NodeDef;
struct EqualGraphDefOptions {
bool ignore_internal_attrs = true;
};
bool EqualGraphDef(const GraphDef& actual, const GraphDef& expected,
string* diff, const EqualGraphDefOptions& options = {});
uint64 GraphDefHash(const GraphDef& gdef,
const EqualGraphDefOptions& options = {});
bool EqualNodeDef(const NodeDef& actual, const NodeDef& expected, string* diff,
const EqualGraphDefOptions& options = {});
uint64 NodeDefHash(const NodeDef& ndef,
const EqualGraphDefOptions& options = {});
bool EqualRepeatedNodeDef(const protobuf::RepeatedPtrField<NodeDef>& actual,
const protobuf::RepeatedPtrField<NodeDef>& expected,
string* diff,
const EqualGraphDefOptions& options = {});
uint64 RepeatedNodeDefHash(const protobuf::RepeatedPtrField<NodeDef>& ndefs,
const EqualGraphDefOptions& options = {});
#define TF_EXPECT_GRAPH_EQ(expected, actual) \
do { \
string diff; \
EXPECT_TRUE(EqualGraphDef(actual, expected, &diff)) \
<< diff << "\nExpected:\n" \
<< SummarizeGraphDef(expected) << "\nActual:\n" \
<< SummarizeGraphDef(actual); \
} while (false)
}
#endif
#include "tensorflow/core/util/equal_graph_def.h"
#include <map>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
bool EqualGraphDef(const GraphDef& actual, const GraphDef& expected,
string* diff, const EqualGraphDefOptions& options) {
return EqualRepeatedNodeDef(actual.node(), expected.node(), diff, options);
}
uint64 GraphDefHash(const GraphDef& gdef, const EqualGraphDefOptions& options) {
return RepeatedNodeDefHash(gdef.node(), options);
}
bool EqualRepeatedNodeDef(const protobuf::RepeatedPtrField<NodeDef>& actual,
const protobuf::RepeatedPtrField<NodeDef>& expected,
string* diff, const EqualGraphDefOptions& options) {
std::unordered_map<string, const NodeDef*> actual_index;
for (const NodeDef& node : actual) {
actual_index[node.name()] = &node;
}
for (const NodeDef& expected_node : expected) {
auto actual_iter = actual_index.find(expected_node.name());
if (actual_iter == actual_index.end()) {
if (diff != nullptr) {
*diff = strings::StrCat("Did not find expected node '",
SummarizeNodeDef(expected_node), "'");
}
return false;
}
if (!EqualNodeDef(*actual_iter->second, expected_node, diff, options)) {
return false;
}
actual_index.erase(actual_iter);
}
if (!actual_index.empty()) {
if (diff != nullptr) {
*diff =
strings::StrCat("Found unexpected node '",
SummarizeNodeDef(*actual_index.begin()->second), "'");
}
return false;
}
return true;
}
uint64 RepeatedNodeDefHash(const protobuf::RepeatedPtrField<NodeDef>& ndefs,
const EqualGraphDefOptions& options) {
uint64 h = 0xDECAFCAFFE;
std::map<string, const NodeDef*> nodes;
for (const NodeDef& node : ndefs) {
nodes[node.name()] = &node;
}
for (const auto& pair : nodes) {
h = Hash64(pair.first.data(), pair.first.size(), h);
h = Hash64Combine(NodeDefHash(*pair.second, options), h);
}
return h;
}
namespace {
string JoinStringField(const protobuf::RepeatedPtrField<string>& f) {
string ret;
for (int i = 0; i < f.size(); ++i) {
if (i > 0) strings::StrAppend(&ret, ", ");
strings::StrAppend(&ret, f.Get(i));
}
return ret;
}
}
bool EqualNodeDef(const NodeDef& actual, const NodeDef& expected, string* diff,
const EqualGraphDefOptions& options) {
if (actual.name() != expected.name()) {
if (diff != nullptr) {
*diff = strings::StrCat("Actual node name '", actual.name(),
"' is not expected '", expected.name(), "'");
}
return false;
}
if (actual.op() != expected.op()) {
if (diff != nullptr) {
*diff = strings::StrCat("Node named '", actual.name(), "' has op '",
actual.op(), "' that is not expected '",
expected.op(), "'");
}
return false;
}
if (actual.device() != expected.device()) {
if (diff != nullptr) {
*diff = strings::StrCat("Node named '", actual.name(), "' has device '",
actual.device(), "' that is not expected '",
expected.device(), "'");
}
return false;
}
if (actual.input_size() != expected.input_size()) {
if (diff != nullptr) {
*diff = strings::StrCat("Node named '", actual.name(), "' has inputs '",
JoinStringField(actual.input()),
"' that don't match expected '",
JoinStringField(expected.input()), "'");
}
return false;
}
int first_control_input = actual.input_size();
for (int i = 0; i < actual.input_size(); ++i) {
if (absl::StartsWith(actual.input(i), "^")) {
first_control_input = i;
break;
}
if (actual.input(i) != expected.input(i) &&
actual.input(i) != strings::StrCat(expected.input(i), ":0") &&
strings::StrCat(actual.input(i), ":0") != expected.input(i)) {
if (diff != nullptr) {
*diff = strings::StrCat("Node named '", actual.name(), "' has input ",
i, " '", actual.input(i),
"' that doesn't match expected '",
expected.input(i), "'");
}
return false;
}
}
std::unordered_set<string> actual_control;
std::unordered_set<string> expected_control;
for (int i = first_control_input; i < actual.input_size(); ++i) {
actual_control.insert(actual.input(i));
expected_control.insert(expected.input(i));
}
for (const auto& e : expected_control) {
if (actual_control.erase(e) == 0) {
if (diff != nullptr) {
*diff = strings::StrCat("Node named '", actual.name(),
"' missing expected control input '", e, "'");
}
return false;
}
}
if (!actual_control.empty()) {
if (diff != nullptr) {
*diff = strings::StrCat("Node named '", actual.name(),
"' has unexpected control input '",
*actual_control.begin(), "'");
}
return false;
}
std::unordered_set<string> actual_attr;
for (const auto& a : actual.attr()) {
if (options.ignore_internal_attrs && !a.first.empty() &&
a.first[0] == '_') {
continue;
}
actual_attr.insert(a.first);
}
for (const auto& e : expected.attr()) {
if (options.ignore_internal_attrs && !e.first.empty() &&
e.first[0] == '_') {
continue;
}
if (actual_attr.erase(e.first) == 0) {
if (diff != nullptr) {
*diff = strings::StrCat("Node named '", actual.name(),
"' missing expected attr '", e.first,
"' with value: ", SummarizeAttrValue(e.second));
}
return false;
}
auto iter = actual.attr().find(e.first);
if (!AreAttrValuesEqual(e.second, iter->second)) {
if (diff != nullptr) {
*diff = strings::StrCat(
"Node named '", actual.name(), "' has attr '", e.first,
"' with value: ", SummarizeAttrValue(iter->second),
" that does not match expected: ", SummarizeAttrValue(e.second));
}
return false;
}
}
if (!actual_attr.empty()) {
if (diff != nullptr) {
*diff = strings::StrCat(
"Node named '", actual.name(), "' has unexpected attr '",
*actual_attr.begin(), "' with value: ",
SummarizeAttrValue(actual.attr().find(*actual_attr.begin())->second));
}
return false;
}
return true;
}
uint64 NodeDefHash(const NodeDef& ndef, const EqualGraphDefOptions& options) {
uint64 h = Hash64(ndef.name());
h = Hash64(ndef.op().data(), ndef.op().size(), h);
h = Hash64(ndef.device().data(), ndef.device().size(), h);
int first_control_input = ndef.input_size();
for (int i = 0; i < ndef.input_size(); ++i) {
if (absl::StartsWith(ndef.input(i), "^")) {
first_control_input = i;
break;
}
h = Hash64(ndef.input(i).data(), ndef.input(i).size(), h);
}
std::set<string> ndef_control;
for (int i = first_control_input; i < ndef.input_size(); ++i) {
ndef_control.insert(ndef.input(i));
}
for (const string& s : ndef_control) {
h = Hash64(s.data(), s.size(), h);
}
std::map<string, AttrValue> ndef_attr;
for (const auto& a : ndef.attr()) {
if (options.ignore_internal_attrs && !a.first.empty() &&
a.first[0] == '_') {
continue;
}
ndef_attr[a.first] = a.second;
}
for (const auto& a : ndef_attr) {
h = Hash64(a.first.data(), a.first.size(), h);
h = Hash64Combine(AttrValueHash(a.second), h);
}
return h;
}
} | #include <utility>
#include "tensorflow/core/util/equal_graph_def.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
REGISTER_OP("Input").Output("o: float");
REGISTER_OP("Alternate").Output("o: float");
REGISTER_OP("Combine").Input("a: float").Input("b: float").Output("o: float");
Node* Input(const GraphDefBuilder::Options& opts) {
return ops::SourceOp("Input", opts);
}
Node* Alternate(const GraphDefBuilder::Options& opts) {
return ops::SourceOp("Alternate", opts);
}
Node* Combine(ops::NodeOut a, ops::NodeOut b,
const GraphDefBuilder::Options& opts) {
return ops::BinaryOp("Combine", std::move(a), std::move(b), opts);
}
class EqualGraphDefTest : public ::testing::Test {
protected:
EqualGraphDefTest()
: e_(GraphDefBuilder::kFailImmediately),
a_(GraphDefBuilder::kFailImmediately) {}
bool Match() {
GraphDef expected;
TF_EXPECT_OK(e_.ToGraphDef(&expected));
GraphDef actual;
TF_EXPECT_OK(a_.ToGraphDef(&actual));
bool match = EqualGraphDef(actual, expected, &diff_);
if (match) {
EXPECT_EQ(GraphDefHash(expected), GraphDefHash(actual));
} else {
EXPECT_NE(GraphDefHash(expected), GraphDefHash(actual));
}
return match;
}
GraphDefBuilder e_;
GraphDefBuilder a_;
string diff_;
};
TEST_F(EqualGraphDefTest, Match) {
Input(e_.opts().WithName("A"));
Input(a_.opts().WithName("A"));
EXPECT_TRUE(Match()) << diff_;
}
TEST_F(EqualGraphDefTest, NoMatch) {
Input(e_.opts().WithName("A"));
Input(a_.opts().WithName("B"));
EXPECT_FALSE(Match());
EXPECT_EQ("Did not find expected node '{{node A}} = Input[]()'", diff_);
}
TEST_F(EqualGraphDefTest, MissingNode) {
Input(e_.opts().WithName("A"));
Input(e_.opts().WithName("B"));
Input(a_.opts().WithName("A"));
EXPECT_FALSE(Match());
EXPECT_EQ("Did not find expected node '{{node B}} = Input[]()'", diff_);
}
TEST_F(EqualGraphDefTest, ExtraNode) {
Input(e_.opts().WithName("A"));
Input(a_.opts().WithName("A"));
Input(a_.opts().WithName("B"));
EXPECT_FALSE(Match());
EXPECT_EQ("Found unexpected node '{{node B}} = Input[]()'", diff_);
}
TEST_F(EqualGraphDefTest, NodeOrder) {
Node* a = Input(e_.opts().WithName("A"));
Node* b = Input(e_.opts().WithName("B"));
Combine(a, b, e_.opts().WithName("C"));
b = Input(a_.opts().WithName("B"));
a = Input(a_.opts().WithName("A"));
Combine(a, b, a_.opts().WithName("C"));
EXPECT_TRUE(Match()) << diff_;
}
TEST_F(EqualGraphDefTest, NameMismatch) {
Node* a = Input(e_.opts().WithName("A"));
Node* b = Input(e_.opts().WithName("B"));
EXPECT_FALSE(EqualNodeDef(a->def(), b->def(), &diff_));
EXPECT_EQ("Actual node name 'A' is not expected 'B'", diff_);
}
TEST_F(EqualGraphDefTest, OpMismatch) {
Input(e_.opts().WithName("A"));
Alternate(a_.opts().WithName("A"));
EXPECT_FALSE(Match());
EXPECT_EQ("Node named 'A' has op 'Alternate' that is not expected 'Input'",
diff_);
}
TEST_F(EqualGraphDefTest, DeviceMatch) {
Input(e_.opts().WithName("A").WithDevice("/cpu:0"));
Input(a_.opts().WithName("A").WithDevice("/cpu:0"));
EXPECT_TRUE(Match()) << diff_;
}
TEST_F(EqualGraphDefTest, DeviceMismatch) {
Input(e_.opts().WithName("A").WithDevice("/cpu:0"));
Input(a_.opts().WithName("A").WithDevice("/cpu:1"));
EXPECT_FALSE(Match());
EXPECT_EQ("Node named 'A' has device '/cpu:1' that is not expected '/cpu:0'",
diff_);
}
TEST_F(EqualGraphDefTest, InputMismatch) {
Node* a = Input(e_.opts().WithName("A"));
Node* b = Input(e_.opts().WithName("B"));
Combine(a, a, e_.opts().WithName("C"));
a = Input(a_.opts().WithName("A"));
b = Input(a_.opts().WithName("B"));
Combine(b, b, a_.opts().WithName("C"));
EXPECT_FALSE(Match());
EXPECT_EQ("Node named 'C' has input 0 'B' that doesn't match expected 'A'",
diff_);
}
TEST_F(EqualGraphDefTest, InputOrderMismatch) {
Node* a = Input(e_.opts().WithName("A"));
Node* b = Input(e_.opts().WithName("B"));
Combine(a, b, e_.opts().WithName("C"));
a = Input(a_.opts().WithName("A"));
b = Input(a_.opts().WithName("B"));
Combine(b, a, a_.opts().WithName("C"));
EXPECT_FALSE(Match());
EXPECT_EQ("Node named 'C' has input 0 'B' that doesn't match expected 'A'",
diff_);
}
TEST_F(EqualGraphDefTest, ControlInputOrder) {
Node* a = Input(e_.opts().WithName("A"));
Node* b = Input(e_.opts().WithName("B"));
Node* c = Input(e_.opts().WithName("C"));
Node* d = Input(e_.opts().WithName("D"));
Combine(a, a,
e_.opts()
.WithName("E")
.WithControlInput(b)
.WithControlInput(c)
.WithControlInput(d));
a = Input(a_.opts().WithName("A"));
b = Input(a_.opts().WithName("B"));
c = Input(a_.opts().WithName("C"));
d = Input(a_.opts().WithName("D"));
Combine(a, a,
a_.opts()
.WithName("E")
.WithControlInput(c)
.WithControlInput(d)
.WithControlInput(b));
EXPECT_TRUE(Match()) << diff_;
}
TEST_F(EqualGraphDefTest, ControlInputMismatch) {
Node* a = Input(e_.opts().WithName("A"));
Node* b = Input(e_.opts().WithName("B"));
Node* c = Input(e_.opts().WithName("C"));
Node* d = Input(e_.opts().WithName("D"));
Combine(a, a,
e_.opts().WithName("E").WithControlInput(b).WithControlInput(c));
a = Input(a_.opts().WithName("A"));
b = Input(a_.opts().WithName("B"));
c = Input(a_.opts().WithName("C"));
d = Input(a_.opts().WithName("D"));
Combine(a, a,
a_.opts().WithName("E").WithControlInput(b).WithControlInput(d));
EXPECT_FALSE(Match());
EXPECT_EQ("Node named 'E' missing expected control input '^C'", diff_);
}
TEST_F(EqualGraphDefTest, ControlInputAdded) {
Node* a = Input(e_.opts().WithName("A"));
Node* b = Input(e_.opts().WithName("B"));
Node* c = Input(e_.opts().WithName("C"));
Combine(a, a, e_.opts().WithName("D").WithControlInput(b));
a = Input(a_.opts().WithName("A"));
b = Input(a_.opts().WithName("B"));
c = Input(a_.opts().WithName("C"));
Combine(a, a,
a_.opts().WithName("D").WithControlInput(b).WithControlInput(c));
EXPECT_FALSE(Match());
EXPECT_EQ(
"Node named 'D' has inputs 'A, A, ^B, ^C' that don't match "
"expected 'A, A, ^B'",
diff_);
}
TEST_F(EqualGraphDefTest, ControlInputRemoved) {
Node* a = Input(e_.opts().WithName("A"));
Node* b = Input(e_.opts().WithName("B"));
Node* c = Input(e_.opts().WithName("C"));
Combine(a, a,
e_.opts().WithName("D").WithControlInput(b).WithControlInput(c));
a = Input(a_.opts().WithName("A"));
b = Input(a_.opts().WithName("B"));
c = Input(a_.opts().WithName("C"));
Combine(a, a, a_.opts().WithName("D").WithControlInput(b));
EXPECT_FALSE(Match());
EXPECT_EQ(
"Node named 'D' has inputs 'A, A, ^B' that don't match "
"expected 'A, A, ^B, ^C'",
diff_);
}
TEST_F(EqualGraphDefTest, Attr) {
Node* a = Input(e_.opts().WithName("A"));
NodeDef same(a->def());
AddNodeAttr("foo", "bar", &same);
EXPECT_TRUE(EqualNodeDef(same, same, &diff_)) << diff_;
}
TEST_F(EqualGraphDefTest, AttrAdded) {
Node* a = Input(e_.opts().WithName("A"));
NodeDef actual(a->def());
AddNodeAttr("foo", "bar", &actual);
EXPECT_FALSE(EqualNodeDef(actual, a->def(), &diff_));
EXPECT_EQ("Node named 'A' has unexpected attr 'foo' with value: \"bar\"",
diff_);
}
TEST_F(EqualGraphDefTest, AttrRemoved) {
Node* a = Input(e_.opts().WithName("A"));
NodeDef expected(a->def());
AddNodeAttr("foo", "bar", &expected);
EXPECT_FALSE(EqualNodeDef(a->def(), expected, &diff_));
EXPECT_EQ("Node named 'A' missing expected attr 'foo' with value: \"bar\"",
diff_);
}
TEST_F(EqualGraphDefTest, AttrOrder) {
Node* a = Input(e_.opts().WithName("A"));
NodeDef actual(a->def());
AddNodeAttr("foo", "bar", &actual);
AddNodeAttr("baz", 42, &actual);
NodeDef expected(a->def());
AddNodeAttr("baz", 42, &expected);
AddNodeAttr("foo", "bar", &expected);
EXPECT_TRUE(EqualNodeDef(actual, expected, &diff_)) << diff_;
}
TEST_F(EqualGraphDefTest, AttrMismatch) {
Node* a = Input(e_.opts().WithName("A"));
NodeDef actual(a->def());
AddNodeAttr("foo", "bar", &actual);
AddNodeAttr("baz", 5, &actual);
NodeDef expected(a->def());
AddNodeAttr("baz", 42, &expected);
AddNodeAttr("foo", "bar", &expected);
EXPECT_FALSE(EqualNodeDef(actual, expected, &diff_));
EXPECT_EQ(
"Node named 'A' has attr 'baz' with value: 5 that does not match "
"expected: 42",
diff_);
}
TEST_F(EqualGraphDefTest, IgnoreInternalAttrs) {
Node* a = Input(e_.opts().WithName("A"));
NodeDef actual(a->def());
AddNodeAttr("foo", "bar", &actual);
AddNodeAttr("_class", 5, &actual);
NodeDef expected(a->def());
AddNodeAttr("foo", "bar", &expected);
AddNodeAttr("_kernel", "eigen", &actual);
EXPECT_TRUE(EqualNodeDef(actual, expected, &diff_));
}
}
} |
1,710 | cpp | tensorflow/tensorflow | stat_summarizer | tensorflow/core/util/stat_summarizer.cc | tensorflow/core/util/stat_summarizer_test.cc | #ifndef TENSORFLOW_CORE_UTIL_STAT_SUMMARIZER_H_
#define TENSORFLOW_CORE_UTIL_STAT_SUMMARIZER_H_
#include <stdlib.h>
#include <cmath>
#include <limits>
#include <map>
#include <memory>
#include <sstream>
#include <string>
#include <vector>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/stat_summarizer_options.h"
#include "tensorflow/core/util/stats_calculator.h"
namespace tensorflow {
class GraphDef;
class StepStats;
class NodeExecStats;
class StatSummarizer {
public:
explicit StatSummarizer(const StatSummarizerOptions& options);
explicit StatSummarizer(const tensorflow::GraphDef& tensorflow_graph);
~StatSummarizer();
void ProcessStepStats(const StepStats& step_stats);
std::string GetOutputString() const {
return stats_calculator_->GetOutputString();
}
std::string ShortSummary() const {
return stats_calculator_->GetShortSummary();
}
void PrintStepStats() const;
void PrintOutputs() const;
void ComputeStatsByType(
std::map<std::string, int64_t>* node_type_map_count,
std::map<std::string, int64_t>* node_type_map_time,
std::map<std::string, int64_t>* node_type_map_memory,
std::map<std::string, int64_t>* node_type_map_times_called,
int64_t* accumulated_us) const {
stats_calculator_->ComputeStatsByType(
node_type_map_count, node_type_map_time, node_type_map_memory,
node_type_map_times_called, accumulated_us);
}
std::string GetStatsByNodeType() const {
return stats_calculator_->GetStatsByNodeType();
}
std::string GetStatsByMetric(const string& title,
StatsCalculator::SortingMetric sorting_metric,
int num_stats) const {
return stats_calculator_->GetStatsByMetric(title, sorting_metric,
num_stats);
}
int num_runs() const { return stats_calculator_->num_runs(); }
const Stat<int64_t>& run_total_us() const {
return stats_calculator_->run_total_us();
}
private:
void Validate(const std::vector<TensorDescription>* outputs,
const NodeExecStats& ns) const;
std::map<std::string, std::vector<TensorDescription> > outputs_;
std::unique_ptr<StatsCalculator> stats_calculator_;
};
}
#endif
#include "tensorflow/core/util/stat_summarizer.h"
#include <iomanip>
#include <map>
#include <queue>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/match.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor_description.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
using Detail = StatsCalculator::Detail;
StatSummarizer::StatSummarizer(const StatSummarizerOptions& options)
: stats_calculator_(new StatsCalculator(options)) {}
StatSummarizer::StatSummarizer(const tensorflow::GraphDef& tensorflow_graph)
: stats_calculator_(new StatsCalculator(StatSummarizerOptions())) {}
StatSummarizer::~StatSummarizer() = default;
void StatSummarizer::Validate(const std::vector<TensorDescription>* outputs,
const NodeExecStats& ns) const {
if (outputs->size() != ns.output_size()) {
LOG(WARNING) << "Number of outputs changed between runs for '"
<< ns.node_name() << "' - was " << outputs->size() << ", now "
<< ns.output_size();
} else {
for (const auto& output : ns.output()) {
const int32_t slot = output.slot();
if ((slot < 0) || (slot >= ns.output_size())) {
continue;
}
const auto& stored = (*outputs)[slot];
const auto& current = output.tensor_description();
bool do_tensors_match =
(stored.dtype() == current.dtype()) &&
(stored.shape().dim_size() == current.shape().dim_size());
if (do_tensors_match) {
for (int i = 0; i < stored.shape().dim_size(); ++i) {
if (stored.shape().dim(i).size() != current.shape().dim(i).size()) {
do_tensors_match = false;
break;
}
}
}
if (!do_tensors_match) {
LOG(WARNING) << "Output tensor changed between runs for '"
<< ns.node_name();
}
}
}
}
void StatSummarizer::PrintStepStats() const {
string output = GetOutputString();
std::istringstream iss(output);
for (std::string line; std::getline(iss, line);) {
LOG(INFO) << line;
}
}
namespace {
std::string OpType(const DeviceStepStats& ds, const NodeExecStats& ns) {
if (absl::StrContains(ds.device(), "/stream") ||
absl::StrContains(ds.device(), "/memcpy")) {
return "<>";
}
const std::string sep(" = ");
const std::string& label = ns.timeline_label();
std::string::size_type start = label.find(sep);
if (start == std::string::npos) return "<>";
start += sep.size();
std::string::size_type end = label.find('(', start);
if (end == std::string::npos) return "<>";
return label.substr(start, end - start);
}
}
void StatSummarizer::ProcessStepStats(const StepStats& step_stats) {
int64_t curr_total_us = 0;
int64_t mem_total = 0;
int node_num = 0;
for (const auto& ds : step_stats.dev_stats()) {
for (const auto& ns : ds.node_stats()) {
if (absl::StrContains(ds.device(), "/stream") &&
!absl::StrContains(ds.device(), "/stream:all")) {
continue;
}
if (absl::StrContains(ds.device(), "/host:CPU")) {
continue;
}
std::string name = ns.node_name();
std::string op_type = "<>";
if (absl::StrContains(ds.device(), "/stream")) {
auto parts = str_util::Split(ns.node_name(), ':');
if (parts.size() == 2) {
name = parts[0] + " [Kernel]";
op_type = "gpu:" + parts[1];
}
} else if (absl::StrContains(ds.device(), "/memcpy")) {
auto parts = str_util::Split(ns.node_name(), ':');
if (parts.size() == 2 || parts.size() == 3) {
name = parts.front() + " [MemCpy]";
op_type = "gpu:" + parts.back();
}
} else {
op_type = OpType(ds, ns);
}
++node_num;
const int64_t curr_time = ns.all_end_rel_micros();
curr_total_us += curr_time;
auto output_result =
outputs_.emplace(name, std::vector<TensorDescription>());
std::vector<TensorDescription>* outputs = &(output_result.first->second);
int64_t rel_end_us = curr_time;
if (output_result.second) {
outputs->resize(ns.output_size());
for (const auto& output : ns.output()) {
const int32_t slot = output.slot();
if ((slot < 0) || (slot >= ns.output_size())) {
continue;
}
(*outputs)[slot] = output.tensor_description();
}
}
int64_t curr_node_mem = 0;
for (const auto& mem : ns.memory()) {
const int64_t mem_usage = mem.total_bytes();
curr_node_mem += mem_usage;
}
stats_calculator_->AddNodeStats(name, op_type, node_num, rel_end_us,
curr_node_mem);
mem_total += curr_node_mem;
Validate(outputs, ns);
}
}
stats_calculator_->UpdateRunTotalUs(curr_total_us);
stats_calculator_->UpdateMemoryUsed(mem_total);
}
void StatSummarizer::PrintOutputs() const {
std::priority_queue<
std::pair<int64_t, const std::pair<const std::string, Detail>*>>
timings;
for (const auto& entry : stats_calculator_->GetDetails()) {
timings.emplace(-entry.second.run_order, &entry);
}
LOG(INFO) << "============ Node output tensor sizes in run order ========";
while (!timings.empty()) {
auto entry = timings.top();
timings.pop();
std::stringstream stream;
const auto detail_outputs = outputs_.at(entry.second->first);
stream << entry.second->first << "\t" << detail_outputs.size();
for (const auto& tensor : detail_outputs) {
stream << "\t" << DataTypeString(tensor.dtype());
stream << "\t" << tensor.shape().dim_size();
for (const auto& d : tensor.shape().dim()) {
stream << "\t" << d.size();
}
}
LOG(INFO) << stream.str();
}
}
} | #include "tensorflow/core/util/stat_summarizer.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/match.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
TEST(StatSummarizerTest, ExtractsOpTypes) {
const std::string graph_def_str(R"EOF(
node {
name: "myconstant"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
}
float_val: 1.0
}
}
}
}
versions {
producer: 21
}
)EOF");
GraphDef graph_def;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(graph_def_str, &graph_def));
std::unique_ptr<Session> session(NewSession(SessionOptions()));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(graph_def));
RunOptions run_options;
run_options.set_trace_level(RunOptions::FULL_TRACE);
RunMetadata run_metadata;
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run(run_options, {}, {"myconstant:0"}, {}, &outputs,
&run_metadata));
StatSummarizer stats(graph_def);
stats.ProcessStepStats(run_metadata.step_stats());
const std::string output = stats.GetOutputString();
const std::string by_node_type = stats.GetStatsByNodeType();
ASSERT_TRUE(absl::StrContains(output, "Const")) << output;
ASSERT_TRUE(absl::StrContains(output, "myconstant")) << output;
ASSERT_TRUE(absl::StrContains(by_node_type, "Const")) << by_node_type;
}
}
} |
1,711 | cpp | tensorflow/tensorflow | saved_tensor_slice_util | tensorflow/core/util/saved_tensor_slice_util.cc | tensorflow/core/util/saved_tensor_slice_util_test.cc | #ifndef TENSORFLOW_CORE_UTIL_SAVED_TENSOR_SLICE_UTIL_H_
#define TENSORFLOW_CORE_UTIL_SAVED_TENSOR_SLICE_UTIL_H_
#include <string>
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_slice.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace checkpoint {
extern const char kSavedTensorSlicesKey[];
string EncodeTensorNameSlice(const string& name,
const tensorflow::TensorSlice& slice);
Status DecodeTensorNameSlice(const string& code, string* name,
tensorflow::TensorSlice* slice);
Status ParseShapeAndSlice(const string& shape_and_slice, TensorShape* shape,
TensorSlice* slice, TensorShape* shape_slice);
template <typename T>
struct SaveTypeTraits;
template <typename T>
int TensorProtoDataSize(const TensorProto& t);
template <typename T>
const typename SaveTypeTraits<T>::SavedType* TensorProtoData(
const TensorProto& t);
template <typename T>
typename SaveTypeTraits<T>::RepeatedField* MutableTensorProtoData(
TensorProto* t);
template <typename T>
void Fill(T* data, size_t n, TensorProto* t);
#define TENSOR_PROTO_EXTRACT_TYPE_HELPER(TYPE, FIELD, FTYPE, STYPE) \
template <> \
struct SaveTypeTraits<TYPE> { \
static constexpr bool supported = true; \
typedef STYPE SavedType; \
typedef protobuf::RepeatedField<FTYPE> RepeatedField; \
}; \
template <> \
inline const STYPE* TensorProtoData<TYPE>(const TensorProto& t) { \
static_assert(SaveTypeTraits<TYPE>::supported, \
"Specified type " #TYPE " not supported for Restore"); \
return reinterpret_cast<const STYPE*>(t.FIELD##_val().data()); \
} \
template <> \
inline protobuf::RepeatedField<FTYPE>* MutableTensorProtoData<TYPE>( \
TensorProto * t) { \
static_assert(SaveTypeTraits<TYPE>::supported, \
"Specified type " #TYPE " not supported for Save"); \
return reinterpret_cast<protobuf::RepeatedField<FTYPE>*>( \
t->mutable_##FIELD##_val()); \
}
#define TENSOR_PROTO_EXTRACT_TYPE(TYPE, FIELD, FTYPE) \
TENSOR_PROTO_EXTRACT_TYPE_HELPER(TYPE, FIELD, FTYPE, FTYPE) \
template <> \
inline int TensorProtoDataSize<TYPE>(const TensorProto& t) { \
return t.FIELD##_val_size(); \
} \
template <> \
inline void Fill(const TYPE* data, size_t n, TensorProto* t) { \
typename protobuf::RepeatedField<FTYPE> copy(data, data + n); \
t->mutable_##FIELD##_val()->Swap(©); \
}
#define TENSOR_PROTO_EXTRACT_TYPE_COMPLEX(TYPE, FIELD, FTYPE) \
TENSOR_PROTO_EXTRACT_TYPE_HELPER(TYPE, FIELD, FTYPE, TYPE) \
template <> \
inline int TensorProtoDataSize<TYPE>(const TensorProto& t) { \
return t.FIELD##_val_size() / 2; \
} \
template <> \
inline void Fill(const TYPE* data, size_t n, TensorProto* t) { \
const FTYPE* sub = reinterpret_cast<const FTYPE*>(data); \
typename protobuf::RepeatedField<FTYPE> copy(sub, sub + 2 * n); \
t->mutable_##FIELD##_val()->Swap(©); \
}
TENSOR_PROTO_EXTRACT_TYPE(bool, bool, bool);
TENSOR_PROTO_EXTRACT_TYPE(float, float, float);
TENSOR_PROTO_EXTRACT_TYPE(double, double, double);
TENSOR_PROTO_EXTRACT_TYPE_COMPLEX(complex64, scomplex, float);
TENSOR_PROTO_EXTRACT_TYPE_COMPLEX(complex128, dcomplex, double);
TENSOR_PROTO_EXTRACT_TYPE(int32, int, int32);
TENSOR_PROTO_EXTRACT_TYPE(uint32, uint32, uint32);
TENSOR_PROTO_EXTRACT_TYPE(int64_t, int64, protobuf_int64);
TENSOR_PROTO_EXTRACT_TYPE(uint64, uint64, protobuf_uint64);
TENSOR_PROTO_EXTRACT_TYPE(uint16, int, int32);
TENSOR_PROTO_EXTRACT_TYPE(uint8, int, int32);
TENSOR_PROTO_EXTRACT_TYPE(int8, int, int32);
TENSOR_PROTO_EXTRACT_TYPE(int16, int, int32);
TENSOR_PROTO_EXTRACT_TYPE(qint8, int, int32);
TENSOR_PROTO_EXTRACT_TYPE(quint8, int, int32);
TENSOR_PROTO_EXTRACT_TYPE(quint16, int, int32);
#undef TENSOR_PROTO_EXTRACT_TYPE_COMPLEX
#undef TENSOR_PROTO_EXTRACT_TYPE_HELPER
#undef TENSOR_PROTO_EXTRACT_TYPE
template <>
struct SaveTypeTraits<qint32> : SaveTypeTraits<int32> {};
template <>
inline int TensorProtoDataSize<qint32>(const TensorProto& t) {
return t.int_val_size();
}
template <>
inline const int32* TensorProtoData<qint32>(const TensorProto& t) {
static_assert(SaveTypeTraits<qint32>::supported,
"Specified type qint32 not supported for Restore");
return reinterpret_cast<const int32*>(t.int_val().data());
}
inline void Fill(const qint32* data, size_t n, TensorProto* t) {
const int32* p = reinterpret_cast<const int32*>(data);
typename protobuf::RepeatedField<int32> copy(p, p + n);
t->mutable_int_val()->Swap(©);
}
template <>
struct SaveTypeTraits<Eigen::half> {
static constexpr bool supported = true;
typedef int SavedType;
typedef protobuf::RepeatedField<int32> RepeatedField;
};
template <>
inline int TensorProtoDataSize<Eigen::half>(const TensorProto& t) {
return t.half_val_size();
}
template <>
inline const int* TensorProtoData<Eigen::half>(const TensorProto& t) {
return t.half_val().data();
}
template <>
inline protobuf::RepeatedField<int32>* MutableTensorProtoData<Eigen::half>(
TensorProto* t) {
return t->mutable_half_val();
}
template <>
inline void Fill(const Eigen::half* data, size_t n, TensorProto* t) {
typename protobuf::RepeatedField<int32>* val = t->mutable_half_val();
val->Resize(n, 0);
for (size_t i = 0; i < n; ++i) {
val->Set(i, Eigen::numext::bit_cast<uint16>(data[i]));
}
}
template <>
struct SaveTypeTraits<tstring> {
static constexpr bool supported = true;
typedef const string* SavedType;
typedef protobuf::RepeatedPtrField<string> RepeatedField;
};
template <>
inline int TensorProtoDataSize<tstring>(const TensorProto& t) {
return t.string_val_size();
}
template <>
inline const string* const* TensorProtoData<tstring>(const TensorProto& t) {
static_assert(SaveTypeTraits<tstring>::supported,
"Specified type tstring not supported for Restore");
return t.string_val().data();
}
template <>
inline protobuf::RepeatedPtrField<string>* MutableTensorProtoData<tstring>(
TensorProto* t) {
static_assert(SaveTypeTraits<tstring>::supported,
"Specified type tstring not supported for Save");
return t->mutable_string_val();
}
template <>
inline void Fill(const tstring* data, size_t n, TensorProto* t) {
typename protobuf::RepeatedPtrField<string> copy(data, data + n);
t->mutable_string_val()->Swap(©);
}
}
}
#endif
#include "tensorflow/core/util/saved_tensor_slice_util.h"
#include <vector>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/ordered_code.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
namespace checkpoint {
const char kSavedTensorSlicesKey[] = "";
string EncodeTensorNameSlice(const string& name, const TensorSlice& slice) {
string buffer;
tensorflow::strings::OrderedCode::WriteNumIncreasing(&buffer, 0);
tensorflow::strings::OrderedCode::WriteString(&buffer, name);
tensorflow::strings::OrderedCode::WriteNumIncreasing(&buffer, slice.dims());
for (int d = 0; d < slice.dims(); ++d) {
tensorflow::strings::OrderedCode::WriteSignedNumIncreasing(&buffer,
slice.start(d));
tensorflow::strings::OrderedCode::WriteSignedNumIncreasing(&buffer,
slice.length(d));
}
return buffer;
}
Status DecodeTensorNameSlice(const string& code, string* name,
tensorflow::TensorSlice* slice) {
StringPiece src(code);
uint64 x;
if (!tensorflow::strings::OrderedCode::ReadNumIncreasing(&src, &x)) {
return errors::Internal("Failed to parse the leading number: src = ", src);
}
if (x != 0) {
return errors::Internal(
"The leading number should always be 0 for any valid key: src = ", src);
}
if (!tensorflow::strings::OrderedCode::ReadString(&src, name)) {
return errors::Internal("Failed to parse the tensor name: src = ", src);
}
if (!tensorflow::strings::OrderedCode::ReadNumIncreasing(&src, &x)) {
return errors::Internal("Failed to parse the tensor rank: src = ", src);
}
if (x == 0) {
return errors::Internal("Expecting positive rank of the tensor, got ", x,
", src = ", src);
}
if (x >= kint32max) {
return errors::Internal("Too many elements ", x);
}
slice->SetFullSlice(x);
for (int d = 0; d < static_cast<int32>(x); ++d) {
int64_t start, length;
if (!tensorflow::strings::OrderedCode::ReadSignedNumIncreasing(&src,
&start)) {
return errors::Internal("Failed to parse start: src = ", src);
}
if (!tensorflow::strings::OrderedCode::ReadSignedNumIncreasing(&src,
&length)) {
return errors::Internal("Failed to parse length: src = ", src);
}
if (length >= 0) {
slice->set_start(d, start);
slice->set_length(d, length);
}
}
return absl::OkStatus();
}
Status ParseShapeAndSlice(const string& shape_and_slice, TensorShape* shape,
TensorSlice* slice, TensorShape* shape_slice) {
CHECK(!shape_and_slice.empty());
std::vector<string> splits = str_util::Split(shape_and_slice, ' ');
if (splits.size() < 2) {
return errors::InvalidArgument(
"Need least two elements in shape_and_slice specification: ",
shape_and_slice);
}
slice->Clear();
auto status = slice->Parse(splits.back(), slice);
if (!status.ok()) return status;
splits.pop_back();
shape->Clear();
for (const auto& s : splits) {
int64_t dim;
if (!strings::safe_strto64(s, &dim)) {
return errors::InvalidArgument(
"Non numerical dimension in shape_and_slice: ", shape_and_slice);
}
shape->AddDim(dim);
}
return slice->SliceTensorShape(*shape, shape_slice);
}
}
} | #include "tensorflow/core/util/saved_tensor_slice_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace checkpoint {
namespace {
TEST(TensorShapeUtilTest, TensorNameSliceToOrderedCode) {
{
TensorSlice s = TensorSlice::ParseOrDie("-:-:1,3:4,5");
string buffer = EncodeTensorNameSlice("foo", s);
string name;
s.Clear();
TF_CHECK_OK(DecodeTensorNameSlice(buffer, &name, &s));
EXPECT_EQ("foo", name);
EXPECT_EQ("-:-:1,3:4,5", s.DebugString());
}
}
}
}
} |
1,712 | cpp | tensorflow/tensorflow | bcast | tensorflow/core/util/bcast.cc | tensorflow/core/util/bcast_test.cc | #ifndef TENSORFLOW_CORE_UTIL_BCAST_H_
#define TENSORFLOW_CORE_UTIL_BCAST_H_
#include <algorithm>
#include <vector>
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
inline void ComputeBatchIndices(
const int64_t output_batch_size,
const absl::InlinedVector<int64_t, 4UL>& reshape,
const absl::InlinedVector<int64_t, 4UL>& bcast,
std::vector<int64_t>* out_indices) {
out_indices->resize(output_batch_size);
int64_t num_output_elements = 1;
int64_t num_input_elements = 1;
for (int64_t i = reshape.size() - 1; i >= 0; --i) {
const int64_t dim = std::max(reshape[i], bcast[i]);
const int64_t incr = bcast[i] > 1 ? 0 : num_input_elements;
for (int64_t k = 0; k < (dim - 1) * num_output_elements; ++k) {
(*out_indices)[num_output_elements + k] = (*out_indices)[k] + incr;
}
num_output_elements *= dim;
num_input_elements *= reshape[i];
}
}
template <int N>
class BCastList {
public:
typedef absl::InlinedVector<int64_t, 4UL> Vec;
explicit BCastList(const Vec (&x)[N], bool fewer_dims_optimization = true,
bool return_flattened_batch_indices = false);
~BCastList() = default;
bool IsValid() const { return valid_; }
bool IsBroadcastingRequired() const { return broadcasting_required_; }
const Vec& reshape(int i) const { return reshape_[i]; }
const Vec& bcast(int i) const { return bcast_[i]; }
const Vec& result_shape() const { return result_; }
const Vec& output_shape() const { return output_; }
const Vec& grad_reduce_idx(int i) const { return grad_reduce_idx_[i]; }
int64_t output_batch_size() const { return output_batch_size_; }
const std::vector<int64_t>& batch_indices(int i) const {
return batch_indices_[i];
}
protected:
bool valid_ = true;
bool broadcasting_required_ = true;
Vec reshape_[N];
Vec bcast_[N];
Vec result_;
Vec output_;
Vec grad_reduce_idx_[N];
int64_t output_batch_size_;
std::vector<int64_t> batch_indices_[N];
static void Reverse(Vec* shape) {
std::reverse(shape->begin(), shape->end());
}
BCastList(const BCastList&) = delete;
void operator=(const BCastList&) = delete;
};
template <int N>
BCastList<N>::BCastList(const BCastList::Vec (&x)[N],
const bool fewer_dims_optimization,
const bool return_flattened_batch_indices) {
typedef BCastList::Vec Vec;
auto mul_dims = [](int64_t dim1, int64_t dim2) -> int64_t {
return dim1 != 0 && dim2 != 0 && (dim1 < 0 || dim2 < 0) ? -1 : dim1 * dim2;
};
bool all_equal = true;
size_t largest_rank = 0;
output_batch_size_ = 1;
for (int i = 0; i < N; ++i) {
if (x[i] != x[0]) {
all_equal = false;
}
if (x[i].size() > largest_rank) {
largest_rank = x[i].size();
}
}
if (all_equal) {
broadcasting_required_ = false;
}
if (all_equal && TF_PREDICT_TRUE(fewer_dims_optimization)) {
int64_t elements = 1;
const int rank = x[0].size();
output_.resize(rank);
for (int i = 0; i < rank; i++) {
const int64_t dim = x[0][i];
elements = mul_dims(elements, dim);
output_[i] = dim;
}
result_.push_back(elements);
output_batch_size_ = elements;
for (int i = 0; i < N; ++i) {
reshape_[i].push_back(elements);
bcast_[i].push_back(1);
}
return;
}
Vec copy[N];
for (int i = 0; i < N; ++i) {
copy[i] = x[i];
Reverse(©[i]);
}
for (int i = 0; i < N; ++i) {
if (copy[i].size() < largest_rank) {
copy[i].resize(largest_rank, 1);
}
}
bool prev_is_one[N];
bool current_is_one[N];
for (int i = 0; i < N; ++i) {
prev_is_one[i] = false;
current_is_one[i] = false;
}
Vec output;
bool output_dim_set = false;
int64_t output_dim = -1;
bool none_is_one = true;
bool set_one = false;
for (int j = 0; j < largest_rank; ++j) {
output_dim = -1;
output_dim_set = false;
none_is_one = true;
for (int i = 0; i < N; ++i) {
if (copy[i][j] == 1) {
current_is_one[i] = true;
none_is_one = false;
} else {
current_is_one[i] = false;
if (!output_dim_set || copy[i][j] == output_dim) {
output_dim = copy[i][j];
output_dim_set = true;
} else {
valid_ = false;
return;
}
}
}
output_.push_back(output_dim_set ? output_dim : 1);
output_batch_size_ = mul_dims(output_batch_size_, output_.back());
if (!output_dim_set) {
if (!TF_PREDICT_TRUE(fewer_dims_optimization)) {
for (int i = 0; i < N; ++i) {
bcast_[i].push_back(1);
reshape_[i].push_back(1);
}
result_.push_back(1);
}
for (int i = 0; i < N; ++i) {
grad_reduce_idx_[i].push_back(largest_rank - 1 - j);
}
continue;
} else if (TF_PREDICT_TRUE(fewer_dims_optimization) &&
std::equal(current_is_one, current_is_one + N, prev_is_one) &&
set_one) {
result_.back() = mul_dims(result_.back(), output_dim);
for (int i = 0; i < N; ++i) {
reshape_[i].back() = mul_dims(reshape_[i].back(), copy[i][j]);
bcast_[i].back() =
mul_dims(bcast_[i].back(), current_is_one[i] ? output_dim : 1);
if (current_is_one[i] && !none_is_one) {
grad_reduce_idx_[i].push_back(largest_rank - 1 - j);
}
}
} else {
result_.push_back(output_dim);
for (int i = 0; i < N; ++i) {
reshape_[i].push_back(copy[i][j]);
bcast_[i].push_back(current_is_one[i] ? output_dim : 1);
if (current_is_one[i] && !none_is_one) {
grad_reduce_idx_[i].push_back(largest_rank - 1 - j);
}
}
}
set_one = true;
for (int i = 0; i < N; ++i) {
prev_is_one[i] = current_is_one[i];
}
}
if (result_.empty()) {
result_.push_back(1);
for (int i = 0; i < N; ++i) {
reshape_[i].push_back(1);
bcast_[i].push_back(1);
}
}
for (int i = 0; i < N; ++i) {
Reverse(&reshape_[i]);
Reverse(&bcast_[i]);
Reverse(&grad_reduce_idx_[i]);
}
Reverse(&result_);
Reverse(&output_);
if (return_flattened_batch_indices && broadcasting_required_ &&
output_batch_size_ > 0) {
for (int i = 0; i < N; ++i) {
ComputeBatchIndices(output_batch_size_, reshape_[i], bcast_[i],
&batch_indices_[i]);
}
}
}
class BCast : public BCastList<2> {
public:
typedef absl::InlinedVector<int64_t, 4UL> Vec;
BCast(const Vec& x, const Vec& y, const bool fewer_dims_optimization = true,
const bool return_flattened_batch_indices = false)
: BCastList<2>({x, y}, fewer_dims_optimization,
return_flattened_batch_indices) {}
~BCast() = default;
const Vec& x_reshape() const { return reshape_[0]; }
const Vec& x_bcast() const { return bcast_[0]; }
const Vec& y_reshape() const { return reshape_[1]; }
const Vec& y_bcast() const { return bcast_[1]; }
const Vec& result_shape() const { return result_; }
const Vec& output_shape() const { return output_; }
const Vec& grad_x_reduce_idx() const { return grad_reduce_idx_[0]; }
const Vec& grad_y_reduce_idx() const { return grad_reduce_idx_[1]; }
const std::vector<int64_t>& x_batch_indices() const {
return batch_indices_[0];
}
const std::vector<int64_t>& y_batch_indices() const {
return batch_indices_[1];
}
template <typename IndexType, int NDIMS>
static Eigen::array<IndexType, NDIMS> ToIndexArrayType(
const BCast::Vec& vec) {
CHECK_EQ(vec.size(), NDIMS);
Eigen::array<IndexType, NDIMS> ret;
for (int i = 0; i < NDIMS; ++i) ret[i] = vec[i];
return ret;
}
template <int NDIMS>
static Eigen::array<Eigen::DenseIndex, NDIMS> ToIndexArray(
const BCast::Vec& vec) {
return ToIndexArrayType<Eigen::DenseIndex, NDIMS>(vec);
}
static Vec FromShape(const TensorShape& shape);
static TensorShape ToShape(const Vec& vec);
private:
BCast(const BCast&) = delete;
void operator=(const BCast&) = delete;
};
}
#endif
#include "tensorflow/core/util/bcast.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
BCast::Vec BCast::FromShape(const TensorShape& shape) {
const int N = shape.dims();
BCastList::Vec ret(N);
for (int i = 0; i < N; ++i) {
ret[i] = shape.dim_size(i);
}
return ret;
}
TensorShape BCast::ToShape(const BCastList::Vec& vec) {
TensorShape shape(vec);
return shape;
}
} | #include "tensorflow/core/util/bcast.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
string BCast(const tensorflow::BCast::Vec& x, const tensorflow::BCast::Vec& y,
const bool fewer_dims_optimization = true) {
tensorflow::BCast b(x, y, fewer_dims_optimization);
if (!b.IsValid()) {
return "invalid";
}
string ret;
strings::StrAppend(&ret, "[", absl::StrJoin(b.x_reshape(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.x_bcast(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.y_reshape(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.y_bcast(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.result_shape(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.output_shape(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.grad_x_reduce_idx(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.grad_y_reduce_idx(), ","), "]");
return ret;
}
string BCastBatchIndices(const tensorflow::BCast::Vec& x,
const tensorflow::BCast::Vec& y,
const bool fewer_dims_optimization = true) {
tensorflow::BCast b(x, y, fewer_dims_optimization,
true);
string ret;
strings::StrAppend(&ret, "[", absl::StrJoin(b.x_batch_indices(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.y_batch_indices(), ","), "]");
return ret;
}
string BCastList3(const tensorflow::BCast::Vec& x,
const tensorflow::BCast::Vec& y,
const tensorflow::BCast::Vec& z,
const bool fewer_dims_optimization = true) {
tensorflow::BCastList<3> b({x, y, z}, fewer_dims_optimization);
if (!b.IsValid()) {
return "invalid";
}
string ret;
strings::StrAppend(&ret, "[", absl::StrJoin(b.reshape(0), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.bcast(0), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.reshape(1), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.bcast(1), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.reshape(2), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.bcast(2), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.result_shape(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.output_shape(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.grad_reduce_idx(0), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.grad_reduce_idx(1), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.grad_reduce_idx(2), ","), "]");
return ret;
}
TEST(BCastTest, Invalid) {
for (const bool use_optimization : {true, false}) {
EXPECT_EQ("invalid", BCast({5, 3, 2}, {3}, use_optimization));
EXPECT_EQ("invalid", BCast({5, 3, 2}, {2, 2}, use_optimization));
EXPECT_EQ("invalid", BCast({5, 3, 2}, {10, 1, 1}, use_optimization));
EXPECT_EQ("invalid",
BCast({1, 2, 1, 2, 1, 2}, {2, 4, 2, 1, 2, 1}, use_optimization));
}
}
TEST(BCastListTest, Invalid) {
for (const bool use_optimization : {true, false}) {
EXPECT_EQ("invalid", BCastList3({5, 3, 2}, {3}, {1}, use_optimization));
EXPECT_EQ("invalid", BCastList3({5, 3, 2}, {2, 2}, {1}, use_optimization));
EXPECT_EQ("invalid",
BCastList3({5, 3, 2}, {10, 1, 1}, {1}, use_optimization));
EXPECT_EQ("invalid", BCastList3({1, 2, 1, 2, 1, 2}, {2, 4, 2, 1, 2, 1}, {1},
use_optimization));
EXPECT_EQ("invalid", BCastList3({5, 3, 2}, {1}, {3}, use_optimization));
EXPECT_EQ("invalid", BCastList3({5, 3, 2}, {1}, {2, 2}, use_optimization));
EXPECT_EQ("invalid",
BCastList3({5, 3, 2}, {1}, {10, 1, 1}, use_optimization));
EXPECT_EQ("invalid", BCastList3({1}, {5, 3, 2}, {3}, use_optimization));
EXPECT_EQ("invalid", BCastList3({1}, {5, 3, 2}, {2, 2}, use_optimization));
EXPECT_EQ("invalid",
BCastList3({1}, {5, 3, 2}, {10, 1, 1}, use_optimization));
}
}
TEST(BCastTest, Basic_SameShape) {
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {11, 7, 5, 3, 2}),
"[2310][1][2310][1]"
"[2310]"
"[11,7,5,3,2]"
"[][]");
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {11, 7, 5, 3, 2}, false),
"[11,7,5,3,2][1,1,1,1,1][11,7,5,3,2][1,1,1,1,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[][]");
}
TEST(BCastListTest, Basic_SameShape) {
EXPECT_EQ(BCastList3({11, 7, 5, 3, 2}, {11, 7, 5, 3, 2}, {11, 7, 5, 3, 2}),
"[2310][1][2310][1][2310][1]"
"[2310]"
"[11,7,5,3,2]"
"[][][]");
EXPECT_EQ(
BCastList3({11, 7, 5, 3, 2}, {11, 7, 5, 3, 2}, {11, 7, 5, 3, 2}, false),
"[11,7,5,3,2][1,1,1,1,1][11,7,5,3,2][1,1,1,1,1][11,7,5,3,2][1,1,1,1,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[][][]");
}
TEST(BCastTest, Basic_SameShapeWithZeroDim) {
EXPECT_EQ(BCast({11, 7, 0, 3, 2}, {11, 7, 0, 3, 2}),
"[0][1][0][1]"
"[0]"
"[11,7,0,3,2]"
"[][]");
EXPECT_EQ(BCast({11, 7, 0, 3, 2}, {11, 7, 0, 3, 2}, false),
"[11,7,0,3,2][1,1,1,1,1][11,7,0,3,2][1,1,1,1,1]"
"[11,7,0,3,2]"
"[11,7,0,3,2]"
"[][]");
}
TEST(BCastListTest, Basic_SameShapeWithZeroDim) {
EXPECT_EQ(BCastList3({11, 7, 0, 3, 2}, {11, 7, 0, 3, 2}, {11, 7, 0, 3, 2}),
"[0][1][0][1][0][1]"
"[0]"
"[11,7,0,3,2]"
"[][][]");
EXPECT_EQ(
BCastList3({11, 7, 0, 3, 2}, {11, 7, 0, 3, 2}, {11, 7, 0, 3, 2}, false),
"[11,7,0,3,2][1,1,1,1,1][11,7,0,3,2][1,1,1,1,1][11,7,0,3,2][1,1,1,1,1]"
"[11,7,0,3,2]"
"[11,7,0,3,2]"
"[][][]");
}
TEST(BCastTest, Basic_Scalar_Scalar) {
EXPECT_EQ(BCast({1, 1}, {}),
"[1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1]");
EXPECT_EQ(BCast({1, 1}, {1}),
"[1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1]");
EXPECT_EQ(BCast({1, 1}, {1}, false),
"[1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1]");
EXPECT_EQ(BCast({1}, {1, 1}),
"[1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1]");
EXPECT_EQ(BCast({1}, {1, 1}, false),
"[1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1]");
}
TEST(BCastTest, Basic_TrueScalar_Scalar) {
EXPECT_EQ(BCast({}, {}),
"[1][1][1][1]"
"[1]"
"[]"
"[][]");
EXPECT_EQ(BCast({}, {1}),
"[1][1][1][1]"
"[1]"
"[1]"
"[0][0]");
EXPECT_EQ(BCast({}, {1}, false),
"[1][1][1][1]"
"[1]"
"[1]"
"[0][0]");
EXPECT_EQ(BCast({}, {1, 1}),
"[1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1]");
EXPECT_EQ(BCast({}, {1, 1}, false),
"[1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1]");
EXPECT_EQ(BCast({1}, {}),
"[1][1][1][1]"
"[1]"
"[1]"
"[0][0]");
EXPECT_EQ(BCast({1}, {}, false),
"[1][1][1][1]"
"[1]"
"[1]"
"[0][0]");
EXPECT_EQ(BCast({1, 1}, {}),
"[1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1]");
EXPECT_EQ(BCast({1, 1}, {}, false),
"[1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1]");
}
TEST(BCastListTest, Basic_Scalar_Scalar_Scalar) {
EXPECT_EQ(BCastList3({1, 1}, {1}, {1}),
"[1][1][1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1, 1}, {1}, {1}, false),
"[1,1][1,1][1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1}, {1, 1}, {1}),
"[1][1][1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1}, {1, 1}, {1}, false),
"[1,1][1,1][1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1}, {1}, {1, 1}),
"[1][1][1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1}, {1}, {1, 1}, false),
"[1,1][1,1][1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1][0,1]");
}
TEST(BCastListTest, Basic_TrueScalar_Scalar_Scalar) {
EXPECT_EQ(BCastList3({1, 1}, {1}, {}),
"[1][1][1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1, 1}, {1}, {}, false),
"[1,1][1,1][1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({}, {1, 1}, {1}),
"[1][1][1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({}, {1, 1}, {1}, false),
"[1,1][1,1][1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1}, {}, {1, 1}),
"[1][1][1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1}, {}, {1, 1}, false),
"[1,1][1,1][1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1][0,1]");
}
TEST(BCastTest, Basic_Tensor_Scalar) {
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {1}),
"[2310][1][1][2310]"
"[2310]"
"[11,7,5,3,2]"
"[][0,1,2,3,4]");
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {1}, false),
"[11,7,5,3,2][1,1,1,1,1][1,1,1,1,1][11,7,5,3,2]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[][0,1,2,3,4]");
EXPECT_EQ(BCast({1}, {11, 7, 5, 3, 2}),
"[1][2310][2310][1]"
"[2310]"
"[11,7,5,3,2]"
"[0,1,2,3,4][]");
EXPECT_EQ(BCast({1}, {11, 7, 5, 3, 2}, false),
"[1,1,1,1,1][11,7,5,3,2][11,7,5,3,2][1,1,1,1,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[0,1,2,3,4][]");
EXPECT_EQ(BCast({1, 2147483648}, {1}),
"[2147483648][1][1][2147483648]"
"[2147483648]"
"[1,2147483648]"
"[0][0,1]");
}
TEST(BCastTest, Basic_Tensor_With_DimSize_1_Scalar) {
EXPECT_EQ(BCast({11, 7, 5, 3, 2, 1}, {1}),
"[2310][1][1][2310]"
"[2310]"
"[11,7,5,3,2,1]"
"[5][0,1,2,3,4,5]");
EXPECT_EQ(BCast({11, 7, 5, 3, 2, 1}, {1}, false),
"[11,7,5,3,2,1][1,1,1,1,1,1][1,1,1,1,1,1][11,7,5,3,2,1]"
"[11,7,5,3,2,1]"
"[11,7,5,3,2,1]"
"[5][0,1,2,3,4,5]");
EXPECT_EQ(BCast({1}, {11, 7, 5, 3, 2, 1}),
"[1][2310][2310][1]"
"[2310]"
"[11,7,5,3,2,1]"
"[0,1,2,3,4,5][5]");
EXPECT_EQ(BCast({1}, {11, 7, 5, 3, 2, 1}, false),
"[1,1,1,1,1,1][11,7,5,3,2,1][11,7,5,3,2,1][1,1,1,1,1,1]"
"[11,7,5,3,2,1]"
"[11,7,5,3,2,1]"
"[0,1,2,3,4,5][5]");
EXPECT_EQ(BCast({11, 7, 5, 1, 1, 3, 2, 1, 1}, {1}),
"[2310][1][1][2310]"
"[2310]"
"[11,7,5,1,1,3,2,1,1]"
"[3,4,7,8][0,1,2,3,4,5,6,7,8]");
EXPECT_EQ(BCast({11, 7, 5, 1, 1, 3, 2, 1, 1}, {1}, false),
"[11,7,5,1,1,3,2,1,1][1,1,1,1,1,1,1,1,1]"
"[1,1,1,1,1,1,1,1,1][11,7,5,1,1,3,2,1,1]"
"[11,7,5,1,1,3,2,1,1]"
"[11,7,5,1,1,3,2,1,1]"
"[3,4,7,8][0,1,2,3,4,5,6,7,8]");
EXPECT_EQ(BCast({1}, {11, 7, 5, 1, 1, 3, 2, 1, 1}),
"[1][2310][2310][1]"
"[2310]"
"[11,7,5,1,1,3,2,1,1]"
"[0,1,2,3,4,5,6,7,8][3,4,7,8]");
EXPECT_EQ(BCast({1}, {11, 7, 5, 1, 1, 3, 2, 1, 1}, false),
"[1,1,1,1,1,1,1,1,1][11,7,5,1,1,3,2,1,1]"
"[11,7,5,1,1,3,2,1,1][1,1,1,1,1,1,1,1,1]"
"[11,7,5,1,1,3,2,1,1]"
"[11,7,5,1,1,3,2,1,1]"
"[0,1,2,3,4,5,6,7,8][3,4,7,8]");
}
TEST(BCastTest, Basic_Tensor_Vector) {
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {2}),
"[1155,2][1,1][1,2][1155,1]"
"[1155,2]"
"[11,7,5,3,2]"
"[][0,1,2,3]");
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {2}, false),
"[11,7,5,3,2][1,1,1,1,1][1,1,1,1,2][11,7,5,3,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[][0,1,2,3]");
EXPECT_EQ(BCast({2}, {11, 7, 5, 3, 2}),
"[1,2][1155,1][1155,2][1,1]"
"[1155,2]"
"[11,7,5,3,2]"
"[0,1,2,3][]");
EXPECT_EQ(BCast({2}, {11, 7, 5, 3, 2}, false),
"[1,1,1,1,2][11,7,5,3,1][11,7,5,3,2][1,1,1,1,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[0,1,2,3][]");
}
TEST(BCastTest, Basic_Tensor_Matrix) {
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {3, 2}),
"[385,6][1,1][1,6][385,1]"
"[385,6]"
"[11,7,5,3,2]"
"[][0,1,2]");
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {3, 2}, false),
"[11,7,5,3,2][1,1,1,1,1][1,1,1,3,2][11,7,5,1,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[][0,1,2]");
EXPECT_EQ(BCast({3, 2}, {11, 7, 5, 3, 2}),
"[1,6][385,1][385,6][1,1]"
"[385,6]"
"[11,7,5,3,2]"
"[0,1,2][]");
EXPECT_EQ(BCast({3, 2}, {11, 7, 5, 3, 2}, false),
"[1,1,1,3,2][11,7,5,1,1][11,7,5,3,2][1,1,1,1,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[0,1,2][]");
}
TEST(BCastTest, Basic_Tensor_Matrix_Column) {
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {3, 1}),
"[385,3,2][1,1,1][1,3,1][385,1,2]"
"[385,3,2]"
"[11,7,5,3,2]"
"[][0,1,2,4]");
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {3, 1}, false),
"[11,7,5,3,2][1,1,1,1,1][1,1,1,3,1][11,7,5,1,2]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[][0,1,2,4]");
EXPECT_EQ(BCast({3, 1}, {11, 7, 5, 3, 2}),
"[1,3,1][385,1,2][385,3,2][1,1,1]"
"[385,3,2]"
"[11,7,5,3,2]"
"[0,1,2,4][]");
EXPECT_EQ(BCast({3, 1}, {11, 7, 5, 3, 2}, false),
"[1,1,1,3,1][11,7,5,1,2][11,7,5,3,2][1,1,1,1,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[0,1,2,4][]");
}
TEST(BCastTest, Basic_Tensor_Matrix_As_Tensor) {
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {7, 5, 1, 1}),
"[11,35,6][1,1,1][1,35,1][11,1,6]"
"[11,35,6]"
"[11,7,5,3,2]"
"[][0,3,4]");
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {7, 5, 1, 1}, false),
"[11,7,5,3,2][1,1,1,1,1][1,7,5,1,1][11,1,1,3,2]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[][0,3,4]");
EXPECT_EQ(BCast({7, 5, 1, 1}, {11, 7, 5, 3, 2}),
"[1,35,1][11,1,6][11,35,6][1,1,1]"
"[11,35,6]"
"[11,7,5,3,2]"
"[0,3,4][]");
EXPECT_EQ(BCast({7, 5, 1, 1}, {11, 7, 5, 3, 2}, false),
"[1,7,5,1,1][11,1,1,3,2][11,7,5,3,2][1,1,1,1,1]"
"[11,7,5,3,2][11,7,5,3,2]"
"[0,3,4][]");
}
TEST(BCastTest, Basic_SymbolicShape) {
constexpr int64_t kSymDim1 = -10'000'000'000;
constexpr int64_t kSymDim2 = -10'000'000'001;
const tensorflow::BCast bcast({10, kSymDim1, kSymDim2}, {10, 1, 1}, false);
EXPECT_TRUE(bcast.IsValid());
EXPECT_EQ(bcast.output_batch_size(), -1);
}
TEST(BCastTest, Complex_BCast_To_Each_Other) {
string truth =
"[11,1,5,1,2][1,7,1,3,1][1,7,1,3,1][11,1,5,1,2]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[1,3][0,2,4]";
EXPECT_EQ(BCast({11, 1, 5, 1, 2}, {7, 1, 3, 1}), truth);
EXPECT_EQ(BCast({11, 1, 5, 1, 2}, {7, 1, 3, 1}, false), truth);
}
TEST(BCastListTest, Complex_BCast_To_Each_Other) {
string truth =
"[11,1,1,1,2][1,7,5,3,1]"
"[1,7,1,3,1][11,1,5,1,2]"
"[1,1,5,1,1][11,7,1,3,2]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[1,2,3][0,2,4][0,1,3,4]";
EXPECT_EQ(BCastList3({11, 1, 1, 1, 2}, {7, 1, 3, 1}, {5, 1, 1}), truth);
EXPECT_EQ(BCastList3({11, 1, 1, 1, 2}, {7, 1, 3, 1}, {5, 1, 1}, false),
truth);
}
TEST(BCastTest, TestZeroDimensionShape) {
EXPECT_EQ(BCast({2, 0, 5}, {5}),
"[0,5][1,1][1,5][0,1]"
"[0,5]"
"[2,0,5]"
"[][0,1]");
EXPECT_EQ(BCast({5}, {2, 0, 5}),
"[1,5][0,1][0,5][1,1]"
"[0,5]"
"[2,0,5]"
"[0,1][]");
EXPECT_EQ(BCast({2, 0, 5}, {5}, false),
"[2,0,5][1,1,1][1,1,5][2,0,1]"
"[2,0,5]"
"[2,0,5]"
"[][0,1]");
EXPECT_EQ(BCast({5}, {2, 0, 5}, false),
"[1,1,5][2,0,1][2,0,5][1,1,1]"
"[2,0,5]"
"[2,0,5]"
"[0,1][]");
EXPECT_EQ(BCast({2, 0, 3, 0, 5}, {5}),
"[0,5][1,1][1,5][0,1]"
"[0,5]"
"[2,0,3,0,5]"
"[][0,1,2,3]");
EXPECT_EQ(BCast({5}, {2, 0, 3, 0, 5}),
"[1,5][0,1][0,5][1,1]"
"[0,5]"
"[2,0,3,0,5]"
"[0,1,2,3][]");
EXPECT_EQ(BCast({2, 0, 3, 0, 5}, {5}, false),
"[2,0,3,0,5][1,1,1,1,1][1,1,1,1,5][2,0,3,0,1]"
"[2,0,3,0,5]"
"[2,0,3,0,5]"
"[][0,1,2,3]");
EXPECT_EQ(BCast({5}, {2, 0, 3, 0, 5}, false),
"[1,1,1,1,5][2,0,3,0,1][2,0,3,0,5][1,1,1,1,1]"
"[2,0,3,0,5]"
"[2,0,3,0,5]"
"[0,1,2,3][]");
EXPECT_EQ(BCast({2, 0, 3, 0, 5}, {3, 1, 5}),
"[0,3,0,5][1,1,1,1][1,3,1,5][0,1,0,1]"
"[0,3,0,5]"
"[2,0,3,0,5]"
"[][0,1,3]");
EXPECT_EQ(BCast({3, 1, 5}, {2, 0, 3, 0, 5}),
"[1,3,1,5][0,1,0,1][0,3,0,5][1,1,1,1]"
"[0,3,0,5]"
"[2,0,3,0,5]"
"[0,1,3][]");
EXPECT_EQ(BCast({2, 0, 3, 0, 5}, {3, 1, 5}, false),
"[2,0,3,0,5][1,1,1,1,1][1,1,3,1,5][2,0,1,0,1]"
"[2,0,3,0,5]"
"[2,0,3,0,5]"
"[][0,1,3]");
EXPECT_EQ(BCast({3, 1, 5}, {2, 0, 3, 0, 5}, false),
"[1,1,3,1,5][2,0,1,0,1][2,0,3,0,5][1,1,1,1,1]"
"[2,0,3,0,5]"
"[2,0,3,0,5]"
"[0,1,3][]");
}
TEST(BCastTest, BatchIndices) {
EXPECT_EQ("[0,0,0,0][0,1,2,3]", BCastBatchIndices({1}, {4}));
EXPECT_EQ("[][]", BCastBatchIndices({5}, {7}));
EXPECT_EQ("[][]", BCastBatchIndices({2, 4, 6}, {2, 4, 6}));
EXPECT_EQ("[0,0,0,0,1,1,1,1,2,2,2,2][0,1,2,3,0,1,2,3,0,1,2,3]",
BCastBatchIndices({3, 1}, {1, 4}));
EXPECT_EQ("[0,0,1,1,2,2,0,0,1,1,2,2][0,1,0,1,0,1,2,3,2,3,2,3]",
BCastBatchIndices({3, 1}, {2, 1, 2}));
}
void BM_BCastSetup(::testing::benchmark::State& state) {
const int same_shape = state.range(0);
if (same_shape) {
state.SetLabel("same_shapes");
for (auto s : state) {
class BCast b({1000, 100}, {1000, 100});
}
} else {
state.SetLabel("different_shapes");
for (auto s : state) {
class BCast b({3, 1, 5}, {2, 0, 3, 0, 5});
}
}
}
BENCHMARK(BM_BCastSetup)->Arg(0)->Arg(1);
}
} |
1,713 | cpp | tensorflow/tensorflow | batch_util | tensorflow/core/util/batch_util.cc | tensorflow/core/framework/batch_util_test.cc | #ifndef TENSORFLOW_CORE_UTIL_BATCH_UTIL_H_
#define TENSORFLOW_CORE_UTIL_BATCH_UTIL_H_
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace batch_util {
Status CopyElementToSlice(Tensor element, Tensor* parent, int64_t index);
Status CopySliceToElement(const Tensor& parent, Tensor* element, int64_t index);
Status CopyContiguousSlices(const Tensor& src, int64_t src_offset,
int64_t dst_offset, int64_t num_slices,
Tensor* dst);
Status MaybeMoveSliceToElement(Tensor* parent, Tensor* element, int64_t index);
Status MaybeMoveContiguousSlices(Tensor& src, int64_t src_offset,
int64_t dst_offset, int64_t num_slices,
Tensor* dst);
Status SetElementZero(Tensor* element, const Tensor& padding);
Status CopyElementToLargerSlice(const Tensor& element, Tensor* parent,
int index);
}
}
#endif
#include "tensorflow/core/util/batch_util.h"
#include <algorithm>
#include <utility>
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/tstring.h"
#define TF_CALL_DATASET_TYPES(m) TF_CALL_ALL_TYPES(m) TF_CALL_QUANTIZED_TYPES(m)
namespace tensorflow {
namespace batch_util {
namespace {
Status ValidateInput(const Tensor& parent, const Tensor& element,
int64_t index) {
DCHECK_NE(parent.dim_size(0), 0);
DCHECK_GE(index, 0);
if (element.NumElements() != (parent.NumElements() / parent.dim_size(0))) {
TensorShape chip_shape = parent.shape();
chip_shape.RemoveDim(0);
return errors::Internal(
"ValidateInput Cannot perform copy: number of elements does not match. "
" Shapes are: [element]: ",
element.shape().DebugString(),
", [parent slice]: ", chip_shape.DebugString());
}
return absl::OkStatus();
}
template <typename T>
Status HandleElementToSlice(const Tensor& , T* src, T* dest,
int64_t num_values) {
static_assert(tsl::is_simple_type<T>::value,
"Memcpy requires a simple type.");
memcpy(dest, src, num_values * sizeof(T));
return absl::OkStatus();
}
template <>
Status HandleElementToSlice<tstring>(const Tensor& element, tstring* src,
tstring* dest, int64_t num_values) {
if (element.RefCountIsOne()) {
for (int64_t i = 0; i < num_values; ++i) {
*dest++ = std::move(*src++);
}
} else {
std::copy_n(src, num_values, dest);
}
return absl::OkStatus();
}
template <>
Status HandleElementToSlice<Variant>(const Tensor& element, Variant* src,
Variant* dest, int64_t num_values) {
if (element.RefCountIsOne()) {
for (int64_t i = 0; i < num_values; ++i) {
*dest++ = std::move(*src++);
}
} else {
std::copy_n(src, num_values, dest);
}
return absl::OkStatus();
}
template <>
Status HandleElementToSlice<ResourceHandle>(const Tensor& ,
ResourceHandle* src,
ResourceHandle* dest,
int64_t num_values) {
std::copy_n(src, num_values, dest);
return absl::OkStatus();
}
template <>
Status HandleElementToSlice<Eigen::half>(const Tensor& ,
Eigen::half* src, Eigen::half* dest,
int64_t num_values) {
std::copy_n(src, num_values, dest);
return absl::OkStatus();
}
template <typename T>
void HandleSliceToElement(const T* src, T* dest, int64_t num_values) {
static_assert(tsl::is_simple_type<T>::value,
"Memcpy requires a simple type.");
memcpy(dest, src, num_values * sizeof(T));
}
template <>
void HandleSliceToElement<tstring>(const tstring* src, tstring* dest,
int64_t num_values) {
std::copy_n(src, num_values, dest);
}
template <>
void HandleSliceToElement<Variant>(const Variant* src, Variant* dest,
int64_t num_values) {
std::copy_n(src, num_values, dest);
}
template <>
void HandleSliceToElement<ResourceHandle>(const ResourceHandle* src,
ResourceHandle* dest,
int64_t num_values) {
std::copy_n(src, num_values, dest);
}
template <>
void HandleSliceToElement<Eigen::half>(const Eigen::half* src,
Eigen::half* dest, int64_t num_values) {
std::copy_n(src, num_values, dest);
}
template <typename T>
void HandleSliceToElement(Tensor* parent, T* src, T* dest, int64_t num_values) {
static_assert(tsl::is_simple_type<T>::value,
"Memcpy requires a simple type.");
memcpy(dest, src, num_values * sizeof(T));
}
template <>
void HandleSliceToElement<tstring>(Tensor* parent, tstring* src, tstring* dest,
int64_t num_values) {
if (parent->RefCountIsOne()) {
for (int64_t i = 0; i < num_values; ++i) {
dest[i] = std::move(src[i]);
}
} else {
std::copy_n(src, num_values, dest);
}
}
template <>
void HandleSliceToElement<Variant>(Tensor* parent, Variant* src, Variant* dest,
int64_t num_values) {
if (parent->RefCountIsOne()) {
for (int64_t i = 0; i < num_values; ++i) {
dest[i] = std::move(src[i]);
}
} else {
std::copy_n(src, num_values, dest);
}
}
template <>
void HandleSliceToElement<ResourceHandle>(Tensor* parent, ResourceHandle* src,
ResourceHandle* dest,
int64_t num_values) {
std::copy_n(src, num_values, dest);
}
template <>
void HandleSliceToElement<Eigen::half>(Tensor* parent, Eigen::half* src,
Eigen::half* dest, int64_t num_values) {
std::copy_n(src, num_values, dest);
}
}
Status CopyElementToSlice(Tensor element, Tensor* parent, int64_t index) {
TF_RETURN_IF_ERROR(ValidateInput(*parent, element, index));
const int64_t num_values = element.NumElements();
#define HANDLE_TYPE(T) \
case DataTypeToEnum<T>::value: { \
T* src = element.base<T>(); \
T* dest = parent->base<T>() + (num_values * index); \
return HandleElementToSlice<T>(element, src, dest, num_values); \
}
switch (element.dtype()) {
TF_CALL_ALL_TYPES(HANDLE_TYPE);
TF_CALL_QUANTIZED_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
default:
return errors::Unimplemented("CopyElementToSlice Unhandled data type: ",
element.dtype());
}
}
Status CopySliceToElement(const Tensor& parent, Tensor* element,
int64_t index) {
TF_RETURN_IF_ERROR(ValidateInput(parent, *element, index));
const int64_t num_values = element->NumElements();
#define HANDLE_TYPE(T) \
case DataTypeToEnum<T>::value: { \
const T* src = parent.base<T>() + (num_values * index); \
T* dest = element->base<T>(); \
HandleSliceToElement<T>(src, dest, num_values); \
return OkStatus(); \
}
switch (parent.dtype()) {
TF_CALL_ALL_TYPES(HANDLE_TYPE);
TF_CALL_QUANTIZED_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
default:
return errors::Unimplemented("CopySliceToElement Unhandled data type: ",
element->dtype());
}
}
Status MaybeMoveContiguousSlices(Tensor& src, int64_t src_offset,
int64_t dst_offset, int64_t num_slices,
Tensor* dst) {
if (src.dtype() != dst->dtype()) {
return absl::FailedPreconditionError(absl::StrCat(
"MaybeMoveContiguousSlices cannot perform copy: src and dst have "
"different "
"dtypes. Source dtype: ",
src.dtype(), " dstination dtype: ", dst->dtype(), "."));
}
if (src.dims() < 1) {
return absl::FailedPreconditionError(absl::StrCat(
"MaybeMoveContiguousSlices cannot perform copy: src has to be a tensor "
"with "
"rank >= 1. Source shape: ",
src.shape().DebugString()));
}
if (dst->dims() < 1) {
return absl::FailedPreconditionError(absl::StrCat(
"MaybeMoveContiguousSlices cannot perform copy: dst has to be a tensor "
"with rank >= 1. Dest shape: ",
dst->shape().DebugString()));
}
const int64_t src_dim0 = src.dim_size(0);
const int64_t dst_dim0 = dst->dim_size(0);
int64_t src_chip_size = 1;
int64_t dst_chip_size = 1;
for (int i = 1; i < src.dims(); ++i) {
src_chip_size *= src.dim_size(i);
}
for (int i = 1; i < dst->dims(); ++i) {
dst_chip_size *= dst->dim_size(i);
}
if (src_chip_size != dst_chip_size) {
return absl::FailedPreconditionError(absl::StrCat(
"MaybeMoveContiguousSlices cannot perform copy: source and dst shapes "
"are"
"not compatible. Source shape: ",
src.shape().DebugString(),
", dst shape: ", dst->shape().DebugString()));
}
if (src_chip_size == 0 && dst_chip_size == 0) {
return absl::OkStatus();
}
if (src_offset < 0 || src_offset + num_slices > src_dim0 || dst_offset < 0 ||
dst_offset + num_slices > dst_dim0) {
return absl::FailedPreconditionError(absl::StrCat(
"MaybeMoveContiguousSlices cannot perform copy: index out of range. "
"src_offset: ",
src_offset, ", num_slices: ", num_slices, ", src_dim0: ", src_dim0,
", dst_offset: ", dst_offset, ", dst_dim0: ", dst_dim0, "."));
}
#define HANDLE_TYPE(T) \
case DataTypeToEnum<T>::value: { \
T* src_p = src.base<T>() + (src_chip_size * src_offset); \
T* dst_p = dst->base<T>() + (dst_chip_size * dst_offset); \
HandleSliceToElement<T>(&src, src_p, dst_p, src_chip_size * num_slices); \
return OkStatus(); \
}
switch (src.dtype()) {
TF_CALL_ALL_TYPES(HANDLE_TYPE);
TF_CALL_QUANTIZED_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
default:
return absl::FailedPreconditionError(absl::StrCat(
"MaybeMoveContiguousSlices unhandled data type: ", src.dtype()));
}
}
Status CopyContiguousSlices(const Tensor& src, int64_t src_offset,
int64_t dst_offset, int64_t num_slices,
Tensor* dst) {
if (src.dtype() != dst->dtype()) {
return errors::FailedPrecondition(
"CopyContiguousSlices cannot perform copy: src and dst have different "
"dtypes. Source dtype: ",
src.dtype(), " dstination dtype: ", dst->dtype(), ".");
}
if (src.dims() < 1) {
return errors::FailedPrecondition(
"CopyContiguousSlices cannot perform copy: src has to be a tensor with "
"rank >= 1. Source shape: ",
src.shape().DebugString());
}
if (dst->dims() < 1) {
return errors::FailedPrecondition(
"CopyContiguousSlices cannot perform copy: dst has to be a tensor "
"with rank >= 1. Dest shape: ",
dst->shape().DebugString());
}
const int64_t src_dim0 = src.dim_size(0);
const int64_t dst_dim0 = dst->dim_size(0);
int64_t src_chip_size = 1;
int64_t dst_chip_size = 1;
for (int i = 1; i < src.dims(); ++i) {
src_chip_size *= src.dim_size(i);
}
for (int i = 1; i < dst->dims(); ++i) {
dst_chip_size *= dst->dim_size(i);
}
if (src_chip_size != dst_chip_size) {
return errors::FailedPrecondition(
"CopyContiguousSlices cannot perform copy: source and dst shapes are"
"not compatible. Source shape: ",
src.shape().DebugString(), ", dst shape: ", dst->shape().DebugString());
}
if (src_chip_size == 0 && dst_chip_size == 0) {
return absl::OkStatus();
}
if (src_offset < 0 || src_offset + num_slices > src_dim0 || dst_offset < 0 ||
dst_offset + num_slices > dst_dim0) {
return errors::FailedPrecondition(
"CopyContiguousSlices cannot perform copy: index out of range. "
"src_offset: ",
src_offset, ", num_slices: ", num_slices, ", src_dim0: ", src_dim0,
", dst_offset: ", dst_offset, ", dst_dim0: ", dst_dim0, ".");
}
#define HANDLE_TYPE(T) \
case DataTypeToEnum<T>::value: { \
const T* src_p = src.base<T>() + (src_chip_size * src_offset); \
T* dst_p = dst->base<T>() + (dst_chip_size * dst_offset); \
HandleSliceToElement<T>(src_p, dst_p, src_chip_size * num_slices); \
return OkStatus(); \
}
switch (src.dtype()) {
TF_CALL_ALL_TYPES(HANDLE_TYPE);
TF_CALL_QUANTIZED_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
default:
return errors::Unimplemented("CopyContiguousSlices unhandled data type: ",
src.dtype());
}
}
Status MaybeMoveSliceToElement(Tensor* parent, Tensor* element, int64_t index) {
TF_RETURN_IF_ERROR(ValidateInput(*parent, *element, index));
const int64_t num_values = element->NumElements();
#define HANDLE_TYPE(T) \
case DataTypeToEnum<T>::value: { \
T* src = parent->base<T>() + (num_values * index); \
T* dest = element->base<T>(); \
HandleSliceToElement<T>(parent, src, dest, num_values); \
return OkStatus(); \
}
switch (parent->dtype()) {
TF_CALL_ALL_TYPES(HANDLE_TYPE);
TF_CALL_QUANTIZED_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
default:
return errors::Unimplemented(
"MaybeMoveSliceToElement Unhandled data type: ", element->dtype());
}
}
Status ValidateElementToLargerSlice(const Tensor& element, Tensor* parent) {
DCHECK_NE(parent->dim_size(0), 0);
if (element.NumElements() > (parent->NumElements() / parent->dim_size(0))) {
TensorShape chip_shape = parent->shape();
chip_shape.RemoveDim(0);
return errors::Internal(
"HandleElementToLargerSlice Cannot copy slice: number of entries in "
"element is greater than number of elements in parent slice. ",
"Shapes are: [element]: ", element.shape().DebugString(),
", [parent slice]: ", chip_shape.DebugString());
}
return absl::OkStatus();
}
template <typename T, int NDIMS>
Status HandleElementToLargerSlice(const Tensor& element, Tensor* parent,
int index) {
TF_RETURN_IF_ERROR(ValidateElementToLargerSlice(element, parent));
if (element.NumElements() == 0) {
return absl::OkStatus();
}
auto element_t = element.tensor<T, NDIMS>();
auto parent_t = parent->tensor<T, NDIMS + 1>();
Eigen::DSizes<Eigen::DenseIndex, NDIMS + 1> slice_indices;
slice_indices[0] = index;
Eigen::DSizes<Eigen::DenseIndex, NDIMS + 1> slice_size;
slice_size[0] = 1;
for (size_t i = 1; i < slice_size.size(); ++i) {
slice_size[i] = element_t.dimension(i - 1);
}
parent_t.slice(slice_indices, slice_size) = element_t.reshape(slice_size);
return absl::OkStatus();
}
template <int NDIMS>
Status HandleElementToLargerSliceWithRank(const Tensor& element, Tensor* parent,
int index) {
#define HANDLE_TYPE(T) \
case DataTypeToEnum<T>::value: { \
return HandleElementToLargerSlice<T, NDIMS>(element, parent, index); \
}
switch (element.dtype()) {
TF_CALL_DATASET_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
default:
return errors::Unimplemented(
"HandleElementToLargerSliceWithRank Unhandled data type: ",
element.dtype());
}
}
Status CopyElementToLargerSlice(const Tensor& element, Tensor* parent,
int index) {
if (parent->dims() != element.dims() + 1) {
return errors::Internal(
"Mismatched ranks. Element's rank is: ", element.dims(),
" but element is meant to be a slice in output Tensor having rank: ",
parent->dims(), " (should be: ", element.dims() + 1, ")");
}
#define HANDLE_DIMS(NDIMS) \
case NDIMS: { \
TF_RETURN_IF_ERROR( \
HandleElementToLargerSliceWithRank<NDIMS>(element, parent, index)); \
return OkStatus(); \
}
switch (element.dims()) {
HANDLE_DIMS(0);
HANDLE_DIMS(1);
HANDLE_DIMS(2);
HANDLE_DIMS(3);
HANDLE_DIMS(4);
HANDLE_DIMS(5);
#undef HANDLE_DIMS
default:
return errors::Unimplemented("CopyElementToLargerSlice Unhandled rank: ",
element.dims());
}
}
Status SetElementZero(Tensor* element, const Tensor& padding) {
#define HANDLE_TYPE(T) \
if (element->dtype() == DataTypeToEnum<T>::value) { \
element->flat<T>().setConstant(padding.scalar<T>()()); \
return OkStatus(); \
}
TF_CALL_DATASET_TYPES(HANDLE_TYPE);
#undef HANDLE_TYPE
return errors::Unimplemented("SetElementZero Unhandled data type: ",
element->dtype());
}
}
} | #include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(CopyContiguousSlicesTest, CompatibleShape) {
Tensor src(DT_FLOAT, {7, 1, 2});
Tensor dst(DT_FLOAT, {9, 2, 1});
auto s = batch_util::CopyContiguousSlices(
src, 2, 0, 5, &dst);
ASSERT_EQ(error::OK, s.code());
}
TEST(CopyContiguousSlicesTest, SourceOffsetOutOfRange) {
Tensor src(DT_FLOAT, {7, 1, 2});
Tensor dst(DT_FLOAT, {9, 2, 1});
auto s = batch_util::CopyContiguousSlices(
src, 7, 0, 5, &dst);
ASSERT_EQ(error::FAILED_PRECONDITION, s.code());
}
TEST(CopyContiguousSlicesTest, DstOffsetOutOfRange) {
Tensor src(DT_FLOAT, {7, 1, 2});
Tensor dst(DT_FLOAT, {9, 2, 1});
auto s = batch_util::CopyContiguousSlices(
src, 0, 0, 8, &dst);
ASSERT_EQ(error::FAILED_PRECONDITION, s.code());
}
TEST(CopyContiguousSlicesTest, CheckDstWithExpectedValues) {
auto src = test::AsTensor<float>({0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
TensorShape({5, 2}));
Tensor dst(DT_FLOAT, {9, 2, 1});
auto s = batch_util::CopyContiguousSlices(
src, 1, 5, 3, &dst);
ASSERT_EQ(error::OK, s.code());
test::ExpectTensorEqual<float>(
test::AsTensor<float>({2, 3, 4, 5, 6, 7}, TensorShape({3, 2, 1})),
dst.Slice(5, 8));
}
}
} |
1,714 | cpp | tensorflow/tensorflow | port | third_party/xla/third_party/tsl/tsl/platform/windows/port.cc | third_party/xla/third_party/tsl/tsl/platform/port_test.cc | #ifndef XLA_STREAM_EXECUTOR_PLATFORM_PORT_H_
#define XLA_STREAM_EXECUTOR_PLATFORM_PORT_H_
#include "tsl/platform/macros.h"
#include "tsl/platform/types.h"
namespace stream_executor {
using tsl::int16;
using tsl::int32;
using tsl::int8;
using tsl::uint16;
using tsl::uint32;
using tsl::uint64;
using tsl::uint8;
#if !defined(PLATFORM_GOOGLE)
using std::string;
#endif
#define SE_FALLTHROUGH_INTENDED TF_FALLTHROUGH_INTENDED
}
#define SE_DISALLOW_COPY_AND_ASSIGN TF_DISALLOW_COPY_AND_ASSIGN
#define SE_MUST_USE_RESULT TF_MUST_USE_RESULT
#define SE_PREDICT_TRUE TF_PREDICT_TRUE
#define SE_PREDICT_FALSE TF_PREDICT_FALSE
#endif
#include "absl/base/internal/sysinfo.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/host_info.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/mem.h"
#include "tsl/platform/numa.h"
#include "tsl/platform/profile_utils/cpu_utils.h"
#include "tsl/platform/snappy.h"
#include "tsl/platform/types.h"
#if defined(__linux__)
#include <sched.h>
#include <sys/sysinfo.h>
#else
#include <sys/syscall.h>
#endif
#if (__x86_64__ || __i386__)
#include <cpuid.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#ifdef TF_USE_SNAPPY
#include "snappy.h"
#endif
#if (defined(__APPLE__) && defined(__MACH__)) || defined(__FreeBSD__) || \
defined(__HAIKU__)
#include <thread>
#endif
#if TENSORFLOW_USE_NUMA
#include "hwloc.h"
#endif
#if defined(__ANDROID__) && (defined(__i386__) || defined(__x86_64__))
#define TENSORFLOW_HAS_CXA_DEMANGLE 0
#elif (__GNUC__ >= 4 || (__GNUC__ >= 3 && __GNUC_MINOR__ >= 4)) && \
!defined(__mips__)
#define TENSORFLOW_HAS_CXA_DEMANGLE 1
#elif defined(__clang__) && !defined(_MSC_VER)
#define TENSORFLOW_HAS_CXA_DEMANGLE 1
#else
#define TENSORFLOW_HAS_CXA_DEMANGLE 0
#endif
#if TENSORFLOW_HAS_CXA_DEMANGLE
#include <cxxabi.h>
#endif
namespace tsl {
namespace port {
void InitMain(const char* usage, int* argc, char*** argv) {}
string Hostname() {
char hostname[1024];
gethostname(hostname, sizeof hostname);
hostname[sizeof hostname - 1] = 0;
return string(hostname);
}
string JobName() {
const char* job_name_cs = std::getenv("TF_JOB_NAME");
if (job_name_cs != nullptr) {
return string(job_name_cs);
}
return "";
}
int64_t JobUid() { return -1; }
int64_t TaskId() { return -1; }
int NumSchedulableCPUs() {
#if defined(__linux__)
for (int ncpus = 1024; ncpus < std::numeric_limits<int>::max() / 2;
ncpus *= 2) {
size_t setsize = CPU_ALLOC_SIZE(ncpus);
cpu_set_t* mask = CPU_ALLOC(ncpus);
if (!mask) break;
if (sched_getaffinity(0, setsize, mask) == 0) {
int result = CPU_COUNT_S(setsize, mask);
CPU_FREE(mask);
return result;
}
CPU_FREE(mask);
if (errno != EINVAL) break;
}
perror("sched_getaffinity");
#endif
#if (defined(__APPLE__) && defined(__MACH__)) || defined(__FreeBSD__) || \
defined(__HAIKU__)
unsigned int count = std::thread::hardware_concurrency();
if (count > 0) return static_cast<int>(count);
#endif
const int kDefaultCores = 4;
fprintf(stderr, "can't determine number of CPU cores: assuming %d\n",
kDefaultCores);
return kDefaultCores;
}
int MaxParallelism() { return NumSchedulableCPUs(); }
int MaxParallelism(int numa_node) {
if (numa_node != port::kNUMANoAffinity) {
return NumSchedulableCPUs() / port::NUMANumNodes();
}
return NumSchedulableCPUs();
}
int NumTotalCPUs() {
int count = absl::base_internal::NumCPUs();
return (count <= 0) ? kUnknownCPU : count;
}
int GetCurrentCPU() {
#if defined(__EMSCRIPTEN__)
return sched_getcpu();
#elif defined(__linux__)
return sched_getcpu();
#elif defined(__cpuid) && !defined(__APPLE__)
uint32_t eax = 0;
uint32_t ebx = 0;
uint32_t ecx = 0;
uint32_t edx = 0;
__cpuid(1, eax, ebx, ecx, edx);
if ((edx & (1 << 9)) != 0) {
return (ebx & 0xFF) >> 24;
}
#elif defined(__NR_getcpu)
unsigned int cpu;
if (syscall(__NR_getcpu, &cpu, NULL, NULL) < 0) {
return kUnknownCPU;
} else {
return static_cast<int>(cpu);
}
#endif
return kUnknownCPU;
}
int NumHyperthreadsPerCore() {
static const int ht_per_core = tsl::port::CPUIDNumSMT();
return (ht_per_core > 0) ? ht_per_core : 1;
}
#ifdef TENSORFLOW_USE_NUMA
namespace {
static hwloc_topology_t hwloc_topology_handle;
bool HaveHWLocTopology() {
static bool init = []() {
if (hwloc_topology_init(&hwloc_topology_handle)) {
LOG(ERROR) << "Call to hwloc_topology_init() failed";
return false;
}
if (hwloc_topology_load(hwloc_topology_handle)) {
LOG(ERROR) << "Call to hwloc_topology_load() failed";
return false;
}
return true;
}();
return init;
}
hwloc_obj_t GetHWLocTypeIndex(hwloc_obj_type_t tp, int index) {
hwloc_obj_t obj = nullptr;
if (index >= 0) {
while ((obj = hwloc_get_next_obj_by_type(hwloc_topology_handle, tp, obj)) !=
nullptr) {
if (obj->os_index == index) break;
}
}
return obj;
}
}
#endif
bool NUMAEnabled() { return (NUMANumNodes() > 1); }
int NUMANumNodes() {
#ifdef TENSORFLOW_USE_NUMA
if (HaveHWLocTopology()) {
int num_numanodes =
hwloc_get_nbobjs_by_type(hwloc_topology_handle, HWLOC_OBJ_NUMANODE);
return std::max(1, num_numanodes);
} else {
return 1;
}
#else
return 1;
#endif
}
void NUMASetThreadNodeAffinity(int node) {
#ifdef TENSORFLOW_USE_NUMA
if (HaveHWLocTopology()) {
hwloc_obj_t obj = GetHWLocTypeIndex(HWLOC_OBJ_NUMANODE, node);
if (obj) {
hwloc_set_cpubind(hwloc_topology_handle, obj->cpuset,
HWLOC_CPUBIND_THREAD | HWLOC_CPUBIND_STRICT);
} else {
LOG(ERROR) << "Could not find hwloc NUMA node " << node;
}
}
#endif
}
int NUMAGetThreadNodeAffinity() {
int node_index = kNUMANoAffinity;
#ifdef TENSORFLOW_USE_NUMA
if (HaveHWLocTopology()) {
hwloc_cpuset_t thread_cpuset = hwloc_bitmap_alloc();
hwloc_get_cpubind(hwloc_topology_handle, thread_cpuset,
HWLOC_CPUBIND_THREAD);
hwloc_obj_t obj = nullptr;
while ((obj = hwloc_get_next_obj_by_type(
hwloc_topology_handle, HWLOC_OBJ_NUMANODE, obj)) != nullptr) {
if (hwloc_bitmap_isincluded(thread_cpuset, obj->cpuset)) {
node_index = obj->os_index;
break;
}
}
hwloc_bitmap_free(thread_cpuset);
}
#endif
return node_index;
}
void* NUMAMalloc(int node, size_t size, int minimum_alignment) {
#ifdef TENSORFLOW_USE_NUMA
if (HaveHWLocTopology()) {
hwloc_obj_t numa_node = GetHWLocTypeIndex(HWLOC_OBJ_NUMANODE, node);
if (numa_node) {
return hwloc_alloc_membind(hwloc_topology_handle, size,
numa_node->nodeset, HWLOC_MEMBIND_BIND,
HWLOC_MEMBIND_BYNODESET);
} else {
LOG(ERROR) << "Failed to find hwloc NUMA node " << node;
}
}
#endif
return tsl::port::AlignedMalloc(size, minimum_alignment);
}
void NUMAFree(void* ptr, size_t size) {
#ifdef TENSORFLOW_USE_NUMA
if (HaveHWLocTopology()) {
hwloc_free(hwloc_topology_handle, ptr, size);
return;
}
#endif
tsl::port::Free(ptr);
}
int NUMAGetMemAffinity(const void* addr) {
int node = kNUMANoAffinity;
#ifdef TENSORFLOW_USE_NUMA
if (HaveHWLocTopology() && addr) {
hwloc_nodeset_t nodeset = hwloc_bitmap_alloc();
if (!hwloc_get_area_memlocation(hwloc_topology_handle, addr, 4, nodeset,
HWLOC_MEMBIND_BYNODESET)) {
hwloc_obj_t obj = nullptr;
while ((obj = hwloc_get_next_obj_by_type(
hwloc_topology_handle, HWLOC_OBJ_NUMANODE, obj)) != nullptr) {
if (hwloc_bitmap_isincluded(nodeset, obj->nodeset)) {
node = obj->os_index;
break;
}
}
hwloc_bitmap_free(nodeset);
} else {
LOG(ERROR) << "Failed call to hwloc_get_area_memlocation.";
}
}
#endif
return node;
}
bool Snappy_Compress(const char* input, size_t length, string* output) {
#ifdef TF_USE_SNAPPY
output->resize(snappy::MaxCompressedLength(length));
size_t outlen;
snappy::RawCompress(input, length, &(*output)[0], &outlen);
output->resize(outlen);
return true;
#else
return false;
#endif
}
bool Snappy_CompressFromIOVec(const struct iovec* iov,
size_t uncompressed_length, string* output) {
#ifdef TF_USE_SNAPPY
output->resize(snappy::MaxCompressedLength(uncompressed_length));
size_t outlen;
snappy::RawCompressFromIOVec(iov, uncompressed_length, &(*output)[0],
&outlen);
output->resize(outlen);
return true;
#else
return false;
#endif
}
bool Snappy_GetUncompressedLength(const char* input, size_t length,
size_t* result) {
#ifdef TF_USE_SNAPPY
return snappy::GetUncompressedLength(input, length, result);
#else
return false;
#endif
}
bool Snappy_Uncompress(const char* input, size_t length, char* output) {
#ifdef TF_USE_SNAPPY
return snappy::RawUncompress(input, length, output);
#else
return false;
#endif
}
bool Snappy_UncompressToIOVec(const char* compressed, size_t compressed_length,
const struct iovec* iov, size_t iov_cnt) {
#ifdef TF_USE_SNAPPY
return snappy::RawUncompressToIOVec(compressed, compressed_length, iov,
iov_cnt);
#else
return false;
#endif
}
static void DemangleToString(const char* mangled, string* out) {
int status = 0;
char* demangled = nullptr;
#if TENSORFLOW_HAS_CXA_DEMANGLE
demangled = abi::__cxa_demangle(mangled, nullptr, nullptr, &status);
#endif
if (status == 0 && demangled != nullptr) {
out->append(demangled);
free(demangled);
} else {
out->append(mangled);
}
}
string Demangle(const char* mangled) {
string demangled;
DemangleToString(mangled, &demangled);
return demangled;
}
double NominalCPUFrequency() {
return tsl::profile_utils::CpuUtils::GetCycleCounterFrequency();
}
}
}
namespace tsl {
namespace port {
void* AlignedMalloc(size_t size, int minimum_alignment) {
#if defined(__ANDROID__)
return memalign(minimum_alignment, size);
#else
void* ptr = nullptr;
const int required_alignment = sizeof(void*);
if (minimum_alignment < required_alignment) return Malloc(size);
int err = posix_memalign(&ptr, minimum_alignment, size);
if (err != 0) {
return nullptr;
} else {
return ptr;
}
#endif
}
void AlignedFree(void* aligned_memory) { Free(aligned_memory); }
void* Malloc(size_t size) { return malloc(size); }
void* Realloc(void* ptr, size_t size) { return realloc(ptr, size); }
void Free(void* ptr) { free(ptr); }
void MallocExtension_ReleaseToSystem(std::size_t num_bytes) {
}
std::size_t MallocExtension_GetAllocatedSize(const void* p) {
#if !defined(__ANDROID__)
return 0;
#else
return malloc_usable_size(p);
#endif
}
MemoryInfo GetMemoryInfo() {
MemoryInfo mem_info = {INT64_MAX, INT64_MAX};
#if defined(__linux__)
struct sysinfo info;
int err = sysinfo(&info);
if (err == 0) {
mem_info.free = info.freeram;
mem_info.total = info.totalram;
}
#endif
return mem_info;
}
MemoryBandwidthInfo GetMemoryBandwidthInfo() {
MemoryBandwidthInfo membw_info = {INT64_MAX};
return membw_info;
}
IOStatistics GetIOStatistics() { return IOStatistics(); }
}
} | #include <condition_variable>
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/env_time.h"
#include "tsl/platform/mem.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace tsl {
namespace port {
TEST(Port, AlignedMalloc) {
for (size_t alignment = 1; alignment <= 1 << 20; alignment <<= 1) {
void* p = AlignedMalloc(1, alignment);
ASSERT_TRUE(p != nullptr) << "AlignedMalloc(1, " << alignment << ")";
uintptr_t pval = reinterpret_cast<uintptr_t>(p);
EXPECT_EQ(pval % alignment, 0);
AlignedFree(p);
}
}
TEST(Port, GetCurrentCPU) {
const int cpu = GetCurrentCPU();
#if !defined(__APPLE__)
EXPECT_GE(cpu, 0);
EXPECT_LT(cpu, NumTotalCPUs());
#endif
}
TEST(ConditionVariable, WaitForMilliseconds_Timeout) {
mutex m;
mutex_lock l(m);
condition_variable cv;
ConditionResult result = tsl::kCond_MaybeNotified;
time_t start = time(nullptr);
while (result == tsl::kCond_MaybeNotified) {
result = WaitForMilliseconds(&l, &cv, 3000);
}
EXPECT_EQ(result, tsl::kCond_Timeout);
time_t finish = time(nullptr);
EXPECT_GE(finish - start, 3);
}
TEST(ConditionVariable, WaitForMilliseconds_Signalled) {
thread::ThreadPool pool(Env::Default(), "test", 1);
mutex m;
mutex_lock l(m);
condition_variable cv;
time_t start = time(nullptr);
pool.Schedule([&m, &cv]() {
Env::Default()->SleepForMicroseconds(1 * 1000 * 1000);
mutex_lock l(m);
cv.notify_all();
});
EXPECT_EQ(WaitForMilliseconds(&l, &cv, 3000), tsl::kCond_MaybeNotified);
time_t finish = time(nullptr);
EXPECT_LT(finish - start, 3);
}
TEST(ConditionalCriticalSections, AwaitWithDeadline_Timeout) {
bool always_false = false;
mutex m;
m.lock();
time_t start = time(nullptr);
bool result =
m.AwaitWithDeadline(Condition(&always_false),
EnvTime::NowNanos() + 3 * EnvTime::kSecondsToNanos);
time_t finish = time(nullptr);
m.unlock();
EXPECT_EQ(result, false);
EXPECT_GE(finish - start, 3);
}
TEST(ConditionalCriticalSections, AwaitWithDeadline_Woken) {
thread::ThreadPool pool(Env::Default(), "test", 1);
bool woken = false;
mutex m;
m.lock();
time_t start = time(nullptr);
pool.Schedule([&m, &woken]() {
Env::Default()->SleepForMicroseconds(1 * 1000 * 1000);
m.lock();
woken = true;
m.unlock();
});
bool result = m.AwaitWithDeadline(
Condition(&woken), EnvTime::NowNanos() + 3 * EnvTime::kSecondsToNanos);
time_t finish = time(nullptr);
m.unlock();
EXPECT_EQ(result, true);
EXPECT_LT(finish - start, 3);
}
static bool Invert(bool* b) { return !*b; }
class InvertClass {
public:
explicit InvertClass(bool* value) : value_(value) {}
bool Value() { return !*this->value_; }
private:
InvertClass();
bool* value_;
};
TEST(ConditionalCriticalSections, Await_PingPong) {
thread::ThreadPool pool(Env::Default(), "test", 1);
bool ping_pong = false;
bool done = false;
mutex m;
pool.Schedule([&m, &ping_pong, &done]() {
m.lock();
for (int i = 0; i != 1000; i++) {
m.Await(Condition(&ping_pong));
ping_pong = false;
}
done = true;
m.unlock();
});
m.lock();
InvertClass invert(&ping_pong);
for (int i = 0; i != 1000; i++) {
m.Await(Condition(&Invert, &ping_pong));
ping_pong = true;
}
m.Await(Condition(&done));
m.unlock();
}
TEST(ConditionalCriticalSections, Await_PingPongMethod) {
thread::ThreadPool pool(Env::Default(), "test", 1);
bool ping_pong = false;
bool done = false;
mutex m;
pool.Schedule([&m, &ping_pong, &done]() {
m.lock();
for (int i = 0; i != 1000; i++) {
m.Await(Condition(&ping_pong));
ping_pong = false;
}
done = true;
m.unlock();
});
m.lock();
InvertClass invert(&ping_pong);
for (int i = 0; i != 1000; i++) {
m.Await(Condition(&invert, &InvertClass::Value));
ping_pong = true;
}
m.Await(Condition(&done));
m.unlock();
}
TEST(TestCPUFeature, TestFeature) {
const bool has_avx = TestCPUFeature(CPUFeature::AVX);
LOG(INFO) << "has_avx = " << has_avx;
const bool has_avx2 = TestCPUFeature(CPUFeature::AVX2);
LOG(INFO) << "has_avx2 = " << has_avx2;
}
}
} |
1,715 | cpp | tensorflow/tensorflow | incremental_barrier | tensorflow/core/util/incremental_barrier.cc | tensorflow/core/util/incremental_barrier_test.cc | #ifndef TENSORFLOW_CORE_UTIL_INCREMENTAL_BARRIER_H_
#define TENSORFLOW_CORE_UTIL_INCREMENTAL_BARRIER_H_
#include <atomic>
#include <functional>
namespace tensorflow {
class InternalIncrementalBarrier;
class IncrementalBarrier {
public:
typedef std::function<void()> DoneCallback;
typedef std::function<void()> BarrierCallback;
explicit IncrementalBarrier(DoneCallback callback);
~IncrementalBarrier();
BarrierCallback Inc();
private:
InternalIncrementalBarrier* internal_barrier_;
};
}
#endif
#include "tensorflow/core/util/incremental_barrier.h"
#include <atomic>
#include <functional>
#include <utility>
#include "absl/functional/bind_front.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
class InternalIncrementalBarrier {
public:
explicit InternalIncrementalBarrier(IncrementalBarrier::DoneCallback callback)
: left_(1), done_callback_(std::move(callback)) {}
void operator()() {
DCHECK_GE(left_.load(std::memory_order_relaxed), 0);
if (left_.fetch_sub(1, std::memory_order_acq_rel) - 1 == 0) {
IncrementalBarrier::DoneCallback done_callback =
std::move(done_callback_);
delete this;
done_callback();
}
}
IncrementalBarrier::BarrierCallback Inc() {
left_.fetch_add(1, std::memory_order_acq_rel);
return absl::bind_front(&InternalIncrementalBarrier::operator(), this);
}
private:
std::atomic<int> left_;
IncrementalBarrier::DoneCallback done_callback_;
};
IncrementalBarrier::IncrementalBarrier(DoneCallback done_callback)
: internal_barrier_(
new InternalIncrementalBarrier(std::move(done_callback))) {}
IncrementalBarrier::~IncrementalBarrier() { (*internal_barrier_)(); }
IncrementalBarrier::BarrierCallback IncrementalBarrier::Inc() {
return internal_barrier_->Inc();
}
} | #include "tensorflow/core/util/incremental_barrier.h"
#include <atomic>
#include "absl/functional/bind_front.h"
#include "absl/time/time.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/platform.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/threadpool.h"
namespace tensorflow {
namespace {
class Counter {
public:
void Increment() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
++count_;
}
int GetCount() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
return count_;
}
private:
mutex mu_;
int count_ = 0;
};
TEST(IncrementalBarrierTest, RunInstantlyWhenZeroClosure) {
Counter counter;
EXPECT_EQ(counter.GetCount(), 0);
{
IncrementalBarrier::DoneCallback done_callback =
absl::bind_front(&Counter::Increment, &counter);
IncrementalBarrier barrier(done_callback);
EXPECT_EQ(counter.GetCount(), 0);
}
EXPECT_EQ(counter.GetCount(), 1);
}
TEST(IncrementalBarrierTest, RunAfterNumClosuresOneNowTwoLater) {
Counter counter;
IncrementalBarrier::BarrierCallback bc1, bc2;
{
IncrementalBarrier::DoneCallback done_callback =
absl::bind_front(&Counter::Increment, &counter);
IncrementalBarrier barrier(done_callback);
CHECK_EQ(counter.GetCount(), 0);
bc1 = barrier.Inc();
bc2 = barrier.Inc();
IncrementalBarrier::BarrierCallback bc3 = barrier.Inc();
bc3();
CHECK_EQ(counter.GetCount(), 0);
}
CHECK_EQ(counter.GetCount(), 0);
bc1();
CHECK_EQ(counter.GetCount(), 0);
bc2();
CHECK_EQ(counter.GetCount(), 1);
}
TEST(IncrementalBarrierTest, RunAfterNumClosuresConcurrency) {
const int num_closure = 100, num_thread = 2;
std::atomic<int> schedule_count{0};
Counter counter;
{
IncrementalBarrier::DoneCallback done_callback =
absl::bind_front(&Counter::Increment, &counter);
IncrementalBarrier barrier(done_callback);
CHECK_EQ(counter.GetCount(), 0);
tensorflow::thread::ThreadPool pool(tensorflow::Env::Default(),
"BarrierClosure", num_thread);
for (int i = 0; i < num_closure; ++i) {
pool.Schedule([&barrier, &schedule_count]() {
schedule_count.fetch_add(1);
IncrementalBarrier::BarrierCallback bc = barrier.Inc();
Env::Default()->SleepForMicroseconds(100);
bc();
});
}
CHECK_EQ(counter.GetCount(), 0);
}
CHECK_EQ(schedule_count.load(std::memory_order_relaxed), 100);
CHECK_EQ(counter.GetCount(), 1);
}
#if defined(PLATFORM_GOOGLE)
void BM_FunctionInc(benchmark::State& state) {
IncrementalBarrier barrier([] {});
for (auto _ : state) {
barrier.Inc()();
}
}
BENCHMARK(BM_FunctionInc);
#endif
}
} |
1,716 | cpp | tensorflow/tensorflow | ragged_to_dense_util | tensorflow/core/util/ragged_to_dense_util.cc | tensorflow/core/util/ragged_to_dense_util_test.cc | #ifndef TENSORFLOW_CORE_UTIL_RAGGED_TO_DENSE_UTIL_H_
#define TENSORFLOW_CORE_UTIL_RAGGED_TO_DENSE_UTIL_H_
#include <vector>
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/util/ragged_to_dense_util_common.h"
namespace tensorflow {
string RowPartitionTypeToString(RowPartitionType row_partition_type);
Status GetRowPartitionTypesHelper(
const std::vector<string>& row_partition_type_strings,
std::vector<RowPartitionType>* row_partition_types);
template <typename ContextType>
Status GetRowPartitionTypes(
ContextType* context, std::vector<RowPartitionType>* row_partition_types) {
std::vector<string> row_partition_type_strings;
TF_RETURN_IF_ERROR(
context->GetAttr("row_partition_types", &row_partition_type_strings));
return GetRowPartitionTypesHelper(row_partition_type_strings,
row_partition_types);
}
Status GetRowPartitionTypesHelper(
const std::vector<string>& row_partition_type_strings,
std::vector<RowPartitionType>* row_partition_types);
Status CombineRaggedTensorToTensorShapes(int ragged_rank,
const TensorShapeProto& shape,
const TensorShapeProto& value_shape,
TensorShapeProto* output_shape);
int GetRaggedRank(const std::vector<RowPartitionType>& row_partition_types);
Status ValidateDefaultValueShape(const TensorShapeProto& default_value_shape,
const TensorShapeProto& value_shape);
}
#endif
#include "tensorflow/core/util/ragged_to_dense_util.h"
#include <algorithm>
#include <vector>
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
namespace tensorflow {
using errors::InvalidArgument;
tensorflow::Status GetRowPartitionTypesHelper(
const std::vector<string>& row_partition_type_strings,
std::vector<RowPartitionType>* row_partition_types) {
*row_partition_types = GetRowPartitionTypesHelper(row_partition_type_strings);
if (row_partition_types->size() != row_partition_type_strings.size()) {
return InvalidArgument(
"Unknown string for partition info type: ",
row_partition_type_strings.at(row_partition_types->size()));
}
return absl::OkStatus();
}
tensorflow::Status CombineRaggedTensorToTensorShapes(
int ragged_rank, const TensorShapeProto& shape,
const TensorShapeProto& value_shape, TensorShapeProto* output_shape) {
if (value_shape.unknown_rank() && shape.unknown_rank()) {
output_shape->Clear();
output_shape->set_unknown_rank(true);
return absl::OkStatus();
}
if (shape.unknown_rank()) {
while (output_shape->dim_size() < ragged_rank + value_shape.dim_size()) {
output_shape->add_dim()->set_size(-1);
}
} else {
*output_shape = shape;
}
if (value_shape.unknown_rank()) {
return absl::OkStatus();
}
if (ragged_rank + value_shape.dim_size() != output_shape->dim_size()) {
return InvalidArgument(
"rt_input.shape and shape=", TensorShape::DebugString(shape),
" are incompatible: rt_input.rank = ",
ragged_rank + value_shape.dim_size(),
" but shape.rank = ", output_shape->dim_size());
}
for (int i = 1; i < value_shape.dim_size(); ++i) {
const TensorShapeProto::Dim& value_dim = value_shape.dim(i);
TensorShapeProto::Dim* output_shape_dim = output_shape->mutable_dim(
output_shape->dim_size() - value_shape.dim_size() + i);
if (value_dim.size() >= 0) {
if (output_shape_dim->size() >= 0) {
if (output_shape_dim->size() != value_dim.size()) {
return InvalidArgument(
"rt_input.shape and shape=", TensorShape::DebugString(shape),
" are incompatible: rt_input.shape[", i + ragged_rank,
"] = ", value_dim.size(), " but shape[", i + ragged_rank,
"] = ", output_shape_dim->size());
}
} else {
output_shape_dim->set_size(value_dim.size());
}
}
}
return absl::OkStatus();
}
tensorflow::Status ValidateDefaultValueShape(
const TensorShapeProto& default_value_shape,
const TensorShapeProto& value_shape) {
if (default_value_shape.unknown_rank() || value_shape.unknown_rank()) {
return absl::OkStatus();
}
int default_ndims = default_value_shape.dim_size();
int values_ndims = value_shape.dim_size();
if (default_ndims >= values_ndims) {
return InvalidArgument(
"default_value.shape=", TensorShape::DebugString(default_value_shape),
" and rt_input.flat_values.shape=",
TensorShape::DebugString(value_shape),
" are incompatible: default_value.rank = ", default_ndims,
" must be less than rt_input.flat_values.rank = ", values_ndims);
}
for (int i = 0; i < std::min(default_ndims, values_ndims - 1); ++i) {
int default_dim = default_value_shape.dim(i).size();
int value_dim = value_shape.dim(i + 1).size();
if (default_dim >= 0 && value_dim >= 0 && default_dim != 1 &&
default_dim != value_dim) {
return InvalidArgument(
"default_value.shape=", TensorShape::DebugString(default_value_shape),
" and rt_input.flat_values.shape=",
TensorShape::DebugString(value_shape),
" are incompatible: default_value.shape[",
i - default_value_shape.dim_size(), "] = ", default_dim,
" but rt_input.flat_values.shape[",
i - default_value_shape.dim_size(), "] = ", value_dim);
}
}
return absl::OkStatus();
}
} | #include "tensorflow/core/util/ragged_to_dense_util.h"
#include <vector>
#include <gmock/gmock.h>
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(CombineRaggedTensorToTensorShapes, UnknownShapeUnknownValue) {
TensorShapeProto shape_proto;
shape_proto.set_unknown_rank(true);
TensorShapeProto value_shape_proto;
value_shape_proto.set_unknown_rank(true);
int ragged_rank = 1;
TensorShapeProto actual_output_shape_proto;
TF_ASSERT_OK(CombineRaggedTensorToTensorShapes(
ragged_rank, shape_proto, value_shape_proto, &actual_output_shape_proto));
EXPECT_EQ(true, actual_output_shape_proto.unknown_rank());
}
TEST(CombineRaggedTensorToTensorShapes, UnknownShape) {
TensorShapeProto shape_proto;
shape_proto.set_unknown_rank(true);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(6);
int ragged_rank = 1;
TensorShapeProto actual_output_shape_proto;
TF_ASSERT_OK(CombineRaggedTensorToTensorShapes(
ragged_rank, shape_proto, value_shape_proto, &actual_output_shape_proto));
ASSERT_EQ(actual_output_shape_proto.dim_size(), 2);
EXPECT_EQ(actual_output_shape_proto.dim(0).size(), -1);
EXPECT_EQ(actual_output_shape_proto.dim(1).size(), -1);
}
TEST(CombineRaggedTensorToTensorShapes, UnknownShapeDenseValue) {
TensorShapeProto shape_proto;
shape_proto.set_unknown_rank(true);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(6);
value_shape_proto.add_dim()->set_size(3);
int ragged_rank = 1;
TensorShapeProto actual_output_shape_proto;
TF_ASSERT_OK(CombineRaggedTensorToTensorShapes(
ragged_rank, shape_proto, value_shape_proto, &actual_output_shape_proto));
ASSERT_EQ(actual_output_shape_proto.dim_size(), 3);
EXPECT_EQ(actual_output_shape_proto.dim(0).size(), -1);
EXPECT_EQ(actual_output_shape_proto.dim(1).size(), -1);
EXPECT_EQ(actual_output_shape_proto.dim(2).size(), 3);
}
TEST(GetRowPartitionTypesHelper, BasicTest) {
const std::vector<string> row_partition_type_strings = {
"FIRST_DIM_SIZE", "VALUE_ROWIDS", "ROW_SPLITS"};
std::vector<RowPartitionType> row_partition_types;
TF_ASSERT_OK(GetRowPartitionTypesHelper(row_partition_type_strings,
&row_partition_types));
EXPECT_THAT(row_partition_types,
::testing::ElementsAre(RowPartitionType::FIRST_DIM_SIZE,
RowPartitionType::VALUE_ROWIDS,
RowPartitionType::ROW_SPLITS));
}
TEST(RowPartitionTypeToString, BasicTest) {
EXPECT_EQ("FIRST_DIM_SIZE",
RowPartitionTypeToString(RowPartitionType::FIRST_DIM_SIZE));
EXPECT_EQ("VALUE_ROWIDS",
RowPartitionTypeToString(RowPartitionType::VALUE_ROWIDS));
EXPECT_EQ("ROW_SPLITS",
RowPartitionTypeToString(RowPartitionType::ROW_SPLITS));
}
TEST(ValidateDefaultValueShape, UnknownDefaultValueShape) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.set_unknown_rank(true);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(6);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
TEST(ValidateDefaultValueShape, UnknownValueShape) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(5);
TensorShapeProto value_shape_proto;
value_shape_proto.set_unknown_rank(true);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
TEST(ValidateDefaultValueShape, ScalarShape) {
TensorShapeProto default_value_shape_proto;
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(5);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
TEST(ValidateDefaultValueShape, TensorShapeEqual) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(2);
default_value_shape_proto.add_dim()->set_size(3);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(5);
value_shape_proto.add_dim()->set_size(2);
value_shape_proto.add_dim()->set_size(3);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
TEST(ValidateDefaultValueShape, TensorDimensionUnknown) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(-1);
default_value_shape_proto.add_dim()->set_size(3);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(5);
value_shape_proto.add_dim()->set_size(2);
value_shape_proto.add_dim()->set_size(3);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
TEST(ValidateDefaultValueShape, TensorDimensionUnknownForValue) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(2);
default_value_shape_proto.add_dim()->set_size(3);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(5);
value_shape_proto.add_dim()->set_size(-1);
value_shape_proto.add_dim()->set_size(3);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
TEST(ValidateDefaultValueShape, TensorDimensionFewDims) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(3);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(5);
value_shape_proto.add_dim()->set_size(-1);
value_shape_proto.add_dim()->set_size(3);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
TEST(ValidateDefaultValueShape, WrongNumberOfDimensions) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(-1);
default_value_shape_proto.add_dim()->set_size(-1);
default_value_shape_proto.add_dim()->set_size(-1);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(-1);
value_shape_proto.add_dim()->set_size(-1);
EXPECT_FALSE(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto)
.ok());
}
TEST(ValidateDefaultValueShape, WrongDimensionSize) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(3);
default_value_shape_proto.add_dim()->set_size(-1);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(5);
value_shape_proto.add_dim()->set_size(6);
value_shape_proto.add_dim()->set_size(-1);
EXPECT_FALSE(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto)
.ok());
}
TEST(ValidateDefaultValueShape, WrongDimensionSizeBut1) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(3);
default_value_shape_proto.add_dim()->set_size(1);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(5);
value_shape_proto.add_dim()->set_size(3);
value_shape_proto.add_dim()->set_size(7);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
}
} |
1,717 | cpp | tensorflow/tensorflow | autotune_serialize | tensorflow/core/util/autotune_maps/autotune_serialize.cc | tensorflow/core/util/autotune_maps/autotune_serialize_test.cc | #ifndef TENSORFLOW_CORE_UTIL_AUTOTUNE_MAPS_AUTOTUNE_SERIALIZE_H_
#define TENSORFLOW_CORE_UTIL_AUTOTUNE_MAPS_AUTOTUNE_SERIALIZE_H_
#include <string>
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
Status LoadSerializedAutotuneMaps(absl::string_view s);
Status SerializeAutotuneMaps(std::string* output);
void ResetAutotuneMaps();
}
#endif
#include "tensorflow/core/util/autotune_maps/autotune_serialize.h"
#include <map>
#include <string>
#include <unordered_map>
#include <vector>
#include "xla/status_macros.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/stream_executor/platform_manager.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/util/activation_mode.h"
#include "tensorflow/core/util/autotune_maps/autotune_map.pb.h"
#include "tensorflow/core/util/autotune_maps/conv_autotune_maps.h"
#include "tensorflow/core/util/autotune_maps/conv_parameters.h"
#include "tensorflow/core/util/autotune_maps/conv_parameters.pb.h"
#include "tsl/lib/strings/proto_serialization.h"
#include "tsl/protobuf/dnn.pb.h"
namespace tensorflow {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
namespace {
using stream_executor::dnn::AlgorithmConfig;
using stream_executor::dnn::AlgorithmConfigProto;
using stream_executor::dnn::AlgorithmDesc;
using stream_executor::dnn::AlgorithmProto;
template <typename Op>
StatusOr<ConvMapProto> ConvMapToProto(
const AutotuneMap<ConvParameters, AutotuneEntry<Op>> &autotune_map) {
ConvMapProto proto;
std::map<string, ConvMapProto::Entry> sorted_map;
for (auto const &p : autotune_map.GetMap()) {
const ConvParameters ¶ms = p.first;
const ConvParametersProto ¶ms_proto = params.proto();
VLOG(1) << "Reading: " << params.ToString();
ConvMapProto::Entry kv;
*kv.mutable_key() = params_proto;
if (p.second.is_algorithm_config()) {
*kv.mutable_value() = p.second.GetAlgorithmConfig().ToProto();
} else {
const auto &runners = p.second.GetOpRunners();
*kv.mutable_value()->mutable_algorithm() =
runners.primary->ToAlgorithmDesc().ToProto();
if (runners.no_scratch_fallback) {
*kv.mutable_value()->mutable_algorithm_no_scratch() =
runners.no_scratch_fallback->ToAlgorithmDesc().ToProto();
}
}
std::string serialized_params;
TF_RET_CHECK(
tsl::SerializeToStringDeterministic(params_proto, &serialized_params));
sorted_map.insert(std::make_pair(std::move(serialized_params), kv));
}
for (auto const &p : sorted_map) {
ConvMapProto::Entry *kv = proto.add_kv_pairs();
*kv = p.second;
}
return proto;
}
template <typename Op>
Status PopulateConvMap(
const ConvMapProto &m,
AutotuneMap<ConvParameters, AutotuneEntry<Op>> *autotune_map) {
if (m.kv_pairs().size() == 0) {
return OkStatus();
}
TF_ASSIGN_OR_RETURN(
se::Platform * platform,
se::PlatformManager::PlatformWithName(se::GpuPlatformName()));
std::vector<std::string> device_descs;
for (int i = 0; i < platform->VisibleDeviceCount(); i++) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<se::DeviceDescription> device_desc,
platform->DescriptionForDevice(i));
device_descs.push_back(device_desc->model_str());
}
std::set<std::string> unmatched_device_descs;
for (const ConvMapProto::Entry &kv : m.kv_pairs()) {
const ConvParametersProto ¶ms_proto = kv.key();
if (params_proto.version() != ConvParameters::kVersion) {
VLOG(1) << "ConvParametersProto with the incompatible version:"
<< params_proto.DebugString();
return errors::Aborted(
"Aborted because the loaded autotune results for convolution "
"operations have a version different "
"from runtime's version. Expected version: ",
ConvParameters::kVersion,
". Actual version: ", params_proto.version());
}
const AlgorithmConfigProto &algorithm_config_proto = kv.value();
const AlgorithmDesc primary(algorithm_config_proto.algorithm());
const absl::optional<AlgorithmDesc> fallback =
algorithm_config_proto.has_algorithm_no_scratch()
? absl::optional<AlgorithmDesc>(
AlgorithmDesc(algorithm_config_proto.algorithm_no_scratch()))
: absl::nullopt;
bool devices_matched = false;
for (int ordinal = 0; ordinal < device_descs.size(); ordinal++) {
const std::string &desc_str = device_descs[ordinal];
if (desc_str != params_proto.device_identifier()) {
continue;
}
devices_matched = true;
AutotuneEntry<Op> entry;
#if TENSORFLOW_USE_ROCM
entry = AutotuneEntry<Op>(AlgorithmConfig(algorithm_config_proto));
#else
entry = AutotuneEntry<Op>(primary, fallback);
#endif
autotune_map->Insert(ConvParameters(ordinal, params_proto), entry);
}
if (!devices_matched) {
unmatched_device_descs.insert(params_proto.device_identifier());
}
}
if (!unmatched_device_descs.empty()) {
LOG(WARNING) << "Unmatched device id's from AoT autotuning data: "
<< str_util::Join(unmatched_device_descs, ", ")
<< "; existing devices: "
<< str_util::Join(device_descs, ", ");
}
return OkStatus();
}
}
#endif
Status SerializeAutotuneMaps(std::string *output) {
AutotuneMapsProto proto;
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
TF_ASSIGN_OR_RETURN(*proto.mutable_conv_map(),
ConvMapToProto(*ConvAutotuneMap::GetInstance()));
TF_ASSIGN_OR_RETURN(*proto.mutable_fused_conv_map(),
ConvMapToProto(*FusedConvAutotuneMap::GetInstance()));
#endif
TF_RET_CHECK(tsl::SerializeToStringDeterministic(proto, output));
return absl::OkStatus();
}
Status LoadSerializedAutotuneMaps(absl::string_view s) {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
AutotuneMapsProto proto;
if (!proto.ParseFromString(string(s))) {
return errors::InvalidArgument(
"Failed to parse the autotune maps from string.");
}
TF_RETURN_IF_ERROR(
PopulateConvMap(proto.conv_map(), ConvAutotuneMap::GetInstance()));
TF_RETURN_IF_ERROR(PopulateConvMap(proto.fused_conv_map(),
FusedConvAutotuneMap::GetInstance()));
#endif
return absl::OkStatus();
}
void ResetAutotuneMaps() {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
ConvAutotuneMap::GetInstance()->ClearMap();
FusedConvAutotuneMap::GetInstance()->ClearMap();
#endif
}
} | #include "xla/stream_executor/platform_manager.h"
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "tensorflow/core/util/autotune_maps/autotune_serialize.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/autotune_maps/conv_autotune_maps.h"
#include "tensorflow/core/util/autotune_maps/conv_parameters.h"
#include "tensorflow/core/util/autotune_maps/conv_parameters.pb.h"
#include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
namespace {
using stream_executor::dnn::AlgorithmConfig;
using stream_executor::dnn::AlgorithmDesc;
using stream_executor::gpu::GpuDriver;
using ::tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
se::StreamExecutor* GetStreamExec() {
se::Platform* platform =
se::PlatformManager::PlatformWithName(se::GpuPlatformName()).value();
CHECK_GT(platform->VisibleDeviceCount(), 0);
return platform->ExecutorForDevice(0).value();
}
TEST(AutotuneSerializeTest, Empty) {
TF_CHECK_OK(GpuDriver::Init());
ResetAutotuneMaps();
std::string output;
TF_CHECK_OK(SerializeAutotuneMaps(&output));
TF_CHECK_OK(LoadSerializedAutotuneMaps(output));
EXPECT_EQ(ConvAutotuneMap::GetInstance()->GetMap().size(), 0);
}
TEST(AutotuneSerializeTest, Consistency) {
TF_CHECK_OK(GpuDriver::Init());
ResetAutotuneMaps();
ConvParameters conv_params_example_a = {
GetStreamExec(),
1,
1,
{{1, 1}},
TensorFormat::FORMAT_NCHW,
1,
{{1, 1}},
{{1, 1}},
{{1, 1}},
{{1, 1}},
DataType::DT_INT8,
1};
ConvParameters fused_params_example_a = {
GetStreamExec(),
1,
1,
{{1, 1}},
TensorFormat::FORMAT_NCHW,
1,
{{1, 1}},
{{1, 1}},
{{1, 1}},
{{1, 1}},
DataType::DT_INT8,
1,
ConvParameters::FusionInfo{1.0, 0., 0.,
se::dnn::ActivationMode::kNone,
false},
};
ConvParameters contrib_fused_params_example_a = {
GetStreamExec(),
1,
1,
{{1, 1}},
TensorFormat::FORMAT_NCHW,
1,
{{1, 1}},
{{1, 1}},
{{1, 1}},
{{1, 1}},
DataType::DT_INT8,
1,
ConvParameters::FusionInfo{1.0, 0., 0.,
se::dnn::ActivationMode::kRelu,
true}};
AlgorithmDesc algorithm(1, true);
AlgorithmDesc algorithm_no_scratch(1, true);
AutotuneEntry<se::dnn::ConvOp> example_a(algorithm, algorithm_no_scratch);
ConvAutotuneMap::GetInstance()->Insert(conv_params_example_a, example_a);
ConvAutotuneMap::GetInstance()->Insert(fused_params_example_a, example_a);
ConvAutotuneMap::GetInstance()->Insert(contrib_fused_params_example_a,
example_a);
std::string serialized_string;
TF_CHECK_OK(SerializeAutotuneMaps(&serialized_string));
ResetAutotuneMaps();
TF_CHECK_OK(LoadSerializedAutotuneMaps(serialized_string));
EXPECT_EQ(ConvAutotuneMap::GetInstance()->GetMap().size(), 3);
AutotuneEntry<se::dnn::ConvOp> entry;
EXPECT_TRUE(
ConvAutotuneMap::GetInstance()->Find(conv_params_example_a, &entry));
EXPECT_EQ(entry, example_a);
EXPECT_TRUE(
ConvAutotuneMap::GetInstance()->Find(fused_params_example_a, &entry));
EXPECT_EQ(entry, example_a);
EXPECT_TRUE(ConvAutotuneMap::GetInstance()->Find(
contrib_fused_params_example_a, &entry));
EXPECT_EQ(entry, example_a);
}
TEST(AutotuneSerializeTest, VersionControl) {
TF_CHECK_OK(GpuDriver::Init());
ResetAutotuneMaps();
ConvParameters fused_params_example_a = {
GetStreamExec(),
1,
1,
{{1, 1}},
TensorFormat::FORMAT_NCHW,
1,
{{1, 1}},
{{1, 1}},
{{1, 1}},
{{1, 1}},
DataType::DT_INT8,
1,
ConvParameters::FusionInfo{1.0, 0., 0.,
se::dnn::ActivationMode::kNone,
false},
ConvParameters::kVersion - 1};
AlgorithmDesc algorithm(1, true);
AlgorithmDesc algorithm_no_scratch(1, true);
AlgorithmConfig algorithm_config_example_a(algorithm, 1,
algorithm_no_scratch);
ConvAutotuneMap::GetInstance()->Insert(
fused_params_example_a,
AutotuneEntry<se::dnn::ConvOp>(algorithm_config_example_a));
std::string serialized_string;
TF_CHECK_OK(SerializeAutotuneMaps(&serialized_string));
ResetAutotuneMaps();
EXPECT_THAT(
LoadSerializedAutotuneMaps(serialized_string),
StatusIs(error::ABORTED,
HasSubstr("Aborted because the loaded autotune results")));
EXPECT_EQ(ConvAutotuneMap::GetInstance()->GetMap().size(), 0);
}
}
}
#endif |
1,718 | cpp | tensorflow/tensorflow | descriptor_pool_registry | tensorflow/core/util/proto/descriptor_pool_registry.cc | tensorflow/core/util/proto/descriptor_pool_registry_test.cc | #ifndef TENSORFLOW_CORE_UTIL_PROTO_DESCRIPTOR_POOL_REGISTRY_H_
#define TENSORFLOW_CORE_UTIL_PROTO_DESCRIPTOR_POOL_REGISTRY_H_
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
class DescriptorPoolRegistry {
public:
typedef std::function<Status(
tensorflow::protobuf::DescriptorPool const** desc_pool,
std::unique_ptr<tensorflow::protobuf::DescriptorPool>* owned_desc_pool)>
DescriptorPoolFn;
static DescriptorPoolRegistry* Global();
DescriptorPoolFn* Get(const string& source);
void Register(const string& source, const DescriptorPoolFn& pool_fn);
private:
std::map<string, DescriptorPoolFn> fns_;
};
namespace descriptor_pool_registration {
class DescriptorPoolRegistration {
public:
DescriptorPoolRegistration(
const string& source,
const DescriptorPoolRegistry::DescriptorPoolFn& pool_fn) {
DescriptorPoolRegistry::Global()->Register(source, pool_fn);
}
};
}
#define REGISTER_DESCRIPTOR_POOL(source, pool_fn) \
REGISTER_DESCRIPTOR_POOL_UNIQ_HELPER(__COUNTER__, source, pool_fn)
#define REGISTER_DESCRIPTOR_POOL_UNIQ_HELPER(ctr, source, pool_fn) \
REGISTER_DESCRIPTOR_POOL_UNIQ(ctr, source, pool_fn)
#define REGISTER_DESCRIPTOR_POOL_UNIQ(ctr, source, pool_fn) \
static descriptor_pool_registration::DescriptorPoolRegistration \
descriptor_pool_registration_fn_##ctr(source, pool_fn)
}
#endif
#include <string>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/proto/descriptor_pool_registry.h"
namespace tensorflow {
DescriptorPoolRegistry* DescriptorPoolRegistry::Global() {
static DescriptorPoolRegistry* registry = new DescriptorPoolRegistry;
return registry;
}
DescriptorPoolRegistry::DescriptorPoolFn* DescriptorPoolRegistry::Get(
const string& source) {
auto found = fns_.find(source);
if (found == fns_.end()) return nullptr;
return &found->second;
}
void DescriptorPoolRegistry::Register(
const string& source,
const DescriptorPoolRegistry::DescriptorPoolFn& pool_fn) {
auto existing = Get(source);
CHECK_EQ(existing, nullptr)
<< "descriptor pool for source: " << source << " already registered";
fns_.insert(std::pair<const string&, DescriptorPoolFn>(source, pool_fn));
}
} | #include "tensorflow/core/util/proto/descriptor_pool_registry.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
struct Value {
static Status Function(
tensorflow::protobuf::DescriptorPool const** desc_pool,
std::unique_ptr<tensorflow::protobuf::DescriptorPool>* owned_desc_pool) {
return absl::OkStatus();
}
};
REGISTER_DESCRIPTOR_POOL("TEST POOL 1", Value::Function);
REGISTER_DESCRIPTOR_POOL("TEST POOL 2", Value::Function);
}
TEST(DescriptorPoolRegistryTest, TestBasic) {
EXPECT_EQ(DescriptorPoolRegistry::Global()->Get("NON-EXISTENT"), nullptr);
auto pool1 = DescriptorPoolRegistry::Global()->Get("TEST POOL 1");
EXPECT_NE(pool1, nullptr);
auto pool2 = DescriptorPoolRegistry::Global()->Get("TEST POOL 2");
EXPECT_NE(pool2, nullptr);
}
} |
1,719 | cpp | tensorflow/tensorflow | proto_utils | tensorflow/core/util/proto/proto_utils.cc | tensorflow/core/util/proto/proto_utils_test.cc | #ifndef XLA_TSL_UTIL_PROTO_PROTO_UTILS_H_
#define XLA_TSL_UTIL_PROTO_PROTO_UTILS_H_
#include "google/protobuf/duration.pb.h"
#include "absl/time/time.h"
namespace tsl {
namespace proto_utils {
inline google::protobuf::Duration ToDurationProto(absl::Duration duration) {
google::protobuf::Duration proto;
proto.set_seconds(absl::IDivDuration(duration, absl::Seconds(1), &duration));
proto.set_nanos(
absl::IDivDuration(duration, absl::Nanoseconds(1), &duration));
return proto;
}
inline absl::Duration FromDurationProto(google::protobuf::Duration proto) {
return absl::Seconds(proto.seconds()) + absl::Nanoseconds(proto.nanos());
}
}
}
#endif
#include "tensorflow/core/util/proto/proto_utils.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace proto_utils {
using tensorflow::protobuf::FieldDescriptor;
using tensorflow::protobuf::internal::WireFormatLite;
bool IsCompatibleType(FieldDescriptor::Type field_type, DataType dtype) {
switch (field_type) {
case WireFormatLite::TYPE_DOUBLE:
return dtype == tensorflow::DT_DOUBLE;
case WireFormatLite::TYPE_FLOAT:
return dtype == tensorflow::DT_FLOAT || dtype == tensorflow::DT_DOUBLE;
case WireFormatLite::TYPE_INT64:
return dtype == tensorflow::DT_INT64;
case WireFormatLite::TYPE_UINT64:
return dtype == tensorflow::DT_UINT64;
case WireFormatLite::TYPE_INT32:
return dtype == tensorflow::DT_INT32 || dtype == tensorflow::DT_INT64;
case WireFormatLite::TYPE_FIXED64:
return dtype == tensorflow::DT_UINT64;
case WireFormatLite::TYPE_FIXED32:
return dtype == tensorflow::DT_UINT32 || dtype == tensorflow::DT_UINT64;
case WireFormatLite::TYPE_BOOL:
return dtype == tensorflow::DT_BOOL;
case WireFormatLite::TYPE_STRING:
return dtype == tensorflow::DT_STRING;
case WireFormatLite::TYPE_GROUP:
return dtype == tensorflow::DT_STRING;
case WireFormatLite::TYPE_MESSAGE:
return dtype == tensorflow::DT_STRING;
case WireFormatLite::TYPE_BYTES:
return dtype == tensorflow::DT_STRING;
case WireFormatLite::TYPE_UINT32:
return dtype == tensorflow::DT_UINT32 || dtype == tensorflow::DT_UINT64;
case WireFormatLite::TYPE_ENUM:
return dtype == tensorflow::DT_INT32;
case WireFormatLite::TYPE_SFIXED32:
return dtype == tensorflow::DT_INT32 || dtype == tensorflow::DT_INT64;
case WireFormatLite::TYPE_SFIXED64:
return dtype == tensorflow::DT_INT64;
case WireFormatLite::TYPE_SINT32:
return dtype == tensorflow::DT_INT32 || dtype == tensorflow::DT_INT64;
case WireFormatLite::TYPE_SINT64:
return dtype == tensorflow::DT_INT64;
}
}
Status ParseTextFormatFromString(absl::string_view input,
protobuf::Message* output) {
DCHECK(output != nullptr) << "output must be non NULL";
if (output == nullptr) {
LOG(ERROR) << "output must be non NULL";
return Status(absl::StatusCode::kInvalidArgument,
"output must be non NULL");
}
string err;
StringErrorCollector err_collector(&err, true);
protobuf::TextFormat::Parser parser;
parser.RecordErrorsTo(&err_collector);
if (!parser.ParseFromString(string(input), output)) {
return Status(absl::StatusCode::kInvalidArgument, err);
}
return absl::OkStatus();
}
StringErrorCollector::StringErrorCollector(string* error_text)
: StringErrorCollector(error_text, false) {}
StringErrorCollector::StringErrorCollector(string* error_text,
bool one_indexing)
: error_text_(error_text), index_offset_(one_indexing ? 1 : 0) {
DCHECK(error_text_ != nullptr) << "error_text must be non NULL";
if (error_text_ == nullptr) {
LOG(ERROR) << "error_text must be non NULL";
}
}
void StringErrorCollector::AddError(int line, int column,
const string& message) {
if (error_text_ != nullptr) {
absl::SubstituteAndAppend(error_text_, "$0($1): $2\n", line + index_offset_,
column + index_offset_, message);
}
}
void StringErrorCollector::AddWarning(int line, int column,
const string& message) {
AddError(line, column, message);
}
}
} | #include "tensorflow/core/util/proto/proto_utils.h"
#include <gmock/gmock.h>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
using proto_utils::ParseTextFormatFromString;
using proto_utils::StringErrorCollector;
using ::testing::ContainsRegex;
TEST(ParseTextFormatFromStringTest, Success) {
protobuf::DescriptorProto output;
TF_ASSERT_OK(ParseTextFormatFromString("name: \"foo\"", &output));
EXPECT_EQ(output.name(), "foo");
}
TEST(ParseTextFormatFromStringTest, ErrorOnInvalidSyntax) {
protobuf::DescriptorProto output;
Status status = ParseTextFormatFromString("name: foo", &output);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
EXPECT_THAT(status.message(), ContainsRegex("foo"));
EXPECT_FALSE(output.has_name());
}
TEST(ParseTextFormatFromStringTest, ErrorOnUnknownFieldName) {
protobuf::DescriptorProto output;
Status status = ParseTextFormatFromString("badname: \"foo\"", &output);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
EXPECT_THAT(status.message(), ContainsRegex("badname"));
EXPECT_FALSE(output.has_name());
}
TEST(ParseTextFormatFromStringTest, DiesOnNullOutputPointer) {
#ifndef NDEBUG
ASSERT_DEATH(ParseTextFormatFromString("foo", nullptr).IgnoreError(),
"output.*non NULL");
#else
Status status = ParseTextFormatFromString("foo", nullptr);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
EXPECT_THAT(status.message(), ContainsRegex("output.*non NULL"));
#endif
}
TEST(StringErrorCollectorTest, AppendsError) {
string err;
StringErrorCollector collector(&err);
collector.AddError(1, 2, "foo");
EXPECT_EQ("1(2): foo\n", err);
}
TEST(StringErrorCollectorTest, AppendsWarning) {
string err;
StringErrorCollector collector(&err);
collector.AddWarning(1, 2, "foo");
EXPECT_EQ("1(2): foo\n", err);
}
TEST(StringErrorCollectorTest, AppendsMultipleError) {
string err;
StringErrorCollector collector(&err);
collector.AddError(1, 2, "foo");
collector.AddError(3, 4, "bar");
EXPECT_EQ("1(2): foo\n3(4): bar\n", err);
}
TEST(StringErrorCollectorTest, AppendsMultipleWarning) {
string err;
StringErrorCollector collector(&err);
collector.AddWarning(1, 2, "foo");
collector.AddWarning(3, 4, "bar");
EXPECT_EQ("1(2): foo\n3(4): bar\n", err);
}
TEST(StringErrorCollectorTest, OffsetWorks) {
string err;
StringErrorCollector collector(&err, true);
collector.AddError(1, 2, "foo");
collector.AddWarning(3, 4, "bar");
EXPECT_EQ("2(3): foo\n4(5): bar\n", err);
}
TEST(StringErrorCollectorTest, DiesOnNullErrorText) {
#ifndef NDEBUG
ASSERT_DEATH(StringErrorCollector(nullptr), "error_text.*non NULL");
#else
StringErrorCollector collector(nullptr);
collector.AddError(1, 2, "foo");
collector.AddWarning(3, 4, "bar");
#endif
}
} |
1,720 | cpp | tensorflow/tensorflow | tensor_bundle | tensorflow/core/util/tensor_bundle/tensor_bundle.cc | tensorflow/core/util/tensor_bundle/tensor_bundle_test.cc | #ifndef TENSORFLOW_CORE_UTIL_TENSOR_BUNDLE_TENSOR_BUNDLE_H_
#define TENSORFLOW_CORE_UTIL_TENSOR_BUNDLE_TENSOR_BUNDLE_H_
#include <cstdint>
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/functional/function_ref.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_slice.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/lib/io/cache.h"
#include "tensorflow/core/lib/io/inputbuffer.h"
#include "tensorflow/core/lib/io/iterator.h"
#include "tensorflow/core/lib/io/table.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/protobuf/tensor_bundle.pb.h"
#include "tensorflow/core/util/tensor_slice_set.h"
#include "tsl/lib/io/buffered_file.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
extern const int kTensorBundleMinProducer;
extern const int kTensorBundleMinConsumer;
extern const int kTensorBundleVersion;
extern const char* const kHeaderEntryKey;
class BundleWriter {
public:
struct Options {
Options() {}
int data_alignment{1};
};
BundleWriter(Env* env, absl::string_view prefix,
const Options& options = Options());
Status Add(absl::string_view key, const Tensor& val);
Status AddSlice(absl::string_view full_tensor_key,
const TensorShape& full_tensor_shape,
const TensorSlice& slice_spec, const Tensor& slice_tensor);
Status Finish() TF_MUST_USE_RESULT;
Status status() const { return status_; }
private:
Env* const env_;
const Options options_;
const std::string prefix_;
std::string metadata_path_;
std::string data_path_;
bool use_temp_file_;
std::unique_ptr<tsl::BufferedWritableFile> out_;
int64_t size_;
std::map<std::string, BundleEntryProto> entries_;
Status status_;
BundleWriter(const BundleWriter&) = delete;
void operator=(const BundleWriter&) = delete;
};
Status MergeBundles(Env* env, absl::Span<const tstring> prefixes,
absl::string_view merged_prefix,
bool allow_missing_files = false);
class BundleCache;
class BundleReader {
public:
BundleReader(Env* const env, absl::string_view prefix,
bool enable_multi_threading_for_testing = false);
struct Options {
BundleCache* cache = nullptr;
bool enable_multi_threading_for_testing = false;
};
BundleReader(Env* env, absl::string_view prefix, Options options);
~BundleReader();
Status status() const { return status_; }
bool Contains(absl::string_view key);
template <class T>
Status SortForSequentialAccess(
std::vector<T>& container,
absl::FunctionRef<std::string(const T&)> get_key);
Status LookupDtypeAndShape(absl::string_view key, DataType* dtype,
TensorShape* shape) TF_MUST_USE_RESULT;
Status LookupTensorShape(absl::string_view key,
TensorShape* shape) TF_MUST_USE_RESULT;
Status Lookup(absl::string_view key, Tensor* val) TF_MUST_USE_RESULT;
Status ReadCurrent(Tensor* val) TF_MUST_USE_RESULT;
Status LookupTensorSlices(absl::string_view key,
std::vector<TensorSlice>* slices)
TF_MUST_USE_RESULT;
Status LookupSlice(absl::string_view full_tensor_key,
const TensorSlice& slice_spec,
Tensor* val) TF_MUST_USE_RESULT;
void Seek(absl::string_view key) { return iter_->Seek(key); }
void Next() const { iter_->Next(); }
bool Valid() const { return iter_->Valid(); }
absl::string_view key() const { return iter_->key(); }
absl::string_view value() const { return iter_->value(); }
std::string DebugString();
private:
Status GetBundleEntryProto(absl::string_view key,
BundleEntryProto* entry) TF_MUST_USE_RESULT;
Status GetValue(const BundleEntryProto& entry,
Tensor* val) TF_MUST_USE_RESULT;
Status GetSliceValue(absl::string_view full_tensor_key,
const BundleEntryProto& full_tensor_entry,
const TensorSlice& slice_spec,
Tensor* val) TF_MUST_USE_RESULT;
Env* env_;
const std::string prefix_;
std::unique_ptr<BundleCache> owned_cache_;
BundleCache* cache_;
Status status_;
RandomAccessFile* metadata_;
table::Table* table_;
table::Cache* index_cache_;
table::Iterator* iter_;
std::unordered_map<int32_t, io::InputBuffer*> data_;
std::unordered_map<std::string, checkpoint::TensorSliceSet*> tensor_slices_;
int num_shards_;
bool need_to_swap_bytes_;
friend class TensorBundleAlignmentTest;
bool enable_multi_threading_for_testing_ = false;
BundleReader(const BundleReader&) = delete;
void operator=(const BundleReader&) = delete;
};
template <class T>
Status BundleReader::SortForSequentialAccess(
std::vector<T>& container,
absl::FunctionRef<std::string(const T&)> get_key) {
struct FileOffset {
int32_t shard_id;
int64_t offset;
};
absl::flat_hash_map<std::string, FileOffset> file_offsets;
for (const T& element : container) {
BundleEntryProto entry;
TF_RETURN_IF_ERROR(GetBundleEntryProto(get_key(element), &entry));
file_offsets[get_key(element)] = {entry.shard_id(), entry.offset()};
}
absl::c_sort(container, [&get_key, &file_offsets](const T& a, const T& b) {
const FileOffset& file_offset_a = file_offsets[get_key(a)];
const FileOffset& file_offset_b = file_offsets[get_key(b)];
if (file_offset_a.shard_id == file_offset_b.shard_id) {
return file_offset_a.offset < file_offset_b.offset;
} else {
return file_offset_a.shard_id < file_offset_b.shard_id;
}
});
return absl::OkStatus();
}
class BundleCache {
public:
explicit BundleCache(Env* env);
Status GetFile(const std::string& fname, RandomAccessFile** file);
private:
struct FileState {
absl::once_flag once;
std::unique_ptr<RandomAccessFile> file;
Status open_status;
};
FileState* EnsureOpened(std::string name);
Env* const env_;
absl::Mutex mu_;
absl::flat_hash_map<std::string, std::unique_ptr<FileState>> opened_files_
TF_GUARDED_BY(mu_);
};
}
#endif
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
#include <cstdlib>
#include <cstring>
#include <memory>
#include <utility>
#include "absl/base/call_once.h"
#include "absl/synchronization/mutex.h"
#include "xla/tsl/util/byte_swap_array.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tensorflow/core/framework/versions.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/coding.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/hash/crc32c.h"
#include "tensorflow/core/lib/io/table_builder.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/bfloat16.h"
#include "tensorflow/core/platform/cord.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/core/util/saved_tensor_slice_util.h"
#include "tensorflow/core/util/tensor_bundle/byte_swap_tensor.h"
#include "tensorflow/core/util/tensor_bundle/naming.h"
#include "tensorflow/core/util/tensor_slice_util.h"
#include "tsl/lib/io/buffered_file.h"
#ifdef PLATFORM_WINDOWS
#undef DeleteFile
#endif
namespace tensorflow {
const int kTensorBundleMinProducer = 0;
const int kTensorBundleMinConsumer = 0;
const int kTensorBundleVersion = 1;
static const int kBufferSize = 1024 * 1024;
const char* const kHeaderEntryKey = "";
const int64_t kLargeTensorThreshold = static_cast<int64_t>(1) << 32;
const int kMaxFileReadThreads = 8;
const int64_t kMinSectionSize = static_cast<int64_t>(1) << 31;
namespace {
Status ReadStringTensor(io::InputBuffer* buffered_file, size_t num_elements,
size_t offset, size_t size, tstring* destination,
uint32* actual_crc32c, bool need_to_swap_bytes) {
if (size == 0) return absl::OkStatus();
CHECK_GT(size, 0);
TF_RETURN_IF_ERROR(buffered_file->Seek(offset));
TF_RETURN_IF_ERROR(buffered_file->Hint(size));
std::vector<uint64> string_lengths(num_elements);
for (size_t i = 0; i < num_elements; ++i) {
TF_RETURN_IF_ERROR(buffered_file->ReadVarint64(&string_lengths[i]));
if (string_lengths[i] <= UINT32_MAX) {
uint32 elem_size_uint32 = static_cast<uint32>(string_lengths[i]);
if (need_to_swap_bytes) {
elem_size_uint32 = BYTE_SWAP_32(elem_size_uint32);
}
*actual_crc32c = crc32c::Extend(
*actual_crc32c, reinterpret_cast<const char*>(&elem_size_uint32),
sizeof(uint32));
} else {
uint64 length = string_lengths[i];
if (need_to_swap_bytes) {
length = BYTE_SWAP_64(length);
}
*actual_crc32c =
crc32c::Extend(*actual_crc32c, reinterpret_cast<const char*>(&length),
sizeof(uint64));
}
}
if (offset + size < buffered_file->Tell()) {
return errors::DataLoss("String lengths longer than expected offset ",
offset + size);
}
uint32 raw_length_checksum = 0;
uint32 length_checksum = 0;
size_t unused_bytes_read = 0;
TF_RETURN_IF_ERROR(buffered_file->ReadNBytes(
sizeof(uint32), reinterpret_cast<char*>(&raw_length_checksum),
&unused_bytes_read));
length_checksum = need_to_swap_bytes ? BYTE_SWAP_32(raw_length_checksum)
: raw_length_checksum;
if (crc32c::Unmask(length_checksum) != *actual_crc32c) {
return errors::DataLoss(
"The length checksum does not match: expected ",
strings::Printf("%08u", crc32c::Unmask(length_checksum)),
" but actual is ", strings::Printf("%08u", *actual_crc32c));
}
*actual_crc32c = crc32c::Extend(*actual_crc32c,
reinterpret_cast<char*>(&raw_length_checksum),
sizeof(uint32));
for (size_t i = 0; i < num_elements; ++i) {
const uint64 string_length = string_lengths[i];
tstring* buffer = &destination[i];
buffer->resize(string_length);
size_t bytes_read = 0;
TF_RETURN_IF_ERROR(
buffered_file->ReadNBytes(string_length, &(*buffer)[0], &bytes_read));
*actual_crc32c = crc32c::Extend(*actual_crc32c, buffer->data(), bytes_read);
}
return absl::OkStatus();
}
Status ReadVariantTensor(io::InputBuffer* buffered_file, Tensor* ret,
size_t offset, size_t size, uint32* actual_crc32c) {
if (size == 0) return absl::OkStatus();
size_t num_elements = ret->NumElements();
TF_RETURN_IF_ERROR(buffered_file->Seek(offset));
TF_RETURN_IF_ERROR(buffered_file->Hint(size));
for (size_t i = 0; i < num_elements; ++i) {
uint64 string_length = 0;
TF_RETURN_IF_ERROR(buffered_file->ReadVarint64(&string_length));
*actual_crc32c = crc32c::Extend(
*actual_crc32c, reinterpret_cast<const char*>(&string_length),
sizeof(uint64));
string buffer;
buffer.resize(string_length);
size_t bytes_read = 0;
TF_RETURN_IF_ERROR(
buffered_file->ReadNBytes(string_length, &buffer[0], &bytes_read));
*actual_crc32c = crc32c::Extend(*actual_crc32c, buffer.data(), bytes_read);
VariantTensorDataProto proto;
if (!proto.ParseFromString(buffer)) {
return errors::DataLoss("Unable to parse VariantTensorDataProto from ",
"buffer of size ", string_length, ". ",
"Bundle entry offset: ", offset, " size: ", size);
}
Variant v = proto;
if (!DecodeUnaryVariant(&v)) {
return errors::Internal("Could not decode variant with type_name: \"",
v.TypeName(), "\". Perhaps you forgot to ",
"register a decoder via ",
"REGISTER_UNARY_VARIANT_DECODE_FUNCTION?");
}
uint32 checksum = 0;
size_t unused_bytes_read = 0;
TF_RETURN_IF_ERROR(buffered_file->ReadNBytes(
sizeof(uint32), reinterpret_cast<char*>(&checksum),
&unused_bytes_read));
if (crc32c::Unmask(checksum) != *actual_crc32c) {
return errors::DataLoss(
"The checksum after Variant ", i, " does not match.",
" Expected: ", strings::Printf("%08u", crc32c::Unmask(checksum)),
" Actual: ", strings::Printf("%08u", *actual_crc32c));
}
*actual_crc32c = crc32c::Extend(
*actual_crc32c, reinterpret_cast<char*>(&checksum), sizeof(uint32));
ret->flat<Variant>()(i) = std::move(v);
}
return absl::OkStatus();
}
char* GetBackingBuffer(const Tensor& val) {
CHECK(DataTypeCanUseMemcpy(val.dtype())) << val.dtype();
return const_cast<char*>(val.tensor_data().data());
}
tstring* GetStringBackingBuffer(const Tensor& val) {
CHECK_EQ(DT_STRING, val.dtype());
return const_cast<tstring*>(val.flat<tstring>().data());
}
Status ParseEntryProto(StringPiece key, StringPiece value,
protobuf::MessageLite* out) {
if (!out->ParseFromArray(value.data(), value.size())) {
return errors::DataLoss("Entry for key ", key, " not parseable.");
}
return absl::OkStatus();
}
Status WriteTensor(const Tensor& val, tsl::BufferedWritableFile* out,
size_t* bytes_written) {
DCHECK_NE(val.dtype(), DT_STRING);
DCHECK_NE(val.dtype(), DT_VARIANT);
*bytes_written = val.TotalBytes();
char* buf = GetBackingBuffer(val);
VLOG(1) << "Appending " << *bytes_written << " bytes to file";
return out->Append(StringPiece(buf, *bytes_written));
}
Status WriteStringTensor(const Tensor& val, tsl::BufferedWritableFile* out,
size_t* bytes_written, uint32* crc32c) {
DCHECK_EQ(val.dtype(), DT_STRING);
const tstring* strings = GetStringBackingBuffer(val);
string lengths;
lengths.reserve(val.NumElements());
*crc32c = 0;
for (int64_t i = 0; i < val.NumElements(); ++i) {
const tstring* elem = &strings[i];
DCHECK_EQ(elem->size(), static_cast<uint64>(elem->size()));
const uint64 elem_size = static_cast<uint64>(elem->size());
core::PutVarint64(&lengths, elem_size);
if (elem_size <= UINT32_MAX) {
const uint32 elem_size_uint32 = static_cast<uint32>(elem_size);
*crc32c = crc32c::Extend(*crc32c,
reinterpret_cast<const char*>(&elem_size_uint32),
sizeof(uint32));
} else {
*crc32c = crc32c::Extend(
*crc32c, reinterpret_cast<const char*>(&elem_size), sizeof(uint64));
}
}
TF_RETURN_IF_ERROR(out->Append(lengths));
*bytes_written = lengths.size();
const uint32 length_checksum = crc32c::Mask(*crc32c);
TF_RETURN_IF_ERROR(out->Append(StringPiece(
reinterpret_cast<const char*>(&length_checksum), sizeof(uint32))));
*crc32c = crc32c::Extend(
*crc32c, reinterpret_cast<const char*>(&length_checksum), sizeof(uint32));
*bytes_written += sizeof(uint32);
for (int64_t i = 0; i < val.NumElements(); ++i) {
const tstring* string = &strings[i];
TF_RETURN_IF_ERROR(out->Append(*string));
*bytes_written += string->size();
*crc32c = crc32c::Extend(*crc32c, string->data(), string->size());
}
return absl::OkStatus();
}
Status WriteVariantTensor(const Tensor& val, tsl::BufferedWritableFile* out,
size_t* bytes_written, uint32* crc32c) {
DCHECK_EQ(val.dtype(), DT_VARIANT);
*crc32c = 0;
*bytes_written = 0;
for (int64_t i = 0; i < val.NumElements(); ++i) {
VariantTensorData data;
val.flat<Variant>()(i).Encode(&data);
VariantTensorDataProto proto;
data.ToProto(&proto);
string elem;
if (!proto.SerializeToString(&elem)) {
return errors::Unknown(
"Failed to serialize tensor data of size ", proto.ByteSizeLong(),
". Tensor: ", val.flat<Variant>()(i).DebugString());
}
DCHECK_EQ(elem.size(), static_cast<uint64>(elem.size()));
const auto elem_size = static_cast<uint64>(elem.size());
string len;
core::PutVarint64(&len, elem_size);
TF_RETURN_IF_ERROR(out->Append(len));
*crc32c = crc32c::Extend(*crc32c, reinterpret_cast<const char*>(&elem_size),
sizeof(uint64));
*bytes_written += len.size();
TF_RETURN_IF_ERROR(out->Append(elem));
*crc32c = crc32c::Extend(*crc32c, elem.data(), elem.size());
*bytes_written += elem.size();
const uint32 length_checksum = crc32c::Mask(*crc32c);
TF_RETURN_IF_ERROR(out->Append(StringPiece(
reinterpret_cast<const char*>(&length_checksum), sizeof(uint32))));
*crc32c =
crc32c::Extend(*crc32c, reinterpret_cast<const char*>(&length_checksum),
sizeof(uint32));
*bytes_written += sizeof(uint32);
}
return absl::OkStatus();
}
bool IsFullSlice(const TensorSlice& slice_spec,
const TensorShape& full_tensor_shape) {
if (slice_spec.IsFull()) {
return true;
} else {
TensorShape sliced_shape;
slice_spec.SliceTensorShape(full_tensor_shape, &sliced_shape).IgnoreError();
return sliced_shape == full_tensor_shape;
}
}
Status CorruptFileError(const Status& in_status, const string& filename,
const string& detail) {
if (in_status.ok()) {
return errors::Internal("Unable to read file (", filename,
"). Perhaps the file is corrupt or was produced by "
"a newer version of TensorFlow with format changes "
"(",
detail, ")");
}
return Status(
in_status.code(),
strings::StrCat("Unable to read file (", filename,
"). Perhaps the file is corrupt or was produced by a | #include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
#include <random>
#include <string>
#include <vector>
#if defined(_WIN32)
#include <windows.h>
#endif
#include "absl/status/status.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/io/table_builder.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/protobuf/tensor_bundle.pb.h"
#include "tensorflow/core/util/tensor_bundle/byte_swap_tensor.h"
#include "tensorflow/core/util/tensor_bundle/naming.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
using ::testing::ElementsAre;
namespace {
string Prefix(const string& prefix) {
return strings::StrCat(testing::TmpDir(), "/", prefix);
}
string TestdataPrefix(const string& prefix) {
return strings::StrCat(testing::TensorFlowSrcRoot(),
"/core/util/tensor_bundle/testdata/", prefix);
}
template <typename T>
Tensor Constant(T v, TensorShape shape) {
Tensor ret(DataTypeToEnum<T>::value, shape);
ret.flat<T>().setConstant(v);
return ret;
}
template <typename T>
Tensor Constant_2x3(T v) {
return Constant(v, TensorShape({2, 3}));
}
template <typename T>
Tensor Constant_100x100(T v) {
return Constant(v, TensorShape({100, 100}));
}
Tensor ByteSwap(Tensor t) {
Tensor ret = tensor::DeepCopy(t);
TF_EXPECT_OK(ByteSwapTensor(&ret));
return ret;
}
template <typename T>
void Expect(BundleReader* reader, const string& key,
const Tensor& expected_val) {
EXPECT_TRUE(reader->Contains(key));
DataType dtype;
TensorShape shape;
TF_ASSERT_OK(reader->LookupDtypeAndShape(key, &dtype, &shape));
EXPECT_EQ(expected_val.dtype(), dtype);
EXPECT_EQ(expected_val.shape(), shape);
Tensor val(expected_val.dtype(), shape);
TF_ASSERT_OK(reader->Lookup(key, &val));
test::ExpectTensorEqual<T>(val, expected_val);
}
template <class T>
void ExpectVariant(BundleReader* reader, const string& key,
const Tensor& expected_t) {
EXPECT_TRUE(reader->Contains(key));
DataType dtype;
TensorShape shape;
TF_ASSERT_OK(reader->LookupDtypeAndShape(key, &dtype, &shape));
EXPECT_EQ(expected_t.dtype(), dtype);
EXPECT_EQ(expected_t.shape(), shape);
Tensor actual_t(dtype, shape);
TF_ASSERT_OK(reader->Lookup(key, &actual_t));
for (int i = 0; i < expected_t.NumElements(); i++) {
Variant actual_var = actual_t.flat<Variant>()(i);
Variant expected_var = expected_t.flat<Variant>()(i);
EXPECT_EQ(actual_var.TypeName(), expected_var.TypeName());
auto* actual_val = actual_var.get<T>();
auto* expected_val = expected_var.get<T>();
EXPECT_EQ(*expected_val, *actual_val);
}
}
template <typename T>
void ExpectNext(BundleReader* reader, const Tensor& expected_val) {
EXPECT_TRUE(reader->Valid());
reader->Next();
TF_ASSERT_OK(reader->status());
Tensor val;
TF_ASSERT_OK(reader->ReadCurrent(&val));
test::ExpectTensorEqual<T>(val, expected_val);
}
std::vector<string> AllTensorKeys(BundleReader* reader) {
std::vector<string> ret;
reader->Seek(kHeaderEntryKey);
reader->Next();
for (; reader->Valid(); reader->Next()) {
ret.emplace_back(reader->key());
}
return ret;
}
Status FlipEndiannessBit(const string& prefix) {
Env* env = Env::Default();
const string metadata_tmp_path = Prefix("some_tmp_path");
std::unique_ptr<WritableFile> metadata_file;
TF_RETURN_IF_ERROR(env->NewWritableFile(metadata_tmp_path, &metadata_file));
std::unique_ptr<table::TableBuilder> builder;
{
const string filename = MetaFilename(prefix);
uint64 file_size;
TF_RETURN_IF_ERROR(env->GetFileSize(filename, &file_size));
std::unique_ptr<RandomAccessFile> file;
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(filename, &file));
table::Table* table = nullptr;
TF_RETURN_IF_ERROR(
table::Table::Open(table::Options(), file.get(), file_size, &table));
std::unique_ptr<table::Table> table_deleter(table);
std::unique_ptr<table::Iterator> iter(table->NewIterator());
iter->Seek(kHeaderEntryKey);
CHECK(iter->Valid());
BundleHeaderProto header;
CHECK(header.ParseFromArray(iter->value().data(), iter->value().size()));
if (header.endianness() == BundleHeaderProto::LITTLE) {
header.set_endianness(BundleHeaderProto::BIG);
} else {
header.set_endianness(BundleHeaderProto::LITTLE);
}
builder.reset(
new table::TableBuilder(table::Options(), metadata_file.get()));
builder->Add(iter->key(), header.SerializeAsString());
iter->Next();
for (; iter->Valid(); iter->Next())
builder->Add(iter->key(), iter->value());
}
TF_RETURN_IF_ERROR(builder->Finish());
TF_RETURN_IF_ERROR(env->RenameFile(metadata_tmp_path, MetaFilename(prefix)));
return metadata_file->Close();
}
template <typename T>
void TestBasic() {
{
BundleWriter writer(Env::Default(), Prefix("foo"));
TF_EXPECT_OK(writer.Add("foo_003", Constant_2x3(T(3))));
TF_EXPECT_OK(writer.Add("foo_000", Constant_2x3(T(0))));
TF_EXPECT_OK(writer.Add("foo_002", Constant_2x3(T(2))));
TF_EXPECT_OK(writer.Add("foo_001", Constant_2x3(T(1))));
TF_ASSERT_OK(writer.Finish());
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
EXPECT_EQ(
AllTensorKeys(&reader),
std::vector<string>({"foo_000", "foo_001", "foo_002", "foo_003"}));
Expect<T>(&reader, "foo_000", Constant_2x3(T(0)));
Expect<T>(&reader, "foo_001", Constant_2x3(T(1)));
Expect<T>(&reader, "foo_002", Constant_2x3(T(2)));
Expect<T>(&reader, "foo_003", Constant_2x3(T(3)));
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
ExpectNext<T>(&reader, Constant_2x3(T(0)));
ExpectNext<T>(&reader, Constant_2x3(T(1)));
ExpectNext<T>(&reader, Constant_2x3(T(2)));
ExpectNext<T>(&reader, Constant_2x3(T(3)));
EXPECT_TRUE(reader.Valid());
reader.Next();
EXPECT_FALSE(reader.Valid());
}
{
BundleWriter writer(Env::Default(), Prefix("bar"));
TF_EXPECT_OK(writer.Add("bar_003", Constant_2x3(T(3))));
TF_EXPECT_OK(writer.Add("bar_000", Constant_2x3(T(0))));
TF_EXPECT_OK(writer.Add("bar_002", Constant_2x3(T(2))));
TF_EXPECT_OK(writer.Add("bar_001", Constant_2x3(T(1))));
TF_ASSERT_OK(writer.Finish());
}
{
BundleReader reader(Env::Default(), Prefix("bar"));
TF_ASSERT_OK(reader.status());
EXPECT_EQ(
AllTensorKeys(&reader),
std::vector<string>({"bar_000", "bar_001", "bar_002", "bar_003"}));
Expect<T>(&reader, "bar_003", Constant_2x3(T(3)));
Expect<T>(&reader, "bar_002", Constant_2x3(T(2)));
Expect<T>(&reader, "bar_001", Constant_2x3(T(1)));
Expect<T>(&reader, "bar_000", Constant_2x3(T(0)));
}
{
BundleReader reader(Env::Default(), Prefix("bar"));
TF_ASSERT_OK(reader.status());
ExpectNext<T>(&reader, Constant_2x3(T(0)));
ExpectNext<T>(&reader, Constant_2x3(T(1)));
ExpectNext<T>(&reader, Constant_2x3(T(2)));
ExpectNext<T>(&reader, Constant_2x3(T(3)));
EXPECT_TRUE(reader.Valid());
reader.Next();
EXPECT_FALSE(reader.Valid());
}
TF_ASSERT_OK(MergeBundles(Env::Default(), {Prefix("foo"), Prefix("bar")},
Prefix("merged")));
{
BundleReader reader(Env::Default(), Prefix("merged"));
TF_ASSERT_OK(reader.status());
EXPECT_EQ(
AllTensorKeys(&reader),
std::vector<string>({"bar_000", "bar_001", "bar_002", "bar_003",
"foo_000", "foo_001", "foo_002", "foo_003"}));
Expect<T>(&reader, "bar_000", Constant_2x3(T(0)));
Expect<T>(&reader, "bar_001", Constant_2x3(T(1)));
Expect<T>(&reader, "bar_002", Constant_2x3(T(2)));
Expect<T>(&reader, "bar_003", Constant_2x3(T(3)));
Expect<T>(&reader, "foo_000", Constant_2x3(T(0)));
Expect<T>(&reader, "foo_001", Constant_2x3(T(1)));
Expect<T>(&reader, "foo_002", Constant_2x3(T(2)));
Expect<T>(&reader, "foo_003", Constant_2x3(T(3)));
}
{
BundleReader reader(Env::Default(), Prefix("merged"));
TF_ASSERT_OK(reader.status());
ExpectNext<T>(&reader, Constant_2x3(T(0)));
ExpectNext<T>(&reader, Constant_2x3(T(1)));
ExpectNext<T>(&reader, Constant_2x3(T(2)));
ExpectNext<T>(&reader, Constant_2x3(T(3)));
ExpectNext<T>(&reader, Constant_2x3(T(0)));
ExpectNext<T>(&reader, Constant_2x3(T(1)));
ExpectNext<T>(&reader, Constant_2x3(T(2)));
ExpectNext<T>(&reader, Constant_2x3(T(3)));
EXPECT_TRUE(reader.Valid());
reader.Next();
EXPECT_FALSE(reader.Valid());
}
}
template <typename T>
void TestByteSwap(const T* forward, const T* swapped, int array_len) {
auto bytes_per_elem = sizeof(T);
std::unique_ptr<T[]> forward_copy(new T[array_len]);
std::memcpy(forward_copy.get(), forward, array_len * bytes_per_elem);
TF_EXPECT_OK(ByteSwapArray(reinterpret_cast<char*>(forward_copy.get()),
bytes_per_elem, array_len));
for (int i = 0; i < array_len; i++) {
EXPECT_EQ(forward_copy.get()[i], swapped[i]);
}
auto shape = TensorShape({array_len});
auto dtype = DataTypeToEnum<T>::value;
Tensor forward_tensor(dtype, shape);
Tensor swapped_tensor(dtype, shape);
std::memcpy(const_cast<char*>(forward_tensor.tensor_data().data()), forward,
array_len * bytes_per_elem);
std::memcpy(const_cast<char*>(swapped_tensor.tensor_data().data()), swapped,
array_len * bytes_per_elem);
TF_EXPECT_OK(ByteSwapTensor(&forward_tensor));
test::ExpectTensorEqual<T>(forward_tensor, swapped_tensor);
}
TEST(TensorBundleTest, SwapBytes) {
ByteSwap(Constant_2x3<int>(42));
EXPECT_NE(absl::OkStatus(), FlipEndiannessBit(Prefix("not_a_valid_prefix")));
const int arr_len_16 = 4;
const uint16_t forward_16[] = {0x1de5, 0xd017, 0xf1ea, 0xc0a1};
const uint16_t swapped_16[] = {0xe51d, 0x17d0, 0xeaf1, 0xa1c0};
const int arr_len_32 = 2;
const uint32_t forward_32[] = {0x0ddba115, 0xf01dab1e};
const uint32_t swapped_32[] = {0x15a1db0d, 0x1eab1df0};
const int arr_len_64 = 2;
const uint64_t forward_64[] = {0xf005ba11caba1000, 0x5ca1ab1ecab005e5};
const uint64_t swapped_64[] = {0x0010baca11ba05f0, 0xe505b0ca1eaba15c};
TestByteSwap(forward_16, swapped_16, arr_len_16);
TestByteSwap(reinterpret_cast<const int16_t*>(forward_16),
reinterpret_cast<const int16_t*>(swapped_16), arr_len_16);
TestByteSwap(reinterpret_cast<const bfloat16*>(forward_16),
reinterpret_cast<const bfloat16*>(swapped_16), arr_len_16);
TestByteSwap(forward_32, swapped_32, arr_len_32);
TestByteSwap(reinterpret_cast<const int32_t*>(forward_32),
reinterpret_cast<const int32_t*>(swapped_32), arr_len_32);
TestByteSwap(reinterpret_cast<const float*>(forward_32),
reinterpret_cast<const float*>(swapped_32), arr_len_32);
TestByteSwap(reinterpret_cast<const uint64*>(forward_64),
reinterpret_cast<const uint64*>(swapped_64), arr_len_64);
TestByteSwap(reinterpret_cast<const int64_t*>(forward_64),
reinterpret_cast<const int64_t*>(swapped_64), arr_len_64);
TestByteSwap(reinterpret_cast<const double*>(forward_64),
reinterpret_cast<const double*>(swapped_64), arr_len_64);
const float* forward_float = reinterpret_cast<const float*>(forward_32);
const float* swapped_float = reinterpret_cast<const float*>(swapped_32);
const double* forward_double = reinterpret_cast<const double*>(forward_64);
const double* swapped_double = reinterpret_cast<const double*>(swapped_64);
Tensor forward_complex64 = Constant_2x3<complex64>(
std::complex<float>(forward_float[0], forward_float[1]));
Tensor swapped_complex64 = Constant_2x3<complex64>(
std::complex<float>(swapped_float[0], swapped_float[1]));
Tensor forward_complex128 = Constant_2x3<complex128>(
std::complex<double>(forward_double[0], forward_double[1]));
Tensor swapped_complex128 = Constant_2x3<complex128>(
std::complex<double>(swapped_double[0], swapped_double[1]));
TF_EXPECT_OK(ByteSwapTensor(&forward_complex64));
test::ExpectTensorEqual<complex64>(forward_complex64, swapped_complex64);
TF_EXPECT_OK(ByteSwapTensor(&forward_complex128));
test::ExpectTensorEqual<complex128>(forward_complex128, swapped_complex128);
}
template <typename T>
void TestEndianness() {
{
BundleWriter writer(Env::Default(), Prefix("foo"));
TF_EXPECT_OK(writer.Add("foo_003", ByteSwap(Constant_2x3<T>(T(3)))));
TF_EXPECT_OK(writer.Add("foo_000", ByteSwap(Constant_2x3<T>(T(0)))));
TF_EXPECT_OK(writer.Add("foo_002", ByteSwap(Constant_2x3<T>(T(2)))));
TF_EXPECT_OK(writer.Add("foo_001", ByteSwap(Constant_2x3<T>(T(1)))));
TF_ASSERT_OK(writer.Finish());
TF_ASSERT_OK(FlipEndiannessBit(Prefix("foo")));
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
EXPECT_EQ(
AllTensorKeys(&reader),
std::vector<string>({"foo_000", "foo_001", "foo_002", "foo_003"}));
Expect<T>(&reader, "foo_000", Constant_2x3<T>(T(0)));
Expect<T>(&reader, "foo_001", Constant_2x3<T>(T(1)));
Expect<T>(&reader, "foo_002", Constant_2x3<T>(T(2)));
Expect<T>(&reader, "foo_003", Constant_2x3<T>(T(3)));
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
ExpectNext<T>(&reader, Constant_2x3<T>(T(0)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(1)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(2)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(3)));
EXPECT_TRUE(reader.Valid());
reader.Next();
EXPECT_FALSE(reader.Valid());
}
{
BundleWriter writer(Env::Default(), Prefix("bar"));
TF_EXPECT_OK(writer.Add("bar_003", ByteSwap(Constant_2x3<T>(T(3)))));
TF_EXPECT_OK(writer.Add("bar_000", ByteSwap(Constant_2x3<T>(T(0)))));
TF_EXPECT_OK(writer.Add("bar_002", ByteSwap(Constant_2x3<T>(T(2)))));
TF_EXPECT_OK(writer.Add("bar_001", ByteSwap(Constant_2x3<T>(T(1)))));
TF_ASSERT_OK(writer.Finish());
TF_ASSERT_OK(FlipEndiannessBit(Prefix("bar")));
}
{
BundleReader reader(Env::Default(), Prefix("bar"));
TF_ASSERT_OK(reader.status());
EXPECT_EQ(
AllTensorKeys(&reader),
std::vector<string>({"bar_000", "bar_001", "bar_002", "bar_003"}));
Expect<T>(&reader, "bar_003", Constant_2x3<T>(T(3)));
Expect<T>(&reader, "bar_002", Constant_2x3<T>(T(2)));
Expect<T>(&reader, "bar_001", Constant_2x3<T>(T(1)));
Expect<T>(&reader, "bar_000", Constant_2x3<T>(T(0)));
}
{
BundleReader reader(Env::Default(), Prefix("bar"));
TF_ASSERT_OK(reader.status());
ExpectNext<T>(&reader, Constant_2x3<T>(T(0)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(1)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(2)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(3)));
EXPECT_TRUE(reader.Valid());
reader.Next();
EXPECT_FALSE(reader.Valid());
}
TF_ASSERT_OK(MergeBundles(Env::Default(), {Prefix("foo"), Prefix("bar")},
Prefix("merged")));
{
BundleReader reader(Env::Default(), Prefix("merged"));
TF_ASSERT_OK(reader.status());
EXPECT_EQ(
AllTensorKeys(&reader),
std::vector<string>({"bar_000", "bar_001", "bar_002", "bar_003",
"foo_000", "foo_001", "foo_002", "foo_003"}));
Expect<T>(&reader, "bar_000", Constant_2x3<T>(T(0)));
Expect<T>(&reader, "bar_001", Constant_2x3<T>(T(1)));
Expect<T>(&reader, "bar_002", Constant_2x3<T>(T(2)));
Expect<T>(&reader, "bar_003", Constant_2x3<T>(T(3)));
Expect<T>(&reader, "foo_000", Constant_2x3<T>(T(0)));
Expect<T>(&reader, "foo_001", Constant_2x3<T>(T(1)));
Expect<T>(&reader, "foo_002", Constant_2x3<T>(T(2)));
Expect<T>(&reader, "foo_003", Constant_2x3<T>(T(3)));
}
{
BundleReader reader(Env::Default(), Prefix("merged"));
TF_ASSERT_OK(reader.status());
ExpectNext<T>(&reader, Constant_2x3<T>(T(0)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(1)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(2)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(3)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(0)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(1)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(2)));
ExpectNext<T>(&reader, Constant_2x3<T>(T(3)));
EXPECT_TRUE(reader.Valid());
reader.Next();
EXPECT_FALSE(reader.Valid());
}
}
template <typename T>
void TestNonStandardShapes() {
{
BundleWriter writer(Env::Default(), Prefix("nonstandard"));
TF_EXPECT_OK(writer.Add("scalar", Constant(T(0), TensorShape())));
TF_EXPECT_OK(
writer.Add("non_standard0", Constant(T(0), TensorShape({0, 1618}))));
TF_EXPECT_OK(
writer.Add("non_standard1", Constant(T(0), TensorShape({16, 0, 18}))));
TF_ASSERT_OK(writer.Finish());
}
{
BundleReader reader(Env::Default(), Prefix("nonstandard"));
TF_ASSERT_OK(reader.status());
Expect<T>(&reader, "scalar", Constant(T(0), TensorShape()));
Expect<T>(&reader, "non_standard0", Constant(T(0), TensorShape({0, 1618})));
Expect<T>(&reader, "non_standard1",
Constant(T(0), TensorShape({16, 0, 18})));
}
}
void VersionTest(const VersionDef& version, StringPiece expected_error) {
const string path = Prefix("version_test");
{
BundleHeaderProto header;
*header.mutable_version() = version;
std::unique_ptr<WritableFile> file;
TF_ASSERT_OK(Env::Default()->NewWritableFile(MetaFilename(path), &file));
table::TableBuilder builder(table::Options(), file.get());
builder.Add(kHeaderEntryKey, header.SerializeAsString());
TF_ASSERT_OK(builder.Finish());
}
BundleReader reader(Env::Default(), path);
EXPECT_TRUE(errors::IsInvalidArgument(reader.status()));
EXPECT_TRUE(absl::StartsWith(reader.status().message(), expected_error));
}
}
TEST(TensorBundleTest, Basic) {
TestBasic<float>();
TestBasic<double>();
TestBasic<int32>();
TestBasic<uint8>();
TestBasic<int16>();
TestBasic<int8>();
TestBasic<complex64>();
TestBasic<complex128>();
TestBasic<int64_t>();
TestBasic<bool>();
TestBasic<qint32>();
TestBasic<quint8>();
TestBasic<qint8>();
TestBasic<bfloat16>();
}
TEST(TensorBundleTest, Endianness) {
TestEndianness<float>();
TestEndianness<double>();
TestEndianness<int32>();
TestEndianness<uint8>();
TestEndianness<int16>();
TestEndianness<int8>();
TestEndianness<complex64>();
TestEndianness<complex128>();
TestEndianness<int64_t>();
TestEndianness<bool>();
TestEndianness<qint32>();
TestEndianness<quint8>();
TestEndianness<qint8>();
TestEndianness<bfloat16>();
}
TEST(TensorBundleTest, PartitionedVariables) {
const TensorShape kFullShape({5, 10});
TensorSlice slice1 = TensorSlice::ParseOrDie("-:0,1");
TensorSlice slice2 = TensorSlice::ParseOrDie("-:1,9");
{
BundleWriter writer(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(writer.AddSlice("foo", kFullShape, slice1,
Constant<float>(0., TensorShape({5, 1}))));
TF_ASSERT_OK(writer.AddSlice("foo", kFullShape, slice2,
Constant<float>(1., TensorShape({5, 9}))));
TF_ASSERT_OK(writer.Finish());
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
Tensor expected_val(DT_FLOAT, kFullShape);
test::FillFn<float>(&expected_val, [](int offset) -> float {
if (offset % 10 == 0) {
return 0;
}
return 1;
});
Tensor val(DT_FLOAT, kFullShape);
TF_ASSERT_OK(reader.Lookup("foo", &val));
test::ExpectTensorEqual<float>(val, expected_val);
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
std::vector<TensorSlice> slices;
TF_ASSERT_OK(reader.LookupTensorSlices("foo", &slices));
EXPECT_EQ(2, slices.size());
EXPECT_EQ(slice1.DebugString(), slices[0].DebugString());
EXPECT_EQ(slice2.DebugString(), slices[1].DebugString());
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
const TensorSlice distinct_slice = TensorSlice::ParseOrDie("-:0,2");
Tensor expected_val(DT_FLOAT, TensorShape({5, 2}));
test::FillFn<float>(&expected_val, [](int offset) -> float {
if (offset % 2 == 0) {
return 0;
}
return 1;
});
Tensor val(DT_FLOAT, TensorShape({5, 2}));
TF_ASSERT_OK(reader.LookupSlice("foo", distinct_slice, &val));
test::ExpectTensorEqual<float>(val, expected_val);
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
const TensorSlice distinct_slice = TensorSlice::ParseOrDie("-:2,2");
Tensor val(DT_FLOAT, TensorShape({5, 2}));
TF_ASSERT_OK(reader.LookupSlice("foo", distinct_slice, &val));
test::ExpectTensorEqual<float>(val,
Constant<float>(1., TensorShape({5, 2})));
}
}
TEST(TensorBundleTest, EquivalentSliceTest) {
const TensorShape kFullShape({5, 10});
const Tensor kExpected(Constant<float>(1., kFullShape));
{
BundleWriter writer(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(writer.AddSlice("no_extents", kFullShape,
TensorSlice::ParseOrDie("-:-"), kExpected));
TF_ASSERT_OK(writer.AddSlice("both_extents", kFullShape,
TensorSlice::ParseOrDie("0,5:0,10"),
kExpected));
TF_ASSERT_OK(writer.Finish());
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
const TensorSlice slice = TensorSlice::ParseOrDie("-:-");
Tensor val(DT_FLOAT, TensorShape(kFullShape));
TF_ASSERT_OK(reader.LookupSlice("no_extents", slice, &val));
test::ExpectTensorEqual<float>(val, kExpected);
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
const TensorSlice slice = TensorSlice::ParseOrDie("0,5:0,10");
Tensor val(DT_FLOAT, TensorShape(kFullShape));
TF_ASSERT_OK(reader.LookupSlice("both_extents", slice, &val));
test::ExpectTensorEqual<float>(val, kExpected);
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
const TensorSlice slice = TensorSlice::ParseOrDie("0,5:0,10");
Tensor val(DT_FLOAT, TensorShape(kFullShape));
TF_ASSERT_OK(reader.LookupSlice("no_extents", slice, &val));
test::ExpectTensorEqual<float>(val, kExpected);
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
const TensorSlice slice = TensorSlice::ParseOrDie("-:-");
Tensor val(DT_FLOAT, TensorShape(kFullShape));
TF_ASSERT_OK(reader.LookupSlice("both_extents", slice, &val));
test::ExpectTensorEqual<float>(val, kExpected);
}
}
TEST(TensorBundleTest, NonStandardShapes) {
TestNonStandardShapes<float>();
TestNonStandardShapes<double>();
TestNonStandardShapes<int32>();
TestNonStandardShapes<uint8>();
TestNonStandardShapes<int16>();
TestNonStandardShapes<int8>();
TestNonStandardShapes<complex64>();
TestNonStandardShapes<complex128>();
TestNonStandardShapes<int64_t>();
TestNonStandardShapes<bool>();
TestNonStandardShapes<qint32>();
TestNonStandardShapes<quint8>();
TestNonStandardShapes<qint8>();
TestNonStandardShapes<bfloat16>();
}
TEST(TensorBundleTest, StringTensorsOldFormat) {
BundleReader reader(Env::Default(), TestdataPrefix("old_string_tensors/foo"));
TF_ASSERT_OK(reader.status());
EXPECT_EQ(AllTensorKeys(&reader),
std::vector<string>({"floats", "scalar", "string_tensor", "strs"}));
Expect<tstring>(&reader, "string_tensor",
Tensor(DT_STRING, TensorShape({1})));
Expect<tstring>(&reader, "scalar", test::AsTensor<tstring>({"hello"}));
Expect<tstring>(
&reader, "strs",
test::AsTensor<tstring>({"hello", "", "x01", string(1 << 10, 'c')}));
Expect<float>(&reader, "floats", Constant_2x3<float>(16.18));
}
size_t GetPageSize() {
#ifdef _WIN32
SYSTEM_INFO system_info;
GetSystemInfo(&system_info);
return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity);
#elif defined(__wasm__) || defined(__asmjs__)
return getpagesize();
#else
return sysconf(_SC_PAGESIZE);
#endif
}
TEST(TensorBundleTest, StringTensors) {
constexpr size_t kLongLength = static_cast<size_t>(UINT32_MAX) + 1;
Tensor long_string_tensor(DT_STRING, TensorShape({1}));
{
BundleWriter writer(Env::Default(), Prefix("foo"));
TF_EXPECT_OK(writer.Add("string_tensor",
Tensor(DT_STRING, TensorShape({1}))));
TF_EXPECT_OK(writer.Add("scalar", test::AsTensor<tstring>({"hello"})));
TF_EXPECT_OK(writer.Add(
"strs",
test::AsTensor<tstring>({"hello", "", "x01", string(1 << 25, 'c')})));
tstring* backing_string = long_string_tensor.flat<tstring>().data();
backing_string->resize_uninitialized(kLongLength);
std::char_traits<char>::assign(backing_string->data(), kLongLength, 'd');
TF_EXPECT_OK(writer.Add("long_scalar", long_string_tensor));
TF_EXPECT_OK(writer.Add("floats", Constant_2x3<float>(16.18)));
TF_ASSERT_OK(writer.Finish());
}
{
BundleReader reader(Env::Default(), Prefix("foo"));
TF_ASSERT_OK(reader.status());
EXPECT_EQ(AllTensorKeys(&reader),
std::vector<string>({"floats", "long_scalar", "scalar",
"string_tensor", "strs"}));
Expect<tstring>(&reader, "string_tensor",
Tensor(DT_STRING, TensorShape({1})));
Expect<tstring>(&reader, "scalar", test::AsTensor<tstring>({"hello"}));
Expect<tstring>(
&reader, "strs",
test::AsTensor<tstring>({"hello", "", "x01", string(1 << 25, 'c')}));
Expect<float>(&reader, "floats", Constant_2x3<float>(16.18));
EXPECT_TRUE(reader.Contains("long_scalar"));
DataType dtype;
TensorShape shape;
TF_ASSERT_OK(reader.LookupDtypeAndShape("long_scalar", &dtype, &shape));
EXPECT_EQ(DT_STRING, dtype);
EXPECT_EQ(TensorShape({1}), shape);
tstring* backing_string = long_string_tensor.flat<tstring>().data();
std::char_traits<char>::assign(backing_string->data(), kLongLength, 'e');
TF_ASSERT_OK(reader.Lookup("long_scalar", &long_string_tensor));
ASSERT_EQ(backing_string, long_string_tensor.flat<tstring>().data());
EXPECT_EQ(kLongLength, backing_string->length());
const size_t kPageSize = GetPageSize();
char* testblock = new char[kPageSize];
memset(testblock, 'd', sizeof(char) * kPageSize);
for (size_t i = 0; i < kLongLength; i += kPageSize) {
if (memcmp(testblock, backing_string->data() + i, kPageSize) != 0) {
FAIL() << "long_scalar is not full of 'd's as expected.";
break;
}
}
delete[] testblock;
}
}
class VariantObject {
public:
VariantObject() {}
VariantObject(const string& metadata, int64_t value)
: metadata_(metadata), value_(value) {}
string TypeName() const { return "TEST VariantObject"; }
void Encode(VariantTensorData* data) const {
data->set_type_name(TypeName());
data->set_metadata(metadata_);
Tensor val_t = Tensor(DT_INT64, TensorShape({}));
val_t.scalar<int64_t>()() = value_;
*(data->add_tensors()) = val_t;
}
bool Decode(const VariantTensorData& data) {
EXPECT_EQ(data.type_name(), TypeName());
data.get_metadata(&metadata_);
EXPECT_EQ(data.te |
1,721 | cpp | tensorflow/tensorflow | uniform_quant_ops_params | tensorflow/core/util/quantization/uniform_quant_ops_params.cc | tensorflow/core/util/quantization/uniform_quant_ops_params_test.cc | #ifndef TENSORFLOW_CORE_UTIL_QUANTIZATION_UNIFORM_QUANT_OPS_PARAMS_H_
#define TENSORFLOW_CORE_UTIL_QUANTIZATION_UNIFORM_QUANT_OPS_PARAMS_H_
#include <string>
#include <vector>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/quantization/uniform_quant_ops_attr.pb.h"
namespace tensorflow {
class UniformQuantizedConvolutionParams {
public:
UniformQuantizedConvolutionParams() = default;
UniformQuantizedConvolutionParams(
const std::vector<int>& window_strides,
const std::vector<int>& lhs_dilation,
const std::vector<int>& rhs_dilation,
const UniformQuantizedConvolutionDimensionNumbersAttr& dimension_numbers,
int feature_group_count, int batch_group_count,
const std::string& padding, const std::vector<int>& padding_list = {})
: window_strides_(window_strides),
lhs_dilation_(lhs_dilation),
rhs_dilation_(rhs_dilation),
dimension_numbers_(dimension_numbers),
feature_group_count_(feature_group_count),
batch_group_count_(batch_group_count),
padding_(padding),
padding_list_(padding_list) {}
const std::vector<int>& window_strides() const { return window_strides_; }
const std::vector<int>& lhs_dilation() const { return lhs_dilation_; }
const std::vector<int>& rhs_dilation() const { return rhs_dilation_; }
const UniformQuantizedConvolutionDimensionNumbersAttr& dimension_numbers()
const {
return dimension_numbers_;
}
int batch_group_count() const { return batch_group_count_; }
const std::vector<int>& padding_list() const { return padding_list_; }
int feature_group_count() const { return feature_group_count_; }
Status LoadFromAttrs(const OpKernelConstruction& context);
Status LoadFromAttrs(const shape_inference::InferenceContext& context);
Status ValidateOrFillParamsAndValidateShape(const TensorShape& lhs_shape,
const TensorShape& rhs_shape);
absl::StatusOr<TensorShape> CalculateOutputShape(
const TensorShape& lhs_shape, const TensorShape& rhs_shape) const;
inline static int64_t DilatedSize(int64_t size, int dilation) {
return size == 0 ? 0 : size + (dilation - 1) * (size - 1);
}
private:
template <typename ContextT>
Status LoadFromAttrsInternal(const ContextT& context);
Status ValidateShape(const TensorShape& lhs_shape,
const TensorShape& rhs_shape);
Status ValidateOrFillPaddingList(const TensorShape& lhs_shape,
const TensorShape& rhs_shape);
std::vector<int> window_strides_;
std::vector<int> lhs_dilation_;
std::vector<int> rhs_dilation_;
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers_;
int feature_group_count_;
int batch_group_count_;
std::string padding_;
std::vector<int> padding_list_;
};
}
#endif
#include "tensorflow/core/util/quantization/uniform_quant_ops_params.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
namespace tensorflow {
namespace {
using tensorflow::errors::InvalidArgument;
Status ValidDim(int64_t dims, int64_t dim) {
if (dim < 0 || dim >= dims) {
return InvalidArgument(
"Each dimension number must be in region [0, rank). Given rank ", dims,
" and dimension number value ", dim);
}
return absl::OkStatus();
}
Status ValidSpatialDimensions(
int64_t dims, const protobuf::RepeatedField<int64_t>& spatial_dimensions) {
if (spatial_dimensions.size() != dims - 2) {
return InvalidArgument(
"Spatial dimensions size must be rank - 2. Given rank ", dims,
" and spatial dimensions size ", spatial_dimensions.size());
}
for (int i = 0; i < spatial_dimensions.size(); ++i) {
TF_RETURN_IF_ERROR(ValidDim(dims, spatial_dimensions.Get(i)));
}
return absl::OkStatus();
}
}
Status UniformQuantizedConvolutionParams::LoadFromAttrs(
const OpKernelConstruction& context) {
return LoadFromAttrsInternal(context);
}
Status UniformQuantizedConvolutionParams::LoadFromAttrs(
const shape_inference::InferenceContext& context) {
return LoadFromAttrsInternal(context);
}
Status UniformQuantizedConvolutionParams::ValidateOrFillParamsAndValidateShape(
const TensorShape& lhs_shape, const TensorShape& rhs_shape) {
if (lhs_shape.dims() != rhs_shape.dims()) {
return InvalidArgument(
"lhs and rhs must have same dims. Given lhs and rhs of shapes: ",
lhs_shape.DebugString(), rhs_shape.DebugString());
}
const int64_t dims = lhs_shape.dims();
if (dims <= 2) {
return InvalidArgument("lhs and rhs shape dims must be at least 3. Given: ",
dims);
}
const int64_t num_spatial_dims = dims - 2;
if (window_strides_.empty()) {
window_strides_.resize(num_spatial_dims, 1);
} else if (window_strides_.size() != num_spatial_dims) {
return InvalidArgument("Size of window_strides Attr must be dims - 2.");
} else if (!absl::c_all_of(window_strides_,
[](int stride) { return stride >= 1; })) {
return InvalidArgument(
"All elements of window_strides must be >= 1. Given ",
absl::StrJoin(window_strides_, ", "));
}
if (lhs_dilation_.empty()) {
lhs_dilation_.resize(num_spatial_dims, 1);
} else if (lhs_dilation_.size() != num_spatial_dims) {
return InvalidArgument("Size of lhs_dilation Attr must be dims - 2.");
} else if (!absl::c_all_of(lhs_dilation_, [](const int dilation) {
return dilation >= 1;
})) {
return InvalidArgument("All elements of lhs_dilation must be >= 1. Given ",
absl::StrJoin(lhs_dilation_, ", "));
}
if (rhs_dilation_.empty()) {
rhs_dilation_.resize(num_spatial_dims, 1);
} else if (rhs_dilation_.size() != num_spatial_dims) {
return InvalidArgument("Size of rhs_dilation Attr must be dims - 2.");
} else if (!absl::c_all_of(rhs_dilation_, [](const int dilation) {
return dilation >= 1;
})) {
return InvalidArgument("All elements of rhs_dilation must be >= 1. Given ",
absl::StrJoin(rhs_dilation_, ", "));
}
if (dimension_numbers_.input_spatial_dimensions_size() == 0) {
dimension_numbers_.set_input_batch_dimension(0);
dimension_numbers_.set_input_feature_dimension(1);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
dimension_numbers_.add_input_spatial_dimensions(2 + i);
}
dimension_numbers_.set_kernel_output_feature_dimension(0);
dimension_numbers_.set_kernel_input_feature_dimension(1);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
dimension_numbers_.add_kernel_spatial_dimensions(2 + i);
}
dimension_numbers_.set_output_batch_dimension(0);
dimension_numbers_.set_output_feature_dimension(1);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
dimension_numbers_.add_output_spatial_dimensions(2 + i);
}
} else {
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.input_batch_dimension()));
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.input_feature_dimension()));
TF_RETURN_IF_ERROR(ValidSpatialDimensions(
dims, dimension_numbers_.input_spatial_dimensions()));
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.kernel_input_feature_dimension()));
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.kernel_output_feature_dimension()));
TF_RETURN_IF_ERROR(ValidSpatialDimensions(
dims, dimension_numbers_.kernel_spatial_dimensions()));
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.output_batch_dimension()));
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.output_batch_dimension()));
TF_RETURN_IF_ERROR(ValidSpatialDimensions(
dims, dimension_numbers_.output_spatial_dimensions()));
}
if (feature_group_count_ <= 0) {
return InvalidArgument(
"feature_group_count must be a positive integer, given: ",
feature_group_count_);
}
const int64_t lhs_feature_count =
lhs_shape.dim_size(dimension_numbers_.input_feature_dimension());
if (lhs_feature_count % feature_group_count_) {
return InvalidArgument(
"feature_group_count must divide lhs feature dimension size, but ",
feature_group_count_, " does not divide ", lhs_feature_count);
}
const int64_t rhs_input_feature_count =
rhs_shape.dim_size(dimension_numbers_.kernel_input_feature_dimension());
if (lhs_feature_count % rhs_input_feature_count) {
return InvalidArgument(
"rhs input feature dimension must divide lhs feature dimension "
"size, but ",
rhs_input_feature_count, " does not divide ", lhs_feature_count);
}
if (lhs_feature_count / feature_group_count_ != rhs_input_feature_count) {
return InvalidArgument(
"lhs feature dimension size divided by feature_group_count must equal "
"the rhs input feature dimension size, but ",
lhs_feature_count, " / ", feature_group_count_,
" != ", rhs_input_feature_count);
}
const int64_t rhs_output_feature_count =
rhs_shape.dim_size(dimension_numbers_.kernel_output_feature_dimension());
if (rhs_output_feature_count % feature_group_count_) {
return InvalidArgument(
"rhs output dimension size must be a multiple of feature_group_count, "
"but ",
rhs_output_feature_count, " is not a multiple of ",
feature_group_count_);
}
if (batch_group_count_ <= 0) {
return InvalidArgument(
"batch_group_count Attr must be a positive integer. Given: ",
batch_group_count_);
}
const int64_t lhs_batch_count =
lhs_shape.dim_size(dimension_numbers_.input_batch_dimension());
if (lhs_batch_count % batch_group_count_) {
return InvalidArgument(
"batch_group_count must divide lhs batch dimension size, but ",
batch_group_count_, " does not divide ", lhs_batch_count);
}
if (rhs_output_feature_count % batch_group_count_) {
return InvalidArgument(
"rhs output dimension size must be a multiple of batch_group_count, "
"but ",
rhs_output_feature_count, " is not a multiple of ", batch_group_count_);
}
return ValidateOrFillPaddingList(lhs_shape, rhs_shape);
}
absl::StatusOr<TensorShape>
UniformQuantizedConvolutionParams::CalculateOutputShape(
const TensorShape& lhs_shape, const TensorShape& rhs_shape) const {
std::vector<int64_t> output_shape_buf(lhs_shape.dims());
output_shape_buf[dimension_numbers_.output_batch_dimension()] =
lhs_shape.dim_size(dimension_numbers_.input_batch_dimension()) /
batch_group_count_;
output_shape_buf[dimension_numbers_.output_feature_dimension()] =
rhs_shape.dim_size(dimension_numbers_.kernel_output_feature_dimension());
for (int i = 0; i < dimension_numbers_.input_spatial_dimensions_size(); ++i) {
const int64_t lhs_size_dilated = DilatedSize(
lhs_shape.dim_size(dimension_numbers_.input_spatial_dimensions(i)),
lhs_dilation_[i]);
const int64_t rhs_size_dilated = DilatedSize(
rhs_shape.dim_size(dimension_numbers_.kernel_spatial_dimensions(i)),
rhs_dilation_[i]);
const int64_t output_size_numerator =
lhs_size_dilated + padding_list_[2 * i] + padding_list_[2 * i + 1] -
rhs_size_dilated + 1;
const int64_t output_size_denominator = window_strides_[i];
output_shape_buf[dimension_numbers_.output_spatial_dimensions(i)] =
(output_size_numerator + output_size_denominator - 1) /
output_size_denominator;
}
TensorShape output_shape;
TF_RETURN_IF_ERROR(
TensorShape::BuildTensorShape(output_shape_buf, &output_shape));
return output_shape;
}
template <typename ContextT>
Status UniformQuantizedConvolutionParams::LoadFromAttrsInternal(
const ContextT& context) {
TF_RETURN_IF_ERROR(context.GetAttr("window_strides", &window_strides_));
TF_RETURN_IF_ERROR(context.GetAttr("lhs_dilation", &lhs_dilation_));
TF_RETURN_IF_ERROR(context.GetAttr("rhs_dilation", &rhs_dilation_));
TF_RETURN_IF_ERROR(context.GetAttr("batch_group_count", &batch_group_count_));
TF_RETURN_IF_ERROR(
context.GetAttr("feature_group_count", &feature_group_count_));
TF_RETURN_IF_ERROR(context.GetAttr("padding", &padding_));
TF_RETURN_IF_ERROR(context.GetAttr("explicit_padding", &padding_list_));
if (padding_ != "EXPLICIT" && padding_ != "SAME" && padding_ != "VALID") {
return InvalidArgument(
"padding Attr must be one of [EXPLICIT | SAME | VALID], but given: ",
padding_);
} else if (padding_ != "EXPLICIT" && !padding_list_.empty()) {
return InvalidArgument(
"If padding Attr is not 'EXPLICIT', explicit_padding Attr must be "
"empty. Given padding ",
padding_, " and explicit_padding of size ", padding_list_.size());
}
std::string dimension_numbers_str;
TF_RETURN_IF_ERROR(
context.GetAttr("dimension_numbers", &dimension_numbers_str));
if (dimension_numbers_str.empty()) {
dimension_numbers_.Clear();
} else if (!dimension_numbers_.ParseFromString(dimension_numbers_str)) {
return InvalidArgument("Error parsing convolution dimension numbers.");
}
return absl::OkStatus();
}
Status UniformQuantizedConvolutionParams::ValidateOrFillPaddingList(
const TensorShape& lhs_shape, const TensorShape& rhs_shape) {
const int64_t dims = lhs_shape.dims();
const int64_t padding_list_size = 2 * (dims - 2);
if (padding_ == "EXPLICIT") {
if (padding_list_.size() != padding_list_size) {
return InvalidArgument(
"Size of explicit_padding Attr must be 2 * (rank - 2). Given rank ",
dims, " and explicit_padding of size ", padding_list_.size());
} else if (!absl::c_all_of(padding_list_,
[](int elem) { return elem >= 0; })) {
return InvalidArgument("All explicit_padding elems must be >= 0, Given ",
absl::StrJoin(padding_list_, ", "));
}
} else if (padding_ == "VALID") {
padding_list_.resize(padding_list_size, 0);
} else {
padding_list_.resize(padding_list_size);
for (int i = 0; i < dimension_numbers_.input_spatial_dimensions_size();
++i) {
const int64_t stride = window_strides_[i];
const int64_t lhs_size_dilated = DilatedSize(
lhs_shape.dim_size(dimension_numbers_.input_spatial_dimensions(i)),
lhs_dilation_[i]);
const int64_t rhs_size_dilated = DilatedSize(
rhs_shape.dim_size(dimension_numbers_.kernel_spatial_dimensions(i)),
rhs_dilation_[i]);
const int64_t output_size = (lhs_size_dilated + stride - 1) / stride;
const int64_t total_padding = std::max(
(output_size - 1) * stride + rhs_size_dilated - lhs_size_dilated,
static_cast<int64_t>(0));
const int64_t padding_begin = total_padding / 2;
const int64_t padding_end = total_padding - padding_begin;
padding_list_[2 * i] = padding_begin;
padding_list_[2 * i + 1] = padding_end;
}
}
return absl::OkStatus();
}
} | #include "tensorflow/core/util/quantization/uniform_quant_ops_params.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/util/quantization/uniform_quant_ops_attr.pb.h"
#include "tsl/lib/core/status_test_util.h"
namespace tensorflow {
namespace {
using protobuf::TextFormat;
using ::testing::ElementsAreArray;
TEST(UniformQuantizedConvolutionParamsTest, DilatedSize) {
EXPECT_EQ(UniformQuantizedConvolutionParams::DilatedSize(0, 2), 0);
EXPECT_EQ(UniformQuantizedConvolutionParams::DilatedSize(10, 3), 28);
}
TEST(UniformQuantizedConvolutionParamsTest,
ValidateOrFillParamsAndValidateShapeDefaultAttr) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
UniformQuantizedConvolutionParams params({},
{},
{},
dimension_numbers,
1,
1,
"VALID");
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape({2, 2, 3, 4},
{3, 2, 2, 3}));
EXPECT_THAT(params.window_strides(), ElementsAreArray({1, 1}));
EXPECT_THAT(params.lhs_dilation(), ElementsAreArray({1, 1}));
EXPECT_THAT(params.rhs_dilation(), ElementsAreArray({1, 1}));
EXPECT_THAT(params.padding_list(), ElementsAreArray({0, 0, 0, 0}));
EXPECT_EQ(params.dimension_numbers().input_batch_dimension(), 0);
EXPECT_EQ(params.dimension_numbers().input_feature_dimension(), 1);
EXPECT_THAT(params.dimension_numbers().input_spatial_dimensions(),
ElementsAreArray({2, 3}));
EXPECT_EQ(params.dimension_numbers().kernel_output_feature_dimension(), 0);
EXPECT_EQ(params.dimension_numbers().kernel_input_feature_dimension(), 1);
EXPECT_THAT(params.dimension_numbers().kernel_spatial_dimensions(),
ElementsAreArray({2, 3}));
EXPECT_EQ(params.dimension_numbers().output_batch_dimension(), 0);
EXPECT_EQ(params.dimension_numbers().output_feature_dimension(), 1);
EXPECT_THAT(params.dimension_numbers().output_spatial_dimensions(),
ElementsAreArray({2, 3}));
}
TEST(UniformQuantizedConvolutionParamsTest,
ValidateOrFillParamsAndValidateShapeSetAttr) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
input_batch_dimension: 0
input_feature_dimension: 3
input_spatial_dimensions: 1
input_spatial_dimensions: 2
kernel_output_feature_dimension: 3
kernel_input_feature_dimension: 2
kernel_spatial_dimensions: 0
kernel_spatial_dimensions: 1
output_batch_dimension: 0
output_feature_dimension: 3
output_spatial_dimensions: 1
output_spatial_dimensions: 2
)pb",
&dimension_numbers));
UniformQuantizedConvolutionParams params({2, 2},
{3, 3},
{4, 4},
dimension_numbers,
2,
1,
"EXPLICIT",
{1, 1, 2, 2});
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape({2, 3, 4, 2},
{2, 3, 1, 2}));
EXPECT_THAT(params.padding_list(), ElementsAreArray({1, 1, 2, 2}));
EXPECT_EQ(params.dimension_numbers().input_batch_dimension(), 0);
EXPECT_EQ(params.dimension_numbers().input_feature_dimension(), 3);
EXPECT_THAT(params.dimension_numbers().input_spatial_dimensions(),
ElementsAreArray({1, 2}));
EXPECT_EQ(params.dimension_numbers().kernel_output_feature_dimension(), 3);
EXPECT_EQ(params.dimension_numbers().kernel_input_feature_dimension(), 2);
EXPECT_THAT(params.dimension_numbers().kernel_spatial_dimensions(),
ElementsAreArray({0, 1}));
EXPECT_EQ(params.dimension_numbers().output_batch_dimension(), 0);
EXPECT_EQ(params.dimension_numbers().output_feature_dimension(), 3);
EXPECT_THAT(params.dimension_numbers().output_spatial_dimensions(),
ElementsAreArray({1, 2}));
}
TEST(UniformQuantizedConvolutionParamsTest, CalculateOutputShapeDefaultAttr) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
UniformQuantizedConvolutionParams params({},
{},
{},
dimension_numbers,
1,
1,
"VALID");
const TensorShape lhs_shape({2, 2, 3, 4});
const TensorShape rhs_shape({3, 2, 2, 3});
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape(lhs_shape, rhs_shape));
auto shape_or = params.CalculateOutputShape(lhs_shape, rhs_shape);
TF_ASSERT_OK(shape_or.status());
EXPECT_TRUE(shape_or.value().IsSameSize({2, 3, 2, 2}));
}
TEST(UniformQuantizedConvolutionParamsTest, CalculateOutputShapeSetAttr) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
input_batch_dimension: 0
input_feature_dimension: 3
input_spatial_dimensions: 1
input_spatial_dimensions: 2
kernel_output_feature_dimension: 3
kernel_input_feature_dimension: 2
kernel_spatial_dimensions: 0
kernel_spatial_dimensions: 1
output_batch_dimension: 0
output_feature_dimension: 3
output_spatial_dimensions: 1
output_spatial_dimensions: 2
)pb",
&dimension_numbers));
UniformQuantizedConvolutionParams params({2, 2},
{3, 3},
{4, 4},
dimension_numbers,
2,
1,
"EXPLICIT",
{1, 1, 2, 2});
const TensorShape lhs_shape({2, 3, 4, 2});
const TensorShape rhs_shape({2, 3, 1, 2});
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape(lhs_shape, rhs_shape));
auto shape_or = params.CalculateOutputShape(lhs_shape, rhs_shape);
TF_ASSERT_OK(shape_or.status());
EXPECT_TRUE(shape_or.value().IsSameSize({2, 3, 3, 2}));
}
TEST(UniformQuantizedConvolutionParamsTest, CalculateSameOptionPadding) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
UniformQuantizedConvolutionParams params({},
{},
{},
dimension_numbers,
1,
1,
"SAME");
const TensorShape lhs_shape({2, 2, 3, 4});
const TensorShape rhs_shape({3, 2, 4, 3});
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape(lhs_shape, rhs_shape));
EXPECT_THAT(params.padding_list(), ElementsAreArray({1, 2, 1, 1}));
}
}
} |
1,722 | cpp | tensorflow/tensorflow | sparse_tensor | tensorflow/core/util/sparse/sparse_tensor.cc | tensorflow/core/util/sparse/sparse_tensor_test.cc | #ifndef TENSORFLOW_CORE_UTIL_SPARSE_SPARSE_TENSOR_H_
#define TENSORFLOW_CORE_UTIL_SPARSE_SPARSE_TENSOR_H_
#include <limits>
#include <numeric>
#include <vector>
#include "absl/base/macros.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/sparse/dim_comparator.h"
#include "tensorflow/core/util/sparse/group_iterator.h"
namespace tensorflow {
namespace sparse {
class SparseTensor {
public:
typedef absl::Span<const int64_t> VarDimArray;
typedef absl::InlinedVector<int64_t, 8UL> ShapeArray;
static Status Create(Tensor ix, Tensor vals, const VarDimArray shape,
const VarDimArray order, SparseTensor* result);
static Status Create(Tensor ix, Tensor vals, const TensorShape& shape,
SparseTensor* result);
static Status Create(Tensor ix, Tensor vals, const VarDimArray shape,
SparseTensor* result);
static Status Create(Tensor ix, Tensor vals, const TensorShape& shape,
const VarDimArray order, SparseTensor* result);
SparseTensor() : dims_(0) {}
ABSL_DEPRECATED("Use Create() functions instead of constructors directly.")
SparseTensor(Tensor ix, Tensor vals, const TensorShape& shape)
: SparseTensor(std::move(ix), std::move(vals), TensorShapeToVector(shape),
UndefinedOrder(TensorShapeToVector(shape))) {}
ABSL_DEPRECATED("Use Create() functions instead of constructors directly.")
SparseTensor(Tensor ix, Tensor vals, const VarDimArray shape)
: SparseTensor(std::move(ix), std::move(vals), shape,
UndefinedOrder(shape)) {}
ABSL_DEPRECATED("use Create() functions instead of constructors directly.")
SparseTensor(Tensor ix, Tensor vals, const TensorShape& shape,
const VarDimArray order)
: SparseTensor(std::move(ix), std::move(vals), TensorShapeToVector(shape),
order) {}
ABSL_DEPRECATED("Use Create() functions instead of constructors directly.")
SparseTensor(Tensor ix, Tensor vals, const VarDimArray shape,
const VarDimArray order);
SparseTensor(const SparseTensor& other)
: SparseTensor(other.ix_, other.vals_, other.shape_, other.order_) {}
SparseTensor(SparseTensor&& other)
: SparseTensor(std::move(other.ix_), std::move(other.vals_),
std::move(other.shape_), std::move(other.order_)) {}
SparseTensor& operator=(const SparseTensor& other) {
ix_ = other.ix_;
vals_ = other.vals_;
shape_ = other.shape_;
order_ = other.order_;
dims_ = other.dims_;
return *this;
}
SparseTensor& operator=(SparseTensor&& other) {
ix_ = std::move(other.ix_);
vals_ = std::move(other.vals_);
shape_ = std::move(other.shape_);
order_ = std::move(other.order_);
dims_ = std::move(other.dims_);
return *this;
}
std::size_t num_entries() const { return ix_.dim_size(0); }
int dims() const { return shape_.size(); }
const Tensor& indices() const { return ix_; }
const Tensor& values() const { return vals_; }
DataType dtype() const { return vals_.dtype(); }
Status IndicesValid() const;
VarDimArray shape() const { return shape_; }
VarDimArray order() const { return order_; }
template <typename T>
void Reorder(const VarDimArray& order);
GroupIterable group(const VarDimArray& group_ix) const {
DCHECK_LE(group_ix.size(), dims_);
for (std::size_t di = 0; di < group_ix.size(); ++di) {
DCHECK_GE(group_ix[di], 0) << "Group dimension out of range";
DCHECK_LT(group_ix[di], dims_) << "Group dimension out of range";
DCHECK_EQ(group_ix[di], order_[di])
<< "Group dimension does not match sorted order";
}
return GroupIterable(ix_, vals_, dims_, group_ix);
}
template <typename T>
bool ToDense(Tensor* out, bool initialize = true);
template <typename T>
static SparseTensor Concat(const absl::Span<const SparseTensor>& tensors);
template <typename T>
static Status Split(const SparseTensor& tensor, const int split_dim,
const int num_split, std::vector<SparseTensor>* result);
template <typename T>
static absl::StatusOr<SparseTensor> Slice(
const SparseTensor& tensor, const absl::Span<const int64_t> start,
const absl::Span<const int64_t> size);
std::vector<int64_t> PickDims(absl::Span<const int64_t> dim_indices) const {
std::vector<int64_t> res(dim_indices.size());
for (size_t i = 0; i < dim_indices.size(); ++i) {
res[i] = shape_[dim_indices[i]];
}
return res;
}
private:
static inline ShapeArray UndefinedOrder(const VarDimArray shape) {
return ShapeArray(shape.size(), -1);
}
static inline ShapeArray TensorShapeToVector(const TensorShape& shape) {
ShapeArray vec(shape.dims());
for (int i = 0; i < shape.dims(); ++i) vec[i] = shape.dim_size(i);
return vec;
}
bool IndicesValidVectorFastPath() const;
bool IndicesValidMatrix32BitFastPath() const;
template <bool standard_order>
Status IndicesValidHelper() const;
template <typename T>
bool ValidateAndInitializeToDense(Tensor* out, bool initialize);
static inline int GetSliceIndex(const int dim, const int split_size,
const int residual) {
DCHECK_GT(split_size, 0);
DCHECK_GE(dim, 0);
if (residual == 0) return dim / split_size;
const int offset = residual * (split_size + 1);
if (dim < offset) {
return dim / (split_size + 1);
} else {
return residual + ((dim - offset) / split_size);
}
}
static inline int GetDimensionInSlice(const int dim, const int split_size,
const int residual) {
DCHECK_GT(split_size, 0);
DCHECK_GE(dim, 0);
if (residual == 0) return dim % split_size;
const int offset = residual * (split_size + 1);
if (dim < offset) {
return dim % (split_size + 1);
} else {
return (dim - offset) % split_size;
}
}
static inline int GetSliceShape(const int slice_index, const int split_size,
const int residual) {
DCHECK_GT(split_size, 0);
DCHECK_GE(slice_index, 0);
if (residual == 0) return split_size;
if (slice_index < residual) {
return split_size + 1;
} else {
return split_size;
}
}
Tensor ix_;
Tensor vals_;
ShapeArray shape_;
ShapeArray order_;
int dims_;
};
template <typename T>
inline void SparseTensor::Reorder(const VarDimArray& order) {
DCHECK_EQ(DataTypeToEnum<T>::v(), dtype())
<< "Reorder requested with the wrong datatype";
DCHECK_EQ(order.size(), dims_) << "Order length must be SparseTensor rank";
auto ix_t = ix_.matrix<int64_t>();
auto vals_t = vals_.vec<T>();
std::vector<int64_t> reorder(num_entries());
std::iota(reorder.begin(), reorder.end(), 0);
switch (order.size()) {
#define CASE_SORT(ORDER_SIZE) \
case ORDER_SIZE: { \
FixedDimComparator<ORDER_SIZE> sorter(ix_t, order, shape()); \
std::sort(reorder.begin(), reorder.end(), sorter); \
break; \
}
CASE_SORT(0);
CASE_SORT(1);
CASE_SORT(2);
CASE_SORT(3);
CASE_SORT(4);
CASE_SORT(5);
#undef CASE_SORT
default: {
DimComparator sorter(ix_t, order, shape());
std::sort(reorder.begin(), reorder.end(), sorter);
}
}
std::vector<size_t> permutation(reorder.size());
for (std::size_t n = 0; n < reorder.size(); ++n) {
permutation[reorder[n]] = n;
}
for (std::size_t n = 0; n + 1 < permutation.size(); ++n) {
while (n != permutation[n]) {
std::size_t r = permutation[n];
std::swap_ranges(&(ix_t(n, 0)), &(ix_t(n + 1, 0)), &(ix_t(r, 0)));
std::swap(vals_t(n), vals_t(r));
std::swap(permutation[n], permutation[r]);
}
}
order_ = ShapeArray(order.begin(), order.end());
}
template <typename T>
inline bool SparseTensor::ValidateAndInitializeToDense(Tensor* out,
bool initialize) {
DCHECK_EQ(DataTypeToEnum<T>::v(), dtype())
<< "ToDense requested with the wrong datatype";
DCHECK_EQ(out->shape().dims(), dims_)
<< "Incompatible dimensions between SparseTensor and output";
DCHECK_EQ(out->dtype(), DataTypeToEnum<T>::v())
<< "Output must be type: " << DataTypeToEnum<T>::v()
<< " but got: " << out->dtype();
const auto& out_shape = out->shape();
if (shape_.size() != out_shape.dims()) return false;
for (int d = 0; d < shape_.size(); ++d) {
if (shape_[d] > out_shape.dim_size(d)) return false;
}
if (initialize) {
auto out_t = out->flat<T>();
out_t.setConstant(T());
}
return true;
}
template <typename T>
inline bool SparseTensor::ToDense(Tensor* out, bool initialize) {
if (!ValidateAndInitializeToDense<T>(out, initialize)) return false;
auto out_t = out->flat<T>();
auto vals_t = vals_.vec<T>();
auto ix_t = ix_.matrix<int64_t>();
const int64_t* const ix_ptr = ix_t.data();
if (dims_ == 1) {
const int64_t out_length = out->shape().dim_size(0);
for (int n = 0; n < vals_t.dimension(0); ++n) {
const int64_t index = internal::SubtleMustCopy(ix_ptr[n]);
if (!FastBoundsCheck(index, out_length)) return false;
out_t(index) = vals_t(n);
}
return true;
} else if (dims_ == 2) {
const auto& out_shape = out->shape();
const int64_t out_rows = out_shape.dim_size(0);
const int64_t out_cols = out_shape.dim_size(1);
for (int n = 0; n < vals_t.dimension(0); ++n) {
const int64_t row_index = internal::SubtleMustCopy(ix_ptr[n * 2]);
const int64_t col_index = internal::SubtleMustCopy(ix_ptr[n * 2 + 1]);
if (!(FastBoundsCheck(row_index, out_rows) &&
FastBoundsCheck(col_index, out_cols))) {
return false;
}
out_t(row_index * out_cols + col_index) = vals_t(n);
}
return true;
} else {
absl::InlinedVector<int64_t, 4UL> strides(dims_);
const auto& out_shape = out->shape().dim_sizes();
if (dims_ > 0) {
strides[dims_ - 1] = 1;
}
for (int d = dims_ - 2; d >= 0; --d) {
strides[d] = strides[d + 1] * out_shape[d + 1];
}
for (int n = 0; n < vals_t.dimension(0); ++n) {
bool invalid_dims = false;
int64_t ix = 0;
for (int d = 0; d < dims_; ++d) {
const int64_t ix_n_d = internal::SubtleMustCopy(ix_ptr[n * dims_ + d]);
if (!FastBoundsCheck(ix_n_d, out_shape[d])) {
invalid_dims = true;
}
ix += strides[d] * ix_n_d;
}
if (invalid_dims) return false;
out_t(ix) = vals_t(n);
}
return true;
}
}
template <typename T>
inline SparseTensor SparseTensor::Concat(
const absl::Span<const SparseTensor>& tensors) {
DCHECK_GE(tensors.size(), size_t{1}) << "Cannot concat 0 SparseTensors";
const int dims = tensors[0].dims_;
DCHECK_GE(dims, 1) << "Cannot concat 0-dimensional SparseTensors";
auto order_0 = tensors[0].order();
const int primary_dim = order_0[0];
ShapeArray final_order(order_0.begin(), order_0.end());
ShapeArray final_shape(tensors[0].shape().begin(), tensors[0].shape().end());
final_shape[primary_dim] = 0;
int num_entries = 0;
bool fully_ordered = true;
for (const SparseTensor& st : tensors) {
DCHECK_EQ(st.dims_, dims) << "All SparseTensors must have the same rank.";
DCHECK_EQ(DataTypeToEnum<T>::v(), st.dtype())
<< "Concat requested with the wrong data type";
DCHECK_GE(st.order()[0], 0) << "SparseTensor must be ordered";
DCHECK_EQ(st.order()[0], primary_dim)
<< "All SparseTensors' order[0] must match. This is the concat dim.";
if (st.order() != final_order) fully_ordered = false;
const VarDimArray& st_shape = st.shape();
for (int d = 0; d < dims - 1; ++d) {
const int cdim = (d < primary_dim) ? d : d + 1;
DCHECK_EQ(final_shape[cdim], st_shape[cdim])
<< "All SparseTensors' shapes must match except on the concat dim. "
<< "Concat dim: " << primary_dim
<< ", mismatched shape at dim: " << cdim
<< ". Expecting shape like: [" << str_util::Join(final_shape, ",")
<< "] but saw shape: [" << str_util::Join(st_shape, ",") << "]";
}
final_shape[primary_dim] =
(final_shape[primary_dim] + st_shape[primary_dim]);
num_entries += st.num_entries();
}
if (!fully_ordered) {
final_order = UndefinedOrder(final_shape);
}
Tensor output_ix(DT_INT64, TensorShape({num_entries, dims}));
Tensor output_vals(DataTypeToEnum<T>::v(), TensorShape({num_entries}));
TTypes<int64_t>::Matrix ix_t = output_ix.matrix<int64_t>();
typename TTypes<T>::Vec vals_t = output_vals.vec<T>();
Eigen::DenseIndex offset = 0;
int64_t shape_offset = 0;
for (const SparseTensor& st : tensors) {
const int st_num_entries = st.num_entries();
if (st_num_entries > 0) {
std::copy_n(&st.vals_.vec<T>()(0), st_num_entries, &vals_t(offset));
const auto* st_ix = &st.ix_.matrix<int64_t>()(0, 0);
auto* ix_out = &ix_t(offset, 0);
for (std::size_t i = 0; i < st_num_entries * dims; ++i) {
*ix_out++ = *st_ix++ + ((i % dims == primary_dim) ? shape_offset : 0);
}
}
offset += st_num_entries;
shape_offset += st.shape()[primary_dim];
}
return SparseTensor(output_ix, output_vals, final_shape, final_order);
}
template <typename T>
inline Status SparseTensor::Split(const SparseTensor& input_tensor,
const int split_dim, const int num_split,
std::vector<SparseTensor>* result) {
std::vector<Tensor> output_indices;
std::vector<Tensor> output_values;
std::vector<TensorShape> output_shapes;
output_indices.reserve(num_split);
output_values.reserve(num_split);
output_shapes.reserve(num_split);
std::vector<typename TTypes<int64_t>::Matrix> output_indices_t;
std::vector<typename TTypes<T>::Vec> output_values_t;
output_indices_t.reserve(num_split);
output_values_t.reserve(num_split);
auto input_values_t = input_tensor.values().vec<T>();
auto input_indices_t = input_tensor.indices().matrix<int64_t>();
std::vector<int> num_values(num_split, 0);
const int num_dim = input_tensor.shape().size();
const int split_dim_size = input_tensor.shape()[split_dim];
const int split_size = split_dim_size / num_split;
if (!(num_split > 0 && num_split <= split_dim_size)) {
return errors::InvalidArgument("num_split must be in the interval (0, ",
split_dim_size, "]");
}
if (!(split_dim >= 0 && split_dim < num_dim)) {
return errors::InvalidArgument("num_dim must be in the interval [0, ",
num_dim, ")");
}
const int residual = split_dim_size % num_split;
for (int i = 0; i < input_tensor.indices().dim_size(0); ++i) {
const int dim = input_tensor.indices().matrix<int64_t>()(i, split_dim);
int slice_index = GetSliceIndex(dim, split_size, residual);
if (slice_index >= num_values.size()) {
return errors::InvalidArgument("Slice index ", slice_index,
" is larger than num_split.");
}
num_values[slice_index]++;
}
for (int i = 0; i < num_split; ++i) {
output_indices.emplace_back(DT_INT64,
TensorShape({num_values[i], num_dim}));
output_values.emplace_back(DataTypeToEnum<T>::v(),
TensorShape({num_values[i]}));
output_shapes.emplace_back(input_tensor.shape());
output_indices_t.emplace_back(output_indices[i].matrix<int64_t>());
output_values_t.emplace_back(output_values[i].vec<T>());
const int size = GetSliceShape(i, split_size, residual);
output_shapes[i].set_dim(split_dim, size);
}
std::vector<int> values_inserted_in_slice(num_split, 0);
for (int i = 0; i < input_tensor.indices().dim_size(0); ++i) {
const int dim = input_indices_t(i, split_dim);
const int slice_index = GetSliceIndex(dim, split_size, residual);
const int slice_dim = values_inserted_in_slice[slice_index]++;
output_values_t[slice_index](slice_dim) = input_values_t(i);
for (int j = 0; j < num_dim; ++j) {
const int64_t original_dim = input_indices_t(i, j);
output_indices_t[slice_index](slice_dim, j) =
(j == split_dim)
? GetDimensionInSlice(original_dim, split_size, residual)
: original_dim;
}
}
result->clear();
result->reserve(num_split);
for (int i = 0; i < num_split; ++i) {
SparseTensor tensor;
Status create_status =
Create(output_indices[i], output_values[i], output_shapes[i], &tensor);
if (!create_status.ok()) {
return create_status;
}
result->push_back(std::move(tensor));
}
return absl::OkStatus();
}
template <typename T>
inline absl::StatusOr<SparseTensor> SparseTensor::Slice(
const SparseTensor& input_tensor, const absl::Span<const int64_t> start,
const absl::Span<const int64_t> size) {
TensorShape output_shape(input_tensor.shape());
const int dims = input_tensor.dims();
for (int dim = 0; dim < dims; dim++) {
const int64_t input_size = output_shape.dim_size(dim);
const int64_t start_index = start[dim];
const int64_t slice_size = size[dim];
if (start_index < input_size - slice_size) {
TF_RETURN_IF_ERROR(output_shape.SetDimWithStatus(dim, slice_size));
} else if (start_index < input_size) {
TF_RETURN_IF_ERROR(
output_shape.SetDimWithStatus(dim, input_size - start_index));
} else {
TF_RETURN_IF_ERROR(output_shape.SetDimWithStatus(dim, 0));
}
}
auto input_indices_t = input_tensor.indices().matrix<int64_t>();
auto input_values_t = input_tensor.values().vec<T>();
int count = 0;
for (int i = 0; i < input_tensor.indices().dim_size(0); i++) {
bool hit = true;
for (int dim = 0; dim < dims; dim++) {
if (!(start[dim] <= input_indices_t(i, dim) &&
input_indices_t(i, dim) < start[dim] + size[dim])) {
hit = false;
break;
}
}
if (!hit) {
continue;
}
count++;
}
Tensor output_values(DataTypeToEnum<T>::v(), TensorShape({count}));
Tensor output_indices(DT_INT64, TensorShape({count, dims}));
auto output_values_t = output_values.vec<T>();
auto output_indices_t = output_indices.matrix<int64_t>();
int index = 0;
for (int i = 0; i < input_tensor.indices().dim_size(0) && index < count;
i++) {
bool hit = true;
for (int dim = 0; dim < dims; dim++) {
if (!(start[dim] <= input_indices_t(i, dim) &&
input_indices_t(i, dim) < start[dim] + size[dim])) {
hit = false;
break;
}
}
if (!hit) {
continue;
}
output_values_t(index) = input_values_t(i);
for (int dim = 0; dim < dims; dim++) {
output_indices_t(index, dim) = input_indices_t(i, dim) - start[dim];
}
index++;
}
return SparseTensor(output_indices, output_values, output_shape);
}
}
}
#endif
#include "tensorflow/core/util/sparse/sparse_tensor.h"
#include "tensorflow/core/lib/strings/strcat.h"
namespace tensorflow {
namespace sparse {
namespace {
int UnsafeGetDimsFromIx(const Tensor& ix) {
DCHECK(TensorShapeUtils::IsMatrix(ix.shape()));
return ix.dim_size(1);
}
Status GetDimsFromIx(const Tensor& ix, int* result) {
if (!TensorShapeUtils::IsMatrix(ix.shape())) {
return errors::InvalidArgument("indices must be a matrix, but got: ",
ix.shape().DebugString());
}
*result = UnsafeGetDimsFromIx(ix);
return Status();
}
}
Status SparseTensor::Create(Tensor ix, Tensor vals,
const VarDimArray shape,
const VarDimArray order,
SparseTensor* result) {
if (ix.dtype() != DT_INT64) {
return errors::InvalidArgument("indices must be type int64 but got: ",
ix.dtype());
}
if (!TensorShapeUtils::IsVector(vals.shape())) {
return errors::InvalidArgument("vals must be a vec, but got: ",
vals.shape().DebugString());
}
if (ix.shape().dim_size(0) != vals.shape().dim_size(0)) {
return errors::InvalidArgument(
"indices and values rows (indexing "
"dimension) must match. (indices = ",
ix.shape().dim_size(0), ", values = ", vals.shape().dim_size(0), ")");
}
int dims = 0;
TF_RETURN_IF_ERROR(GetDimsFromIx(ix, &dims));
if (order.size() != dims) {
return errors::InvalidArgument("Order length must be SparseTensor rank.");
}
if (shape.size() != dims) {
return errors::InvalidArgument("Shape rank must be SparseTensor rank.");
}
result->ix_ = std::move(ix);
result->vals_ = std::move(vals);
result->shape_.assign(shape.begin(), shape.end());
result->order_.assign(order.begin(), order.end());
result->dims_ = dims;
return absl::OkStatus();
}
Status SparseTensor::Create(Tensor ix, Tensor vals,
const TensorShape& shape,
SparseTensor* result) {
return Create(std::move(ix), std::move(vals), TensorShapeToVector(shape),
UndefinedOrder(TensorShapeToVector(shape)), result);
}
Status SparseTensor::Create(Tensor ix, Tensor vals,
const VarDimArray shape,
SparseTensor* result) {
return Create(std::move(ix), std::move(vals), shape, UndefinedOrder(shape),
result);
}
Status SparseTensor::Create(Tensor ix, Tensor vals,
const TensorShape& shape,
const VarDimArray order,
SparseTensor* result) {
return Create(std::move(ix), std::move(vals), TensorShapeToVector(shape),
order, result);
}
SparseTensor::SparseTensor(Tensor ix, Tensor vals, const VarDimArray shape,
const VarDimArray order)
: ix_(std::move(ix)),
vals_(std::move(vals)),
shape_(shape.begin(), shape.end()),
order_(order.begin(), order.end()),
dims_(UnsafeGetDimsFromIx(ix_)) {
DCHECK_EQ(ix_.dtype(), DT_INT64)
<< "indices must be type int64 but got: " << ix_.dtype();
DCHECK(TensorShapeUtils::IsVector(vals_.shape()))
<< "vals must be a vec, but got: " << vals_.shape().DebugString();
DCHECK_EQ(ix_.shape().dim_size(0), vals_.shape().dim_size(0))
<< "indices and values rows (indexing dimension) must match.";
DCHECK_EQ(order.size(), dims_) << "Order length must be SparseTensor rank.";
DCHECK_EQ(shape.size(), dims_) << "Shape rank must be SparseTensor rank.";
}
bool SparseTensor::IndicesValidVectorFastPath() const {
DCHECK_EQ(shape_.size(), 1);
DCHECK_EQ(order_[0], 0);
const int64_t max_index = shape_[0];
bool index_in_range_valid = true;
bool order_valid = true;
int64_t prev_index = -1;
const auto ix_t = ix_.matrix<int64_t>();
const int64_t* const index_base_ptr = ix_t.data();
for (std::size_t n = 0; n < ix_t.dimension(0); ++n) {
const int64_t index = index_base_ptr[n];
index_in_range_valid = index_in_range_valid & (index < max_index);
order_valid = order_valid & (index > prev_index);
prev_index = index;
}
return index_in_range_valid & order_valid;
}
bool SparseTensor::IndicesValidMatrix32BitFastPath() const {
const auto ix_t = ix_.matrix<int64_t>();
const int64_t* const shape_ptr = shape_.data();
DCHECK_EQ(shape_.size(), 2);
DCHECK_EQ(order_[0], 0);
DCHECK_EQ(order_[1], 1);
DCHECK_LE(shape_ptr[0], std::numeric_limits<int32>::max());
DCHECK_LE(shape_ptr[1], std::numeric_limits<int32>::max());
const int32_t max_rows = static_cast<int32>(shape_ptr[0]);
const int32_t max_cols = static_cast<int32>(shape_ptr[1]);
bool row_zeros_valid = true;
bool row_in_range_valid = true;
bool col_zeros_valid = true;
bool col_in_range_valid = true;
bool order_valid = true;
int64_t prev_index = -1;
const int32* const index_base_ptr =
reinterpret_cast<const int32*>(ix_t.data());
const size_t kInt32ElementsPerRow = 4;
for (std::size_t n = 0; n < ix_t.dimension(0); ++n) {
const int32* const index_ptr = index_base_ptr + n * kInt32ElementsPerRow;
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
const int32 row_zeros = index_ptr[0];
const int32 row_32 = index_ptr[1];
const int32 col_zeros = index_ptr[2];
const int32 col_32 = index_ptr[3];
#else
const int32_t row_32 = index_ptr[0];
const int32_t row_ze | #include "tensorflow/core/util/sparse/sparse_tensor.h"
#include <string>
#include <vector>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace sparse {
namespace {
Eigen::Tensor<int64_t, 2, Eigen::RowMajor, Eigen::DenseIndex>
GetSimpleIndexTensor(int N, const int NDIM) {
Eigen::Tensor<int64_t, 2, Eigen::RowMajor, Eigen::DenseIndex> ix(N, NDIM);
ix(0, 0) = 0;
ix(0, 1) = 0;
ix(0, 2) = 0;
ix(1, 0) = 3;
ix(1, 1) = 0;
ix(1, 2) = 0;
ix(2, 0) = 2;
ix(2, 1) = 0;
ix(2, 2) = 0;
ix(3, 0) = 0;
ix(3, 1) = 1;
ix(3, 2) = 0;
ix(4, 0) = 0;
ix(4, 1) = 0;
ix(4, 2) = 2;
return ix;
}
TEST(SparseTensorTest, DimComparatorSorts) {
int64_t N = 5;
const int NDIM = 3;
auto ix = GetSimpleIndexTensor(N, NDIM);
TTypes<int64_t>::Matrix map(ix.data(), N, NDIM);
std::vector<int64_t> sorting(N);
for (std::size_t n = 0; n < N; ++n) sorting[n] = n;
std::vector<int64_t> order{0, 1, 2};
std::vector<int64_t> shape{N, N, N};
DimComparator sorter(map, order, shape);
std::sort(sorting.begin(), sorting.end(), sorter);
EXPECT_EQ(sorting, std::vector<int64_t>({0, 4, 3, 2, 1}));
FixedDimComparator<3> sorter_fixed(map, order, shape);
std::sort(sorting.begin(), sorting.end(), sorter_fixed);
EXPECT_EQ(sorting, std::vector<int64_t>({0, 4, 3, 2, 1}));
std::vector<int64_t> order1{2, 0, 1};
DimComparator sorter1(map, order1, shape);
for (std::size_t n = 0; n < N; ++n) sorting[n] = n;
std::sort(sorting.begin(), sorting.end(), sorter1);
EXPECT_EQ(sorting, std::vector<int64_t>({0, 3, 2, 1, 4}));
FixedDimComparator<3> sorter1_fixed(map, order1, shape);
for (std::size_t n = 0; n < N; ++n) sorting[n] = n;
std::sort(sorting.begin(), sorting.end(), sorter1_fixed);
EXPECT_EQ(sorting, std::vector<int64_t>({0, 3, 2, 1, 4}));
}
TEST(SparseTensorTest, SparseTensorInvalidIndicesType) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT32, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
SparseTensor result;
EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
&result)
.code(),
error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidIndicesShape) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM, 1}));
Tensor vals(DT_STRING, TensorShape({N}));
SparseTensor result;
EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
&result)
.code(),
error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidValues) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N, 1}));
SparseTensor result;
EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
&result)
.code(),
error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidN) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N - 1}));
SparseTensor result;
EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
&result)
.code(),
error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidOrder) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
SparseTensor result;
EXPECT_EQ(
SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1}, &result)
.code(),
error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidShape) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
SparseTensor result;
EXPECT_EQ(
SparseTensor::Create(ix, vals, TensorShape({10, 10}), {0, 1, 2}, &result)
.code(),
error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorConstruction) {
int N = 5;
const int NDIM = 3;
auto ix_c = GetSimpleIndexTensor(N, NDIM);
Eigen::Tensor<tstring, 1, Eigen::RowMajor> vals_c(N);
vals_c(0) = "hi0";
vals_c(1) = "hi1";
vals_c(2) = "hi2";
vals_c(3) = "hi3";
vals_c(4) = "hi4";
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
auto ix_t = ix.matrix<int64_t>();
auto vals_t = vals.vec<tstring>();
vals_t = vals_c;
ix_t = ix_c;
TensorShape shape({10, 10, 10});
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
Status st_indices_valid = st.IndicesValid();
EXPECT_FALSE(st_indices_valid.ok());
EXPECT_EQ(
"indices[2] = [2,0,0] is out of order. "
"Many sparse ops require sorted indices.\n"
" Use `tf.sparse.reorder` to create a correctly ordered copy."
"\n\n",
st_indices_valid.message());
st.Reorder<tstring>({2, 0, 1});
TF_EXPECT_OK(st.IndicesValid());
EXPECT_EQ(vals_t(0), "hi0");
EXPECT_EQ(vals_t(1), "hi3");
EXPECT_EQ(vals_t(2), "hi2");
EXPECT_EQ(vals_t(3), "hi1");
EXPECT_EQ(vals_t(4), "hi4");
ix_t = ix_c;
vals_t = vals_c;
st.Reorder<tstring>({0, 1, 2});
TF_EXPECT_OK(st.IndicesValid());
EXPECT_EQ(vals_t(0), "hi0");
EXPECT_EQ(vals_t(1), "hi4");
EXPECT_EQ(vals_t(2), "hi3");
EXPECT_EQ(vals_t(3), "hi2");
EXPECT_EQ(vals_t(4), "hi1");
ix_t = ix_c;
vals_t = vals_c;
st.Reorder<tstring>({2, 1, 0});
TF_EXPECT_OK(st.IndicesValid());
}
TEST(SparseTensorTest, EmptySparseTensorAllowed) {
int N = 0;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
std::vector<int64_t> shape{10, 10, 10};
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
TF_EXPECT_OK(st.IndicesValid());
EXPECT_EQ(st.order(), order);
std::vector<int64_t> new_order{1, 0, 2};
st.Reorder<tstring>(new_order);
TF_EXPECT_OK(st.IndicesValid());
EXPECT_EQ(st.order(), new_order);
}
TEST(SparseTensorTest, SortingWorksCorrectly) {
int N = 30;
const int NDIM = 4;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
TensorShape shape({1000, 1000, 1000, 1000});
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, &st));
auto ix_t = ix.matrix<int64_t>();
for (int n = 0; n < 100; ++n) {
ix_t = ix_t.random(Eigen::internal::UniformRandomGenerator<int64_t>(n + 1));
ix_t = ix_t.abs() % 1000;
st.Reorder<tstring>({0, 1, 2, 3});
TF_EXPECT_OK(st.IndicesValid());
st.Reorder<tstring>({3, 2, 1, 0});
TF_EXPECT_OK(st.IndicesValid());
st.Reorder<tstring>({1, 0, 2, 3});
TF_EXPECT_OK(st.IndicesValid());
st.Reorder<tstring>({3, 0, 2, 1});
TF_EXPECT_OK(st.IndicesValid());
}
}
TEST(SparseTensorTest, ValidateIndicesFindsInvalid) {
int N = 2;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
Eigen::Tensor<int64_t, 2, Eigen::RowMajor> ix_orig(N, NDIM);
ix_orig(0, 0) = 0;
ix_orig(0, 1) = 0;
ix_orig(0, 2) = 0;
ix_orig(1, 0) = 0;
ix_orig(1, 1) = 0;
ix_orig(1, 2) = 0;
auto ix_t = ix.matrix<int64_t>();
ix_t = ix_orig;
TensorShape shape({10, 10, 10});
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
st.Reorder<tstring>(order);
Status st_indices_valid = st.IndicesValid();
EXPECT_FALSE(st_indices_valid.ok());
EXPECT_EQ("indices[1] = [0,0,0] is repeated", st_indices_valid.message());
ix_orig(1, 2) = 1;
ix_t = ix_orig;
st.Reorder<tstring>(order);
TF_EXPECT_OK(st.IndicesValid());
ix_orig(0, 2) = 1;
ix_t = ix_orig;
st.Reorder<tstring>(order);
st_indices_valid = st.IndicesValid();
EXPECT_FALSE(st_indices_valid.ok());
EXPECT_EQ("indices[1] = [0,0,1] is repeated", st_indices_valid.message());
}
TEST(SparseTensorTest, SparseTensorCheckBoundaries) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
auto ix_t = GetSimpleIndexTensor(N, NDIM);
ix.matrix<int64_t>() = ix_t;
TensorShape shape({10, 10, 10});
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
EXPECT_FALSE(st.IndicesValid().ok());
st.Reorder<tstring>(order);
TF_EXPECT_OK(st.IndicesValid());
ix_t(0, 0) = 11;
ix.matrix<int64_t>() = ix_t;
st.Reorder<tstring>(order);
Status st_indices_valid = st.IndicesValid();
EXPECT_FALSE(st_indices_valid.ok());
EXPECT_EQ("[11,0,0] is out of bounds: need 0 <= index < [10,10,10]",
st_indices_valid.message().substr(13));
ix_t(0, 0) = -1;
ix.matrix<int64_t>() = ix_t;
st.Reorder<tstring>(order);
st_indices_valid = st.IndicesValid();
EXPECT_FALSE(st_indices_valid.ok());
EXPECT_EQ("[-1,0,0] is out of bounds: need 0 <= index < [10,10,10]",
st_indices_valid.message().substr(13));
ix_t(0, 0) = 0;
ix.matrix<int64_t>() = ix_t;
st.Reorder<tstring>(order);
TF_EXPECT_OK(st.IndicesValid());
}
TEST(SparseTensorTest, SparseTensorToDenseTensor) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
auto ix_t = GetSimpleIndexTensor(N, NDIM);
auto vals_t = vals.vec<tstring>();
ix.matrix<int64_t>() = ix_t;
vals_t(0) = "hi0";
vals_t(1) = "hi1";
vals_t(2) = "hi2";
vals_t(3) = "hi3";
vals_t(4) = "hi4";
TensorShape shape({4, 4, 5});
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
Tensor dense(DT_STRING, TensorShape({4, 4, 5}));
st.ToDense<tstring>(&dense);
auto dense_t = dense.tensor<tstring, 3>();
Eigen::array<Eigen::DenseIndex, NDIM> ix_n;
for (int n = 0; n < N; ++n) {
for (int d = 0; d < NDIM; ++d) ix_n[d] = ix_t(n, d);
EXPECT_EQ(dense_t(ix_n), vals_t(n));
}
EXPECT_EQ(dense_t(0, 0, 1), "");
EXPECT_EQ(dense_t(0, 0, 3), "");
EXPECT_EQ(dense_t(3, 3, 3), "");
EXPECT_EQ(dense_t(3, 3, 4), "");
}
TEST(SparseTensorTest, SparseTensorToLargerDenseTensor) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
auto ix_t = GetSimpleIndexTensor(N, NDIM);
auto vals_t = vals.vec<tstring>();
ix.matrix<int64_t>() = ix_t;
vals_t(0) = "hi0";
vals_t(1) = "hi1";
vals_t(2) = "hi2";
vals_t(3) = "hi3";
vals_t(4) = "hi4";
TensorShape shape({4, 4, 5});
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
Tensor dense(DT_STRING, TensorShape({10, 10, 10}));
st.ToDense<tstring>(&dense);
auto dense_t = dense.tensor<tstring, 3>();
Eigen::array<Eigen::DenseIndex, NDIM> ix_n;
for (int n = 0; n < N; ++n) {
for (int d = 0; d < NDIM; ++d) ix_n[d] = ix_t(n, d);
EXPECT_EQ(dense_t(ix_n), vals_t(n));
}
EXPECT_EQ(dense_t(0, 0, 1), "");
EXPECT_EQ(dense_t(0, 0, 3), "");
EXPECT_EQ(dense_t(3, 3, 3), "");
EXPECT_EQ(dense_t(3, 3, 4), "");
EXPECT_EQ(dense_t(9, 0, 0), "");
EXPECT_EQ(dense_t(9, 0, 9), "");
EXPECT_EQ(dense_t(9, 9, 9), "");
}
TEST(SparseTensorTest, SparseTensorGroup) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_INT32, TensorShape({N}));
auto ix_t = ix.matrix<int64_t>();
auto vals_t = vals.vec<int32>();
ix_t = GetSimpleIndexTensor(N, NDIM);
vals_t(0) = 1;
vals_t(1) = 2;
vals_t(2) = 3;
vals_t(3) = 4;
vals_t(4) = 5;
TensorShape shape({10, 10, 10});
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
st.Reorder<int32>(order);
std::vector<std::vector<int64_t> > groups;
std::vector<TTypes<int64_t>::UnalignedConstMatrix> grouped_indices;
std::vector<TTypes<int32>::UnalignedVec> grouped_values;
auto gi = st.group({0});
for (const auto& g : gi) {
groups.push_back(g.group());
VLOG(1) << "Group: " << absl::StrJoin(g.group(), ",");
VLOG(1) << "Indices: " << g.indices();
VLOG(1) << "Values: " << g.values<int32>();
grouped_indices.push_back(g.indices());
grouped_values.push_back(g.values<int32>());
}
EXPECT_EQ(groups.size(), 3);
EXPECT_EQ(groups[0], std::vector<int64_t>({0}));
EXPECT_EQ(groups[1], std::vector<int64_t>({2}));
EXPECT_EQ(groups[2], std::vector<int64_t>({3}));
std::vector<Eigen::Tensor<int64_t, 2, Eigen::RowMajor> > expected_indices;
std::vector<Eigen::Tensor<int32, 1, Eigen::RowMajor> > expected_vals;
expected_indices.emplace_back(3, NDIM);
expected_vals.emplace_back(3);
expected_indices[0].setZero();
expected_indices[0](1, 2) = 2;
expected_indices[0](2, 1) = 1;
expected_vals[0].setConstant(-1);
expected_vals[0](0) = 1;
expected_vals[0](1) = 5;
expected_vals[0](2) = 4;
expected_indices.emplace_back(1, NDIM);
expected_vals.emplace_back(1);
expected_indices[1].setZero();
expected_indices[1](0, 0) = 2;
expected_vals[1](0) = 3;
expected_indices.emplace_back(1, NDIM);
expected_vals.emplace_back(1);
expected_indices[2].setZero();
expected_indices[2](0, 0) = 3;
expected_vals[2](0) = 2;
for (std::size_t gix = 0; gix < groups.size(); ++gix) {
auto gi_t = grouped_indices[gix];
Eigen::Tensor<bool, 0, Eigen::RowMajor> eval =
(gi_t == expected_indices[gix]).all();
EXPECT_TRUE(eval()) << gix << " indices: " << gi_t << " vs. "
<< expected_indices[gix];
auto gv_t = grouped_values[gix];
eval = (gv_t == expected_vals[gix]).all();
EXPECT_TRUE(eval()) << gix << " values: " << gv_t << " vs. "
<< expected_vals[gix];
}
}
TEST(SparseTensorTest, Concat) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
auto ix_c = GetSimpleIndexTensor(N, NDIM);
auto ix_t = ix.matrix<int64_t>();
auto vals_t = vals.vec<tstring>();
ix_t = ix_c;
TensorShape shape({10, 10, 10});
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
EXPECT_FALSE(st.IndicesValid().ok());
st.Reorder<tstring>(order);
TF_EXPECT_OK(st.IndicesValid());
SparseTensor concatted = SparseTensor::Concat<tstring>({st, st, st, st});
EXPECT_EQ(concatted.order(), st.order());
absl::InlinedVector<int64_t, 8UL> expected_shape{40, 10, 10};
EXPECT_EQ(concatted.shape(), expected_shape);
EXPECT_EQ(concatted.num_entries(), 4 * N);
TF_EXPECT_OK(concatted.IndicesValid());
auto conc_ix_t = concatted.indices().matrix<int64_t>();
auto conc_vals_t = concatted.values().vec<tstring>();
for (int n = 0; n < 4; ++n) {
for (int i = 0; i < N; ++i) {
EXPECT_EQ(conc_ix_t(n * N + i, 0), 10 * n + ix_t(i, 0));
EXPECT_EQ(conc_ix_t(n * N + i, 1), ix_t(i, 1));
EXPECT_EQ(conc_ix_t(n * N + i, 1), ix_t(i, 1));
EXPECT_EQ(conc_vals_t(n * N + i), vals_t(i));
}
}
SparseTensor st_ooo;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, {0, 2, 1},
&st_ooo));
SparseTensor conc_ooo = SparseTensor::Concat<tstring>({st, st, st, st_ooo});
std::vector<int64_t> expected_ooo{-1, -1, -1};
EXPECT_EQ(conc_ooo.order(), expected_ooo);
EXPECT_EQ(conc_ooo.shape(), expected_shape);
EXPECT_EQ(conc_ooo.num_entries(), 4 * N);
}
TEST(SparseTensorTest, ConcatEmptyN) {
constexpr int N = 0;
constexpr int NDIM = 2;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
TensorShape shape({10, 10});
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, {0, 1}, &st));
SparseTensor concatted = SparseTensor::Concat<tstring>({st, st, st});
EXPECT_EQ(concatted.num_entries(), 0);
}
TEST(SparseTensorTest, Split) {
const int N = 4;
const int DIM = 2;
Tensor ids(DT_INT64, TensorShape({N, DIM}));
Tensor vals(DT_INT64, TensorShape({N}));
ids.matrix<int64_t>()(0, 0) = 0;
ids.matrix<int64_t>()(0, 1) = 0;
ids.matrix<int64_t>()(1, 0) = 1;
ids.matrix<int64_t>()(1, 1) = 1;
ids.matrix<int64_t>()(2, 0) = 1;
ids.matrix<int64_t>()(2, 1) = 2;
ids.matrix<int64_t>()(3, 0) = 3;
ids.matrix<int64_t>()(3, 1) = 0;
vals.vec<int64_t>()(0) = 1;
vals.vec<int64_t>()(1) = 2;
vals.vec<int64_t>()(2) = 3;
vals.vec<int64_t>()(3) = 4;
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ids, vals, TensorShape({4, 3}), &st));
std::vector<SparseTensor> st_list;
TF_ASSERT_OK(SparseTensor::Split<int64_t>(st, 0, 2, &st_list));
EXPECT_EQ(st_list.size(), 2);
auto expected_shape = absl::InlinedVector<int64_t, 8UL>{2, 3};
EXPECT_EQ(st_list[0].shape(), expected_shape);
EXPECT_EQ(st_list[0].values().NumElements(), 3);
EXPECT_EQ(st_list[0].values().vec<int64_t>()(0), 1);
EXPECT_EQ(st_list[0].values().vec<int64_t>()(1), 2);
EXPECT_EQ(st_list[0].values().vec<int64_t>()(2), 3);
EXPECT_EQ(st_list[0].indices().NumElements(), 6);
EXPECT_EQ(st_list[0].indices().matrix<int64_t>()(0, 0), 0);
EXPECT_EQ(st_list[0].indices().matrix<int64_t>()(0, 1), 0);
EXPECT_EQ(st_list[0].indices().matrix<int64_t>()(1, 0), 1);
EXPECT_EQ(st_list[0].indices().matrix<int64_t>()(1, 1), 1);
EXPECT_EQ(st_list[0].indices().matrix<int64_t>()(2, 0), 1);
EXPECT_EQ(st_list[0].indices().matrix<int64_t>()(2, 1), 2);
EXPECT_EQ(st_list[1].shape(), expected_shape);
EXPECT_EQ(st_list[1].values().NumElements(), 1);
EXPECT_EQ(st_list[1].values().vec<int64_t>()(0), 4);
EXPECT_EQ(st_list[1].indices().NumElements(), 2);
EXPECT_EQ(st_list[1].indices().matrix<int64_t>()(0, 0), 1);
EXPECT_EQ(st_list[1].indices().matrix<int64_t>()(0, 1), 0);
}
TEST(SparseTensorTest, Slice) {
const int N = 4;
const int DIM = 2;
Tensor ids(DT_INT64, TensorShape({N, DIM}));
Tensor vals(DT_INT64, TensorShape({N}));
ids.matrix<int64_t>()(0, 0) = 0;
ids.matrix<int64_t>()(0, 1) = 0;
ids.matrix<int64_t>()(1, 0) = 1;
ids.matrix<int64_t>()(1, 1) = 1;
ids.matrix<int64_t>()(2, 0) = 1;
ids.matrix<int64_t>()(2, 1) = 2;
ids.matrix<int64_t>()(3, 0) = 3;
ids.matrix<int64_t>()(3, 1) = 0;
vals.vec<int64_t>()(0) = 1;
vals.vec<int64_t>()(1) = 2;
vals.vec<int64_t>()(2) = 3;
vals.vec<int64_t>()(3) = 4;
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ids, vals, TensorShape({4, 3}), &st));
std::vector<int64_t> start(2, 0);
std::vector<int64_t> size(2);
size[0] = 2;
size[1] = 3;
TF_ASSERT_OK_AND_ASSIGN(SparseTensor slice,
SparseTensor::Slice<int64_t>(st, start, size));
EXPECT_EQ(TensorShape(slice.shape()), TensorShape({2, 3}));
EXPECT_EQ(slice.values().NumElements(), 3);
EXPECT_EQ(slice.values().vec<int64_t>()(0), 1);
EXPECT_EQ(slice.values().vec<int64_t>()(1), 2);
EXPECT_EQ(slice.values().vec<int64_t>()(2), 3);
EXPECT_EQ(slice.indices().NumElements(), 6);
EXPECT_EQ(slice.indices().matrix<int64_t>()(0, 0), 0);
EXPECT_EQ(slice.indices().matrix<int64_t>()(0, 1), 0);
EXPECT_EQ(slice.indices().matrix<int64_t>()(1, 0), 1);
EXPECT_EQ(slice.indices().matrix<int64_t>()(1, 1), 1);
EXPECT_EQ(slice.indices().matrix<int64_t>()(2, 0), 1);
EXPECT_EQ(slice.indices().matrix<int64_t>()(2, 1), 2);
}
TEST(SparseTensorTest, SliceReducesOutputDimension) {
const int num_rows = 2;
const int num_columns = 2;
Tensor ids(DT_INT64, TensorShape({num_rows, num_columns}));
ids.matrix<int64_t>()(0, 0) = 0;
ids.matrix<int64_t>()(0, 1) = 0;
ids.matrix<int64_t>()(1, 0) = 1;
ids.matrix<int64_t>()(1, 1) = 1;
Tensor vals(DT_INT64, TensorShape({2}));
vals.vec<int64_t>()(0) = 1;
vals.vec<int64_t>()(1) = 2;
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ids, vals,
TensorShape({num_rows, num_columns}), &st));
TF_ASSERT_OK_AND_ASSIGN(
SparseTensor slice,
SparseTensor::Slice<int64_t>(st, {num_rows + 1, 1}, {1, num_columns}));
EXPECT_EQ(TensorShape(slice.shape()), TensorShape({0, 1}));
}
TEST(SparseTensorTest, Dim0SparseTensorToDenseTensor) {
Tensor ix(DT_INT64, TensorShape({1, 0}));
Tensor vals(DT_INT32, TensorShape({1}));
vals.scalar<int32>()() = 5;
TensorShape shape({});
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, &st));
Tensor dense(DT_INT32, TensorShape({}));
st.ToDense<int32>(&dense);
EXPECT_EQ(dense.scalar<int32>()(), 5);
}
static void BM_SparseReorderFloat(::testing::benchmark::State& state) {
int N32 = state.range(0);
int NDIM32 = state.range(1);
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
const int64_t NDIM = static_cast<int64_t>(NDIM32);
const int64_t N = static_cast<int64_t>(N32);
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_FLOAT, TensorShape({N}));
TensorShape shape;
std::vector<int64_t> order;
for (int d = 0; d < NDIM32; ++d) {
shape.AddDim(1000);
order.push_back(d);
}
std::vector<int64_t> reorder;
reorder.push_back(1);
reorder.push_back(0);
for (int d = 2; d < NDIM32; ++d) {
reorder.push_back(d);
}
auto ix_t = ix.matrix<int64_t>();
for (auto s : state) {
state.PauseTiming();
for (int64_t i = 0; i < N; ++i) {
for (int d = 0; d < NDIM32; ++d) {
ix_t(i, d) = rnd.Rand64() % 1000;
}
}
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
state.ResumeTiming();
st.Reorder<float>(reorder);
}
}
static void BM_SparseReorderString(::testing::benchmark::State& state) {
int N32 = state.range(0);
int NDIM32 = state.range(1);
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
const int64_t NDIM = static_cast<int64_t>(NDIM32);
const int64_t N = static_cast<int64_t>(N32);
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
TensorShape shape;
std::vector<int64_t> order;
auto ix_t = ix.matrix<int64_t>();
auto vals_t = vals.vec<tstring>();
for (int i = 0; i < N32; ++i) {
int len = rnd.Rand32() % 1000;
vals_t(i).resize(len);
}
for (int d = 0; d < NDIM32; ++d) {
shape.AddDim(1000);
order.push_back(d);
}
std::vector<int64_t> reorder;
reorder.push_back(1);
reorder.push_back(0);
for (int d = 2; d < NDIM32; ++d) {
reorder.push_back(d);
}
for (auto s : state) {
state.PauseTiming();
for (int64_t i = 0; i < N; ++i) {
for (int d = 0; d < NDIM32; ++d) {
ix_t(i, d) = rnd.Rand64() % 1000;
}
}
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
state.ResumeTiming();
st.Reorder<tstring>(reorder);
}
}
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(10, 2);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(100, 2);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(1000, 2);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(10000, 2);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(100000, 2);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(10, 3);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(100, 3);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(1000, 3);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(10000, 3);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(100000, 3);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(10, 2);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(100, 2);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(1000, 2);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(10000, 2);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(10, 3);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(100, 3);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(1000, 3);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(10000, 3);
}
}
} |
1,723 | cpp | tensorflow/tensorflow | serialization_utils | tensorflow/core/data/serialization_utils.cc | tensorflow/core/data/serialization_utils_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERIALIZATION_UTILS_H_
#define TENSORFLOW_CORE_DATA_SERIALIZATION_UTILS_H_
#include <cstdint>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset.pb.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tensorflow/core/lib/core/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
inline constexpr absl::string_view kRetvalOp = "_Retval";
Status ReadElementsFromCheckpoint(IteratorContext* ctx,
IteratorStateReader* reader,
StringPiece key_prefix,
std::vector<std::vector<Tensor>>* elements);
Status WriteElementsToCheckpoint(
IteratorStateWriter* writer, StringPiece key_prefix,
const std::vector<std::vector<Tensor>>& elements);
Status UpdateCheckpointElements(
IteratorStateWriter* writer, StringPiece key_prefix,
const std::vector<std::vector<Tensor>>& elements,
const absl::flat_hash_set<int64_t>& checkpoint_indices);
class VariantTensorDataReader : public IteratorStateReader {
public:
explicit VariantTensorDataReader(
const std::vector<const VariantTensorData*>& data);
bool Contains(StringPiece key) const override;
bool Contains(StringPiece name, StringPiece key) const override;
Status ReadScalar(StringPiece key, int64_t* val) const override;
Status ReadScalar(StringPiece name, StringPiece key,
int64_t* val) const override;
Status ReadScalar(StringPiece key, tstring* val) const override;
Status ReadScalar(StringPiece name, StringPiece key,
tstring* val) const override;
Status ReadTensor(StringPiece key, Tensor* val) const override;
Status ReadTensor(FunctionLibraryRuntime* flr, StringPiece key,
Tensor* val) const override;
Status ReadTensor(StringPiece name, StringPiece key,
Tensor* val) const override;
Status ReadTensor(FunctionLibraryRuntime* flr, StringPiece name,
StringPiece key, Tensor* val) const override;
private:
template <typename T>
Status ReadScalarInternal(StringPiece name, StringPiece key, T* val) const;
Status ReadTensorInternal(FunctionLibraryRuntime* flr, StringPiece name,
StringPiece key, Tensor* val) const;
Status ReadDatasetInternal(FunctionLibraryRuntime* flr, StringPiece name,
StringPiece key, Tensor* val) const;
std::map<string, Tensor> ReadAllTensors();
friend absl::StatusOr<absl::flat_hash_map<std::string, int64_t>>
CheckpointStats(const std::string& checkpoint_bytes);
std::map<string, std::map<string, size_t>> map_;
std::map<string, const VariantTensorData*> data_;
};
class VariantTensorDataWriter : public IteratorStateWriter {
public:
Status WriteScalar(StringPiece key, int64_t val) override;
Status WriteScalar(StringPiece name, StringPiece key, int64_t val) override;
Status WriteScalar(StringPiece key, const tstring& val) override;
Status WriteScalar(StringPiece name, StringPiece key,
const tstring& val) override;
Status WriteTensor(StringPiece key, const Tensor& val) override;
Status WriteTensor(StringPiece name, StringPiece key,
const Tensor& val) override;
void ReleaseData(std::vector<std::unique_ptr<VariantTensorData>>* variants);
void GetData(std::vector<const VariantTensorData*>* variants);
private:
void MaybeFlush();
void Reset();
template <typename T>
Status WriteScalarInternal(StringPiece name, StringPiece key, const T& val);
Status WriteTensorInternal(StringPiece name, StringPiece key,
const Tensor& val);
Status WriteDatasetInternal(StringPiece name, StringPiece key,
const DatasetBase* dataset);
bool is_flushed_ = false;
std::map<string, std::unique_ptr<VariantTensorData>> data_;
std::map<string, std::vector<string>> keys_;
};
class IteratorStateVariant {
public:
IteratorStateVariant() = default;
IteratorStateVariant(const IteratorStateVariant& other);
IteratorStateVariant& operator=(IteratorStateVariant&& other) = default;
IteratorStateVariant& operator=(const IteratorStateVariant& other) = delete;
static std::string TypeName();
Status InitializeFromVariantData(std::unique_ptr<VariantTensorData> data);
const VariantTensorData* GetData() const { return data_.get(); }
void Encode(VariantTensorData* data) const;
bool Decode(VariantTensorData data);
std::string DebugString() const;
private:
static const CompressedElement* GetCompressedElement(
const VariantTensorData& data);
std::unique_ptr<VariantTensorData> data_;
};
Status AsGraphDef(const DatasetBase* dataset,
SerializationContext&& serialization_ctx,
GraphDef* graph_def);
Status AsGraphDefForRewrite(OpKernelContext* ctx, const DatasetBase* input,
std::vector<std::pair<string, Tensor>>* input_list,
GraphDef* result, string* dataset_node);
absl::StatusOr<absl::flat_hash_map<std::string, int64_t>> CheckpointStats(
const std::string& checkpoint_bytes);
}
}
#endif
#include "tensorflow/core/data/serialization_utils.h"
#include <cstdint>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/data/compression_utils.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/platform/stringpiece.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kDelimiter[] = "@@";
constexpr char kComponent[] = "component";
constexpr char kNumComponents[] = "num_components";
constexpr char kNumElements[] = "num_elements";
constexpr char kIsDataset[] = ".is_dataset";
constexpr char kIteratorVariantTypeName[] = "tensorflow::Iterator";
constexpr char kOutputNode[] = ".output_node";
Status FromGraphDef(FunctionLibraryRuntime* flr, const GraphDef& graph_def,
const std::vector<std::pair<string, Tensor>>& input_list,
const string& output_node, Tensor* result) {
FunctionLibraryRuntime* cloned_flr = nullptr;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr = nullptr;
std::unique_ptr<FunctionLibraryDefinition> lib_def = nullptr;
TF_RETURN_IF_ERROR(flr->Clone(&lib_def, &pflr, &cloned_flr, true));
TF_RETURN_IF_ERROR(AddToFunctionLibrary(lib_def.get(), graph_def.library()));
Graph graph(OpRegistry::Global());
TF_RETURN_IF_ERROR(ImportGraphDef({}, graph_def, &graph, nullptr));
std::vector<Tensor> outputs;
GraphRunner graph_runner(cloned_flr->device());
TF_RETURN_IF_ERROR(graph_runner.Run(&graph, cloned_flr, input_list,
{output_node}, &outputs));
*result = outputs[0];
return absl::OkStatus();
}
Status FindStatefulOps(const GraphDef& graph_def,
std::vector<string>* stateful_op_names) {
FunctionLibraryDefinition lib_def(OpRegistry::Global(), graph_def.library());
for (const auto& node : graph_def.node()) {
if (node.op() == FunctionLibraryDefinition::kRetOp) continue;
if (!IsNodeStateful(lib_def, node).ok()) {
stateful_op_names->push_back(node.op());
}
}
for (const auto& fdef : graph_def.library().function()) {
if (!fdef.signature().is_stateful()) continue;
for (const auto& node : fdef.node_def()) {
if (!IsNodeStateful(lib_def, node).ok()) {
stateful_op_names->push_back(
absl::StrCat(node.op(), " in function: ", fdef.signature().name()));
}
}
}
return absl::OkStatus();
}
}
Status ReadElementsFromCheckpoint(IteratorContext* ctx,
IteratorStateReader* reader,
StringPiece key_prefix,
std::vector<std::vector<Tensor>>* elements) {
int64_t num_elements;
TF_RETURN_IF_ERROR(
reader->ReadScalar(key_prefix, kNumElements, &num_elements));
DCHECK(elements->empty());
elements->reserve(num_elements);
for (int i = 0; i < num_elements; ++i) {
std::string element_prefix = absl::StrCat(key_prefix, "::", i);
int64_t num_components;
TF_RETURN_IF_ERROR(
reader->ReadScalar(element_prefix, kNumComponents, &num_components));
elements->emplace_back();
std::vector<Tensor>& element = elements->at(i);
element.reserve(num_components);
for (int j = 0; j < num_components; ++j) {
element.emplace_back();
TF_RETURN_IF_ERROR(reader->ReadTensor(
ctx->flr(), element_prefix, absl::StrCat(kComponent, "[", j, "]"),
&element.back()));
}
}
return absl::OkStatus();
}
Status WriteElement(IteratorStateWriter* writer, StringPiece key_prefix,
const std::vector<std::vector<Tensor>>& elements,
int64_t index) {
const std::vector<Tensor>& element = elements[index];
std::string element_prefix = absl::StrCat(key_prefix, "::", index);
TF_RETURN_IF_ERROR(
writer->WriteScalar(element_prefix, kNumComponents, element.size()));
for (int j = 0; j < element.size(); ++j) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
element_prefix, absl::StrCat(kComponent, "[", j, "]"), element[j]));
}
return absl::OkStatus();
}
Status WriteElementsToCheckpoint(
IteratorStateWriter* writer, StringPiece key_prefix,
const std::vector<std::vector<Tensor>>& elements) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(key_prefix, kNumElements, elements.size()));
for (int i = 0; i < elements.size(); ++i) {
TF_RETURN_IF_ERROR(WriteElement(writer, key_prefix, elements, i));
}
return absl::OkStatus();
}
Status UpdateCheckpointElements(
IteratorStateWriter* writer, StringPiece key_prefix,
const std::vector<std::vector<Tensor>>& elements,
const absl::flat_hash_set<int64_t>& checkpoint_indices) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(key_prefix, kNumElements, elements.size()));
for (int64_t i : checkpoint_indices) {
TF_RETURN_IF_ERROR(WriteElement(writer, key_prefix, elements, i));
}
return absl::OkStatus();
}
VariantTensorDataReader::VariantTensorDataReader(
const std::vector<const tensorflow::VariantTensorData*>& data) {
for (const auto& d : data) {
string metadata;
d->get_metadata(&metadata);
auto keys = str_util::Split(metadata, kDelimiter, str_util::SkipEmpty());
const string name = keys[0];
data_[name] = d;
map_[name] = std::map<string, size_t>();
for (size_t i = 1; i < keys.size(); ++i) {
map_[name][keys[i]] = i - 1;
}
}
}
Status VariantTensorDataReader::ReadScalar(StringPiece key,
int64_t* val) const {
string prefix;
TF_RETURN_IF_ERROR(ExtractIteratorPrefix(key, &prefix));
return ReadScalar(prefix, key, val);
}
Status VariantTensorDataReader::ReadScalar(StringPiece name, StringPiece key,
int64_t* val) const {
return ReadScalarInternal(name, key, val);
}
Status VariantTensorDataReader::ReadScalar(StringPiece key,
tstring* val) const {
string prefix;
TF_RETURN_IF_ERROR(ExtractIteratorPrefix(key, &prefix));
return ReadScalar(prefix, key, val);
}
Status VariantTensorDataReader::ReadScalar(StringPiece name, StringPiece key,
tstring* val) const {
return ReadScalarInternal(name, key, val);
}
Status VariantTensorDataReader::ReadTensor(StringPiece key, Tensor* val) const {
string prefix;
TF_RETURN_IF_ERROR(ExtractIteratorPrefix(key, &prefix));
return ReadTensor(prefix, key, val);
}
Status VariantTensorDataReader::ReadTensor(FunctionLibraryRuntime* flr,
StringPiece key, Tensor* val) const {
string prefix;
TF_RETURN_IF_ERROR(ExtractIteratorPrefix(key, &prefix));
return ReadTensorInternal(flr, prefix, key, val);
}
Status VariantTensorDataReader::ReadTensor(StringPiece name, StringPiece key,
Tensor* val) const {
return ReadTensor(nullptr, name, key, val);
}
Status VariantTensorDataReader::ReadTensor(FunctionLibraryRuntime* flr,
StringPiece name, StringPiece key,
Tensor* val) const {
return ReadTensorInternal(flr, name, key, val);
}
bool VariantTensorDataReader::Contains(StringPiece key) const {
string prefix;
if (!ExtractIteratorPrefix(key, &prefix).ok()) {
return false;
}
return Contains(prefix, key);
}
bool VariantTensorDataReader::Contains(StringPiece n, StringPiece key) const {
string name(n);
auto it = map_.find(name);
if (it == map_.end()) {
return false;
}
const auto& bucket = it->second;
return bucket.find(string(key)) != bucket.end();
}
template <typename T>
Status VariantTensorDataReader::ReadScalarInternal(StringPiece n,
StringPiece key,
T* val) const {
string name(n);
auto it = map_.find(name);
if (it == map_.end()) {
return errors::NotFound(name);
}
const auto& bucket = it->second;
auto key_it = bucket.find(string(key));
if (key_it == bucket.end()) {
return errors::NotFound(key);
}
*val = data_.at(name)->tensors(key_it->second).scalar<T>()();
return absl::OkStatus();
}
Status VariantTensorDataReader::ReadTensorInternal(FunctionLibraryRuntime* flr,
StringPiece n,
StringPiece key,
Tensor* val) const {
if (Contains(n, strings::StrCat(key, kIsDataset))) {
return ReadDatasetInternal(flr, n, key, val);
}
string name(n);
auto it = map_.find(name);
if (it == map_.end()) {
return errors::NotFound(name);
}
const auto& bucket = it->second;
auto key_it = bucket.find(string(key));
if (key_it == bucket.end()) {
return errors::NotFound(key);
}
*val = data_.at(name)->tensors(key_it->second);
return absl::OkStatus();
}
Status VariantTensorDataReader::ReadDatasetInternal(FunctionLibraryRuntime* flr,
StringPiece n,
StringPiece key,
Tensor* val) const {
if (flr == nullptr) {
return errors::Internal(
"Function library runtime is needed to restore a dataset.");
}
tstring output_node, serialized_graph_def;
TF_RETURN_IF_ERROR(
ReadScalar(n, strings::StrCat(key, kOutputNode), &output_node));
TF_RETURN_IF_ERROR(
ReadScalar(n, strings::StrCat(key), &serialized_graph_def));
GraphDef graph_def;
graph_def.ParseFromString(serialized_graph_def);
TF_RETURN_IF_ERROR(FromGraphDef(flr, graph_def, {}, output_node, val));
return absl::OkStatus();
}
std::map<string, Tensor> VariantTensorDataReader::ReadAllTensors() {
std::map<string, Tensor> result;
for (const auto& entry : map_) {
string key1 = entry.first;
for (const auto& inner : entry.second) {
string key2 = inner.first;
size_t index = inner.second;
result[absl::StrCat(key1, kDelimiter, key2)] =
data_[key1]->tensors(index);
}
}
return result;
}
Status VariantTensorDataWriter::WriteScalar(StringPiece key,
const int64_t val) {
string prefix;
TF_RETURN_IF_ERROR(ExtractIteratorPrefix(key, &prefix));
return WriteScalar(prefix, key, val);
}
Status VariantTensorDataWriter::WriteScalar(StringPiece name, StringPiece key,
const int64_t val) {
return WriteScalarInternal(name, key, val);
}
Status VariantTensorDataWriter::WriteScalar(StringPiece key,
const tstring& val) {
string prefix;
TF_RETURN_IF_ERROR(ExtractIteratorPrefix(key, &prefix));
return WriteScalar(prefix, key, val);
}
Status VariantTensorDataWriter::WriteScalar(StringPiece name, StringPiece key,
const tstring& val) {
return WriteScalarInternal(name, key, val);
}
Status VariantTensorDataWriter::WriteTensor(StringPiece key,
const Tensor& val) {
string prefix;
TF_RETURN_IF_ERROR(ExtractIteratorPrefix(key, &prefix));
return WriteTensor(prefix, key, val);
}
Status VariantTensorDataWriter::WriteTensor(StringPiece name, StringPiece key,
const Tensor& val) {
return WriteTensorInternal(name, key, val);
}
void VariantTensorDataWriter::MaybeFlush() {
if (is_flushed_) return;
for (auto& keys : keys_) {
const string name = keys.first;
string metadata = name;
for (size_t i = 0; i < keys_[name].size(); ++i) {
strings::StrAppend(&metadata, kDelimiter, keys_[name][i]);
}
data_[name]->set_metadata(metadata);
}
is_flushed_ = true;
}
void VariantTensorDataWriter::Reset() {
is_flushed_ = false;
data_.clear();
keys_.clear();
}
void VariantTensorDataWriter::ReleaseData(
std::vector<std::unique_ptr<VariantTensorData>>* variants) {
MaybeFlush();
for (auto& it : data_) {
variants->push_back(std::move(it.second));
}
Reset();
}
void VariantTensorDataWriter::GetData(
std::vector<const VariantTensorData*>* variants) {
MaybeFlush();
for (auto& it : data_) {
variants->push_back(it.second.get());
}
}
template <typename T>
Status VariantTensorDataWriter::WriteScalarInternal(StringPiece name,
StringPiece key,
const T& val) {
if (is_flushed_) {
return errors::FailedPrecondition(
"Cannot call WriteScalar after GetData or ReleaseData is called");
}
Tensor val_t = Tensor(DataTypeToEnum<T>::v(), TensorShape({}));
val_t.scalar<T>()() = val;
return WriteTensorInternal(name, key, val_t);
}
Status VariantTensorDataWriter::WriteTensorInternal(StringPiece n,
StringPiece key,
const Tensor& val) {
DatasetBase* dataset;
if (GetDatasetFromVariantTensor(val, &dataset).ok()) {
return WriteDatasetInternal(n, key, dataset);
}
if (is_flushed_) {
return errors::FailedPrecondition(
"Cannot call WriteTensor after GetData or ReleaseData is called");
}
DCHECK_EQ(key.find(kDelimiter), string::npos);
string name(n);
if (keys_.count(name) == 0) {
keys_[name] = std::vector<string>();
}
keys_[name].push_back(string(key));
if (data_.count(name) == 0) {
data_[name] = std::make_unique<VariantTensorData>();
data_[name]->set_type_name("tensorflow::Iterator");
}
*(data_[name]->add_tensors()) = val;
return absl::OkStatus();
}
Status VariantTensorDataWriter::WriteDatasetInternal(
StringPiece n, StringPiece key, const DatasetBase* dataset) {
GraphDef graph_def;
SerializationContext ctx((SerializationContext::Params()));
TF_RETURN_IF_ERROR(AsGraphDef(dataset, std::move(ctx), &graph_def));
string output_node;
for (const auto& node : graph_def.node()) {
if (node.op() == kRetvalOp) {
output_node = node.input(0);
break;
}
}
string result;
graph_def.SerializeToString(&result);
TF_RETURN_IF_ERROR(WriteScalar(n, strings::StrCat(key, kIsDataset), ""));
TF_RETURN_IF_ERROR(
WriteScalar(n, strings::StrCat(key, kOutputNode), output_node));
TF_RETURN_IF_ERROR(WriteScalar(n, key, result));
return absl::OkStatus();
}
std::string IteratorStateVariant::TypeName() {
return kIteratorVariantTypeName;
}
IteratorStateVariant::IteratorStateVariant(const IteratorStateVariant& other) {
if (other.data_) {
data_ = std::make_unique<VariantTensorData>(*other.data_);
}
}
Status IteratorStateVariant::InitializeFromVariantData(
std::unique_ptr<VariantTensorData> data) {
data_ = std::move(data);
return absl::OkStatus();
}
void IteratorStateVariant::Encode(VariantTensorData* data) const {
CompressedElement compressed_tensors;
Status s = CompressElement(data_->tensors(), &compressed_tensors);
if (!s.ok()) {
LOG(WARNING) << "Failed to compress iterator state variant: " << s;
*data = *data_;
return;
}
data->set_type_name(TypeName());
data->set_metadata(data_->metadata_string());
Tensor tensor(DT_VARIANT, TensorShape({}));
tensor.scalar<Variant>()() = std::move(compressed_tensors);
*data->add_tensors() = std::move(tensor);
}
bool IteratorStateVariant::Decode(VariantTensorData data) {
if (data.type_name() != TypeName()) {
return false;
}
const CompressedElement* compressed = GetCompressedElement(data);
if (!compressed) {
data_ = std::make_unique<VariantTensorData>(std::move(data));
return true;
}
std::vector<Tensor> tensors;
Status s = UncompressElement(*compressed, &tensors);
if (!s.ok()) {
LOG(WARNING) << "Failed to uncompress iterator state variant: " << s;
data_ = std::make_unique<VariantTensorData>(std::move(data));
return true;
}
data_ = std::make_unique<VariantTensorData>();
data_->set_type_name(TypeName());
data_->set_metadata(std::move(data.metadata_string()));
for (auto& tensor : tensors) {
*data_->add_tensors() = std::move(tensor);
}
return true;
}
const CompressedElement* IteratorStateVariant::GetCompressedElement(
const VariantTensorData& data) {
bool should_uncompress =
data.tensors_size() == 1 &&
TensorShapeUtils::IsScalar(data.tensors(0).shape()) &&
data.tensors(0).dtype() == DT_VARIANT;
if (!should_uncompress) {
return nullptr;
}
const Variant& variant = data.tensors(0).scalar<Variant>()();
return variant.get<CompressedElement>();
}
std::string IteratorStateVariant::DebugString() const {
if (data_) {
return strings::StrCat("IteratorStateVariant<", data_->DebugString(), ">");
} else {
return strings::StrCat("IteratorStateVariant<empty>");
}
}
REGISTER_UNARY_VARIANT_DECODE_FUNCTION(IteratorStateVariant,
kIteratorVariantTypeName);
Status AsGraphDefForRewrite(OpKernelContext* ctx, const DatasetBase* input,
std::vector<std::pair<string, Tensor>>* input_list,
GraphDef* result, string* dataset_node) {
SerializationContext::Params params(ctx);
params.input_list = input_list;
params.external_state_policy = ExternalStatePolicy::POLICY_IGNORE;
params.is_graph_rewrite = true;
SerializationContext serialization_ctx(params);
TF_RETURN_IF_ERROR(AsGraphDef(input, std::move(serialization_ctx), result));
for (const auto& node : result->node()) {
if (node.op() == kRetvalOp) {
*dataset_node = node.input(0);
}
}
return absl::OkStatus();
}
Status AsGraphDef(const DatasetBase* dataset,
SerializationContext&& serialization_ctx,
GraphDef* graph_def) {
if (serialization_ctx.external_state_policy() ==
ExternalStatePolicy::POLICY_FAIL) {
TF_RETURN_IF_ERROR(dataset->CheckExternalState());
}
if (serialization_ctx.external_state_policy() ==
ExternalStatePolicy::POLICY_WARN) {
std::vector<string> stateful_op_names;
TF_RETURN_IF_ERROR(FindStatefulOps(*graph_def, &stateful_op_names));
if (!stateful_op_names.empty()) {
LOG(WARNING) << "We found the following stateful ops in the dataset "
"construction graph whose state would not be "
"serialized and might "
"cause subtle bugs: "
<< absl::StrJoin(stateful_op_names, ", ");
}
}
GraphDefBuilder b;
DatasetBase::DatasetGraphDefBuilder db(&b);
Node* output_node = nullptr;
TF_RETURN_IF_ERROR(
db.AddInputDataset(&serialization_ctx, dataset, &output_node));
ops::UnaryOp(std::string(kRetvalOp), output_node,
b.opts()
.WithName("dataset")
.WithAttr("T", DT_VARIANT)
.WithAttr("index", 0));
TF_RETURN_IF_ERROR(b.ToGraphDef(graph_def));
return absl::OkStatus();
}
absl::StatusOr<absl::flat_hash_map<std::string, int64_t>> CheckpointStats(
const std::string& checkpoint_bytes) {
TensorProto proto;
if (!ParseProtoUnlimited(&proto, checkpoint_bytes)) {
return absl::InvalidArgumentError(
"Failed to parse checkpoint bytes into proto.");
}
Tensor t;
if (!t.FromProto(proto)) {
return absl::InvalidArgumentError(
"Failed to parse checkpoint tensor from proto.");
}
auto variant = t.scalar<Variant>()();
auto* w = variant.get<IteratorStateVariant>();
if (!w) {
return absl::InvalidArgumentError(
"Failed to access IteratorStateVariant inside checkpoint tensor");
}
const VariantTensorData* data = w->GetData();
auto reader = std::make_unique<VariantTensorDataReader>(
std::vector<const VariantTensorData*>{data});
absl::flat_hash_map<std::string, int64_t> stats;
for (const auto& [key, tensor] : reader->ReadAllTensors()) {
stats[key] = tensor.TotalBytes();
}
return stats;
}
}
} | #include "tensorflow/core/data/serialization_utils.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/test_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/work_sharder.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
namespace {
string full_name(string key) { return FullName("Iterator:", key); }
TEST(SerializationUtilsTest, CheckpointElementsRoundTrip) {
std::vector<std::vector<Tensor>> elements;
elements.push_back(CreateTensors<int32>(TensorShape({3}), {{1, 2, 3}}));
elements.push_back(CreateTensors<int32>(TensorShape({2}), {{4, 5}}));
VariantTensorDataWriter writer;
tstring test_prefix = full_name("test_prefix");
TF_ASSERT_OK(WriteElementsToCheckpoint(&writer, test_prefix, elements));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
std::vector<std::vector<Tensor>> read_elements;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<TestContext> ctx,
TestContext::Create());
TF_ASSERT_OK(ReadElementsFromCheckpoint(ctx->iter_ctx(), &reader, test_prefix,
&read_elements));
ASSERT_EQ(elements.size(), read_elements.size());
for (int i = 0; i < elements.size(); ++i) {
std::vector<Tensor>& original = elements[i];
std::vector<Tensor>& read = read_elements[i];
ASSERT_EQ(original.size(), read.size());
for (int j = 0; j < original.size(); ++j) {
EXPECT_EQ(original[j].NumElements(), read[j].NumElements());
EXPECT_EQ(original[j].flat<int32>()(0), read[j].flat<int32>()(0));
}
}
}
TEST(SerializationUtilsTest, VariantTensorDataRoundtrip) {
VariantTensorDataWriter writer;
TF_ASSERT_OK(writer.WriteScalar(full_name("Int64"), 24));
Tensor input_tensor(DT_FLOAT, {1});
input_tensor.flat<float>()(0) = 2.0f;
TF_ASSERT_OK(writer.WriteTensor(full_name("Tensor"), input_tensor));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
int64_t val_int64;
TF_ASSERT_OK(reader.ReadScalar(full_name("Int64"), &val_int64));
EXPECT_EQ(val_int64, 24);
Tensor val_tensor;
TF_ASSERT_OK(reader.ReadTensor(full_name("Tensor"), &val_tensor));
EXPECT_EQ(input_tensor.NumElements(), val_tensor.NumElements());
EXPECT_EQ(input_tensor.flat<float>()(0), val_tensor.flat<float>()(0));
}
TEST(SerializationUtilsTest, VariantTensorDataNonExistentKey) {
VariantTensorData data;
strings::StrAppend(&data.metadata_, "key1", "@@");
data.tensors_.push_back(Tensor(DT_INT64, {1}));
std::vector<const VariantTensorData*> reader_data;
reader_data.push_back(&data);
VariantTensorDataReader reader(reader_data);
int64_t val_int64;
tstring val_string;
Tensor val_tensor;
EXPECT_EQ(error::NOT_FOUND,
reader.ReadScalar(full_name("NonExistentKey"), &val_int64).code());
EXPECT_EQ(error::NOT_FOUND,
reader.ReadScalar(full_name("NonExistentKey"), &val_string).code());
EXPECT_EQ(error::NOT_FOUND,
reader.ReadTensor(full_name("NonExistentKey"), &val_tensor).code());
}
TEST(SerializationUtilsTest, VariantTensorDataRoundtripIteratorName) {
VariantTensorDataWriter writer;
TF_ASSERT_OK(writer.WriteScalar("Iterator", "Int64", 24));
Tensor input_tensor(DT_FLOAT, {1});
input_tensor.flat<float>()(0) = 2.0f;
TF_ASSERT_OK(writer.WriteTensor("Iterator", "Tensor", input_tensor));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
int64_t val_int64;
TF_ASSERT_OK(reader.ReadScalar("Iterator", "Int64", &val_int64));
EXPECT_EQ(val_int64, 24);
Tensor val_tensor;
TF_ASSERT_OK(reader.ReadTensor("Iterator", "Tensor", &val_tensor));
EXPECT_EQ(input_tensor.NumElements(), val_tensor.NumElements());
EXPECT_EQ(input_tensor.flat<float>()(0), val_tensor.flat<float>()(0));
}
TEST(SerializationUtilsTest, VariantTensorDataNonExistentKeyIteratorName) {
VariantTensorData data;
strings::StrAppend(&data.metadata_, "key1", "@@");
data.tensors_.push_back(Tensor(DT_INT64, {1}));
std::vector<const VariantTensorData*> reader_data;
reader_data.push_back(&data);
VariantTensorDataReader reader(reader_data);
int64_t val_int64;
tstring val_string;
Tensor val_tensor;
EXPECT_EQ(error::NOT_FOUND,
reader.ReadScalar("Iterator", "NonExistentKey", &val_int64).code());
EXPECT_EQ(
error::NOT_FOUND,
reader.ReadScalar("Iterator", "NonExistentKey", &val_string).code());
EXPECT_EQ(
error::NOT_FOUND,
reader.ReadTensor("Iterator", "NonExistentKey", &val_tensor).code());
}
TEST(SerializationUtilsTest, VariantTensorDataWriteAfterFlushing) {
VariantTensorDataWriter writer;
TF_ASSERT_OK(writer.WriteScalar(full_name("Int64"), 24));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
Tensor input_tensor(DT_FLOAT, {1});
input_tensor.flat<float>()(0) = 2.0f;
EXPECT_EQ(error::FAILED_PRECONDITION,
writer.WriteTensor(full_name("Tensor"), input_tensor).code());
}
class ParameterizedIteratorStateVariantTest
: public DatasetOpsTestBase,
public ::testing::WithParamInterface<std::vector<Tensor>> {
protected:
VariantTensorData GetVariantTensorData() const {
std::vector<Tensor> tensors = GetParam();
VariantTensorData data;
data.set_type_name(IteratorStateVariant::TypeName());
for (Tensor& tensor : tensors) {
*data.add_tensors() = std::move(tensor);
}
return data;
}
absl::StatusOr<VariantTensorData> EncodeAndDecode(
const VariantTensorData& data) const {
IteratorStateVariant encoder;
TF_RETURN_IF_ERROR(encoder.InitializeFromVariantData(
std::make_unique<VariantTensorData>(data)));
VariantTensorData encoded_data;
encoder.Encode(&encoded_data);
IteratorStateVariant decoder;
decoder.Decode(encoded_data);
return *decoder.GetData();
}
absl::StatusOr<VariantTensorData> DecodeUncompressed(
const VariantTensorData& data) const {
IteratorStateVariant decoder;
decoder.Decode(data);
return *decoder.GetData();
}
};
class ParemeterizedCheckpointIndicesTest
: public DatasetOpsTestBase,
public ::testing::WithParamInterface<absl::flat_hash_set<int64_t>> {
protected:
absl::flat_hash_set<int64_t> GetCheckpointIndices() const {
absl::flat_hash_set<int64_t> checkpoint_indices = GetParam();
return checkpoint_indices;
}
};
std::vector<std::vector<Tensor>> TestCases() {
return {
CreateTensors<int64_t>(TensorShape{1}, {{1}}),
CreateTensors<int64_t>(TensorShape{1}, {{1}, {2}}),
CreateTensors<tstring>(TensorShape{1}, {{"a"}, {"b"}}),
{CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{1}, {1})},
{},
{CreateTensor<int64_t>(TensorShape{128, 128}),
CreateTensor<int64_t>(TensorShape{64, 2})},
};
}
std::vector<absl::flat_hash_set<int64_t>> CheckpointIndicesTestCases() {
return {
{},
{ 0},
{ 0, 1},
{ 0, 1, 2},
{ 1, 3, 4},
{ 1, 2, 3, 4},
{ 0, 1, 2, 3, 4},
};
}
TEST_P(ParameterizedIteratorStateVariantTest, EncodeAndDecode) {
VariantTensorData data = GetVariantTensorData();
TF_ASSERT_OK_AND_ASSIGN(VariantTensorData result, EncodeAndDecode(data));
EXPECT_EQ(result.type_name(), data.type_name());
for (int i = 0; i < result.tensors_size(); ++i) {
test::ExpectEqual(result.tensors(i), data.tensors(i));
}
}
TEST_P(ParameterizedIteratorStateVariantTest, DecodeUncompressed) {
VariantTensorData data = GetVariantTensorData();
TF_ASSERT_OK_AND_ASSIGN(VariantTensorData result, DecodeUncompressed(data));
EXPECT_EQ(result.type_name(), data.type_name());
for (int i = 0; i < result.tensors_size(); ++i) {
test::ExpectEqual(result.tensors(i), data.tensors(i));
}
}
TEST_P(ParemeterizedCheckpointIndicesTest,
CheckpointElementsRoundTripUsingIndices) {
std::vector<std::vector<Tensor>> elements;
elements.push_back(CreateTensors<int32>(TensorShape({3}), {{1, 2, 3}}));
elements.push_back(CreateTensors<int32>(TensorShape({2}), {{4, 5}}));
elements.push_back(
CreateTensors<int32>(TensorShape({5}), {{6, 7, 8, 9, 10}}));
elements.push_back(
CreateTensors<int32>(TensorShape({4}), {{11, 12, 13, 14}}));
elements.push_back(CreateTensors<int32>(TensorShape({2}), {{15, 16}}));
VariantTensorDataWriter writer;
tstring test_prefix = full_name("test_prefix");
absl::flat_hash_set<int64_t> checkpoint_indices_write = {0, 1, 2, 3, 4};
TF_ASSERT_OK(WriteElementsToCheckpoint(&writer, test_prefix, elements));
for (auto index : GetCheckpointIndices()) {
elements.at(index) = CreateTensors<int32>(TensorShape({1}), {{1}});
}
TF_ASSERT_OK(UpdateCheckpointElements(&writer, test_prefix, elements,
GetCheckpointIndices()));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
std::vector<std::vector<Tensor>> read_elements;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<TestContext> ctx,
TestContext::Create());
TF_ASSERT_OK(ReadElementsFromCheckpoint(ctx->iter_ctx(), &reader, test_prefix,
&read_elements));
ASSERT_EQ(elements.size(), read_elements.size());
for (int index = 0; index < elements.size(); ++index) {
std::vector<Tensor>& original = elements[index];
std::vector<Tensor>& read = read_elements[index];
ASSERT_EQ(original.size(), read.size());
for (int j = 0; j < original.size(); ++j) {
EXPECT_EQ(original[j].NumElements(), read[j].NumElements());
EXPECT_EQ(original[j].flat<int32>()(0), read[j].flat<int32>()(0));
}
}
}
INSTANTIATE_TEST_SUITE_P(Instantiation, ParameterizedIteratorStateVariantTest,
::testing::ValuesIn(TestCases()));
INSTANTIATE_TEST_SUITE_P(Instantiation, ParemeterizedCheckpointIndicesTest,
::testing::ValuesIn(CheckpointIndicesTestCases()));
}
}
} |
1,724 | cpp | tensorflow/tensorflow | hash_utils | tensorflow/core/data/hash_utils.cc | tensorflow/core/data/hash_utils_test.cc | #ifndef TENSORFLOW_CORE_DATA_HASH_UTILS_H_
#define TENSORFLOW_CORE_DATA_HASH_UTILS_H_
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
namespace data {
Status HashNode(const GraphDef& graph, const NodeDef& node, uint64* hash);
Status HashNode(const GraphDef& graph, const NodeDef& node,
const FunctionLibraryDefinition& flib_def, uint64* hash);
Status HashTensor(const Tensor& tensor, uint64* hash);
Status HashGraph(const GraphDef& graph, uint64* hash);
Status CheckGraphsEqual(const GraphDef& a, const GraphDef& b);
Status CheckSubgraphsEqual(const GraphDef& a, const NodeDef* node_a,
const GraphDef& b, const NodeDef* node_b);
}
}
#endif
#include "tensorflow/core/data/hash_utils.h"
#include <array>
#include <memory>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/proto_serialization.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/regexp.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
namespace data {
namespace {
constexpr std::array<const char*, 3> kOpsWithSeed = {
"AnonymousRandomSeedGenerator",
"ShuffleDataset",
"ShuffleAndRepeatDataset"
};
constexpr char kSeedInputName[] = "seed";
constexpr char kSeed2InputName[] = "seed2";
constexpr char kSeedGeneratorInputName[] = "seed_generator";
template <std::size_t SIZE>
bool IsNodeOfType(const NodeDef& node,
const std::array<const char*, SIZE>& op_types) {
for (const auto& type : op_types) {
if (MatchesAnyVersion(type, node.op())) {
return true;
}
}
return false;
}
Status GetSink(const GraphDef& graph_def, const NodeDef** sink) {
for (auto& node : graph_def.node()) {
if (node.op() == kRetvalOp) {
*sink = &node;
break;
}
}
if (sink == nullptr) {
return errors::Internal("Cannot find sink node for dataset graph.");
}
return absl::OkStatus();
}
Status ShouldIgnoreInput(const NodeDef& node, int i, bool* result) {
*result = false;
if (IsNodeOfType(node, kOpsWithSeed)) {
const OpRegistrationData* reg;
auto status = OpRegistry::Global()->LookUp(node.op(), ®);
if (status.ok()) {
if (reg->op_def.input_arg_size() > i) {
const std::string input_arg_name = reg->op_def.input_arg(i).name();
if (input_arg_name == kSeedInputName ||
input_arg_name == kSeed2InputName ||
input_arg_name == kSeedGeneratorInputName) {
VLOG(2) << "Ignoring arg: " << input_arg_name
<< " from node: " << node.name();
*result = true;
return absl::OkStatus();
}
}
} else if (errors::IsNotFound(status)) {
LOG(WARNING) << "Cannot find " << node.op()
<< " in global op registry, so cannot determine which "
"inputs are seeds.";
} else {
return status;
}
}
return absl::OkStatus();
}
Status ParseInputNodeName(absl::string_view input_name,
absl::string_view* node_name,
absl::string_view* suffix, bool* is_control_input) {
if (input_name[0] == '^') {
*node_name = input_name.substr(1);
*is_control_input = true;
return absl::OkStatus();
}
std::pair<absl::string_view, absl::string_view> node_spec =
absl::StrSplit(input_name, absl::MaxSplits(':', 1));
*node_name = node_spec.first;
*suffix = node_spec.second;
*is_control_input = false;
return absl::OkStatus();
}
class GraphHasher {
using NodeCache = absl::flat_hash_map<const NodeDef*, uint64>;
using FunctionCache = absl::flat_hash_map<const FunctionDef*, uint64>;
using AttrCache =
absl::flat_hash_map<std::pair<const NodeDef*, bool>, uint64>;
public:
explicit GraphHasher(const GraphDef* graph, const NodeDef* root,
const FunctionLibraryDefinition* flib)
: graph_(graph), root_(root), flib_(flib) {
node_cache_ = std::make_shared<NodeCache>();
function_cache_ = std::make_shared<FunctionCache>();
attr_cache_ = std::make_shared<AttrCache>();
}
explicit GraphHasher(const GraphDef* graph, const NodeDef* root,
const FunctionLibraryDefinition* flib,
std::shared_ptr<NodeCache> node_cache,
std::shared_ptr<FunctionCache> function_cache,
std::shared_ptr<AttrCache> attr_cache)
: graph_(graph),
root_(root),
flib_(flib),
node_cache_(node_cache),
function_cache_(function_cache),
attr_cache_(attr_cache) {}
Status Init() {
absl::flat_hash_map<absl::string_view, const NodeDef*> node_def_by_name;
node_def_by_name.reserve(graph_->node_size());
for (const auto& node : graph_->node()) {
auto result = node_def_by_name.emplace(node.name(), &node);
if (TF_PREDICT_FALSE(!result.second)) {
auto node_name_formatter =
[](std::string* out,
const decltype(node_def_by_name)::value_type& item) {
absl::StrAppend(out, "'", item.first, "'");
};
return errors::Internal(
"Encountered graph with duplicate node name '", node.name(),
"' in [", absl::StrJoin(node_def_by_name, ",", node_name_formatter),
"]");
}
}
absl::flat_hash_set<absl::string_view> visited;
std::queue<const NodeDef*> bfs_queue;
bfs_queue.push(root_);
while (!bfs_queue.empty()) {
const NodeDef* node = bfs_queue.front();
bfs_queue.pop();
if (visited.contains(node->name())) {
continue;
}
visited.insert(node->name());
NodeRep node_rep;
for (int i = 0; i < node->input_size(); ++i) {
DCHECK_GT(node->input(i).length(), 0);
bool should_ignore_input = false;
TF_RETURN_IF_ERROR(ShouldIgnoreInput(*node, i, &should_ignore_input));
if (should_ignore_input) continue;
absl::string_view node_name, suffix;
bool is_control_input;
TF_RETURN_IF_ERROR(ParseInputNodeName(node->input(i), &node_name,
&suffix, &is_control_input));
auto* input_node = gtl::FindPtrOrNull(node_def_by_name, node_name);
if (input_node == nullptr) {
return errors::Internal("Graph node [", node->name(), "] has input [",
node_name, "] that doesn't exist in graph");
}
if (visited.contains(node_name)) {
EdgeRep cycle_edge(node, input_node);
cycle_forming_edges_.insert(cycle_edge.GetHash());
continue;
}
if (is_control_input) {
node_rep.node_control_inputs.push_back(input_node);
} else {
node_rep.node_inputs.push_back(std::make_pair(input_node, suffix));
bfs_queue.push(input_node);
}
}
nodes_[node] = node_rep;
}
return absl::OkStatus();
}
Status HashRoot(uint64* hash) { return HashNode(root_, hash); }
Status CheckEqual(GraphHasher* that) {
return CheckNodesEqual(root_, that, that->root_);
}
private:
Status HashNode(const NodeDef* node, uint64* hash) {
auto it = node_cache_->find(node);
if (it != node_cache_->end()) {
*hash = it->second;
return absl::OkStatus();
}
NodeRep* node_rep = gtl::FindOrNull(nodes_, node);
if (node_rep == nullptr) {
return errors::InvalidArgument("Could not find node: ", node->name());
}
uint64 non_input_hash;
TF_RETURN_IF_ERROR(
HashNodeNonInput(node, true, &non_input_hash));
uint64 control_inputs_hash;
TF_RETURN_IF_ERROR(
HashControlInputs(node_rep->node_control_inputs, &control_inputs_hash));
uint64 inputs_hash = 0;
for (const auto& input : node_rep->node_inputs) {
uint64 node_hash = 0;
EdgeRep edge(node, input.first);
if (cycle_forming_edges_.contains(edge.GetHash())) {
TF_RETURN_IF_ERROR(
HashNodeNonInput(input.first, true, &node_hash));
} else {
TF_RETURN_IF_ERROR(HashNode(input.first, &node_hash));
}
inputs_hash = Hash64Combine(
inputs_hash, Hash64Combine(node_hash, Hash64(input.second.data(),
input.second.size())));
}
*hash = Hash64Combine(non_input_hash,
Hash64Combine(control_inputs_hash, inputs_hash));
auto result = node_cache_->emplace(node, *hash);
if (!result.second) {
return errors::Internal(absl::StrCat("Computed the hash for node ",
node->DebugString(), " twice!"));
}
return absl::OkStatus();
}
Status CheckNodesEqual(const NodeDef* this_node, GraphHasher* that,
const NodeDef* that_node) {
Status s = CheckNodesEqualHelper(this_node, that, that_node);
if (!s.ok()) {
return errors::FailedPrecondition("Nodes ", this_node->name(), " and ",
that_node->name(),
" are not the same:\n", s);
}
return s;
}
Status CheckNodesEqualHelper(const NodeDef* this_node, GraphHasher* that,
const NodeDef* that_node) {
TF_RETURN_IF_ERROR(CheckNodesEqualNonInput(this_node, that, that_node,
true));
TF_RETURN_IF_ERROR(
CheckControlInputsEqual(nodes_[this_node].node_control_inputs, that,
that->nodes_[that_node].node_control_inputs));
auto& this_node_inputs = nodes_[this_node].node_inputs;
auto& that_node_inputs = that->nodes_[that_node].node_inputs;
if (this_node_inputs.size() != that_node_inputs.size()) {
return errors::FailedPrecondition(
"Nodes have different numbers of node inputs: ",
this_node_inputs.size(), " vs ", that_node_inputs.size());
}
for (int i = 0; i < this_node_inputs.size(); ++i) {
const NodeDef* this_input = this_node_inputs[i].first;
const NodeDef* that_input = that_node_inputs[i].first;
if (is_cycle_forming_edge(this_node, this_input)) {
TF_RETURN_IF_ERROR(CheckNodesEqualNonInput(this_input, that, that_input,
true));
} else {
TF_RETURN_IF_ERROR(CheckNodesEqual(this_input, that, that_input));
}
absl::string_view this_input_suffix = this_node_inputs[i].second;
absl::string_view that_input_suffix = that_node_inputs[i].second;
if (this_input_suffix != that_input_suffix) {
return errors::FailedPrecondition(
"Node inputs ", this_input->name(), " and ", that_input->name(),
" have different suffixes: ", this_input_suffix, " vs ",
that_input_suffix);
}
}
return absl::OkStatus();
}
Status HashNodeNonInput(const NodeDef* node, bool hash_functions,
uint64* hash) {
auto iter = attr_cache_->find(std::make_pair(node, hash_functions));
if (iter != attr_cache_->end()) {
*hash = iter->second;
return absl::OkStatus();
}
uint64 attrs_hash = 0;
const OpRegistrationData* reg;
TF_RETURN_IF_ERROR(flib_->LookUp(node->op(), ®));
uint64 op_hash = 0;
if (reg->is_function_op) {
if (hash_functions) {
TF_RETURN_IF_ERROR(HashFunction(node->op(), node->attr(), &op_hash));
}
} else {
op_hash = Hash64(node->op());
}
for (const auto& attr : reg->op_def.attr()) {
const auto& attr_key = attr.name();
if (DatasetOpKernel::IsDatasetOp(reg->op_def) && attr_key == "metadata")
continue;
auto node_attr_iter = node->attr().find(attr_key);
if (node_attr_iter == node->attr().end()) {
continue;
}
const auto& attr_value = node_attr_iter->second;
if (attr_key == kColocationAttrName ||
attr_key == kColocationGroupPrefix) {
continue;
}
uint64 attr_hash = 0;
TF_RETURN_IF_ERROR(
HashAttr(attr_key, attr_value, hash_functions, &attr_hash));
attrs_hash = Hash64Combine(attrs_hash, attr_hash);
}
uint64 device_hash = Hash64(node->device());
*hash = Hash64Combine(op_hash, Hash64Combine(attrs_hash, device_hash));
auto result =
attr_cache_->emplace(std::make_pair(node, hash_functions), *hash);
if (!result.second) {
return errors::Internal(absl::StrCat(
"Computed the hash for non-input node: ", node->DebugString(),
" and hash function bool: ", hash_functions, "twice!"));
}
return absl::OkStatus();
}
Status CheckNodesEqualNonInput(const NodeDef* this_node, GraphHasher* that,
const NodeDef* that_node,
bool compare_functions) {
const OpRegistrationData* reg;
TF_RETURN_IF_ERROR(flib_->LookUp(this_node->op(), ®));
if (reg->is_function_op) {
if (compare_functions) {
TF_RETURN_IF_ERROR(
CheckFunctionsEqual(this_node->op(), this_node->attr(), that,
that_node->op(), that_node->attr()));
}
} else {
if (this_node->op() != that_node->op()) {
return errors::FailedPrecondition(
"ops for nodes ", this_node->name(), " and ", that_node->name(),
" are different: ", this_node->op(), " != ", that_node->op());
}
}
for (const auto& attr : reg->op_def.attr()) {
const auto& attr_key = attr.name();
const bool this_has_attr = this_node->attr().contains(attr_key);
const bool that_has_attr = that_node->attr().contains(attr_key);
if (this_has_attr != that_has_attr) {
return errors::FailedPrecondition(
"attr with key ", attr_key, " is different for nodes ",
this_node->name(), " and ", that_node->name(),
". Present in former: ", this_has_attr,
". Present in latter: ", that_has_attr);
}
if (!this_has_attr) {
continue;
}
if (attr_key == kColocationAttrName ||
attr_key == kColocationGroupPrefix) {
continue;
}
const auto& this_attr = this_node->attr().at(attr_key);
const auto& that_attr = that_node->attr().at(attr_key);
TF_RETURN_IF_ERROR(CheckAttrsEqual(attr_key, this_attr, that, that_attr,
compare_functions));
}
if (this_node->device() != that_node->device()) {
return errors::FailedPrecondition(
"Devices are different for nodes ", this_node->name(), " and ",
that_node->name(), ": ", this_node->device(), " vs ",
that_node->device());
}
return absl::OkStatus();
}
Status HashAttr(const std::string& attr_name, const AttrValue& attr_value,
bool hash_functions, uint64* hash) {
uint64 value_hash = 0;
if (attr_value.has_func()) {
if (hash_functions) {
TF_RETURN_IF_ERROR(HashFunction(attr_value.func(), &value_hash));
}
} else if (attr_value.has_list() && attr_value.list().func_size() > 0) {
if (hash_functions) {
for (auto& func : attr_value.list().func()) {
uint64 func_hash;
TF_RETURN_IF_ERROR(HashFunction(func, &func_hash));
value_hash = Hash64Combine(value_hash, func_hash);
}
}
} else {
value_hash = DeterministicProtoHash64(attr_value);
}
*hash = Hash64Combine(Hash64(attr_name), value_hash);
return absl::OkStatus();
}
Status CheckAttrsEqual(const std::string& attr_name,
const AttrValue& this_attr, GraphHasher* that,
const AttrValue& that_attr, bool compare_functions) {
if (this_attr.has_func() != that_attr.has_func()) {
return errors::FailedPrecondition(
"AttrValues are of different types: ", this_attr.DebugString(),
" vs ", that_attr.DebugString());
}
if (this_attr.has_func()) {
if (compare_functions) {
TF_RETURN_IF_ERROR(
CheckFunctionsEqual(this_attr.func(), that, that_attr.func()));
}
return absl::OkStatus();
}
if (this_attr.has_list() != that_attr.has_list()) {
return errors::FailedPrecondition(
"AttrValues are of different types: ", this_attr.DebugString(),
" vs ", that_attr.DebugString());
}
if (this_attr.has_list()) {
if (this_attr.list().func_size() != that_attr.list().func_size()) {
return errors::FailedPrecondition(
"AttrValues have func lists of different sizes: ",
this_attr.DebugString(), " vs ", that_attr.DebugString());
}
if (compare_functions) {
for (int i = 0; i < this_attr.list().func_size(); ++i) {
TF_RETURN_IF_ERROR(CheckFunctionsEqual(this_attr.list().func(i), that,
that_attr.list().func(i)));
}
}
return absl::OkStatus();
}
uint64 this_hash, that_hash;
TF_RETURN_IF_ERROR(
HashAttr(attr_name, this_attr, true, &this_hash));
TF_RETURN_IF_ERROR(that->HashAttr(attr_name, that_attr,
true, &that_hash));
if (this_hash != that_hash) {
return errors::FailedPrecondition(
"AttrValues are different: ", this_attr.DebugString(), " vs ",
that_attr.DebugString());
}
return absl::OkStatus();
}
Status HashFunction(const NameAttrList& func, uint64* hash) {
return HashFunction(func.name(), func.attr(), hash);
}
Status HashFunction(const std::string& name, const AttrValueMap& attrs,
uint64* hash) {
const FunctionDef* fdef = flib_->Find(name);
auto it = function_cache_->find(fdef);
if (it != function_cache_->end()) {
*hash = it->second;
return absl::OkStatus();
}
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(
FunctionDefToBodyHelper(*fdef, AttrSlice(&attrs), flib_, &fbody));
GraphDef graph_def = fbody->graph->ToGraphDefDebug();
uint64 ret_nodes_hash = 0;
for (const auto& ret_node : fbody->ret_nodes) {
uint64 ret_node_hash = 0;
GraphHasher hasher(&graph_def, &ret_node->def(), flib_, node_cache_,
function_cache_, attr_cache_);
TF_RETURN_IF_ERROR(hasher.Init());
TF_RETURN_IF_ERROR(hasher.HashRoot(&ret_node_hash));
ret_nodes_hash = Hash64Combine(ret_nodes_hash, ret_node_hash);
}
std::vector<const NodeDef*> control_rets;
control_rets.reserve(fbody->control_ret_nodes.size());
for (const auto& control_ret_node : fbody->control_ret_nodes) {
control_rets.push_back(&control_ret_node->def());
}
uint64 control_ret_nodes_hash = 0;
TF_RETURN_IF_ERROR(
HashControlInputs(control_rets, &control_ret_nodes_hash));
*hash = Hash64Combine(ret_nodes_hash, control_ret_nodes_hash);
auto result = function_cache_->emplace(fdef, *hash);
if (!result.second) {
return errors::Internal(
absl::StrCat("Computed the hash for function ", name, " twice!"));
}
return absl::OkStatus();
}
Status CheckFunctionsEqual(const NameAttrList& this_func, GraphHasher* that,
const NameAttrList& that_func) {
return CheckFunctionsEqual(this_func.name(), this_func.attr(), that,
that_func.name(), that_func.attr());
}
Status CheckFunctionsEqual(const std::string& this_name,
const AttrValueMap& this_attrs, GraphHasher* that,
const std::string& that_name,
const AttrValueMap& that_attrs) {
Status s = CheckFunctionsEqualHelper(this_name, this_attrs, that, that_name,
that_attrs);
if (!s.ok()) {
return errors::FailedPrecondition("Functions ", this_name, " and ",
that_name, " are not the same:\n", s);
}
return s;
}
Status CheckFunctionsEqualHelper(const std::string& this_name,
const AttrValueMap& this_attrs,
GraphHasher* that,
const std::string& that_name,
const AttrValueMap& that_attrs) {
const FunctionDef* this_fdef = flib_->Find(this_name);
const FunctionDef* that_fdef = that->flib_->Find(that_name);
std::unique_ptr<FunctionBody> this_fbody;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*this_fdef, AttrSlice(&this_attrs), flib_, &this_fbody));
GraphDef this_graph_def = this_fbody->graph->ToGraphDefDebug();
std::unique_ptr<FunctionBody> that_fbody;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*that_fdef, AttrSlice(&that_attrs), that->flib_, &that_fbody));
GraphDef that_graph_def = that_fbody->graph->ToGraphDefDebug();
if (this_fbody->ret_nodes.size() != that_fbody->ret_nodes.size()) {
return errors::FailedPrecondition(
"Different numbers of ret nodes for functions ", this_name, " and ",
that_name, ": ", this_fbody->ret_nodes.size(), " vs ",
that_fbody->ret_nodes.size());
}
for (int i = 0; i < this_fbody->ret_nodes.size(); ++i) {
const NodeDef* this_root = &this_fbody->ret_nodes[i]->def();
const NodeDef* that_root = &that_fbody->ret_nodes[i]->def();
GraphHasher this_hasher(&this_graph_def, this_root, flib_, node_cache_,
function_cache_, attr_cache_);
TF_RETURN_IF_ERROR(this_hasher.Init());
GraphHasher that_hasher(&that_graph_def, that_root, that->flib_,
node_cache_, function_cache_, attr_cache_);
TF_RETURN_IF_ERROR(that_hasher.Init());
TF_RETURN_IF_ERROR(this_hasher.CheckEqual(&that_hasher));
}
std::vector<const NodeDef*> this_control_rets;
this_control_rets.reserve(this_fbody->control_ret_nodes.size());
for (const auto& control_ret_node : this_fbody->control_ret_nodes) {
this_control_rets.push_back(&control_ret_node->def());
}
std::vector<const NodeDef*> that_control_rets;
that_control_rets.reserve(that_fbody->control_ret_nodes.size());
for (const auto& control_ret_node : that_fbody->control_ret_nodes) {
that_control_rets.push_back(&control_ret_node->def());
}
TF_RETURN_IF_ERROR(
CheckControlInputsEqual(this_control_rets, that, that_control_rets));
return absl::OkStatus();
}
Status HashControlInputs(const std::vector<const NodeDef*>& inputs,
uint64* hash) {
*hash = 0;
for (const NodeDef* input : inputs) {
uint64 node_hash = 0;
TF_RETURN_IF_ERROR(
HashNodeNonInput(input, false, &node_hash));
*hash = Hash64CombineUnordered(*hash, node_hash);
}
return absl::OkStatus();
}
Status CheckControlInputsEqual(
const std::vector<const NodeDef*>& this_inputs, GraphHasher* that,
const std::vector<const NodeDef*>& that_inputs) {
absl::flat_hash_map<uint64, const NodeDef*> this_hashes;
for (const NodeDef* input : this_inputs) {
uint64 node_hash = 0;
TF_RETURN_IF_ERROR(
HashNodeNonInput(input, false, &node_hash));
this_hashes[node_hash] = input;
}
absl::flat_hash_map<uint64, const NodeDef*> that_hashes;
for (const NodeDef* input : that_inputs) {
uint64 node_hash = 0;
TF_RETURN_IF_ERROR(
HashNodeNonInput(input, false, &node_hash));
auto this_iter = this_hashes.find(node_hash);
if (this_iter != this_hashes.end()) {
this_hashes.erase(this_iter);
} else {
that_hashes[node_hash] = input;
}
}
if (!this_hashes.empty()) {
auto formatter = [](string* out,
const decltype(this_hashes)::value_type& item) {
out->append(item.second->name());
};
return errors::FailedPrecondition(
"Control dependencies are different. One node has dependencies [",
absl::StrJoin(this_hashes, ", ", formatter),
"], which don't match any of the other node's dependencies [",
absl::StrJoin(that_hashes, ", ", formatter), "]");
}
return absl::OkStatus();
}
private:
bool is_cycle_forming_edge(const NodeDef* start, const NodeDef* end) {
EdgeRep edge(start, end);
return cycle_forming_edges_.contains(edge.GetHash());
}
struct NodeRep {
std::vector<const NodeDef*> node_control_inputs;
std::vector<std::pair<const NodeDef*, absl::string_view>> node_inputs;
};
struct EdgeRep {
const NodeDef* start_node;
const NodeDef* end_node;
EdgeRep(const NodeDef* start, const NodeDef* end)
: start_node(start), end_node(end) {}
uint64 GetHash() {
return Hash64Combine(absl::Hash<const NodeDef*>()(start_node),
absl::Hash<const NodeDef*>()(end_node));
}
};
const GraphDef* const graph_;
const NodeDef* const root_;
const FunctionLibraryDefinition* const flib_;
absl::flat_hash_set<uint64> cycle_forming_edges_;
absl::flat_hash_map<const NodeDef*, NodeRep> nodes_;
std::shared_ptr<NodeCache> node_cache_;
std::shared_ptr<FunctionCache> function_cache_;
std::shared_ptr<AttrCache> attr_cache_;
};
}
Status HashTensor(const Tensor& tensor, uint64* hash) {
const tstring* s = nullptr;
*hash = Hash64Combine(0, tensor.dtype());
for (int i = 0; i < tensor.shape().dims(); ++i) {
*hash = Hash64Combine(*hash, tensor.shape().dim_size(i));
}
switch (tensor.dtype()) {
case DT_RESOURCE:
case DT_VARIANT:
return errors::Unimplemented("Hashing ", DataTypeString(tensor.dtype()),
" is not supported.");
case DT_STRING:
s = tensor.flat<tstring>().data();
for (int i = 0; i < tensor.NumElements(); ++i, ++s) {
*hash = Hash64Combine(*hash, Hash64(s->data(), s->size()));
}
break;
default:
*hash = Hash64(tensor.tensor_data().data(), tensor.tensor_data().size());
}
return absl::OkStatus();
}
Status HashNode(const GraphDef& graph, const NodeDef& node, uint64* hash) {
const FunctionLibraryDefinition flib_def(OpRegistry::Global(),
graph.library());
return HashNode(graph, node, flib_def, hash);
}
Status HashNod | #include "tensorflow/core/data/hash_utils.h"
#include <utility>
#include <vector>
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::ContainsRegex;
class DatasetHashUtilsTest : public ::testing::Test {
protected:
uint64 GetHash(const FunctionDefLibrary& library, const FunctionDef& fn) {
GraphDef graph_def;
*graph_def.mutable_library() = library;
NodeDef* node = graph_def.add_node();
node->set_op("RemoteCall");
NameAttrList func;
func.set_name(fn.signature().name());
AddNodeAttr("f", func, node);
uint64 hash = 0;
TF_CHECK_OK(HashNode(graph_def, *node, &hash));
return hash;
}
Status CheckEqual(const FunctionDefLibrary& library, const FunctionDef& fn1,
const FunctionDef& fn2) {
GraphDef graph_def;
*graph_def.mutable_library() = library;
NodeDef* node1 = graph_def.add_node();
node1->set_name("RemoteCall");
node1->set_op("RemoteCall");
NameAttrList func1;
func1.set_name(fn1.signature().name());
AddNodeAttr("f", func1, node1);
NodeDef* node2 = graph_def.add_node();
node1->set_name("RemoteCall2");
node2->set_op("RemoteCall");
NameAttrList func2;
func2.set_name(fn2.signature().name());
AddNodeAttr("f", func2, node2);
return CheckSubgraphsEqual(graph_def, node1, graph_def, node2);
}
uint64 GetHash(const GraphDef& graph, const NodeDef& node) {
uint64 hash = 0;
TF_CHECK_OK(HashNode(graph, node, &hash));
return hash;
}
uint64 GetHash(const Tensor& tensor) {
uint64 hash = 0;
TF_CHECK_OK(HashTensor(tensor, &hash));
return hash;
}
};
TEST_F(DatasetHashUtilsTest, HashFunctionSameFunctionDifferentNames) {
FunctionDefLibrary fl;
FunctionDef* f1 = fl.add_function();
*f1 = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
FunctionDef* f2 = fl.add_function();
*f2 = FunctionDefHelper::Create(
"AddAndMul2", {"input: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"input", "input"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"input", "input"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
EXPECT_EQ(GetHash(fl, *f1), GetHash(fl, *f2));
TF_EXPECT_OK(CheckEqual(fl, *f1, *f2));
}
TEST_F(DatasetHashUtilsTest, HashFunctionDifferentFunctions) {
FunctionDefLibrary fl;
FunctionDef* f1 = fl.add_function();
*f1 = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
FunctionDef* f2 = fl.add_function();
*f2 = FunctionDefHelper::Create(
"AddAndAdd", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
EXPECT_NE(GetHash(fl, *f1), GetHash(fl, *f2));
Status s = CheckEqual(fl, *f1, *f2);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(s.message(), ContainsRegex("Add"));
}
TEST_F(DatasetHashUtilsTest, HashFunctionDifferentInternalNodeNames) {
FunctionDefLibrary fl;
FunctionDef* f1 = fl.add_function();
*f1 = FunctionDefHelper::Create(
"AddAndMul", {"i: float", "j: float", "k: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "j"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"add:z:0", "k"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "ret"}});
FunctionDef* f2 = fl.add_function();
*f2 = FunctionDefHelper::Create(
"AddAndMul2", {"a: float", "b: float", "c: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"a", "b"}, {{"T", DT_FLOAT}}},
{{"mul"}, "Mul", {"add:z:0", "c"}, {{"T", DT_FLOAT}}}},
{{"o", "mul:z:0"}},
{{"must_execute", "mul"}});
EXPECT_EQ(GetHash(fl, *f1), GetHash(fl, *f2));
TF_EXPECT_OK(CheckEqual(fl, *f1, *f2));
}
TEST_F(DatasetHashUtilsTest, HashGraphWithMultipleCycles) {
uint64 hash = 0;
for (int i = 0; i < 1000; ++i) {
GraphDef g;
NodeDef* output_node = g.add_node();
TF_CHECK_OK(NodeDefBuilder("O", "Add")
.Input("A", 0, DT_FLOAT)
.Input("D", 0, DT_FLOAT)
.Finalize(output_node));
TF_CHECK_OK(NodeDefBuilder("A", "Abs")
.Input("B", 0, DT_FLOAT)
.Finalize(g.add_node()));
TF_CHECK_OK(NodeDefBuilder("B", "Add")
.Input("C", 0, DT_FLOAT)
.Input("D", 0, DT_FLOAT)
.Finalize(g.add_node()));
TF_CHECK_OK(NodeDefBuilder("C", "Ceil")
.Input("A", 0, DT_FLOAT)
.Finalize(g.add_node()));
TF_CHECK_OK(NodeDefBuilder("D", "Cos")
.Input("E", 0, DT_FLOAT)
.Finalize(g.add_node()));
TF_CHECK_OK(NodeDefBuilder("E", "Floor")
.Input("B", 0, DT_FLOAT)
.Finalize(g.add_node()));
uint64 t = GetHash(g, *output_node);
if (hash == 0) {
hash = t;
} else {
EXPECT_EQ(t, hash);
}
}
}
TEST_F(DatasetHashUtilsTest, HashNodeSameGraphDifferentNames) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n3));
NodeDef* n4 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_3/node_7", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n4));
NodeDef* n5 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_4/node_9", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n5));
NodeDef* n6 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_5/node_11", "Add")
.Device("CPU:0")
.Input(n4->name(), 0, DT_INT32)
.Input(n5->name(), 0, DT_INT32)
.Finalize(n6));
uint64 hash1 = GetHash(gd, *n3);
uint64 hash2 = GetHash(gd, *n6);
EXPECT_EQ(hash1, hash2);
TF_EXPECT_OK(CheckSubgraphsEqual(gd, n3, gd, n6));
}
TEST_F(DatasetHashUtilsTest, HashNodeDifferentGraphs) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n3));
NodeDef* n4 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_4", "Mul")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n4));
uint64 hash1 = GetHash(gd, *n3);
uint64 hash2 = GetHash(gd, *n4);
EXPECT_NE(hash1, hash2);
Status s = CheckSubgraphsEqual(gd, n3, gd, n4);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(s.message(), ContainsRegex("Add"));
EXPECT_THAT(s.message(), ContainsRegex("Mul"));
}
TEST_F(DatasetHashUtilsTest, HashSameGraphDifferentSeeds) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* seed = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/seed", "Const")
.Attr("value", 123)
.Device("CPU:0")
.Finalize(seed));
NodeDef* seed2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/seed2", "Const")
.Attr("value", 456)
.Device("CPU:0")
.Finalize(seed2));
NodeDef* range_ds = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/range", "RangeDataset")
.Input(n1->name(), 0, DT_INT64)
.Input(n1->name(), 0, DT_INT64)
.Input(n1->name(), 0, DT_INT64)
.Device("CPU:0")
.Finalize(range_ds));
NodeDef* shuffle_ds = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/shuffle", "ShuffleDataset")
.Input(range_ds->name(), 0, DT_VARIANT)
.Input(n1->name(), 0, DT_INT64)
.Input(seed->name(), 0, DT_INT64)
.Input(seed2->name(), 0, DT_INT64)
.Device("CPU:0")
.Finalize(shuffle_ds));
NodeDef* different_seed = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/different_seed", "Const")
.Attr("value", 789)
.Device("CPU:0")
.Finalize(different_seed));
NodeDef* different_seed2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/different_seed2", "Const")
.Attr("value", 654)
.Device("CPU:0")
.Finalize(different_seed2));
NodeDef* range_ds_2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/range_2", "RangeDataset")
.Input(n1->name(), 0, DT_INT64)
.Input(n1->name(), 0, DT_INT64)
.Input(n1->name(), 0, DT_INT64)
.Device("CPU:0")
.Finalize(range_ds_2));
NodeDef* shuffle_ds_2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/shuffle_2", "ShuffleDataset")
.Input(range_ds_2->name(), 0, DT_VARIANT)
.Input(n1->name(), 0, DT_INT64)
.Input(different_seed->name(), 0, DT_INT64)
.Input(different_seed2->name(), 0, DT_INT64)
.Device("CPU:0")
.Finalize(shuffle_ds_2));
uint64 hash1 = GetHash(gd, *shuffle_ds);
uint64 hash2 = GetHash(gd, *shuffle_ds_2);
EXPECT_EQ(hash1, hash2);
TF_EXPECT_OK(CheckSubgraphsEqual(gd, shuffle_ds, gd, shuffle_ds_2));
}
TEST_F(DatasetHashUtilsTest, HashNodeSameGraphDifferentColocationNames) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Attr("_class", {"graph_1/node_2"})
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n3));
NodeDef* n4 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_3/node_7", "Const")
.Attr("value", 1)
.Attr("_class", {"graph_3/node_9"})
.Device("CPU:0")
.Finalize(n4));
NodeDef* n5 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_4/node_9", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n5));
NodeDef* n6 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_5/node_11", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n6));
uint64 hash1 = GetHash(gd, *n3);
uint64 hash2 = GetHash(gd, *n6);
EXPECT_EQ(hash1, hash2);
TF_EXPECT_OK(CheckSubgraphsEqual(gd, n3, gd, n6));
}
TEST_F(DatasetHashUtilsTest, HashNodeReversedOrder) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n3));
NodeDef* n4 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_4", "Add")
.Device("CPU:0")
.Input(n2->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Finalize(n4));
uint64 hash1 = GetHash(gd, *n3);
uint64 hash2 = GetHash(gd, *n4);
EXPECT_NE(hash1, hash2);
Status s = CheckSubgraphsEqual(gd, n3, gd, n4);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(s.message(), ContainsRegex("AttrValues are different"));
}
TEST_F(DatasetHashUtilsTest, HashNodeInputPortChanged) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Add")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.Input(n2->name(), 0, DT_INT32)
.Finalize(n3));
NodeDef* n4 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_4", "Add")
.Device("CPU:0")
.Input(n1->name(), 1, DT_INT32)
.Input(n2->name(), 2, DT_INT32)
.Finalize(n4));
uint64 hash1 = GetHash(gd, *n3);
uint64 hash2 = GetHash(gd, *n4);
EXPECT_NE(hash1, hash2);
Status s = CheckSubgraphsEqual(gd, n3, gd, n4);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(s.message(), ContainsRegex("Node inputs"));
}
TEST_F(DatasetHashUtilsTest, HashNodeSameFunctionDifferentNames) {
GraphDef gd;
FunctionDefLibrary* fl1 = gd.mutable_library();
FunctionDef* f1 = fl1->add_function();
*f1 = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
FunctionDef* f2 = fl1->add_function();
*f2 = FunctionDefHelper::Create(
"AddAndMul2", {"input: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"input", "input"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"input", "input"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
AttrValue a1;
NameAttrList* nal1 = a1.mutable_func();
nal1->set_name("AddAndMul");
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
std::vector<NodeDefBuilder::NodeOut> func_inputs;
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a1)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
AttrValue a2;
NameAttrList* nal2 = a2.mutable_func();
nal2->set_name("AddAndMul2");
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a2)
.Device("CPU:0")
.Finalize(n3));
uint64 hash1 = GetHash(gd, *n2);
uint64 hash2 = GetHash(gd, *n3);
EXPECT_EQ(hash1, hash2);
TF_EXPECT_OK(CheckSubgraphsEqual(gd, n2, gd, n3));
}
TEST_F(DatasetHashUtilsTest, HashNodeSameFunctionListsDifferentNames) {
GraphDef gd;
FunctionDefLibrary* fl1 = gd.mutable_library();
FunctionDef* f1 = fl1->add_function();
*f1 = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
FunctionDef* f2 = fl1->add_function();
*f2 = FunctionDefHelper::Create(
"AddAndMul2", {"input: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"input", "input"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"input", "input"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
AttrValue a1;
AttrValue_ListValue* list1 = a1.mutable_list();
NameAttrList* nal1 = list1->add_func();
nal1->set_name("AddAndMul");
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
std::vector<NodeDefBuilder::NodeOut> func_inputs;
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a1)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
AttrValue a2;
AttrValue_ListValue* list2 = a2.mutable_list();
NameAttrList* nal2 = list2->add_func();
nal2->set_name("AddAndMul2");
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a2)
.Device("CPU:0")
.Finalize(n3));
uint64 hash1 = GetHash(gd, *n2);
uint64 hash2 = GetHash(gd, *n3);
EXPECT_EQ(hash1, hash2);
TF_EXPECT_OK(CheckSubgraphsEqual(gd, n2, gd, n3));
}
TEST_F(DatasetHashUtilsTest, HashNodeSameFunctionsOps) {
GraphDef gd;
FunctionDefLibrary* fl1 = gd.mutable_library();
FunctionDef* f1 = fl1->add_function();
FunctionDef func = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
*f1 = func;
FunctionDef* f2 = fl1->add_function();
func = FunctionDefHelper::Create(
"AddAndMul2", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
*f2 = func;
FunctionLibraryDefinition flib(OpRegistry::Global(), gd.library());
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "AddAndMul", &flib)
.Input(n1->name(), 0, DT_FLOAT)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "AddAndMul2", &flib)
.Input(n1->name(), 0, DT_FLOAT)
.Device("CPU:0")
.Finalize(n3));
uint64 hash1 = GetHash(gd, *n2);
uint64 hash2 = GetHash(gd, *n3);
EXPECT_EQ(hash1, hash2);
TF_EXPECT_OK(CheckSubgraphsEqual(gd, n2, gd, n3));
}
TEST_F(DatasetHashUtilsTest, HashNodeDifferentFunctionsOps) {
GraphDef gd;
FunctionDefLibrary* fl1 = gd.mutable_library();
FunctionDef* f1 = fl1->add_function();
FunctionDef func = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
*f1 = func;
FunctionDef* f2 = fl1->add_function();
func = FunctionDefHelper::Create(
"AddAndMul2", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "ret"}});
*f2 = func;
FunctionLibraryDefinition flib(OpRegistry::Global(), gd.library());
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "AddAndMul", &flib)
.Input(n1->name(), 0, DT_FLOAT)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "AddAndMul2", &flib)
.Input(n1->name(), 0, DT_FLOAT)
.Device("CPU:0")
.Finalize(n3));
uint64 hash1 = GetHash(gd, *n2);
uint64 hash2 = GetHash(gd, *n3);
EXPECT_NE(hash1, hash2);
Status s = CheckSubgraphsEqual(gd, n2, gd, n3);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(
s.message(),
ContainsRegex("Functions AddAndMul and AddAndMul2 are not the same"));
}
TEST_F(DatasetHashUtilsTest, HashNodeDifferentFunctions) {
GraphDef gd;
FunctionDefLibrary* fl1 = gd.mutable_library();
FunctionDef* f1 = fl1->add_function();
FunctionDef func = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
*f1 = func;
FunctionDef* f2 = fl1->add_function();
func = FunctionDefHelper::Create(
"AddAndMul2", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "ret"}});
*f2 = func;
AttrValue a1;
NameAttrList* nal1 = a1.mutable_func();
nal1->set_name("AddAndMul");
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
std::vector<NodeDefBuilder::NodeOut> func_inputs;
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a1)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
AttrValue a2;
NameAttrList* nal2 = a2.mutable_func();
nal2->set_name("AddAndMul2");
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a2)
.Device("CPU:0")
.Finalize(n3));
uint64 hash1 = GetHash(gd, *n2);
uint64 hash2 = GetHash(gd, *n3);
EXPECT_NE(hash1, hash2);
Status s = CheckSubgraphsEqual(gd, n2, gd, n3);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(
s.message(),
ContainsRegex("Functions AddAndMul and AddAndMul2 are not the same"));
}
TEST_F(DatasetHashUtilsTest, HashNodeDifferentFunctionLists) {
GraphDef gd;
FunctionDefLibrary* fl1 = gd.mutable_library();
FunctionDef* f1 = fl1->add_function();
FunctionDef func = FunctionDefHelper::Create(
"AddAndMul", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
*f1 = func;
FunctionDef* f2 = fl1->add_function();
func = FunctionDefHelper::Create(
"AddAndMul2", {"i: float"}, {"o: float"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_FLOAT}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "ret"}});
*f2 = func;
AttrValue a1;
AttrValue_ListValue* list1 = a1.mutable_list();
NameAttrList* nal1 = list1->add_func();
nal1->set_name("AddAndMul");
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
std::vector<NodeDefBuilder::NodeOut> func_inputs;
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
func_inputs.emplace_back(n1->name(), 0, DT_FLOAT);
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a1)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
AttrValue a2;
AttrValue_ListValue* list2 = a2.mutable_list();
NameAttrList* nal2 = list2->add_func();
nal2->set_name("AddAndMul2");
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "For")
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(n1->name(), 0, DT_INT32)
.Input(func_inputs)
.Attr("body", a2)
.Device("CPU:0")
.Finalize(n3));
uint64 hash1 = GetHash(gd, *n2);
uint64 hash2 = GetHash(gd, *n3);
EXPECT_NE(hash1, hash2);
Status s = CheckSubgraphsEqual(gd, n2, gd, n3);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(
s.message(),
ContainsRegex("Functions AddAndMul and AddAndMul2 are not the same"));
}
TEST_F(DatasetHashUtilsTest, HashNodeDifferentControlInputs) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Const")
.Attr("value", 10)
.Device("CPU:0")
.Finalize(n3));
NodeDef* n4 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_4", "Identity")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.ControlInput(n2->name())
.Finalize(n4));
NodeDef* n5 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_5", "Identity")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.ControlInput(n3->name())
.Finalize(n5));
uint64 hash1 = GetHash(gd, *n4);
uint64 hash2 = GetHash(gd, *n5);
EXPECT_NE(hash1, hash2);
Status s = CheckSubgraphsEqual(gd, n4, gd, n5);
EXPECT_NE(s.code(), error::OK);
EXPECT_THAT(s.message(), ContainsRegex("Control dependencies are different"));
}
TEST_F(DatasetHashUtilsTest, HashNodeControlInputDifferentOrdering) {
GraphDef gd;
NodeDef* n1 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_1", "Const")
.Attr("value", 1)
.Device("CPU:0")
.Finalize(n1));
NodeDef* n2 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_2", "Const")
.Attr("value", 2)
.Device("CPU:0")
.Finalize(n2));
NodeDef* n3 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_3", "Const")
.Attr("value", 10)
.Device("CPU:0")
.Finalize(n3));
NodeDef* n4 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_4", "Identity")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.ControlInput(n2->name())
.ControlInput(n3->name())
.Finalize(n4));
NodeDef* n5 = gd.add_node();
TF_CHECK_OK(NodeDefBuilder("graph_1/node_5", "Identity")
.Device("CPU:0")
.Input(n1->name(), 0, DT_INT32)
.ControlInput(n3->name())
.ControlInput(n2->name())
.Finalize(n5));
uint64 hash1 = GetHash(gd, *n4);
uint64 hash2 = GetHash(gd, *n5);
EXPECT_EQ(hash1, hash2);
TF_EXPECT_OK(C |
1,725 | cpp | tensorflow/tensorflow | tfdataz_metrics | tensorflow/core/data/tfdataz_metrics.cc | tensorflow/core/data/tfdataz_metrics_test.cc | #ifndef TENSORFLOW_CORE_DATA_TFDATAZ_METRICS_H_
#define TENSORFLOW_CORE_DATA_TFDATAZ_METRICS_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/time/time.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace data {
class ApproximateLatencyEstimator {
public:
enum class Duration {
kMinute = 1,
kFiveMinutes = 5,
kSixtyMinutes = 60,
};
explicit ApproximateLatencyEstimator(const Env& env);
void AddLatency(int64_t latency_usec);
absl::Duration GetAverageLatency(Duration duration);
private:
static constexpr int64_t kSecondsPerMinute = 60;
static constexpr int64_t kMinutesPerHour = 60;
static constexpr int64_t kSlots = kMinutesPerHour;
void UpdateRingBuffer() TF_LOCKS_EXCLUDED(mu_);
void IncrementNextSlot() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
int PrevSlot(int steps) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
const Env& env_;
int64_t last_updated_time_mins_ TF_GUARDED_BY(mu_);
mutex mu_;
int64_t latency_value_counter_ TF_GUARDED_BY(mu_);
int64_t latency_count_counter_ TF_GUARDED_BY(mu_);
int next_slot_ TF_GUARDED_BY(mu_);
int64_t latency_value_[kSlots] TF_GUARDED_BY(mu_);
int64_t latency_count_[kSlots] TF_GUARDED_BY(mu_);
};
class TfDatazMetricsCollector {
public:
TfDatazMetricsCollector(const Env& env, DatasetBaseIterator* iterator,
std::shared_ptr<model::Model> model);
void RecordGetNextLatency(int64_t get_next_latency_usec);
absl::Duration GetAverageLatencyForLastOneMinute();
absl::Duration GetAverageLatencyForLastFiveMinutes();
absl::Duration GetAverageLatencyForLastSixtyMinutes();
std::optional<std::string> DatasetName();
int64_t GetIteratorTotalMemoryUsage();
std::shared_ptr<model::Model> GetModel();
private:
DatasetBaseIterator* iterator_;
std::shared_ptr<model::Model> model_;
ApproximateLatencyEstimator latency_estimator_;
};
class TfDatazMetricsRegistry {
public:
static void Register(std::shared_ptr<TfDatazMetricsCollector> collector);
static void Deregister(std::shared_ptr<TfDatazMetricsCollector> collector);
static absl::flat_hash_set<std::shared_ptr<TfDatazMetricsCollector>>
GetIteratorMetricCollectors();
};
}
}
#endif
#include "tensorflow/core/data/tfdataz_metrics.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/time/time.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
ApproximateLatencyEstimator::ApproximateLatencyEstimator(const Env& env)
: env_(env),
last_updated_time_mins_(0),
latency_value_counter_(0),
latency_count_counter_(0),
next_slot_(0) {
for (int i = 0; i < kSlots; ++i) {
latency_value_[i] = 0;
latency_count_[i] = 0;
}
}
void ApproximateLatencyEstimator::AddLatency(const int64_t latency_usec)
TF_LOCKS_EXCLUDED(mu_) {
UpdateRingBuffer();
mutex_lock l(mu_);
latency_value_counter_ += latency_usec;
latency_count_counter_ += 1;
}
void ApproximateLatencyEstimator::UpdateRingBuffer() TF_LOCKS_EXCLUDED(mu_) {
int64_t now_minutes =
absl::ToInt64Minutes(absl::Microseconds(env_.NowMicros()));
mutex_lock l(mu_);
int64_t elapsed_minutes = now_minutes - last_updated_time_mins_;
int64_t minutes_to_update = std::min(elapsed_minutes, kSlots);
for (int i = 0; i < minutes_to_update; ++i) {
latency_value_[next_slot_] = latency_value_counter_;
latency_count_[next_slot_] = latency_count_counter_;
IncrementNextSlot();
}
last_updated_time_mins_ = now_minutes;
}
void ApproximateLatencyEstimator::IncrementNextSlot()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
next_slot_ = (next_slot_ + 1) % kSlots;
}
int ApproximateLatencyEstimator::PrevSlot(int steps)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return (next_slot_ - steps + kSlots) % kSlots;
}
absl::Duration ApproximateLatencyEstimator::GetAverageLatency(Duration duration)
TF_LOCKS_EXCLUDED(mu_) {
UpdateRingBuffer();
mutex_lock l(mu_);
double interval_latency =
static_cast<double>(latency_value_counter_ -
latency_value_[PrevSlot(static_cast<int>(duration))]);
double interval_count =
static_cast<double>(latency_count_counter_ -
latency_count_[PrevSlot(static_cast<int>(duration))]);
if (interval_count == 0) {
return absl::ZeroDuration();
}
return absl::Duration(absl::Microseconds(interval_latency)) / interval_count;
}
TfDatazMetricsCollector::TfDatazMetricsCollector(
const Env& env, DatasetBaseIterator* iterator,
std::shared_ptr<model::Model> model)
: iterator_(iterator), model_(std::move(model)), latency_estimator_(env) {}
void TfDatazMetricsCollector::RecordGetNextLatency(
int64_t get_next_latency_usec) {
if (get_next_latency_usec > 0) {
latency_estimator_.AddLatency(get_next_latency_usec);
}
}
absl::Duration TfDatazMetricsCollector::GetAverageLatencyForLastOneMinute() {
return latency_estimator_.GetAverageLatency(
ApproximateLatencyEstimator::Duration::kMinute);
}
absl::Duration TfDatazMetricsCollector::GetAverageLatencyForLastFiveMinutes() {
return latency_estimator_.GetAverageLatency(
ApproximateLatencyEstimator::Duration::kFiveMinutes);
}
absl::Duration TfDatazMetricsCollector::GetAverageLatencyForLastSixtyMinutes() {
return latency_estimator_.GetAverageLatency(
ApproximateLatencyEstimator::Duration::kSixtyMinutes);
}
std::optional<std::string> TfDatazMetricsCollector::DatasetName() {
auto options = iterator_->dataset()->options();
if (options.has_dataset_name()) {
return std::make_optional(options.dataset_name());
}
return std::nullopt;
}
int64_t TfDatazMetricsCollector::GetIteratorTotalMemoryUsage() {
return iterator_->TotalBufferedBytes();
}
std::shared_ptr<model::Model> TfDatazMetricsCollector::GetModel() {
return model_;
}
namespace {
static mutex* get_tfdataz_metrics_registry_lock() {
static mutex tfdataz_metrics_registry_lock(LINKER_INITIALIZED);
return &tfdataz_metrics_registry_lock;
}
using TfDatazMetricsCollectors =
absl::flat_hash_set<std::shared_ptr<TfDatazMetricsCollector>>;
TfDatazMetricsCollectors& tfdataz_metric_collectors() {
static auto& collectors = *new TfDatazMetricsCollectors();
return collectors;
}
}
void TfDatazMetricsRegistry::Register(
std::shared_ptr<TfDatazMetricsCollector> collector) {
mutex_lock l(*get_tfdataz_metrics_registry_lock());
tfdataz_metric_collectors().insert(collector);
}
void TfDatazMetricsRegistry::Deregister(
std::shared_ptr<TfDatazMetricsCollector> collector) {
mutex_lock l(*get_tfdataz_metrics_registry_lock());
tfdataz_metric_collectors().erase(collector);
}
absl::flat_hash_set<std::shared_ptr<TfDatazMetricsCollector>>
TfDatazMetricsRegistry::GetIteratorMetricCollectors() {
mutex_lock l(*get_tfdataz_metrics_registry_lock());
return tfdataz_metric_collectors();
}
}
} | #include "tensorflow/core/data/tfdataz_metrics.h"
#include <memory>
#include <utility>
#include "absl/time/time.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/fake_clock_env.h"
namespace tensorflow {
namespace data {
namespace {
static int64_t k1MinutesInMicros = absl::ToInt64Microseconds(absl::Minutes(1));
static int64_t k2MinutesInMicros = absl::ToInt64Microseconds(absl::Minutes(2));
static int64_t k5MinutesInMicros = absl::ToInt64Microseconds(absl::Minutes(5));
static int64_t k59MinutesInMicros =
absl::ToInt64Microseconds(absl::Minutes(59));
static int64_t k60MinutesInMicros =
absl::ToInt64Microseconds(absl::Minutes(60));
static int64_t k61MinutesInMicros =
absl::ToInt64Microseconds(absl::Minutes(61));
class TfDatazMetricsTest : public ::testing::Test {
protected:
void SetUp() override {
env_ = std::make_unique<FakeClockEnv>(Env::Default());
tfdataz_metrics_ = std::make_unique<TfDatazMetricsCollector>(
*env_, iterator_.get(), nullptr);
}
void TearDown() override {
env_.reset();
tfdataz_metrics_.reset();
}
std::unique_ptr<DatasetBaseIterator> iterator_;
std::unique_ptr<FakeClockEnv> env_;
std::unique_ptr<TfDatazMetricsCollector> tfdataz_metrics_;
};
TEST_F(TfDatazMetricsTest, RecordGetNextLatency) {
tfdataz_metrics_->RecordGetNextLatency(1);
tfdataz_metrics_->RecordGetNextLatency(2);
tfdataz_metrics_->RecordGetNextLatency(3);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastOneMinute()),
2.0);
}
TEST_F(TfDatazMetricsTest, GetAverageLatencyForLastOneMinute) {
tfdataz_metrics_->RecordGetNextLatency(1);
env_->AdvanceByMicroseconds(k2MinutesInMicros);
tfdataz_metrics_->RecordGetNextLatency(2);
tfdataz_metrics_->RecordGetNextLatency(3);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastOneMinute()),
2.5);
}
TEST_F(TfDatazMetricsTest, GetAverageLatencyForLastFiveMinutes) {
tfdataz_metrics_->RecordGetNextLatency(1);
env_->AdvanceByMicroseconds(k5MinutesInMicros);
tfdataz_metrics_->RecordGetNextLatency(4);
tfdataz_metrics_->RecordGetNextLatency(5);
tfdataz_metrics_->RecordGetNextLatency(6);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastFiveMinutes()),
5.0);
}
TEST_F(TfDatazMetricsTest,
GetAverageLatencyForLastSixtyMinutesWithAdvanceBySixtyMinutes) {
tfdataz_metrics_->RecordGetNextLatency(1);
env_->AdvanceByMicroseconds(k60MinutesInMicros);
tfdataz_metrics_->RecordGetNextLatency(4);
tfdataz_metrics_->RecordGetNextLatency(5);
tfdataz_metrics_->RecordGetNextLatency(6);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastSixtyMinutes()),
5.0);
}
TEST_F(TfDatazMetricsTest,
GetAverageLatencyForLastSixtyMinutesWithAdvanceByFiftyNineMinutes) {
tfdataz_metrics_->RecordGetNextLatency(1);
env_->AdvanceByMicroseconds(k59MinutesInMicros);
tfdataz_metrics_->RecordGetNextLatency(4);
tfdataz_metrics_->RecordGetNextLatency(5);
tfdataz_metrics_->RecordGetNextLatency(6);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastSixtyMinutes()),
4.0);
}
TEST_F(TfDatazMetricsTest,
GetAverageLatencyForLastSixtyMinutesWithAdvanceBySixtyOneMinutes) {
tfdataz_metrics_->RecordGetNextLatency(1);
env_->AdvanceByMicroseconds(k61MinutesInMicros);
tfdataz_metrics_->RecordGetNextLatency(2);
tfdataz_metrics_->RecordGetNextLatency(3);
tfdataz_metrics_->RecordGetNextLatency(4);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastSixtyMinutes()),
3.0);
}
TEST_F(TfDatazMetricsTest, GetMultipleAverageLatencies) {
tfdataz_metrics_->RecordGetNextLatency(1);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastOneMinute()),
1.0);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastFiveMinutes()),
1.0);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastSixtyMinutes()),
1.0);
env_->AdvanceByMicroseconds(k1MinutesInMicros);
tfdataz_metrics_->RecordGetNextLatency(2);
tfdataz_metrics_->RecordGetNextLatency(3);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastOneMinute()),
2.5);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastFiveMinutes()),
2.0);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastSixtyMinutes()),
2.0);
env_->AdvanceByMicroseconds(k60MinutesInMicros);
tfdataz_metrics_->RecordGetNextLatency(4);
tfdataz_metrics_->RecordGetNextLatency(5);
tfdataz_metrics_->RecordGetNextLatency(6);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastOneMinute()),
5.0);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastFiveMinutes()),
5.0);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastSixtyMinutes()),
5.0);
}
TEST_F(TfDatazMetricsTest, GetAverageLatencyWithZeroGetNextCalls) {
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastOneMinute()),
0);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastFiveMinutes()),
0);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastSixtyMinutes()),
0);
}
class ScopedTfDataMetricsRegistration {
public:
explicit ScopedTfDataMetricsRegistration(
std::shared_ptr<TfDatazMetricsCollector> collector)
: collector_(std::move(collector)) {
TfDatazMetricsRegistry::Register(collector_);
}
~ScopedTfDataMetricsRegistration() {
TfDatazMetricsRegistry::Deregister(collector_);
}
void Deregister() { TfDatazMetricsRegistry::Deregister(collector_); }
private:
std::shared_ptr<TfDatazMetricsCollector> collector_;
};
TEST(TfDatazMetricsRegistryTest, Register) {
std::unique_ptr<DatasetBaseIterator> iterator;
auto collector_one = std::make_shared<TfDatazMetricsCollector>(
*Env::Default(), iterator.get(), nullptr);
auto collector_two = std::make_shared<TfDatazMetricsCollector>(
*Env::Default(), iterator.get(), nullptr);
ScopedTfDataMetricsRegistration scoped_registration_one(collector_one);
ScopedTfDataMetricsRegistration scoped_registration_two(collector_two);
EXPECT_EQ(TfDatazMetricsRegistry::GetIteratorMetricCollectors().size(), 2);
}
TEST(TfDatazMetricsRegistryTest, Deregister) {
std::unique_ptr<DatasetBaseIterator> iterator;
auto collector_one = std::make_shared<TfDatazMetricsCollector>(
*Env::Default(), iterator.get(), nullptr);
auto collector_two = std::make_shared<TfDatazMetricsCollector>(
*Env::Default(), iterator.get(), nullptr);
auto collector_three = std::make_shared<TfDatazMetricsCollector>(
*Env::Default(), iterator.get(), nullptr);
ScopedTfDataMetricsRegistration scoped_registration_one(collector_one);
ScopedTfDataMetricsRegistration scoped_registration_two(collector_two);
ScopedTfDataMetricsRegistration scoped_registration_three(collector_three);
EXPECT_EQ(TfDatazMetricsRegistry::GetIteratorMetricCollectors().size(), 3);
scoped_registration_one.Deregister();
EXPECT_EQ(TfDatazMetricsRegistry::GetIteratorMetricCollectors().size(), 2);
scoped_registration_two.Deregister();
scoped_registration_three.Deregister();
EXPECT_EQ(TfDatazMetricsRegistry::GetIteratorMetricCollectors().size(), 0);
}
}
}
} |
1,726 | cpp | tensorflow/tensorflow | metric_utils | tensorflow/core/data/metric_utils.cc | tensorflow/core/data/metric_utils_test.cc | #ifndef TENSORFLOW_CORE_DATA_METRIC_UTILS_H_
#define TENSORFLOW_CORE_DATA_METRIC_UTILS_H_
#include <cstdint>
#include <string>
#include <vector>
#include "absl/time/time.h"
#include "tensorflow/core/data/tfdataz_metrics.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
class IteratorMetricsCollector {
public:
IteratorMetricsCollector(const std::string& device_type, const Env& env);
absl::Time RecordStart();
void RecordStop(absl::Time start_time, const std::vector<Tensor>& output);
private:
bool ShouldCollectMetrics() const;
const std::string device_type_;
const Env& env_;
mutex mu_;
uint64_t num_active_calls_ TF_GUARDED_BY(mu_) = 0;
uint64_t first_start_time_us_ TF_GUARDED_BY(mu_) = 0;
uint64_t end_time_us_ TF_GUARDED_BY(mu_) = 0;
};
}
}
#endif
#include "tensorflow/core/data/metric_utils.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include <vector>
#include "absl/time/time.h"
#include "tensorflow/core/data/utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
namespace tensorflow {
namespace data {
namespace {
uint64_t safe_sub(uint64_t x, uint64_t y) { return x >= y ? x - y : 0; }
}
IteratorMetricsCollector::IteratorMetricsCollector(
const std::string& device_type, const Env& env)
: device_type_(device_type), env_(env) {}
absl::Time IteratorMetricsCollector::RecordStart() {
const uint64_t start_time_us = env_.NowMicros();
if (!ShouldCollectMetrics()) {
return absl::FromUnixMicros(start_time_us);
}
mutex_lock l(mu_);
if (end_time_us_ == 0) {
end_time_us_ = start_time_us;
}
uint64_t gap_time_us = 0;
if (num_active_calls_ == 0) {
first_start_time_us_ = start_time_us;
gap_time_us = safe_sub(start_time_us, end_time_us_);
}
metrics::RecordTFDataIteratorGap(gap_time_us);
num_active_calls_++;
return absl::FromUnixMicros(start_time_us);
}
void IteratorMetricsCollector::RecordStop(absl::Time start_time,
const std::vector<Tensor>& output) {
if (!ShouldCollectMetrics()) {
return;
}
const uint64_t end_time_us = env_.NowMicros();
const int64_t latency_micros =
safe_sub(end_time_us, absl::ToUnixMicros(start_time));
AddLatencySample(latency_micros);
IncrementThroughput(GetTotalBytes(output));
mutex_lock l(mu_);
metrics::RecordTFDataIteratorLifetime(safe_sub(end_time_us, end_time_us_));
end_time_us_ = std::max(end_time_us_, end_time_us);
num_active_calls_--;
if (num_active_calls_ == 0) {
metrics::RecordTFDataIteratorBusy(
safe_sub(end_time_us_, first_start_time_us_));
}
}
bool IteratorMetricsCollector::ShouldCollectMetrics() const {
return device_type_ == DEVICE_CPU;
}
}
} | #include "tensorflow/core/data/metric_utils.h"
#include <cstdint>
#include "absl/memory/memory.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/lib/monitoring/test_utils.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
using tensorflow::monitoring::testing::CellReader;
using tensorflow::monitoring::testing::Histogram;
TEST(MetricUtilsTest, CollectMetrics) {
CellReader<Histogram> latency("/tensorflow/data/getnext_duration");
CellReader<int64_t> iterator_lifetime("/tensorflow/data/iterator_lifetime");
CellReader<int64_t> iterator_busy("/tensorflow/data/iterator_busy");
EXPECT_FLOAT_EQ(latency.Delta().num(), 0.0);
EXPECT_EQ(iterator_lifetime.Delta(), 0);
EXPECT_EQ(iterator_busy.Delta(), 0);
IteratorMetricsCollector metrics_collector(DEVICE_CPU, *Env::Default());
absl::Time start_time = metrics_collector.RecordStart();
absl::SleepFor(absl::Seconds(1));
metrics_collector.RecordStop(start_time, {});
Histogram latency_histogram = latency.Delta();
EXPECT_FLOAT_EQ(latency_histogram.num(), 1.0);
EXPECT_GT(latency_histogram.sum(), 0.0);
EXPECT_GT(iterator_lifetime.Delta(), 0);
EXPECT_GT(iterator_busy.Delta(), 0);
}
TEST(MetricUtilsTest, ShouldNotCollectMetrics) {
CellReader<Histogram> latency("/tensorflow/data/getnext_duration");
CellReader<int64_t> iterator_lifetime("/tensorflow/data/iterator_lifetime");
CellReader<int64_t> iterator_busy("/tensorflow/data/iterator_busy");
EXPECT_FLOAT_EQ(latency.Delta().num(), 0.0);
EXPECT_EQ(iterator_lifetime.Delta(), 0);
EXPECT_EQ(iterator_busy.Delta(), 0);
IteratorMetricsCollector metrics_collector(DEVICE_TPU, *Env::Default());
absl::Time start_time = metrics_collector.RecordStart();
absl::SleepFor(absl::Seconds(1));
metrics_collector.RecordStop(start_time, {});
EXPECT_FLOAT_EQ(latency.Delta().num(), 0.0);
EXPECT_EQ(iterator_lifetime.Delta(), 0);
EXPECT_EQ(iterator_busy.Delta(), 0);
}
TEST(MetricUtilsTest, ConcurrentThreads) {
CellReader<Histogram> latency("/tensorflow/data/getnext_duration");
CellReader<int64_t> iterator_lifetime("/tensorflow/data/iterator_lifetime");
CellReader<int64_t> iterator_busy("/tensorflow/data/iterator_busy");
EXPECT_FLOAT_EQ(latency.Delta().num(), 0.0);
EXPECT_EQ(iterator_lifetime.Delta(), 0);
EXPECT_EQ(iterator_busy.Delta(), 0);
IteratorMetricsCollector metrics_collector(DEVICE_CPU, *Env::Default());
absl::Time start_time = metrics_collector.RecordStart();
auto thread = absl::WrapUnique(Env::Default()->StartThread(
{}, "Concurrent metric collection thread",
[&metrics_collector]() {
absl::Time concurrent_start_time = metrics_collector.RecordStart();
absl::SleepFor(absl::Seconds(1));
metrics_collector.RecordStop(concurrent_start_time, {});
}));
absl::SleepFor(absl::Seconds(1));
metrics_collector.RecordStop(start_time, {});
thread.reset();
Histogram latency_histogram = latency.Delta();
EXPECT_FLOAT_EQ(latency_histogram.num(), 2.0);
EXPECT_GT(latency_histogram.sum(),
absl::ToInt64Microseconds(absl::Seconds(2)));
EXPECT_GE(iterator_lifetime.Delta(),
absl::ToInt64Microseconds(absl::Seconds(1)));
EXPECT_LT(iterator_lifetime.Delta(),
absl::ToInt64Microseconds(absl::Seconds(1.5)));
EXPECT_GE(iterator_busy.Delta(), absl::ToInt64Microseconds(absl::Seconds(1)));
EXPECT_LT(iterator_busy.Delta(),
absl::ToInt64Microseconds(absl::Seconds(1.5)));
}
TEST(MetricUtilsTest, OverlappingThreads) {
CellReader<Histogram> latency("/tensorflow/data/getnext_duration");
CellReader<int64_t> iterator_lifetime("/tensorflow/data/iterator_lifetime");
CellReader<int64_t> iterator_busy("/tensorflow/data/iterator_busy");
EXPECT_FLOAT_EQ(latency.Delta().num(), 0.0);
EXPECT_EQ(iterator_lifetime.Delta(), 0);
EXPECT_EQ(iterator_busy.Delta(), 0);
IteratorMetricsCollector metrics_collector(DEVICE_CPU, *Env::Default());
absl::Time start_time = metrics_collector.RecordStart();
absl::SleepFor(absl::Seconds(0.5));
auto thread = absl::WrapUnique(Env::Default()->StartThread(
{}, "Concurrent metric collection thread",
[&metrics_collector]() {
absl::Time concurrent_start_time = metrics_collector.RecordStart();
absl::SleepFor(absl::Seconds(2));
metrics_collector.RecordStop(concurrent_start_time, {});
}));
absl::SleepFor(absl::Seconds(0.5));
metrics_collector.RecordStop(start_time, {});
absl::SleepFor(absl::Seconds(1.5));
thread.reset();
Histogram latency_histogram = latency.Delta();
EXPECT_FLOAT_EQ(latency_histogram.num(), 2.0);
EXPECT_GT(latency_histogram.sum(),
absl::ToInt64Microseconds(absl::Seconds(3)));
EXPECT_GE(iterator_lifetime.Delta(),
absl::ToInt64Microseconds(absl::Seconds(2.5)));
EXPECT_LT(iterator_lifetime.Delta(),
absl::ToInt64Microseconds(absl::Seconds(2.9)));
EXPECT_GE(iterator_busy.Delta(),
absl::ToInt64Microseconds(absl::Seconds(2.5)));
EXPECT_LT(iterator_busy.Delta(),
absl::ToInt64Microseconds(absl::Seconds(2.9)));
}
}
}
} |
1,727 | cpp | tensorflow/tensorflow | rewrite_utils | tensorflow/core/data/rewrite_utils.cc | tensorflow/core/data/rewrite_utils_test.cc | #ifndef TENSORFLOW_CORE_DATA_REWRITE_UTILS_H_
#define TENSORFLOW_CORE_DATA_REWRITE_UTILS_H_
#include "tensorflow/core/platform/platform.h"
#if !defined(IS_MOBILE_PLATFORM)
#include <functional>
#include <memory>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace data {
RewriterConfig CreateRewriterConfig(
const absl::flat_hash_set<tstring>& optimizations,
const absl::flat_hash_set<tstring>& optimizations_configs);
Status RewriteDataset(OpKernelContext* ctx, const DatasetBase* input,
std::function<RewriterConfig(void)> config_factory,
bool record_fingerprint,
core::RefCountPtr<DatasetBase>* rewritten_input);
std::unique_ptr<tensorflow::grappler::GrapplerItem> GetGrapplerItem(
GraphDef* graph_def, std::string* dataset_node, bool add_fake_sinks,
bool apply_optimizations = true);
absl::StatusOr<std::string> GetDatasetNode(const GraphDef& graph_def);
absl::StatusOr<NodeDef> GetDatasetNodeDef(const GraphDef& graph_def);
absl::flat_hash_set<tstring> SelectOptimizations(
const absl::flat_hash_set<string>& experiments,
const absl::flat_hash_set<tstring>& optimizations_enabled,
const absl::flat_hash_set<tstring>& optimizations_disabled,
const absl::flat_hash_set<tstring>& optimizations_default);
}
}
#endif
#endif
#include "tensorflow/core/data/rewrite_utils.h"
#include "tensorflow/core/platform/refcount.h"
#if !defined(IS_MOBILE_PLATFORM)
#include <algorithm>
#include <functional>
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/hash_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/grappler_item_builder.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/optimizers/meta_optimizer.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/proto_serialization.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/device_properties.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kOptimizerName[] = "tf_data_meta_optimizer";
constexpr char kOptimizers[] = "optimizers";
constexpr char kOptimizerConfigs[] = "optimizer_configs";
void AddFakeSinks(FunctionDef* function_def) {
int counter = 0;
for (const auto& output : function_def->signature().output_arg()) {
NodeDef* node = function_def->add_node_def();
tensorflow::grappler::function_utils::SetUniqueFunctionNodeName(
strings::StrCat("FakeSink", counter++), function_def, node);
node->set_op("Identity");
node->add_input(function_def->ret().at(output.name()));
(*node->mutable_attr())["T"].set_type(output.type());
(*function_def->mutable_ret())[output.name()] =
strings::StrCat(node->name(), ":output:0");
}
}
void RemoveFakeSinks(FunctionDef* function_def) {
std::map<std::string, std::string> identity_map;
for (const auto& node : function_def->node_def()) {
if (node.op() == "Identity" && node.input_size() == 1) {
identity_map[node.name()] = node.input(0);
}
}
for (const auto& output_arg : function_def->signature().output_arg()) {
const std::string& tensor = function_def->ret().at(output_arg.name());
const std::string& output_node = tensor.substr(0, tensor.find(':'));
if (identity_map.find(output_node) != identity_map.end()) {
(*function_def->mutable_ret())[output_arg.name()] =
identity_map.at(output_node);
}
}
}
Status ApplyRewrites(OpKernelContext* ctx,
const std::function<RewriterConfig(void)> config_factory,
GraphDef* graph_def, string* dataset_node) {
std::unique_ptr<tensorflow::grappler::GrapplerItem> grappler_item =
GetGrapplerItem(graph_def, dataset_node, true);
std::unordered_map<std::string, tensorflow::DeviceProperties> device_map;
tensorflow::grappler::VirtualCluster cluster(device_map);
tensorflow::ConfigProto config;
*config.mutable_graph_options()->mutable_rewrite_options() = config_factory();
TF_RETURN_IF_ERROR(tensorflow::grappler::RunMetaOptimizer(
std::move(*grappler_item), config, ctx->device(), &cluster, graph_def));
for (auto& function_def : *graph_def->mutable_library()->mutable_function()) {
RemoveFakeSinks(&function_def);
}
return absl::OkStatus();
}
}
RewriterConfig CreateRewriterConfig(
const absl::flat_hash_set<tstring>& optimizations,
const absl::flat_hash_set<tstring>& optimizations_configs) {
RewriterConfig rewriter_config;
rewriter_config.add_optimizers(kOptimizerName);
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::ONE);
rewriter_config.set_fail_on_optimizer_errors(true);
auto custom_optimizer = rewriter_config.add_custom_optimizers();
custom_optimizer->set_name(kOptimizerName);
auto* custom_optimizations_list =
(*custom_optimizer->mutable_parameter_map())[kOptimizers].mutable_list();
const auto& registered_optimizers =
grappler::CustomGraphOptimizerRegistry::GetRegisteredOptimizers();
for (const auto& optimization : optimizations) {
if (std::find(registered_optimizers.begin(), registered_optimizers.end(),
optimization) != registered_optimizers.end()) {
custom_optimizations_list->add_s(optimization.data(),
optimization.size());
} else {
VLOG(1) << "Optimization " << optimization << " is not registered.";
}
}
auto* config_list =
(*custom_optimizer->mutable_parameter_map())[kOptimizerConfigs]
.mutable_list();
for (const auto& config : optimizations_configs) {
config_list->add_s(config.data(), config.size());
}
return rewriter_config;
}
Status RewriteDataset(OpKernelContext* ctx, const DatasetBase* input,
std::function<RewriterConfig(void)> config_factory,
bool record_fingerprint,
core::RefCountPtr<DatasetBase>* rewritten_input) {
std::vector<std::pair<string, Tensor>> input_list;
GraphDef graph_def;
string output_node;
TF_RETURN_IF_ERROR(
AsGraphDefForRewrite(ctx, input, &input_list, &graph_def, &output_node));
VLOG(3) << "Before graph rewrites: " << graph_def.DebugString();
TF_RETURN_IF_ERROR(
ApplyRewrites(ctx, config_factory, &graph_def, &output_node));
VLOG(3) << "After graph rewrites: " << graph_def.DebugString();
FunctionLibraryRuntime* flr = nullptr;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr = nullptr;
std::unique_ptr<FunctionLibraryDefinition> lib_def = nullptr;
TF_RETURN_IF_ERROR(
ctx->function_library()->Clone(&lib_def, &pflr, &flr, true));
TF_RETURN_IF_ERROR(AddToFunctionLibrary(lib_def.get(), graph_def.library()));
Graph graph(OpRegistry::Global());
TF_RETURN_IF_ERROR(ImportGraphDef({}, graph_def, &graph, nullptr));
std::vector<Tensor> outputs;
GraphRunner graph_runner(flr->device());
TF_RETURN_IF_ERROR(
graph_runner.Run(&graph, flr, input_list, {output_node}, &outputs));
DatasetBase* rewritten_dataset;
TF_RETURN_IF_ERROR(
GetDatasetFromVariantTensor(outputs[0], &rewritten_dataset));
rewritten_dataset->Ref();
rewritten_input->reset(rewritten_dataset);
if (record_fingerprint) {
(*ctx->runner())([graph_def = std::move(graph_def),
lib_def = lib_def.release(),
input_list = std::move(input_list),
output_node = std::move(output_node)]() {
std::unique_ptr<FunctionLibraryDefinition> lib_def_owner(lib_def);
const NodeDef* node_def = nullptr;
for (const auto& node : graph_def.node()) {
if (node.name() == output_node) {
node_def = &node;
break;
}
}
if (node_def == nullptr) {
VLOG(3) << "Failed to find node: " << output_node;
return;
}
uint64 hash = 0;
Status s = HashNode(graph_def, *node_def, *lib_def, &hash);
if (!s.ok()) {
VLOG(3) << "Failed to hash graph: " << s;
return;
}
for (const auto& pair : input_list) {
hash = Hash64CombineUnordered(hash, Hash64(pair.first));
uint64 tensor_hash = 0;
Status s = HashTensor(pair.second, &tensor_hash);
if (s.ok()) {
hash = Hash64CombineUnordered(hash, tensor_hash);
} else {
VLOG(3) << "Failed to hash tensor: " << s;
}
}
string graph_hash =
strings::StrCat(strings::Hex(hash, strings::kZeroPad16));
metrics::RecordTFDataFingerprint(graph_hash);
});
}
return absl::OkStatus();
}
std::unique_ptr<tensorflow::grappler::GrapplerItem> GetGrapplerItem(
GraphDef* graph_def, std::string* dataset_node, bool add_fake_sinks,
bool apply_optimizations) {
NodeDef* node = graph_def->mutable_node()->Add();
tensorflow::grappler::graph_utils::SetUniqueGraphNodeName("Sink", graph_def,
node);
node->set_op("Identity");
node->add_input(*dataset_node);
(*node->mutable_attr())["T"].set_type(DT_VARIANT);
*dataset_node = node->name();
if (add_fake_sinks) {
for (auto& function_def :
*graph_def->mutable_library()->mutable_function()) {
AddFakeSinks(&function_def);
}
}
MetaGraphDef meta_graph_def;
(*meta_graph_def.mutable_graph_def()) = *graph_def;
CollectionDef collection_def;
auto node_list = collection_def.mutable_node_list();
node_list->add_value(*dataset_node);
(*meta_graph_def.mutable_collection_def())["train_op"] = collection_def;
tensorflow::grappler::ItemConfig item_config;
item_config.apply_optimizations = apply_optimizations;
std::unique_ptr<tensorflow::grappler::GrapplerItem> grappler_item =
tensorflow::grappler::GrapplerItemFromMetaGraphDef(
"graph", meta_graph_def, item_config);
grappler_item->optimization_options().optimize_function_library = false;
return grappler_item;
}
absl::flat_hash_set<tstring> SelectOptimizations(
const absl::flat_hash_set<string>& experiments,
const absl::flat_hash_set<tstring>& optimizations_enabled,
const absl::flat_hash_set<tstring>& optimizations_disabled,
const absl::flat_hash_set<tstring>& optimizations_default) {
absl::flat_hash_set<tstring> optimizations;
optimizations.insert(optimizations_enabled.begin(),
optimizations_enabled.end());
for (const auto& optimization : optimizations_default) {
if (!optimizations_disabled.contains(optimization)) {
optimizations.insert(optimization);
}
}
const auto& registered_optimizers =
grappler::CustomGraphOptimizerRegistry::GetRegisteredOptimizers();
for (const auto& experiment : experiments) {
if (std::find(registered_optimizers.begin(), registered_optimizers.end(),
experiment) != registered_optimizers.end() &&
!optimizations_disabled.contains(experiment)) {
optimizations.insert(experiment);
}
}
return optimizations;
}
absl::StatusOr<std::string> GetDatasetNode(const GraphDef& graph_def) {
for (const auto& node : graph_def.node()) {
if (node.op() == kRetvalOp) {
return node.input(0);
}
}
return errors::NotFound(
absl::Substitute("Dataset node for graph is not found:\n$0",
graph_def.ShortDebugString()));
}
absl::StatusOr<NodeDef> GetDatasetNodeDef(const GraphDef& graph_def) {
TF_ASSIGN_OR_RETURN(std::string dataset_node_name, GetDatasetNode(graph_def));
for (const auto& node : graph_def.node()) {
if (node.name() == dataset_node_name) {
return node;
}
}
return errors::NotFound(
absl::Substitute("Dataset node for graph is not found:\n$0",
graph_def.ShortDebugString()));
}
}
}
#endif | #include "tensorflow/core/data/rewrite_utils.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::test::AsScalar;
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
using ::testing::ElementsAre;
NodeDef GetMapNode(absl::string_view name, absl::string_view input_node_name,
absl::string_view function_name) {
return NDef(
name, "MapDataset", {std::string(input_node_name)},
{{"f", FunctionDefHelper::FunctionRef(std::string(function_name))},
{"Targuments", {}},
{"output_shapes", absl::Span<const TensorShape>{TensorShape()}},
{"output_types", absl::Span<const DataType>{DT_INT64}}});
}
FunctionDef XTimesX() {
return FunctionDefHelper::Create(
"XTimesX",
{"x: int64"},
{"y: int64"},
{},
{{{"y"}, "Mul", {"x", "x"}, {{"T", DT_INT64}}}},
{{"y", "y:z:0"}});
}
GraphDef GetRangeSquareDatasetDef(const int64_t range) {
return GDef(
{NDef("start", "Const", {},
{{"value", AsScalar<int64_t>(0)}, {"dtype", DT_INT64}}),
NDef("stop", "Const", {},
{{"value", AsScalar<int64_t>(range)}, {"dtype", DT_INT64}}),
NDef("step", "Const", {},
{{"value", AsScalar<int64_t>(1)}, {"dtype", DT_INT64}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{TensorShape()}},
{"output_types", absl::Span<const DataType>{DT_INT64}}}),
GetMapNode("map", "range", "XTimesX"),
NDef("dataset", "_Retval", {"map"},
{{"T", DT_VARIANT}, {"index", 0}})},
{XTimesX()});
}
TEST(GraphUtilTest, GetFetchNode) {
GraphDef graph = GetRangeSquareDatasetDef(10);
TF_ASSERT_OK_AND_ASSIGN(std::string dataset_node, GetDatasetNode(graph));
std::unique_ptr<tensorflow::grappler::GrapplerItem> grappler_item =
GetGrapplerItem(&graph, &dataset_node, false);
EXPECT_THAT(grappler_item->fetch, ElementsAre("Sink"));
}
TEST(GraphUtilTest, GetFetchNodeDef) {
GraphDef graph = GetRangeSquareDatasetDef(10);
TF_ASSERT_OK_AND_ASSIGN(NodeDef dataset_nodedef, GetDatasetNodeDef(graph));
std::string dataset_node = dataset_nodedef.name();
std::unique_ptr<tensorflow::grappler::GrapplerItem> grappler_item =
GetGrapplerItem(&graph, &dataset_node, false);
EXPECT_THAT(grappler_item->fetch, ElementsAre("Sink"));
}
struct SelectOptimizationsTestCase {
absl::flat_hash_set<string> experiments;
absl::flat_hash_set<tstring> optimizations_enabled;
absl::flat_hash_set<tstring> optimizations_disabled;
absl::flat_hash_set<tstring> optimizations_default;
std::vector<string> expected;
};
class SelectOptimizationsTest
: public ::testing::TestWithParam<SelectOptimizationsTestCase> {};
TEST_P(SelectOptimizationsTest, DatasetUtils) {
const SelectOptimizationsTestCase test_case = GetParam();
auto optimizations = SelectOptimizations(
test_case.experiments, test_case.optimizations_enabled,
test_case.optimizations_disabled, test_case.optimizations_default);
EXPECT_THAT(std::vector<string>(optimizations.begin(), optimizations.end()),
::testing::UnorderedElementsAreArray(test_case.expected));
}
INSTANTIATE_TEST_SUITE_P(
Test, SelectOptimizationsTest,
::testing::Values(
SelectOptimizationsTestCase{
{}, {},
{}, {},
{}},
SelectOptimizationsTestCase{
{"map_and_batch_fusion"},
{"bar"},
{}, {"baz"},
{"map_and_batch_fusion", "bar", "baz"}},
SelectOptimizationsTestCase{
{"this_is_not_an_optimization"},
{"bar"},
{}, {"baz"},
{"bar", "baz"}},
SelectOptimizationsTestCase{{},
{"foo"},
{"baz"},
{"bar", "baz"},
{"foo", "bar"}},
SelectOptimizationsTestCase{
{"foo"}, {"bar"},
{"foo"},
{"baz"}, {"bar", "baz"}}));
}
}
} |
1,728 | cpp | tensorflow/tensorflow | dataset_utils | tensorflow/core/data/dataset_utils.cc | tensorflow/core/data/dataset_utils_test.cc | #ifndef TENSORFLOW_CORE_DATA_DATASET_UTILS_H_
#define TENSORFLOW_CORE_DATA_DATASET_UTILS_H_
#include <atomic>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/resource_handle.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
namespace data {
constexpr int kShardHint = -1;
template <typename T>
Status CreateWeakHandle(OpKernelContext* ctx, T* resource,
const string& container_name, ResourceHandle* handle) {
static std::atomic<int64_t> resource_id_counter(0);
string unique_name =
strings::StrCat(container_name, resource_id_counter.fetch_add(1));
ResourceMgr* mgr = ctx->resource_manager();
TF_RETURN_IF_ERROR(mgr->Create<T>(container_name, unique_name, resource));
*handle = MakeResourceHandle(container_name, unique_name, *ctx->device(),
TypeIndex::Make<T>());
return absl::OkStatus();
}
template <typename T>
Status CreateHandle(OpKernelContext* ctx, T* resource, ResourceHandle* handle) {
ResourceMgr* mgr = ctx->resource_manager();
*handle =
ResourceHandle::MakeRefCountingHandle(resource, ctx->device()->name());
TF_RETURN_IF_ERROR(
mgr->CreateUnowned<T>(handle->container(), handle->name(), resource));
return absl::OkStatus();
}
template <typename T>
class AnonymousResourceOp : public OpKernel {
public:
explicit AnonymousResourceOp(OpKernelConstruction* context, bool ref_counting,
bool return_deleter)
: OpKernel(context),
ref_counting_(ref_counting),
return_deleter_(return_deleter) {}
void Compute(OpKernelContext* ctx) override {
FunctionLibraryRuntime* lib;
std::unique_ptr<FunctionLibraryDefinition> flib_def(nullptr);
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(nullptr);
OP_REQUIRES_OK(
ctx, ctx->function_library()->Clone(&flib_def, &pflr, &lib, true));
T* resource;
OP_REQUIRES_OK(ctx, CreateResource(ctx, std::move(flib_def),
std::move(pflr), lib, &resource));
ResourceHandle handle;
if (ref_counting_) {
OP_REQUIRES_OK(ctx, CreateHandle(ctx, resource, &handle));
} else {
OP_REQUIRES_OK(ctx, CreateWeakHandle(ctx, resource, name(), &handle));
}
Tensor* handle_t;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &handle_t));
handle_t->scalar<ResourceHandle>()() = handle;
if (return_deleter_) {
Tensor* deleter_t;
AllocatorAttributes attr;
attr.set_on_host(true);
OP_REQUIRES_OK(
ctx, ctx->allocate_output(1, TensorShape({}), &deleter_t, attr));
if (!ref_counting_) {
deleter_t->scalar<Variant>()() =
ResourceDeleter(handle, ctx->resource_manager());
}
}
}
protected:
virtual string name() = 0;
virtual Status CreateResource(
OpKernelContext* ctx, std::unique_ptr<FunctionLibraryDefinition> flib_def,
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr,
FunctionLibraryRuntime* lib, T** resource) = 0;
private:
const bool ref_counting_;
const bool return_deleter_;
};
Status VerifyTypesMatch(const DataTypeVector& expected,
const DataTypeVector& received);
Status VerifyTypesMatch(const DataTypeVector& expected,
const std::vector<Tensor>& received);
Status VerifyShapesCompatible(const std::vector<PartialTensorShape>& expected,
const std::vector<PartialTensorShape>& received);
Status VerifyShapesCompatible(const std::vector<PartialTensorShape>& expected,
const std::vector<Tensor>& received);
class DeterminismPolicy {
public:
enum class Type : int {
kDeterministic,
kNondeterministic,
kDefault,
};
static constexpr const char* const kDeterministic = "true";
static constexpr const char* const kNondeterministic = "false";
static constexpr const char* const kDefault = "default";
DeterminismPolicy() : determinism_(Type::kDefault) {}
explicit DeterminismPolicy(Type determinism) : determinism_(determinism) {}
explicit DeterminismPolicy(bool is_deterministic);
static Status FromString(const std::string& s, DeterminismPolicy* out);
std::string String() const;
bool IsDeterministic() const { return determinism_ == Type::kDeterministic; }
bool IsNondeterministic() const {
return determinism_ == Type::kNondeterministic;
}
bool IsDefault() const { return determinism_ == Type::kDefault; }
private:
Type determinism_;
};
std::pair<int64_t, int64_t> MaybeOverrideSeeds(
std::pair<int64_t, int64_t> seeds);
Status AddToFunctionLibrary(FunctionLibraryDefinition* base,
const FunctionLibraryDefinition& to_add);
Status AddToFunctionLibrary(FunctionLibraryDefinition* base,
const FunctionDefLibrary& to_add);
Status IsFunctionStateful(const FunctionLibraryDefinition& library,
const FunctionDef& function_def);
Status IsNodeStateful(const FunctionLibraryDefinition& library,
const NodeDef& node);
std::function<void(std::function<void()>)> RunnerWithMaxParallelism(
std::function<void(std::function<void()>)> runner, int max_parallelism);
template <typename ResourceType>
class DummyResourceOp : public OpKernel {
public:
explicit DummyResourceOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
Tensor* tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &tensor));
tensor->scalar<ResourceHandle>()() = MakeResourceHandle<ResourceType>(
ctx, "", "dummy_resource");
}
};
bool MatchesAnyVersion(StringPiece op_prefix, StringPiece op_to_match);
Tensor MaybeCopySubSlice(const Tensor& tensor, int64 index);
void StripDevicePlacement(FunctionDefLibrary* library);
Status CopyPartialBatch(int64_t num_elements, const Tensor& value,
Tensor* output);
Status ReadBatch(IteratorContext* ctx, IteratorStateReader* reader,
int64_t batch_size, const string& iterator_prefix,
const string& batch_prefix, std::vector<Tensor>* batch);
Status WriteBatch(int64_t batch_size, int64_t num_elements,
const string& iterator_prefix, const string& batch_prefix,
IteratorStateWriter* writer, std::vector<Tensor>* batch);
Status ReadStatus(const string& iterator_prefix, const string& prefix,
IteratorStateReader* reader, Status* status);
Status WriteStatus(const string& iterator_prefix, const string& prefix,
const Status& status, IteratorStateWriter* writer);
Status ProcessBatch(int64_t batch_size, int64_t num_elements,
bool drop_remainder, const Status& status,
IteratorContext* ctx, std::vector<Tensor>* output,
bool* end_of_sequence, std::vector<Tensor>* batch);
Status CopyBatch(AnyContext ctx,
std::vector<std::vector<Tensor>>&& batch_elements,
bool parallel_copy, std::vector<Tensor>* out_tensors);
absl::flat_hash_set<string> GetExperiments();
absl::flat_hash_set<string> GetExperiments(
const std::string& job_name, int64_t task_id,
std::function<uint64_t(const string&)> hash_func);
void LogAndRecordExperiments(const absl::flat_hash_set<string>& experiments);
void GetOptimizations(const Options& options,
absl::flat_hash_set<tstring>* optimizations_enabled,
absl::flat_hash_set<tstring>* optimizations_disabled,
absl::flat_hash_set<tstring>* optimizations_default);
absl::flat_hash_set<tstring> CreateGraphRewriteConfigs(const Options& options);
bool ShouldConfigureMaxIntraOpParallelism(const Options& options);
bool ShouldUsePrivateThreadPool(const Options& options);
bool ShouldUseAutotuning(const Options& options);
bool ShouldApplyOptimizations(
const Options& options,
const absl::flat_hash_set<tstring>& optimizations_enabled,
const absl::flat_hash_set<tstring>& optimizations_default);
inline int GetCpuBudget() {
static bool in_experiment = GetExperiments().contains("tune_cpu_budget");
return (in_experiment ? 1.2 : 1.0) * port::NumSchedulableCPUs();
}
int64 GetAutotuneDefaultParallelism(IteratorContext* ctx);
IteratorContext MakeNestedIteratorContext(IteratorContext* ctx);
template <int64_t rollout_pct>
bool RandomJobSamplePercentage(uint64_t name_hash) {
return name_hash % 100 < rollout_pct;
}
bool AllTasks(int64_t unused_task_id, bool unused_evens);
bool IndependentHostTasks(int64_t task_id, bool evens);
class DatasetExperimentRegistry {
public:
using JobSelector = std::function<bool(uint64_t name_hash)>;
using TaskSelector = std::function<bool(int64_t task_id, bool evens)>;
struct ExperimentSelector {
JobSelector job_selector;
TaskSelector task_selector;
};
static void Register(const string& experiment, JobSelector job_selector,
TaskSelector task_selector);
static absl::flat_hash_map<string, ExperimentSelector> Experiments();
};
class DatasetExperimentRegistrar {
public:
explicit DatasetExperimentRegistrar(
const string& experiment,
DatasetExperimentRegistry::JobSelector job_selector,
DatasetExperimentRegistry::TaskSelector task_selector) {
DatasetExperimentRegistry::Register(experiment, job_selector,
task_selector);
}
};
#define REGISTER_DATASET_EXPERIMENT(experiment, job_selector, task_selector) \
REGISTER_DATASET_OP_NAME_UNIQ_HELPER(__COUNTER__, experiment, job_selector, \
task_selector)
#define REGISTER_DATASET_OP_NAME_UNIQ_HELPER(ctr, experiment, job_selector, \
task_selector) \
REGISTER_DATASET_OP_NAME_UNIQ(ctr, experiment, job_selector, task_selector)
#define REGISTER_DATASET_OP_NAME_UNIQ(ctr, experiment, job_selector, \
task_selector) \
static ::tensorflow::data::DatasetExperimentRegistrar \
registrar__body__##ctr##__object(experiment, job_selector, \
task_selector)
}
}
#endif
#include "tensorflow/core/data/dataset_utils.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <memory>
#include <queue>
#include <random>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/proto_serialization.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/host_info.h"
#include "tensorflow/core/platform/regexp.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/util/determinism.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kOutputSize[] = "output_size";
constexpr char kCode[] = "code";
constexpr char kExperimentOptAll[] = "all";
constexpr char kExperimentOptOutAllExceptOptIn[] = "all_except_opt_in";
constexpr char kMessage[] = "msg";
constexpr char kOutput[] = "output";
static mutex* get_dataset_experiment_registry_lock() {
static mutex dataset_experiment_registry_lock(LINKER_INITIALIZED);
return &dataset_experiment_registry_lock;
}
static absl::flat_hash_map<string,
DatasetExperimentRegistry::ExperimentSelector>*
get_dataset_experiments() {
static absl::flat_hash_map<
string, DatasetExperimentRegistry::ExperimentSelector>* experiments =
new absl::flat_hash_map<string,
DatasetExperimentRegistry::ExperimentSelector>;
return experiments;
}
constexpr char kMapAndBatchFusionOpt[] = "map_and_batch_fusion";
constexpr char kNoopEliminationOpt[] = "noop_elimination";
constexpr char kMapParallelizationOpt[] = "map_parallelization";
constexpr char kShuffleAndRepeatFusionOpt[] = "shuffle_and_repeat_fusion";
constexpr char kFilterFusionOpt[] = "filter_fusion";
constexpr char kMapAndFilterFusionOpt[] = "map_and_filter_fusion";
constexpr char kMapFusionOpt[] = "map_fusion";
constexpr char kParallelBatchOpt[] = "parallel_batch";
constexpr char kAutotuneBufferSizesOpt[] = "autotune_buffer_sizes";
constexpr char kDisablePrefetchLegacyAutotuneOpt[] =
"disable_prefetch_legacy_autotune";
constexpr char kMakeSloppyOpt[] = "make_sloppy";
constexpr char kBatchParallelizationOpt[] = "batch_parallelization";
constexpr char kEnableGradientDescentOpt[] = "enable_gradient_descent";
constexpr char kInjectPrefetchOpt[] = "inject_prefetch";
constexpr char kSeqInterleavePrefetchOpt[] = "seq_interleave_prefetch";
constexpr char kInjectIoPrefetchEligibleOpt[] = "inject_io_prefetch_eligible";
constexpr char kInjectIoPrefetchOpt[] = "inject_io_prefetch";
constexpr char kAutotuneOpt[] = "autotune";
constexpr char kSlackOpt[] = "slack";
constexpr char kSlackPeriodOpt[] = "slack_period";
constexpr char kMakeDeterministicOpt[] = "make_deterministic";
constexpr char kFilterParallelizationOpt[] = "filter_parallelization";
constexpr char kWarmStartOpt[] = "warm_start";
void DefaultOptimizationGraphRewrites(
const Options& options, absl::flat_hash_set<tstring>* optimization_enabled,
absl::flat_hash_set<tstring>* optimization_disabled,
absl::flat_hash_set<tstring>* optimization_default) {
const auto& optimization_options = options.optimization_options();
if (optimization_options.optional_apply_default_optimizations_case() !=
OptimizationOptions::kApplyDefaultOptimizations ||
optimization_options.apply_default_optimizations()) {
if (optimization_options.optional_map_and_batch_fusion_case() !=
OptimizationOptions::kMapAndBatchFusion) {
optimization_default->insert(kMapAndBatchFusionOpt);
}
if (optimization_options.optional_noop_elimination_case() !=
OptimizationOptions::kNoopElimination) {
optimization_default->insert(kNoopEliminationOpt);
}
if (optimization_options.optional_map_parallelization_case() !=
OptimizationOptions::kMapParallelization) {
optimization_default->insert(kMapParallelizationOpt);
}
if (optimization_options.optional_shuffle_and_repeat_fusion_case() !=
OptimizationOptions::kShuffleAndRepeatFusion) {
optimization_default->insert(kShuffleAndRepeatFusionOpt);
}
if (optimization_options.optional_parallel_batch_case() !=
OptimizationOptions::kParallelBatch) {
optimization_default->insert(kParallelBatchOpt);
}
if (optimization_options.optional_inject_prefetch_case() !=
OptimizationOptions::kInjectPrefetch) {
optimization_default->insert(kInjectPrefetchOpt);
}
}
if (OpDeterminismRequired()) {
optimization_enabled->insert(kMakeDeterministicOpt);
}
if (optimization_options.optional_filter_fusion_case() ==
OptimizationOptions::kFilterFusion) {
if (optimization_options.filter_fusion()) {
optimization_enabled->insert(kFilterFusionOpt);
} else {
optimization_disabled->insert(kFilterFusionOpt);
}
}
if (optimization_options.optional_map_and_batch_fusion_case() ==
OptimizationOptions::kMapAndBatchFusion) {
if (optimization_options.map_and_batch_fusion()) {
optimization_enabled->insert(kMapAndBatchFusionOpt);
} else {
optimization_disabled->insert(kMapAndBatchFusionOpt);
}
}
if (optimization_options.optional_map_and_filter_fusion_case() ==
OptimizationOptions::kMapAndFilterFusion) {
if (optimization_options.map_and_filter_fusion()) {
optimization_enabled->insert(kMapAndFilterFusionOpt);
} else {
optimization_disabled->insert(kMapAndFilterFusionOpt);
}
}
if (optimization_options.optional_map_parallelization_case() ==
OptimizationOptions::kMapParallelization) {
if (optimization_options.map_parallelization()) {
optimization_enabled->insert(kMapParallelizationOpt);
} else {
optimization_disabled->insert(kMapParallelizationOpt);
}
}
if (optimization_options.optional_filter_parallelization_case() ==
OptimizationOptions::kFilterParallelization) {
if (optimization_options.filter_parallelization()) {
optimization_enabled->insert(kFilterParallelizationOpt);
} else {
optimization_disabled->insert(kFilterParallelizationOpt);
}
}
if (optimization_options.optional_map_fusion_case() ==
OptimizationOptions::kMapFusion) {
if (optimization_options.map_fusion()) {
optimization_enabled->insert(kMapFusionOpt);
} else {
optimization_disabled->insert(kMapFusionOpt);
}
}
if (optimization_options.optional_noop_elimination_case() ==
OptimizationOptions::kNoopElimination) {
if (optimization_options.noop_elimination()) {
optimization_enabled->insert(kNoopEliminationOpt);
} else {
optimization_disabled->insert(kNoopEliminationOpt);
}
}
if (optimization_options.optional_parallel_batch_case() ==
OptimizationOptions::kParallelBatch) {
if (optimization_options.parallel_batch()) {
optimization_enabled->insert(kParallelBatchOpt);
} else {
optimization_disabled->insert(kParallelBatchOpt);
}
}
if (optimization_options.optional_shuffle_and_repeat_fusion_case() ==
OptimizationOptions::kShuffleAndRepeatFusion) {
if (optimization_options.shuffle_and_repeat_fusion()) {
optimization_enabled->insert(kShuffleAndRepeatFusionOpt);
} else {
optimization_disabled->insert(kShuffleAndRepeatFusionOpt);
}
}
if (optimization_options.optional_inject_prefetch_case() ==
OptimizationOptions::kInjectPrefetch) {
if (optimization_options.inject_prefetch()) {
optimization_enabled->insert(kInjectPrefetchOpt);
} else {
optimization_disabled->insert(kInjectPrefetchOpt);
}
}
if (optimization_options.optional_seq_interleave_prefetch_case() ==
OptimizationOptions::kSeqInterleavePrefetch) {
if (optimization_options.seq_interleave_prefetch()) {
optimization_enabled->insert(kSeqInterleavePrefetchOpt);
} else {
optimization_disabled->insert(kSeqInterleavePrefetchOpt);
}
}
}
bool IsOpAllowlisted(const OpDef* op_def) {
return (op_def->output_arg_size() == 1 &&
op_def->output_arg(0).type() == DT_VARIANT &&
(absl::EndsWith(op_def->name(), "Dataset") ||
absl::EndsWith(op_def->name(), "DatasetV2"))) ||
AllowlistedStatefulOpRegistry::Global()->Contains(op_def->name());
}
}
std::pair<int64_t, int64_t> MaybeOverrideSeeds(
std::pair<int64_t, int64_t> seeds) {
if (seeds.first == 0 && seeds.second == 0) {
return {random::New64(), random::New64()};
}
return seeds;
}
Status VerifyTypeMatch(const DataType& expected, const DataType& received,
int index) {
if (expected != received) {
return errors::InvalidArgument("Data type mismatch at component ", index,
": expected ", DataTypeString(expected),
" but got ", DataTypeString(received), ".");
}
return absl::OkStatus();
}
Status VerifyTypesMatch(const DataTypeVector& expected,
const DataTypeVector& received) {
if (expected.size() != received.size()) {
return errors::InvalidArgument(
"Number of components does not match: expected ", expected.size(),
" types but got ", received.size(), ".");
}
for (size_t i = 0; i < expected.size(); ++i) {
TF_RETURN_IF_ERROR(VerifyTypeMatch(expected[i], received[i], i));
}
return absl::OkStatus();
}
Status VerifyTypesMatch(const DataTypeVector& expected,
const std::vector<Tensor>& received) {
if (expected.size() != received.size()) {
return errors::InvalidArgument(
"Number of components does not match: expected ", expected.size(),
" types but got ", received.size(), ".");
}
for (size_t i = 0; i < expected.size(); ++i) {
TF_RETURN_IF_ERROR(VerifyTypeMatch(expected[i], received[i].dtype(), i));
}
return absl::OkStatus();
}
Status VerifyShapeCompatible(const PartialTensorShape& expected,
const PartialTensorShape& received, int index) {
if (!expected.IsCompatibleWith(received)) {
return errors::InvalidArgument("Incompatible shapes at component ", index,
": expected ", expected.DebugString(),
" but got ", received.DebugString(), ".");
}
return absl::OkStatus();
}
Status VerifyShapesCompatible(const std::vector<PartialTensorShape>& expected,
const std::vector<PartialTensorShape>& received) {
if (expected.size() != received.size()) {
return errors::InvalidArgument(
"Number of components does not match: expected ", expected.size(),
" shapes but got ", received.size(), ".");
}
for (size_t i = 0; i < expected.size(); ++i) {
TF_RETURN_IF_ERROR(VerifyShapeCompatible(expected[i], received[i], i));
}
return absl::OkStatus();
}
Status VerifyShapesCompatible(const std::vector<PartialTensorShape>& expected,
const std::vector<Tensor>& received) {
if (expected.size() != received.size()) {
return errors::InvalidArgument(
"Number of components does not match: expected ", expected.size(),
" shapes but got ", received.size(), ".");
}
for (size_t i = 0; i < expected.size(); ++i) {
TF_RETURN_IF_ERROR(
VerifyShapeCompatible(expected[i], received[i].shape(), i));
}
return absl::OkStatus();
}
Status AddToFunctionLibrary(FunctionLibraryDefinition* base,
const FunctionLibraryDefinition& to_add) {
for (const auto& fn : to_add.ListFunctionNames()) {
if (auto found = base->Find(fn)) {
if (!OpDefEqual(found->signature(), to_add.Find(fn)->signature())) {
return errors::InvalidArgument("Cannot add function '", fn,
"' because a different function with "
"the same signature already exists.");
}
TF_RETURN_IF_ERROR(base->RemoveFunction(fn));
}
}
return base->AddLibrary(to_add);
}
Status AddToFunctionLibrary(FunctionLibraryDefinition* base,
const FunctionDefLibrary& to_add) {
for (const auto& fd : to_add.function()) {
if (auto found = base->Find(fd.signature().name())) {
if (!OpDefEqual(found->signature(), fd.signature())) {
return errors::InvalidArgument("Cannot add function '",
fd.signature().name(),
"' because a different function with "
"the same signature already exists.");
}
TF_RETURN_IF_ERROR(base->RemoveFunction(fd.signature().name()));
}
}
return base->AddLibrary(to_add);
}
Status IsFunctionStateful(const FunctionLibraryDefinition& library,
const FunctionDef& function_def) {
if (!function_def.signature().is_stateful()) {
return absl::OkStatus();
}
for (const NodeDef& node_def : function_def.node_def()) {
TF_RETURN_IF_ERROR(IsNodeStateful(library, node_def));
}
return absl::OkStatus();
}
Status | #include "tensorflow/core/data/dataset_utils.h"
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "xla/tsl/util/determinism_test_util.h"
#include "tensorflow/core/data/compression_utils.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/data/test_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/util/work_sharder.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
TEST(DatasetUtilsTest, MatchesAnyVersion) {
EXPECT_TRUE(MatchesAnyVersion("BatchDataset", "BatchDataset"));
EXPECT_TRUE(MatchesAnyVersion("BatchDataset", "BatchDatasetV2"));
EXPECT_TRUE(MatchesAnyVersion("BatchDataset", "BatchDatasetV3"));
EXPECT_FALSE(MatchesAnyVersion("BatchDataset", "BatchDatasetXV3"));
EXPECT_FALSE(MatchesAnyVersion("BatchDataset", "BatchV2Dataset"));
EXPECT_FALSE(MatchesAnyVersion("BatchDataset", "PaddedBatchDataset"));
}
TEST(DatasetUtilsTest, AddToFunctionLibrary) {
auto make_fn_a = [](const string& fn_name) {
return FunctionDefHelper::Create(
fn_name,
{"arg: int64"},
{"ret: int64"},
{},
{{{"node"}, "Identity", {"arg"}, {{"T", DT_INT64}}}},
{{"ret", "node:output:0"}});
};
auto make_fn_b = [](const string& fn_name) {
return FunctionDefHelper::Create(
fn_name,
{"arg: int64"},
{"ret: int64"},
{},
{{{"node"}, "Identity", {"arg"}, {{"T", DT_INT64}}},
{{"node2"}, "Identity", {"node:output:0"}, {{"T", DT_INT64}}}},
{{"ret", "node2:output:0"}});
};
FunctionDefLibrary fdef_base;
*fdef_base.add_function() = make_fn_a("0");
*fdef_base.add_function() = make_fn_a("1");
*fdef_base.add_function() = make_fn_a("2");
FunctionDefLibrary fdef_to_add;
*fdef_to_add.add_function() = make_fn_b("0");
*fdef_to_add.add_function() = make_fn_a("1");
*fdef_to_add.add_function() = make_fn_b("3");
FunctionLibraryDefinition flib_0(OpRegistry::Global(), fdef_base);
TF_ASSERT_OK(AddToFunctionLibrary(&flib_0, fdef_to_add));
FunctionLibraryDefinition flib_1(OpRegistry::Global(), fdef_base);
FunctionLibraryDefinition flib_to_add(OpRegistry::Global(), fdef_to_add);
TF_ASSERT_OK(AddToFunctionLibrary(&flib_1, flib_to_add));
for (const auto& flib : {flib_0, flib_1}) {
EXPECT_TRUE(FunctionDefsEqual(*flib.Find("0"), make_fn_b("0")));
EXPECT_TRUE(FunctionDefsEqual(*flib.Find("1"), make_fn_a("1")));
EXPECT_TRUE(FunctionDefsEqual(*flib.Find("2"), make_fn_a("2")));
EXPECT_TRUE(FunctionDefsEqual(*flib.Find("3"), make_fn_b("3")));
}
}
TEST(DatasetUtilsTest, AddToFunctionLibraryWithConflictingSignatures) {
FunctionDefLibrary fdef_base;
*fdef_base.add_function() = FunctionDefHelper::Create(
"0",
{"arg: int64"},
{"ret: int64"},
{},
{},
{{"ret", "arg"}});
FunctionDefLibrary fdef_to_add;
*fdef_to_add.add_function() = FunctionDefHelper::Create(
"0",
{"arg: int64"},
{"ret: int64", "ret2: int64"},
{},
{},
{{"ret", "arg"}, {"ret2", "arg"}});
FunctionLibraryDefinition flib_0(OpRegistry::Global(), fdef_base);
Status s = AddToFunctionLibrary(&flib_0, fdef_to_add);
EXPECT_EQ(error::Code::INVALID_ARGUMENT, s.code());
EXPECT_EQ(
"Cannot add function '0' because a different function with the same "
"signature already exists.",
s.message());
FunctionLibraryDefinition flib_1(OpRegistry::Global(), fdef_base);
FunctionLibraryDefinition flib_to_add(OpRegistry::Global(), fdef_to_add);
s = AddToFunctionLibrary(&flib_1, flib_to_add);
EXPECT_EQ(error::Code::INVALID_ARGUMENT, s.code());
EXPECT_EQ(
"Cannot add function '0' because a different function with the same "
"signature already exists.",
s.message());
}
TEST(DatasetUtilsTest, StripDevicePlacement) {
FunctionDefLibrary flib;
*flib.add_function() = FunctionDefHelper::Create(
"0",
{"arg: int64"},
{"ret: int64"},
{},
{{{"node"},
"Identity",
{"arg"},
{{"T", DT_INT64}},
{},
"device:CPU:0"}},
{{"ret", "arg"}});
EXPECT_EQ(flib.function(0).node_def(0).device(), "device:CPU:0");
StripDevicePlacement(&flib);
EXPECT_EQ(flib.function(0).node_def(0).device(), "");
}
TEST(DatasetUtilsTest, RunnerWithMaxParallelism) {
auto runner =
RunnerWithMaxParallelism([](const std::function<void()> fn) { fn(); }, 2);
auto fn = []() { ASSERT_EQ(GetPerThreadMaxParallelism(), 2); };
runner(fn);
}
TEST(DatasetUtilsTest, ParseDeterminismPolicy) {
DeterminismPolicy determinism;
TF_ASSERT_OK(DeterminismPolicy::FromString("true", &determinism));
EXPECT_TRUE(determinism.IsDeterministic());
TF_ASSERT_OK(DeterminismPolicy::FromString("false", &determinism));
EXPECT_TRUE(determinism.IsNondeterministic());
TF_ASSERT_OK(DeterminismPolicy::FromString("default", &determinism));
EXPECT_TRUE(determinism.IsDefault());
}
TEST(DatasetUtilsTest, DeterminismString) {
for (auto s : {"true", "false", "default"}) {
DeterminismPolicy determinism;
TF_ASSERT_OK(DeterminismPolicy::FromString(s, &determinism));
EXPECT_TRUE(s == determinism.String());
}
}
TEST(DatasetUtilsTest, BoolConstructor) {
EXPECT_TRUE(DeterminismPolicy(true).IsDeterministic());
EXPECT_FALSE(DeterminismPolicy(true).IsNondeterministic());
EXPECT_FALSE(DeterminismPolicy(true).IsDefault());
EXPECT_TRUE(DeterminismPolicy(false).IsNondeterministic());
EXPECT_FALSE(DeterminismPolicy(false).IsDeterministic());
EXPECT_FALSE(DeterminismPolicy(false).IsDefault());
}
class TestSplitProvider : public SplitProvider {
public:
Status GetNext(Tensor* split, bool* end_of_splits) override {
return absl::OkStatus();
}
Status Reset() override { return absl::OkStatus(); }
Status Save(std::function<std::string(std::string)> key_name_fn,
IteratorStateWriter* writer) override {
return absl::OkStatus();
}
Status Restore(std::function<std::string(std::string)> key_name_fn,
IteratorStateReader* reader) override {
return absl::OkStatus();
}
};
TEST(DatasetUtilsTest, MakeNestedIteratorContext) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<TestContext> test_ctx,
TestContext::Create());
IteratorContext::Params params(test_ctx->op_ctx());
params.split_providers.push_back(std::make_unique<TestSplitProvider>());
IteratorContext iter_ctx(params);
IteratorContext nested_ctx = MakeNestedIteratorContext(&iter_ctx);
EXPECT_FALSE(iter_ctx.split_providers().empty());
EXPECT_TRUE(nested_ctx.split_providers().empty());
}
REGISTER_DATASET_EXPERIMENT("test_only_experiment_0",
RandomJobSamplePercentage<0>, AllTasks);
REGISTER_DATASET_EXPERIMENT("test_only_experiment_1",
RandomJobSamplePercentage<1>, AllTasks);
REGISTER_DATASET_EXPERIMENT("test_only_experiment_5",
RandomJobSamplePercentage<5>, AllTasks);
REGISTER_DATASET_EXPERIMENT("test_only_experiment_10",
RandomJobSamplePercentage<10>, AllTasks);
REGISTER_DATASET_EXPERIMENT("test_only_experiment_50",
RandomJobSamplePercentage<50>, AllTasks);
REGISTER_DATASET_EXPERIMENT("test_only_experiment_99",
RandomJobSamplePercentage<99>, AllTasks);
REGISTER_DATASET_EXPERIMENT("test_only_experiment_100",
RandomJobSamplePercentage<100>, AllTasks);
REGISTER_DATASET_EXPERIMENT("test_only_task_experiment_100",
RandomJobSamplePercentage<100>,
IndependentHostTasks);
struct GetExperimentsHashTestCase {
uint64 hash;
std::vector<string> expected_in;
std::vector<string> expected_out;
};
class GetExperimentsHashTest
: public ::testing::TestWithParam<GetExperimentsHashTestCase> {};
TEST_P(GetExperimentsHashTest, DatasetUtils) {
const GetExperimentsHashTestCase test_case = GetParam();
uint64 hash_result = test_case.hash;
const std::string job_name = "job";
const int64_t task_id = 0;
auto hash_func = [hash_result](const string& str) { return hash_result; };
auto experiments = GetExperiments(job_name, task_id, hash_func);
absl::flat_hash_set<string> experiment_set(experiments.begin(),
experiments.end());
for (const auto& experiment : test_case.expected_in) {
EXPECT_TRUE(experiment_set.find(experiment) != experiment_set.end())
<< "experiment=" << experiment << " hash=" << hash_result;
}
for (const auto& experiment : test_case.expected_out) {
EXPECT_TRUE(experiment_set.find(experiment) == experiment_set.end())
<< "experiment=" << experiment << " hash=" << hash_result;
}
}
INSTANTIATE_TEST_SUITE_P(
Test, GetExperimentsHashTest,
::testing::Values<GetExperimentsHashTestCase>(
GetExperimentsHashTestCase{
0,
{"test_only_experiment_1", "test_only_experiment_5",
"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_99", "test_only_experiment_100"},
{"test_only_experiment_0"},
},
GetExperimentsHashTestCase{
5,
{"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_99", "test_only_experiment_100"},
{
"test_only_experiment_0",
"test_only_experiment_1",
"test_only_experiment_5",
},
},
GetExperimentsHashTestCase{
95,
{"test_only_experiment_99", "test_only_experiment_100"},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50"},
},
GetExperimentsHashTestCase{
99,
{"test_only_experiment_100"},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_99"},
},
GetExperimentsHashTestCase{
100,
{"test_only_experiment_1", "test_only_experiment_5",
"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_99", "test_only_experiment_100"},
{"test_only_experiment_0"},
},
GetExperimentsHashTestCase{
105,
{"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_99", "test_only_experiment_100"},
{
"test_only_experiment_0",
"test_only_experiment_1",
"test_only_experiment_5",
},
},
GetExperimentsHashTestCase{
195,
{"test_only_experiment_99", "test_only_experiment_100"},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50"},
}));
struct GetExperimentsOptTestCase {
std::vector<string> opt_ins;
std::vector<string> opt_outs;
std::vector<string> expected_in;
std::vector<string> expected_out;
};
class GetExperimentsOptTest
: public ::testing::TestWithParam<GetExperimentsOptTestCase> {};
TEST_P(GetExperimentsOptTest, DatasetUtils) {
const GetExperimentsOptTestCase test_case = GetParam();
auto opt_ins = test_case.opt_ins;
auto opt_outs = test_case.opt_outs;
if (!opt_ins.empty()) {
setenv("TF_DATA_EXPERIMENT_OPT_IN", str_util::Join(opt_ins, ",").c_str(),
1);
}
if (!opt_outs.empty()) {
setenv("TF_DATA_EXPERIMENT_OPT_OUT", str_util::Join(opt_outs, ",").c_str(),
1);
}
const std::string job_name = "job";
const int64_t task_id = 0;
auto hash_func = [](const string& str) { return 0; };
auto experiments = GetExperiments(job_name, task_id, hash_func);
absl::flat_hash_set<string> experiment_set(experiments.begin(),
experiments.end());
for (const auto& experiment : test_case.expected_in) {
EXPECT_TRUE(experiment_set.find(experiment) != experiment_set.end())
<< "experiment=" << experiment << " opt_ins={"
<< str_util::Join(opt_ins, ",") << "} opt_outs={"
<< str_util::Join(opt_outs, ",") << "}";
}
for (const auto& experiment : test_case.expected_out) {
EXPECT_TRUE(experiment_set.find(experiment) == experiment_set.end())
<< "experiment=" << experiment << " opt_ins={"
<< str_util::Join(opt_ins, ",") << "} opt_outs={"
<< str_util::Join(opt_outs, ",") << "}";
}
if (!opt_ins.empty()) {
unsetenv("TF_DATA_EXPERIMENT_OPT_IN");
}
if (!opt_outs.empty()) {
unsetenv("TF_DATA_EXPERIMENT_OPT_OUT");
}
}
INSTANTIATE_TEST_SUITE_P(
Test, GetExperimentsOptTest,
::testing::Values<GetExperimentsOptTestCase>(
GetExperimentsOptTestCase{
{"all"},
{"all"},
{},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_99",
"test_only_experiment_100"}},
GetExperimentsOptTestCase{
{"all"},
{},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_99",
"test_only_experiment_100"},
{}},
GetExperimentsOptTestCase{
{"all"},
{"test_only_experiment_1", "test_only_experiment_99"},
{"test_only_experiment_0", "test_only_experiment_5",
"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_100"},
{"test_only_experiment_1", "test_only_experiment_99"}},
GetExperimentsOptTestCase{
{},
{"all"},
{},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_99",
"test_only_experiment_100"}},
GetExperimentsOptTestCase{
{},
{},
{"test_only_experiment_1", "test_only_experiment_5",
"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_99", "test_only_experiment_100"},
{"test_only_experiment_0"}},
GetExperimentsOptTestCase{
{},
{"test_only_experiment_1", "test_only_experiment_99"},
{"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_100"},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_99"}},
GetExperimentsOptTestCase{
{"test_only_experiment_0", "test_only_experiment_100"},
{"all"},
{},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_99",
"test_only_experiment_100"}},
GetExperimentsOptTestCase{
{"test_only_experiment_0", "test_only_experiment_100"},
{"all_except_opt_in"},
{"test_only_experiment_0", "test_only_experiment_100"},
{"test_only_experiment_1", "test_only_experiment_5",
"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_99"}},
GetExperimentsOptTestCase{
{"test_only_experiment_0", "test_only_experiment_100"},
{},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_99",
"test_only_experiment_100"},
{}},
GetExperimentsOptTestCase{
{"test_only_experiment_0", "test_only_experiment_100"},
{"test_only_experiment_1", "test_only_experiment_99"},
{"test_only_experiment_0", "test_only_experiment_5",
"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_100"},
{"test_only_experiment_1", "test_only_experiment_99"}}));
struct GetExperimentsJobNameTestCase {
uint64_t hash;
string job_name;
int64_t task_id;
std::vector<string> expected_in;
std::vector<string> expected_out;
};
class GetExperimentsJobNameTest
: public ::testing::TestWithParam<GetExperimentsJobNameTestCase> {};
TEST_P(GetExperimentsJobNameTest, DatasetUtils) {
const GetExperimentsJobNameTestCase test_case = GetParam();
auto job_name = test_case.job_name;
auto task_id = test_case.task_id;
uint64 hash_result = test_case.hash;
auto hash_func = [hash_result](const string& str) { return hash_result; };
auto experiments = GetExperiments(job_name, task_id, hash_func);
absl::flat_hash_set<string> experiment_set(experiments.begin(),
experiments.end());
for (const auto& experiment : test_case.expected_in) {
EXPECT_TRUE(experiment_set.find(experiment) != experiment_set.end())
<< "experiment=" << experiment << " job_name=" << job_name;
}
for (const auto& experiment : test_case.expected_out) {
EXPECT_TRUE(experiment_set.find(experiment) == experiment_set.end())
<< "experiment=" << experiment << " job_name=" << job_name;
}
}
INSTANTIATE_TEST_SUITE_P(
Test, GetExperimentsJobNameTest,
::testing::Values(
GetExperimentsJobNameTestCase{
0,
"",
0,
{},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_99",
"test_only_experiment_100", "test_only_task_experiment_100"}},
GetExperimentsJobNameTestCase{
0,
"",
-1,
{},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_99",
"test_only_experiment_100", "test_only_task_experiment_100"}},
GetExperimentsJobNameTestCase{
0,
"",
2,
{},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_99",
"test_only_experiment_100", "test_only_task_experiment_100"}},
GetExperimentsJobNameTestCase{
0,
"job_name",
-1,
{},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_experiment_99",
"test_only_experiment_100", "test_only_task_experiment_100"}},
GetExperimentsJobNameTestCase{
0,
"job_name",
0,
{"test_only_experiment_1", "test_only_experiment_5",
"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_99", "test_only_experiment_100",
"test_only_task_experiment_100"},
{"test_only_experiment_0"}},
GetExperimentsJobNameTestCase{
0,
"job_name",
1,
{"test_only_experiment_1", "test_only_experiment_5",
"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_99", "test_only_experiment_100",
"test_only_task_experiment_100"},
{"test_only_experiment_0"}},
GetExperimentsJobNameTestCase{
0,
"job_name",
2,
{"test_only_experiment_1", "test_only_experiment_5",
"test_only_experiment_10", "test_only_experiment_50",
"test_only_experiment_99", "test_only_experiment_100"},
{"test_only_experiment_0", "test_only_task_experiment_100"}},
GetExperimentsJobNameTestCase{
95,
"job_name",
1,
{"test_only_experiment_99", "test_only_experiment_100"},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50", "test_only_task_experiment_100"}},
GetExperimentsJobNameTestCase{
95,
"job_name",
2,
{"test_only_experiment_99", "test_only_experiment_100",
"test_only_task_experiment_100"},
{"test_only_experiment_0", "test_only_experiment_1",
"test_only_experiment_5", "test_only_experiment_10",
"test_only_experiment_50"}}));
struct GetOptimizationsTestCase {
Options options;
std::vector<string> expected_enabled;
std::vector<string> expected_disabled;
std::vector<string> expected_default;
};
GetOptimizationsTestCase GetOptimizationTestCase1() {
return {
Options(),
{},
{},
{"noop_elimination", "map_and_batch_fusion", "shuffle_and_repeat_fusion",
"map_parallelization", "parallel_batch", "inject_prefetch"}};
}
GetOptimizationsTestCase GetOptimizationTestCase2() {
Options options;
options.mutable_optimization_options()->set_apply_default_optimizations(
false);
return {options, {}, {},
{}};
}
GetOptimizationsTestCase GetOptimizationTestCase3() {
Options options;
options.set_deterministic(false);
options.mutable_optimization_options()->set_map_and_batch_fusion(true);
options.mutable_optimization_options()->set_map_parallelization(false);
options.mutable_optimization_options()->set_parallel_batch(false);
return {options,
{"make_sloppy", "map_and_batch_fusion"},
{"parallel_batch", "map_parallelization"},
{"noop_elimination", "shuffle_and_repeat_fusion", "inject_prefetch"}};
}
GetOptimizationsTestCase GetOptimizationTestCase4() {
Options options;
options.set_deterministic(false);
options.mutable_optimization_options()->set_filter_fusion(true);
options.mutable_optimization_options()->set_filter_parallelization(true);
options.mutable_optimization_options()->set_map_and_batch_fusion(true);
options.mutable_optimization_options()->set_map_and_filter_fusion(true);
options.mutable_optimization_options()->set_map_fusion(true);
options.mutable_optimization_options()->set_map_parallelization(true);
options.mutable_optimization_options()->set_noop_elimination(true);
options.mutable_optimization_options()->set_parallel_batch(true);
options.mutable_optimization_options()->set_shuffle_and_repeat_fusion(true);
options.mutable_optimization_options()->set_inject_prefetch(true);
options.mutable_optimization_options()->set_seq_interleave_prefetch(true);
options.set_slack(true);
return {options,
{"filter_fusion", "filter_parallelization", "make_sloppy",
"map_and_batch_fusion", "map_and_filter_fusion", "map_fusion",
"map_parallelization", "noop_elimination", "parallel_batch",
"shuffle_and_repeat_fusion", "slack", "inject_prefetch",
"seq_interleave_prefetch"},
{},
{}};
}
class GetOptimizationsTest
: public ::testing::TestWithParam<GetOptimizationsTestCase> {};
TEST_P(GetOptimizationsTest, DatasetUtils) {
const GetOptimizationsTestCase test_case = GetParam();
auto options = test_case.options;
absl::flat_hash_set<tstring> actual_enabled, actual_disabled, actual_default;
GetOptimizations(options, &actual_enabled, &actual_disabled, &actual_default);
EXPECT_THAT(std::vector<string>(actual_enabled.begin(), actual_enabled.end()),
::testing::UnorderedElementsAreArray(test_case.expected_enabled));
EXPECT_THAT(
std::vector<string>(actual_disabled.begin(), actual_disabled.end()),
::testing::UnorderedElementsAreArray(test_case.expected_disabled));
EXPECT_THAT(std::vector<string>(actual_default.begin(), actual_default.end()),
::testing::UnorderedElementsAreArray(test_case.expected_default));
}
INSTANTIATE_TEST_SUITE_P(Test, GetOptimizationsTest,
::testing::Values(GetOptimizationTestCase1(),
GetOptimizationTestCase2(),
GetOptimizationTestCase3(),
GetOptimizationTestCase4()));
TEST(DeterministicOpsTest, GetOptimizations) {
#if !defined(__APPLE__)
tsl::test::DeterministicOpsScope det_scope;
Options options;
options.set_deterministic(false);
absl::flat_hash_set<tstring> actual_enabled, actual_disabled, actual_default;
GetOptimizations(options, &actual_enabled, &actual_disabled, &actual_default);
EXPECT_THAT(std::vector<string>(actual_enabled.begin(), actual_enabled.end()),
::testing::UnorderedElementsAreArray({"make_deterministic"}));
EXPECT_EQ(actual_disabled.size(), 0);
#endif
}
REGISTER_DATASET_EXPERIMENT("test_only_experiment",
RandomJobSamplePercentage<42>, AllTasks);
TEST(DatasetUtilsTest, DatasetExperimentRegistry) {
auto experiments = DatasetExperimentRegistry::Experiments();
EXPECT_TRUE(experiments.find("test_only_experiment") != experiments.end());
EXPECT_TRUE(experiments.find("non_existing_experiment") == experiments.end());
}
TEST(DatasetUtilsTest, CountBytes) {
std::vector<Tensor> uncompressed = {
CreateTensor<int64_t>(TensorShape{128, 2}),
CreateTensor<int64_t>(TensorShape{64, 4})};
EXPECT_EQ(GetAllocatedBytes(uncompressed), 4096);
EXPECT_EQ(GetTotalBytes(uncompressed), 4096);
CompressedElement compressed_element;
TF_ASSERT_OK(CompressElement(uncompressed, &compressed_element));
std::vector<Tensor> compressed{{DT_VARIANT, TensorShape({})}};
compressed.front().scalar<Variant>()() = compressed_element;
EXPECT_EQ(GetAllocatedBytes(compressed), compressed_element.ByteSizeLong());
EXPECT_EQ(GetTotalBytes(compressed), compressed_element.ByteSizeLong());
}
TEST_F(DatasetOpsTestBase, TestVariantEqualityChecking) {
Tensor scalar_0{DT_VARIANT, TensorShape({})};
scalar_0.scalar<Variant>()() = TestVariant({CreateTensor<int64_t>({}, {0})});
TF_EXPECT_OK(ExpectEqual(scalar_0, scalar_0));
Tensor scalar_1{DT_VARIANT, TensorShape({})};
scalar_1.scalar<Variant>()() = TestVariant({CreateTensor<int64_t>({}, {1})});
EXPECT_THAT(ExpectEqual(scalar_0, scalar_1),
StatusIs(tsl::error::INTERNAL, HasSubstr("aren't equal")));
Tensor nonscalar{DT_VARIANT, TensorShape({2})};
EXPECT_THAT(ExpectEqual(nonscalar, nonscalar),
StatusIs(tsl::error::INTERNAL, HasSubstr("must be scalars")));
Tensor unsupported{DT_VARIANT, TensorShape({})};
unsupported.scalar<Variant>()() = 0;
EXPECT_THAT(ExpectEqual(unsupported, unsupported),
StatusIs(tsl::error::INTERNAL, HasSubstr("types must be")));
}
}
}
} |
1,729 | cpp | tensorflow/tensorflow | compression_utils | tensorflow/core/data/compression_utils.cc | tensorflow/core/data/compression_utils_test.cc | #ifndef TENSORFLOW_CORE_DATA_COMPRESSION_UTILS_H_
#define TENSORFLOW_CORE_DATA_COMPRESSION_UTILS_H_
#include <vector>
#include "tensorflow/core/framework/dataset.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace data {
Status CompressElement(const std::vector<Tensor>& element,
CompressedElement* out);
Status UncompressElement(const CompressedElement& compressed,
std::vector<Tensor>* out);
}
}
#endif
#include "tensorflow/core/data/compression_utils.h"
#include <limits>
#include <string>
#include <vector>
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/snappy.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace data {
namespace {
constexpr int kCompressedElementVersion = 0;
}
class Iov {
public:
explicit Iov(size_t size) : iov_(size), idx_(0), num_bytes_(0) {}
void Add(void* base, size_t len) {
iov_[idx_].iov_base = base;
iov_[idx_].iov_len = len;
num_bytes_ += len;
++idx_;
}
iovec* Data() { return iov_.data(); }
size_t NumBytes() const { return num_bytes_; }
size_t NumPieces() const { return iov_.size(); }
private:
std::vector<struct iovec> iov_;
size_t idx_;
size_t num_bytes_;
};
Status CompressElement(const std::vector<Tensor>& element,
CompressedElement* out) {
size_t num_string_tensors = 0;
size_t num_string_tensor_strings = 0;
std::vector<TensorProto> nonmemcpyable_components;
size_t total_nonmemcpyable_size = 0;
for (const auto& component : element) {
if (component.dtype() == DT_STRING) {
++num_string_tensors;
num_string_tensor_strings += component.NumElements();
} else if (!DataTypeCanUseMemcpy(component.dtype())) {
nonmemcpyable_components.emplace_back();
component.AsProtoTensorContent(&nonmemcpyable_components.back());
total_nonmemcpyable_size +=
nonmemcpyable_components.back().ByteSizeLong();
}
}
Iov iov{element.size() + num_string_tensor_strings - num_string_tensors};
tstring nonmemcpyable;
nonmemcpyable.resize_uninitialized(total_nonmemcpyable_size);
char* nonmemcpyable_pos = nonmemcpyable.mdata();
int nonmemcpyable_component_index = 0;
for (int i = 0; i < element.size(); ++i) {
const auto& component = element[i];
CompressedComponentMetadata* metadata =
out->mutable_component_metadata()->Add();
metadata->set_dtype(component.dtype());
component.shape().AsProto(metadata->mutable_tensor_shape());
if (DataTypeCanUseMemcpy(component.dtype())) {
const TensorBuffer* buffer = DMAHelper::buffer(&component);
if (buffer) {
iov.Add(buffer->data(), buffer->size());
metadata->add_uncompressed_bytes(buffer->size());
}
} else if (component.dtype() == DT_STRING) {
const auto& flats = component.unaligned_flat<tstring>();
for (int i = 0; i < flats.size(); ++i) {
iov.Add(const_cast<char*>(flats.data()[i].data()),
flats.data()[i].size());
metadata->add_uncompressed_bytes(flats.data()[i].size());
}
} else {
TensorProto& proto =
nonmemcpyable_components[nonmemcpyable_component_index++];
proto.SerializeToArray(nonmemcpyable_pos, proto.ByteSizeLong());
iov.Add(nonmemcpyable_pos, proto.ByteSizeLong());
nonmemcpyable_pos += proto.ByteSizeLong();
metadata->add_uncompressed_bytes(proto.ByteSizeLong());
}
}
if (iov.NumBytes() > kuint32max) {
return errors::OutOfRange("Encountered dataset element of size ",
iov.NumBytes(),
", exceeding the 4GB Snappy limit.");
}
if (!port::Snappy_CompressFromIOVec(iov.Data(), iov.NumBytes(),
out->mutable_data())) {
return errors::Internal("Failed to compress using snappy.");
}
out->set_version(kCompressedElementVersion);
VLOG(3) << "Compressed element from " << iov.NumBytes() << " bytes to "
<< out->data().size() << " bytes";
return absl::OkStatus();
}
Status UncompressElement(const CompressedElement& compressed,
std::vector<Tensor>* out) {
if (compressed.version() != kCompressedElementVersion) {
return errors::Internal("Unsupported compressed element version: ",
compressed.version());
}
int num_components = compressed.component_metadata_size();
out->clear();
out->reserve(num_components);
size_t num_string_tensors = 0;
size_t num_string_tensor_strings = 0;
size_t total_nonmemcpyable_size = 0;
for (const auto& metadata : compressed.component_metadata()) {
if (metadata.dtype() == DT_STRING) {
++num_string_tensors;
num_string_tensor_strings += metadata.uncompressed_bytes_size();
} else if (!DataTypeCanUseMemcpy(metadata.dtype())) {
total_nonmemcpyable_size += metadata.uncompressed_bytes(0);
}
}
Iov iov{num_components + num_string_tensor_strings - num_string_tensors};
tstring nonmemcpyable;
nonmemcpyable.resize_uninitialized(total_nonmemcpyable_size);
char* nonmemcpyable_pos = nonmemcpyable.mdata();
for (const auto& metadata : compressed.component_metadata()) {
if (DataTypeCanUseMemcpy(metadata.dtype())) {
out->emplace_back(metadata.dtype(), metadata.tensor_shape());
TensorBuffer* buffer = DMAHelper::buffer(&out->back());
if (buffer) {
iov.Add(buffer->data(), metadata.uncompressed_bytes(0));
}
} else if (metadata.dtype() == DT_STRING) {
out->emplace_back(metadata.dtype(), metadata.tensor_shape());
const auto& flats = out->back().unaligned_flat<tstring>();
for (int i = 0; i < metadata.uncompressed_bytes_size(); ++i) {
flats.data()[i].resize(metadata.uncompressed_bytes(i));
iov.Add(flats.data()[i].mdata(), metadata.uncompressed_bytes(i));
}
} else {
out->emplace_back();
iov.Add(nonmemcpyable_pos, metadata.uncompressed_bytes(0));
nonmemcpyable_pos += metadata.uncompressed_bytes(0);
}
}
const std::string& compressed_data = compressed.data();
size_t uncompressed_size;
if (!port::Snappy_GetUncompressedLength(
compressed_data.data(), compressed_data.size(), &uncompressed_size)) {
return errors::Internal(
"Could not get snappy uncompressed length. Compressed data size: ",
compressed_data.size());
}
if (uncompressed_size != static_cast<size_t>(iov.NumBytes())) {
return errors::Internal(
"Uncompressed size mismatch. Snappy expects ", uncompressed_size,
" whereas the tensor metadata suggests ", iov.NumBytes());
}
if (!port::Snappy_UncompressToIOVec(compressed_data.data(),
compressed_data.size(), iov.Data(),
iov.NumPieces())) {
return errors::Internal("Failed to perform snappy decompression.");
}
nonmemcpyable_pos = nonmemcpyable.mdata();
for (int i = 0; i < num_components; ++i) {
const CompressedComponentMetadata& metadata =
compressed.component_metadata(i);
if (!DataTypeCanUseMemcpy(metadata.dtype()) &&
metadata.dtype() != DT_STRING) {
TensorProto tp;
if (!tp.ParseFromString(
{nonmemcpyable_pos,
static_cast<size_t>(metadata.uncompressed_bytes(0))})) {
return errors::Internal("Could not parse TensorProto");
}
if (!out->at(i).FromProto(tp)) {
return errors::Internal("Could not parse Tensor");
}
nonmemcpyable_pos += metadata.uncompressed_bytes(0);
}
}
return absl::OkStatus();
}
REGISTER_UNARY_VARIANT_DECODE_FUNCTION(CompressedElement,
"tensorflow.data.CompressedElement");
}
} | #include "tensorflow/core/data/compression_utils.h"
#include <string>
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
TEST(CompressionUtilsTest, Exceeds4GB) {
std::vector<Tensor> element = {
CreateTensor<int64_t>(TensorShape{1024, 1024, 513})};
CompressedElement compressed;
EXPECT_THAT(CompressElement(element, &compressed),
StatusIs(error::OUT_OF_RANGE,
HasSubstr("exceeding the 4GB Snappy limit")));
}
std::vector<std::vector<Tensor>> TestCases() {
return {
CreateTensors<int64_t>(TensorShape{1}, {{1}}),
CreateTensors<int64_t>(TensorShape{1}, {{1}, {2}}),
CreateTensors<tstring>(TensorShape{1}, {{"a"}, {"b"}}),
{CreateTensor<tstring>(TensorShape{1, 2}, {"abc", "xyz"}),
CreateTensor<tstring>(TensorShape{2, 1}, {"ijk", "mnk"})},
{CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{1}, {1})},
{},
{CreateTensor<int64_t>(TensorShape{1, 0})},
{CreateTensor<int64_t>(TensorShape{128, 128}),
CreateTensor<int64_t>(TensorShape{64, 2})},
{
DatasetOpsTestBase::CreateTestVariantTensor(
{CreateTensor<int64_t>(TensorShape{3, 1}, {1, 2, 3}),
CreateTensor<tstring>(TensorShape{}, {"abc"})}),
DatasetOpsTestBase::CreateTestVariantTensor(
{CreateTensor<int64_t>(TensorShape{3, 1}, {10, 11, 12}),
CreateTensor<tstring>(TensorShape{}, {"xyz"})}),
},
};
}
class ParameterizedCompressionUtilsTest
: public DatasetOpsTestBase,
public ::testing::WithParamInterface<std::vector<Tensor>> {};
TEST_P(ParameterizedCompressionUtilsTest, RoundTrip) {
std::vector<Tensor> element = GetParam();
CompressedElement compressed;
TF_ASSERT_OK(CompressElement(element, &compressed));
std::vector<Tensor> round_trip_element;
TF_ASSERT_OK(UncompressElement(compressed, &round_trip_element));
TF_EXPECT_OK(
ExpectEqual(element, round_trip_element, true));
}
TEST_P(ParameterizedCompressionUtilsTest, CompressedElementVersion) {
std::vector<Tensor> element = GetParam();
CompressedElement compressed;
TF_ASSERT_OK(CompressElement(element, &compressed));
EXPECT_EQ(0, compressed.version());
}
TEST_P(ParameterizedCompressionUtilsTest, VersionMismatch) {
std::vector<Tensor> element = GetParam();
CompressedElement compressed;
TF_ASSERT_OK(CompressElement(element, &compressed));
compressed.set_version(1);
std::vector<Tensor> round_trip_element;
EXPECT_THAT(UncompressElement(compressed, &round_trip_element),
StatusIs(error::INTERNAL));
}
INSTANTIATE_TEST_SUITE_P(Instantiation, ParameterizedCompressionUtilsTest,
::testing::ValuesIn(TestCases()));
}
}
} |
1,730 | cpp | tensorflow/tensorflow | standalone | tensorflow/core/data/standalone.cc | tensorflow/core/data/standalone_test.cc | #ifndef TENSORFLOW_CORE_DATA_STANDALONE_H_
#define TENSORFLOW_CORE_DATA_STANDALONE_H_
#include <functional>
#include <memory>
#include <optional>
#include <vector>
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/data/tfdataz_metrics.h"
#include "tensorflow/core/data/unbounded_thread_pool.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_handle_cache.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/public/session_options.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
namespace standalone {
class Dataset;
class Iterator {
public:
virtual ~Iterator();
Status GetNext(std::vector<Tensor>* outputs, bool* end_of_input);
absl::StatusOr<std::vector<Tensor>> Save();
Status Restore(const std::vector<Tensor>& saved_iterator);
std::shared_ptr<model::Model> model() const;
private:
friend class Dataset;
Iterator(IteratorBase* iterator, IteratorContext* ctx,
SerializationContext* serialization_ctx);
std::unique_ptr<IteratorBase> iterator_;
std::unique_ptr<IteratorContext> ctx_;
std::unique_ptr<SerializationContext> serialization_ctx_;
std::shared_ptr<TfDatazMetricsCollector> tf_dataz_metrics_collector_;
};
class Dataset {
public:
struct Params {
SessionOptions session_options;
};
static Status FromGraph(Params params, const GraphDef& graph_def,
std::unique_ptr<Dataset>* result);
~Dataset();
Status MakeIterator(std::unique_ptr<Iterator>* result);
Status MakeIterator(
std::vector<std::unique_ptr<SplitProvider>> split_providers,
std::unique_ptr<Iterator>* result);
Status MakeSplitProviders(
std::vector<std::unique_ptr<SplitProvider>>* result);
const DatasetBase* Get() const;
private:
Dataset(DatasetBase* finalized_dataset, DatasetBase* original_dataset,
DeviceMgr* device_mgr, ProcessFunctionLibraryRuntime* pflr,
FunctionLibraryDefinition* flib_def, thread::ThreadPool* pool,
std::function<void(std::function<void()>)> runner);
DatasetBase* finalized_dataset_;
DatasetBase* original_dataset_;
std::unique_ptr<DeviceMgr> device_mgr_;
std::unique_ptr<FunctionLibraryDefinition> flib_def_;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr_;
std::unique_ptr<thread::ThreadPool> interop_threadpool_;
std::unique_ptr<FunctionHandleCache> function_handle_cache_;
std::function<void(std::function<void()>)> runner_;
ResourceMgr resource_mgr_;
CancellationManager cancellation_manager_;
UnboundedThreadPool unbounded_thread_pool_;
};
}
}
}
#endif
#include "tensorflow/core/data/standalone.h"
#include <algorithm>
#include <functional>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/root_dataset.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/data/tf_data_memory_logger.h"
#include "tensorflow/core/data/tfdataz_metrics.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_handle_cache.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/version.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/refcount.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
namespace standalone {
namespace {
OpKernelContext::Params CreateParams(
ProcessFunctionLibraryRuntime* pflr, DeviceMgr* device_mgr,
std::function<void(std::function<void()>)>* runner) {
OpKernelContext::Params params;
params.function_library = pflr->GetFLR("/device:CPU:0");
params.device = device_mgr->ListDevices()[0];
params.runner = runner;
return params;
}
}
Iterator::Iterator(IteratorBase* iterator, IteratorContext* ctx,
SerializationContext* serialization_ctx)
: iterator_(iterator), ctx_(ctx), serialization_ctx_(serialization_ctx) {
if (DatasetBaseIterator* dataset_iterator =
dynamic_cast<DatasetBaseIterator*>(iterator_.get())) {
tf_dataz_metrics_collector_ = std::make_shared<TfDatazMetricsCollector>(
*Env::Default(), dataset_iterator, ctx_->model());
TfDatazMetricsRegistry::Register(tf_dataz_metrics_collector_);
EnsureIteratorMemoryLoggerStarted();
}
}
Iterator::~Iterator() {
if (tf_dataz_metrics_collector_) {
TfDatazMetricsRegistry::Deregister(tf_dataz_metrics_collector_);
}
}
Status Iterator::GetNext(std::vector<Tensor>* outputs, bool* end_of_input) {
return iterator_->GetNext(ctx_.get(), outputs, end_of_input);
}
absl::StatusOr<std::vector<Tensor>> Iterator::Save() {
VariantTensorDataWriter writer;
TF_RETURN_IF_ERROR(iterator_->Save(serialization_ctx_.get(), &writer));
std::vector<std::unique_ptr<VariantTensorData>> data;
writer.ReleaseData(&data);
std::vector<Tensor> serialized;
for (size_t i = 0; i < data.size(); ++i) {
Tensor tensor(DT_VARIANT, TensorShape({1}));
IteratorStateVariant variant;
TF_RETURN_IF_ERROR(variant.InitializeFromVariantData(std::move(data[i])));
tensor.vec<Variant>()(0) = std::move(variant);
serialized.push_back(std::move(tensor));
}
return serialized;
}
Status Iterator::Restore(const std::vector<Tensor>& saved_iterator) {
std::vector<const VariantTensorData*> data;
data.reserve(saved_iterator.size());
for (int i = 0; i < saved_iterator.size(); ++i) {
auto saved_vec = saved_iterator[i].vec<Variant>();
auto* variant = saved_vec(0).get<IteratorStateVariant>();
if (!variant) {
return errors::Internal(
"Cannot initialize an iterator from tensor ",
saved_vec(0).DebugString(),
". Expected a variant tensor of type IteratorStateVariant.");
}
data.push_back(variant->GetData());
}
VariantTensorDataReader reader(data);
return iterator_->Restore(ctx_.get(), &reader);
}
std::shared_ptr<model::Model> Iterator::model() const { return ctx_->model(); }
Status Dataset::FromGraph(Params params, const GraphDef& graph_def,
std::unique_ptr<Dataset>* result) {
Graph graph(OpRegistry::Global());
TF_RETURN_IF_ERROR(ImportGraphDef({}, graph_def, &graph, nullptr));
auto device_mgr = std::make_unique<StaticDeviceMgr>(DeviceFactory::NewDevice(
"CPU", params.session_options, "/job:localhost/replica:0/task:0"));
Device* device = device_mgr->ListDevices()[0];
auto flib_def = std::make_unique<FunctionLibraryDefinition>(
OpRegistry::Global(), graph_def.library());
auto pflr = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr.get(), Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, flib_def.get(), OptimizerOptions{},
nullptr, nullptr,
nullptr,
Rendezvous::Factory{[](const int64_t, const DeviceMgr* device_mgr,
tsl::core::RefCountPtr<Rendezvous>* r) {
*r = tsl::core::RefCountPtr<Rendezvous>(
new IntraProcessRendezvous(device_mgr));
return absl::OkStatus();
}});
string fetch_node = "";
for (const auto& node : graph_def.node()) {
if (node.op() == "_Retval") {
fetch_node = node.input(0);
}
}
if (fetch_node.empty()) {
return errors::NotFound("Failed to find a _Retval op in the given dataset");
}
std::vector<Tensor> outputs;
GraphRunner graph_runner(device);
TF_RETURN_IF_ERROR(graph_runner.Run(&graph, pflr->GetFLR("/device:CPU:0"), {},
{fetch_node}, &outputs));
data::DatasetBase* dataset;
TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(outputs[0], &dataset));
data::DatasetBase* finalized_dataset;
std::unique_ptr<thread::ThreadPool> pool(
NewThreadPoolFromSessionOptions(params.session_options));
std::function<void(std::function<void()>)> runner =
[&pool](std::function<void()> c) { pool->Schedule(std::move(c)); };
OpKernelContext::Params op_params =
CreateParams(pflr.get(), device_mgr.get(), &runner);
OpKernelContext ctx(&op_params, 0);
TF_RETURN_IF_ERROR(data::FinalizeDataset(&ctx, dataset, &finalized_dataset));
core::ScopedUnref unref(finalized_dataset);
*result = absl::WrapUnique(new Dataset(
finalized_dataset, dataset, device_mgr.release(), pflr.release(),
flib_def.release(), pool.release(), std::move(runner)));
return absl::OkStatus();
}
Status Dataset::MakeIterator(
std::vector<std::unique_ptr<SplitProvider>> split_providers,
std::unique_ptr<Iterator>* result) {
std::unique_ptr<IteratorContext> ctx;
OpKernelContext::Params op_params =
CreateParams(pflr_.get(), device_mgr_.get(), &runner_);
OpKernelContext op_ctx(&op_params, 0);
IteratorContext::Params params(&op_ctx);
params.cancellation_manager = &cancellation_manager_;
params.function_handle_cache = function_handle_cache_.get();
params.resource_mgr = &resource_mgr_;
std::move(split_providers.begin(), split_providers.end(),
std::back_inserter(params.split_providers));
params.thread_factory = unbounded_thread_pool_.get_thread_factory();
params.thread_pool = &unbounded_thread_pool_;
if (ShouldUseAutotuning(finalized_dataset_->options())) {
params.model = std::make_shared<model::Model>();
}
params.run_mode = RunMode::STANDALONE;
ctx = std::make_unique<IteratorContext>(std::move(params));
SerializationContext::Params serialization_params(&op_ctx);
auto serialization_ctx =
std::make_unique<SerializationContext>(std::move(serialization_params));
std::unique_ptr<IteratorBase> iterator;
TF_RETURN_IF_ERROR(finalized_dataset_->MakeIterator(
ctx.get(), nullptr, "Iterator", &iterator));
*result = absl::WrapUnique(new Iterator(iterator.release(), ctx.release(),
serialization_ctx.release()));
return absl::OkStatus();
}
Status Dataset::MakeIterator(std::unique_ptr<Iterator>* result) {
return MakeIterator({}, result);
}
Status Dataset::MakeSplitProviders(
std::vector<std::unique_ptr<SplitProvider>>* result) {
return finalized_dataset_->MakeSplitProviders(result);
}
const DatasetBase* Dataset::Get() const { return finalized_dataset_; }
Dataset::Dataset(DatasetBase* finalized_dataset, DatasetBase* original_dataset,
DeviceMgr* device_mgr, ProcessFunctionLibraryRuntime* pflr,
FunctionLibraryDefinition* flib_def, thread::ThreadPool* pool,
std::function<void(std::function<void()>)> runner)
: finalized_dataset_(finalized_dataset),
original_dataset_(original_dataset),
device_mgr_(device_mgr),
flib_def_(flib_def),
pflr_(pflr),
interop_threadpool_(pool),
runner_(std::move(runner)),
unbounded_thread_pool_(Env::Default(), "tf_data_standalone") {
finalized_dataset_->Ref();
original_dataset_->Ref();
function_handle_cache_ =
std::make_unique<FunctionHandleCache>(pflr_->GetFLR("/device:CPU:0"));
}
Dataset::~Dataset() {
finalized_dataset_->Unref();
original_dataset_->Unref();
}
}
}
} | #include "tensorflow/core/data/standalone.h"
#include <memory>
#include <optional>
#include <vector>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/lib/core/status_test_util.h"
namespace tensorflow {
namespace data {
namespace standalone {
namespace {
constexpr const char* const kRangeGraphProto = R"pb(
node {
name: "Const/_0"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT64 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT64
tensor_shape {}
int64_val: 0
}
}
}
}
node {
name: "Const/_1"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT64 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT64
tensor_shape {}
int64_val: 10
}
}
}
}
node {
name: "Const/_2"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT64 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT64
tensor_shape {}
int64_val: 1
}
}
}
}
node {
name: "RangeDataset/_3"
op: "RangeDataset"
input: "Const/_0"
input: "Const/_1"
input: "Const/_2"
attr {
key: "output_shapes"
value { list { shape {} } }
}
attr {
key: "output_types"
value { list { type: DT_INT64 } }
}
}
node {
name: "dataset"
op: "_Retval"
input: "RangeDataset/_3"
attr {
key: "T"
value { type: DT_VARIANT }
}
attr {
key: "index"
value { i: 0 }
}
}
library {}
versions { producer: 96 }
)pb";
constexpr const char* const kMapGraphProto = R"pb(
node {
name: "Const/_0"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT64 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT64
tensor_shape {}
int64_val: 0
}
}
}
}
node {
name: "Const/_1"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT64 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT64
tensor_shape {}
int64_val: 10
}
}
}
}
node {
name: "Const/_2"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT64 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT64
tensor_shape {}
int64_val: 1
}
}
}
}
node {
name: "RangeDataset/_3"
op: "RangeDataset"
input: "Const/_0"
input: "Const/_1"
input: "Const/_2"
attr {
key: "output_shapes"
value { list { shape {} } }
}
attr {
key: "output_types"
value { list { type: DT_INT64 } }
}
}
node {
name: "MapDataset/_4"
op: "MapDataset"
input: "RangeDataset/_3"
attr {
key: "Targuments"
value { list {} }
}
attr {
key: "f"
value { func { name: "__inference_Dataset_map_<lambda>_67" } }
}
attr {
key: "output_shapes"
value { list { shape {} } }
}
attr {
key: "output_types"
value { list { type: DT_INT64 } }
}
attr {
key: "preserve_cardinality"
value { b: false }
}
attr {
key: "use_inter_op_parallelism"
value { b: true }
}
}
node {
name: "dataset"
op: "_Retval"
input: "MapDataset/_4"
attr {
key: "T"
value { type: DT_VARIANT }
}
attr {
key: "index"
value { i: 0 }
}
}
library {
function {
signature {
name: "__inference_Dataset_map_<lambda>_67"
input_arg { name: "args_0" type: DT_INT64 }
output_arg { name: "identity" type: DT_INT64 }
}
node_def {
name: "mul"
op: "Mul"
input: "args_0"
input: "args_0"
attr {
key: "T"
value { type: DT_INT64 }
}
}
node_def {
name: "Identity"
op: "Identity"
input: "mul:z:0"
attr {
key: "T"
value { type: DT_INT64 }
}
}
ret { key: "identity" value: "Identity:output:0" }
arg_attr {
key: 0
value {
attr {
key: "_user_specified_name"
value { s: "args_0" }
}
}
}
}
}
versions { producer: 96 min_consumer: 12 }
)pb";
constexpr const char* const kMapGraphNoAutotuneProto = R"pb(
node {
name: "Const/_0"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT64 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT64
tensor_shape {}
int64_val: 0
}
}
}
}
node {
name: "Const/_1"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT64 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT64
tensor_shape {}
int64_val: 10
}
}
}
}
node {
name: "Const/_2"
op: "Const"
attr {
key: "dtype"
value { type: DT_INT64 }
}
attr {
key: "value"
value {
tensor {
dtype: DT_INT64
tensor_shape {}
int64_val: 1
}
}
}
}
node {
name: "RangeDataset/_3"
op: "RangeDataset"
input: "Const/_0"
input: "Const/_1"
input: "Const/_2"
attr {
key: "metadata"
value { s: "\n\017RangeDataset:13" }
}
attr {
key: "output_shapes"
value { list { shape {} } }
}
attr {
key: "output_types"
value { list { type: DT_INT64 } }
}
attr {
key: "replicate_on_split"
value { b: false }
}
experimental_type {
type_id: TFT_PRODUCT
args {
type_id: TFT_DATASET
args {
type_id: TFT_PRODUCT
args {
type_id: TFT_TENSOR
args { type_id: TFT_INT64 }
}
}
}
}
}
node {
name: "MapDataset/_4"
op: "MapDataset"
input: "RangeDataset/_3"
attr {
key: "Targuments"
value { list {} }
}
attr {
key: "f"
value { func { name: "__inference_Dataset_map_lambda_74" } }
}
attr {
key: "metadata"
value { s: "\n\rMapDataset:14" }
}
attr {
key: "output_shapes"
value { list { shape {} } }
}
attr {
key: "output_types"
value { list { type: DT_INT64 } }
}
attr {
key: "preserve_cardinality"
value { b: true }
}
attr {
key: "use_inter_op_parallelism"
value { b: true }
}
experimental_type {
type_id: TFT_PRODUCT
args {
type_id: TFT_DATASET
args {
type_id: TFT_PRODUCT
args {
type_id: TFT_TENSOR
args { type_id: TFT_INT64 }
}
}
}
}
}
node {
name: "OptionsDataset/_5"
op: "OptionsDataset"
input: "MapDataset/_4"
attr {
key: "metadata"
value { s: "\n\021OptionsDataset:15" }
}
attr {
key: "output_shapes"
value { list { shape {} } }
}
attr {
key: "output_types"
value { list { type: DT_INT64 } }
}
attr {
key: "serialized_options"
value { s: "\022\000\032\003\240\001\000*\000:\002\010\000" }
}
experimental_type {
type_id: TFT_PRODUCT
args {
type_id: TFT_DATASET
args {
type_id: TFT_PRODUCT
args {
type_id: TFT_TENSOR
args { type_id: TFT_INT64 }
}
}
}
}
}
node {
name: "dataset"
op: "_Retval"
input: "OptionsDataset/_5"
attr {
key: "T"
value { type: DT_VARIANT }
}
attr {
key: "index"
value { i: 0 }
}
}
library {
function {
signature {
name: "__inference_Dataset_map_lambda_74"
input_arg { name: "args_0" type: DT_INT64 }
output_arg { name: "identity" type: DT_INT64 }
}
node_def {
name: "mul"
op: "Mul"
input: "args_0"
input: "args_0"
attr {
key: "T"
value { type: DT_INT64 }
}
}
node_def {
name: "Identity"
op: "Identity"
input: "mul:z:0"
attr {
key: "T"
value { type: DT_INT64 }
}
}
ret { key: "identity" value: "Identity:output:0" }
attr {
key: "_construction_context"
value { s: "kEagerRuntime" }
}
attr {
key: "_tf_data_function"
value { b: true }
}
arg_attr {
key: 0
value {
attr {
key: "_output_shapes"
value { list { shape {} } }
}
attr {
key: "_user_specified_name"
value { s: "args_0" }
}
}
}
}
}
versions { producer: 1594 }
)pb";
TEST(Scalar, Standalone) {
struct TestCase {
string graph_string;
std::vector<int64_t> expected_outputs;
};
auto test_cases = {
TestCase{kRangeGraphProto, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}},
TestCase{kMapGraphProto, {0, 1, 4, 9, 16, 25, 36, 49, 64, 81}},
};
for (auto test_case : test_cases) {
GraphDef graph_def;
protobuf::TextFormat::ParseFromString(test_case.graph_string, &graph_def);
std::unique_ptr<Dataset> dataset;
TF_EXPECT_OK(Dataset::FromGraph({}, graph_def, &dataset));
std::unique_ptr<Iterator> iterator;
TF_EXPECT_OK(dataset->MakeIterator(&iterator));
EXPECT_DOUBLE_EQ(iterator->model()->ComputeSnapshotProcessingTimeNsec(), 0);
bool end_of_input = false;
for (int num_outputs = 0; !end_of_input; ++num_outputs) {
std::vector<tensorflow::Tensor> outputs;
TF_EXPECT_OK(iterator->GetNext(&outputs, &end_of_input));
if (!end_of_input) {
EXPECT_EQ(outputs[0].scalar<int64_t>()(),
test_case.expected_outputs[num_outputs]);
} else {
EXPECT_EQ(test_case.expected_outputs.size(), num_outputs);
}
}
absl::SleepFor(absl::Seconds(1));
EXPECT_GT(iterator->model()->ComputeSnapshotProcessingTimeNsec(), 0);
}
}
TEST(NoAutotune, Standalone) {
std::vector<int64_t> expected_outputs({0, 1, 4, 9, 16, 25, 36, 49, 64, 81});
GraphDef graph_def;
protobuf::TextFormat::ParseFromString(kMapGraphNoAutotuneProto, &graph_def);
std::unique_ptr<Dataset> dataset;
TF_EXPECT_OK(Dataset::FromGraph({}, graph_def, &dataset));
std::unique_ptr<Iterator> iterator;
TF_EXPECT_OK(dataset->MakeIterator(&iterator));
EXPECT_EQ(iterator->model(), nullptr);
bool end_of_input = false;
for (int num_outputs = 0; !end_of_input; ++num_outputs) {
std::vector<tensorflow::Tensor> outputs;
TF_EXPECT_OK(iterator->GetNext(&outputs, &end_of_input));
if (!end_of_input) {
EXPECT_EQ(outputs[0].scalar<int64_t>()(), expected_outputs[num_outputs]);
} else {
EXPECT_EQ(expected_outputs.size(), num_outputs);
}
}
absl::SleepFor(absl::Seconds(1));
EXPECT_EQ(iterator->model(), nullptr);
}
}
}
}
} |
1,731 | cpp | tensorflow/tensorflow | snapshot_utils | tensorflow/core/data/snapshot_utils.cc | tensorflow/core/data/snapshot_utils_test.cc | #ifndef TENSORFLOW_CORE_DATA_SNAPSHOT_UTILS_H_
#define TENSORFLOW_CORE_DATA_SNAPSHOT_UTILS_H_
#include <cstdint>
#include <deque>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/io/compression.h"
#include "tensorflow/core/lib/io/inputstream_interface.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/lib/io/record_writer.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/snapshot.pb.h"
namespace tensorflow {
class GraphDef;
namespace data {
namespace experimental {
class SnapshotMetadataRecord;
class SnapshotTensorMetadata;
}
namespace snapshot_util {
constexpr char kMetadataFilename[] = "snapshot.metadata";
constexpr char kModeAuto[] = "auto";
constexpr char kModeWrite[] = "write";
constexpr char kModeRead[] = "read";
constexpr char kModePassthrough[] = "passthrough";
constexpr char kShardDirectorySuffix[] = ".shard";
enum Mode { READER = 0, WRITER = 1, PASSTHROUGH = 2 };
std::string HashDirectory(const std::string& path, uint64 hash);
std::string RunDirectory(const std::string& hash_directory, uint64 run_id);
std::string RunDirectory(const std::string& hash_directory,
const std::string& run_id);
std::string ShardDirectory(const std::string& run_directory, int64_t shard_id);
std::string GetCheckpointFileName(const std::string& shard_directory,
uint64 checkpoint_id);
class Writer {
public:
static Status Create(Env* env, const std::string& filename,
const std::string& compression_type, int version,
const DataTypeVector& dtypes,
std::unique_ptr<Writer>* out_writer);
virtual Status WriteTensors(const std::vector<Tensor>& tensors) = 0;
virtual Status Sync() = 0;
virtual Status Close() = 0;
virtual ~Writer() = default;
protected:
virtual Status Initialize(tensorflow::Env* env) = 0;
};
class TFRecordWriter : public Writer {
public:
TFRecordWriter(const std::string& filename,
const std::string& compression_type);
Status Initialize(tensorflow::Env* env) override;
Status WriteTensors(const std::vector<Tensor>& tensors) override;
Status Sync() override;
Status Close() override;
~TFRecordWriter() override;
private:
const std::string filename_;
const std::string compression_type_;
std::unique_ptr<WritableFile> dest_;
std::unique_ptr<io::RecordWriter> record_writer_;
};
class CustomWriter : public Writer {
public:
static constexpr const size_t kHeaderSize = sizeof(uint64);
static constexpr const char* const kClassName = "SnapshotWriter";
static constexpr const char* const kWriteStringPiece = "WriteStringPiece";
static constexpr const char* const kWriteCord = "WriteCord";
static constexpr const char* const kSeparator = "::";
CustomWriter(const std::string& filename, const std::string& compression_type,
const DataTypeVector& dtypes);
Status WriteTensors(const std::vector<Tensor>& tensors) override;
Status Sync() override;
Status Close() override;
~CustomWriter() override;
protected:
Status Initialize(tensorflow::Env* env) override;
private:
Status WriteRecord(const StringPiece& data);
#if defined(TF_CORD_SUPPORT)
Status WriteRecord(const absl::Cord& data);
#endif
std::unique_ptr<WritableFile> dest_;
const std::string filename_;
const std::string compression_type_;
const DataTypeVector dtypes_;
std::unique_ptr<WritableFile> zlib_underlying_dest_;
std::vector<bool> simple_tensor_mask_;
int num_simple_ = 0;
int num_complex_ = 0;
};
class Reader {
public:
class DatasetOp : public DatasetOpKernel {
public:
explicit DatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override;
private:
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
std::string compression_;
int64_t version_;
};
class NestedDatasetOp : public DatasetOpKernel {
public:
explicit NestedDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override;
private:
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
};
static Status Create(Env* env, const std::string& filename,
const string& compression_type, int version,
const DataTypeVector& dtypes,
std::unique_ptr<Reader>* out_reader);
static Status MakeNestedDataset(Env* env,
const std::vector<std::string>& shard_dirs,
const string& compression_type, int version,
const DataTypeVector& dtypes,
const std::vector<PartialTensorShape>& shapes,
int64_t start_index, DatasetBase** output);
static void MakeNestedDataset(const std::vector<DatasetBase*>& datasets,
DatasetBase** output);
virtual Status ReadTensors(std::vector<Tensor>* read_tensors) = 0;
virtual Status SkipRecords(int64_t num_records);
virtual ~Reader() = default;
protected:
virtual Status Initialize(Env* env) = 0;
class Dataset;
class NestedDataset;
};
class TFRecordReaderImpl {
public:
TFRecordReaderImpl(const std::string& filename, const string& compression,
std::optional<int64_t> output_buffer_size = std::nullopt);
Status Initialize(Env* env);
absl::StatusOr<Tensor> GetNext();
absl::StatusOr<std::vector<Tensor>> GetTensors();
uint64_t BytesRead() const { return bytes_read_; }
private:
absl::StatusOr<Tensor> Parse(const tstring& record);
std::string filename_;
std::unique_ptr<RandomAccessFile> file_;
std::unique_ptr<io::RecordReader> record_reader_;
uint64_t offset_ = 0;
uint64_t bytes_read_ = 0;
const string compression_;
const std::optional<int64_t> output_buffer_size_;
};
class TFRecordReader : public Reader {
public:
TFRecordReader(const std::string& filename, const string& compression,
const DataTypeVector& dtypes,
std::optional<int64_t> output_buffer_size = std::nullopt)
: reader_impl_(filename, compression, output_buffer_size),
dtypes_(dtypes) {}
Status Initialize(Env* env) override { return reader_impl_.Initialize(env); }
Status ReadTensors(std::vector<Tensor>* read_tensors) override;
uint64_t BytesRead() const { return reader_impl_.BytesRead(); }
private:
TFRecordReaderImpl reader_impl_;
const DataTypeVector dtypes_;
};
class CustomReader : public Reader {
public:
static constexpr const int64_t kSnappyReaderInputBufferSizeBytes =
1 << 30;
static constexpr const int64_t kSnappyReaderOutputBufferSizeBytes =
32 << 20;
static constexpr const size_t kHeaderSize = sizeof(uint64);
static constexpr const char* const kClassName = "SnapshotReader";
static constexpr const char* const kReadString = "ReadString";
static constexpr const char* const kReadCord = "ReadCord";
static constexpr const char* const kSeparator = "::";
CustomReader(const std::string& filename, const string& compression_type,
int version, const DataTypeVector& dtypes);
Status ReadTensors(std::vector<Tensor>* read_tensors) override;
~CustomReader() override = default;
protected:
Status Initialize(Env* env) override;
private:
Status ReadTensorsV0(std::vector<Tensor>* read_tensors);
Status SnappyUncompress(
const experimental::SnapshotTensorMetadata* metadata,
std::vector<Tensor>* simple_tensors,
std::vector<std::pair<std::unique_ptr<char[]>, size_t>>*
tensor_proto_strs);
Status ReadRecord(tstring* record);
#if defined(TF_CORD_SUPPORT)
Status ReadRecord(absl::Cord* record);
#endif
std::string filename_;
std::unique_ptr<RandomAccessFile> file_;
std::unique_ptr<io::InputStreamInterface> input_stream_;
const string compression_type_;
const int version_;
const DataTypeVector dtypes_;
int num_simple_ = 0;
int num_complex_ = 0;
std::vector<bool> simple_tensor_mask_;
};
Status WriteMetadataFile(Env* env, const string& dir,
const experimental::SnapshotMetadataRecord* metadata);
Status WriteMetadataFile(
Env* env, const string& dir,
const experimental::DistributedSnapshotMetadata* metadata);
Status ReadMetadataFile(Env* env, const string& dir,
experimental::SnapshotMetadataRecord* metadata,
bool* file_exists);
Status ReadMetadataFile(Env* env, const string& dir,
experimental::DistributedSnapshotMetadata* metadata,
bool* file_exists);
Status DumpDatasetGraph(Env* env, const std::string& path, uint64 hash,
const GraphDef* graph);
Status DetermineOpState(const std::string& mode_string, bool file_exists,
const experimental::SnapshotMetadataRecord* metadata,
uint64 pending_snapshot_expiry_seconds, Mode* mode);
struct ElementOrEOF {
std::vector<Tensor> value;
bool end_of_sequence = false;
};
class AsyncWriter {
public:
explicit AsyncWriter(Env* env, int64_t file_index,
const std::string& shard_directory, uint64 checkpoint_id,
const std::string& compression, int64_t version,
const DataTypeVector& output_types,
std::function<void(Status)> done);
void Write(const std::vector<Tensor>& tensors) TF_LOCKS_EXCLUDED(mu_);
void SignalEOF() TF_LOCKS_EXCLUDED(mu_);
private:
void Consume(ElementOrEOF* be) TF_LOCKS_EXCLUDED(mu_);
bool ElementAvailable() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
Status WriterThread(Env* env, const std::string& shard_directory,
uint64 checkpoint_id, const std::string& compression,
int64_t version, DataTypeVector output_types);
mutex mu_;
std::deque<ElementOrEOF> deque_ TF_GUARDED_BY(mu_);
std::unique_ptr<Thread> thread_;
};
}
}
}
#endif
#include "tensorflow/core/data/snapshot_utils.h"
#include <algorithm>
#include <climits>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/io/buffered_inputstream.h"
#include "tensorflow/core/lib/io/random_inputstream.h"
#include "tensorflow/core/lib/io/record_writer.h"
#include "tensorflow/core/lib/io/zlib_compression_options.h"
#include "tensorflow/core/lib/io/zlib_inputstream.h"
#include "tensorflow/core/lib/io/zlib_outputbuffer.h"
#include "tensorflow/core/platform/coding.h"
#include "tensorflow/core/platform/file_system.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/protobuf/snapshot.pb.h"
#include "tsl/lib/io/snappy/snappy_inputbuffer.h"
#include "tsl/lib/io/snappy/snappy_outputbuffer.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
namespace snapshot_util {
namespace {
constexpr const char* const kOutputTypes = "output_types";
constexpr const char* const kOutputShapes = "output_shapes";
constexpr const char* const kCompression = "compression";
constexpr const char* const kVersion = "version";
constexpr const char* const kCurrentCheckpointID = "current_checkpoint_id";
constexpr const char* const kIndex = "index";
constexpr const char* const kStartIndex = "start_index";
std::string ProtoSerializationErrorMessage(const TensorProto& proto,
const std::string& output_file) {
const auto proto_byte_size = proto.ByteSizeLong();
std::string error_message =
absl::StrCat("Failed to serialize tensor proto of ", proto_byte_size,
" bytes to file: ", output_file);
if (proto_byte_size > INT_MAX) {
absl::StrAppend(&error_message, ": exceeded maximum protobuf size of 2GB.");
}
return error_message;
}
}
constexpr const int64_t
CustomReader::kSnappyReaderInputBufferSizeBytes;
constexpr const int64_t
CustomReader::kSnappyReaderOutputBufferSizeBytes;
std::string HashDirectory(const std::string& path, uint64 hash) {
return io::JoinPath(
path, strings::Printf("%llu", static_cast<unsigned long long>(hash)));
}
std::string RunDirectory(const std::string& hash_directory, uint64 run_id) {
return RunDirectory(
hash_directory,
strings::Printf("%llu", static_cast<unsigned long long>(run_id)));
}
std::string RunDirectory(const std::string& hash_directory,
const std::string& run_id) {
return io::JoinPath(hash_directory, run_id);
}
std::string ShardDirectory(const std::string& run_directory, int64_t shard_id) {
return io::JoinPath(
run_directory,
strings::Printf("%08llu%s", static_cast<unsigned long long>(shard_id),
kShardDirectorySuffix));
}
std::string GetCheckpointFileName(const std::string& shard_directory,
uint64 checkpoint_id) {
return io::JoinPath(
shard_directory,
strings::Printf("%08llu.snapshot",
static_cast<unsigned long long>(checkpoint_id)));
}
Status Writer::Create(Env* env, const std::string& filename,
const std::string& compression_type, int version,
const DataTypeVector& dtypes,
std::unique_ptr<Writer>* out_writer) {
switch (version) {
case 1:
*out_writer =
std::make_unique<CustomWriter>(filename, compression_type, dtypes);
break;
case 2:
*out_writer =
std::make_unique<TFRecordWriter>(filename, compression_type);
break;
default:
return errors::InvalidArgument("Snapshot writer version: ", version,
" is not supported.");
}
return (*out_writer)->Initialize(env);
}
TFRecordWriter::TFRecordWriter(const std::string& filename,
const std::string& compression_type)
: filename_(filename), compression_type_(compression_type) {}
Status TFRecordWriter::Initialize(tensorflow::Env* env) {
TF_RETURN_IF_ERROR(env->NewAppendableFile(filename_, &dest_));
record_writer_ = std::make_unique<io::RecordWriter>(
dest_.get(), io::RecordWriterOptions::CreateRecordWriterOptions(
compression_type_));
return absl::OkStatus();
}
Status TFRecordWriter::WriteTensors(const std::vector<Tensor>& tensors) {
for (const auto& tensor : tensors) {
TensorProto proto;
tensor.AsProtoTensorContent(&proto);
#if defined(TF_CORD_SUPPORT)
auto* proto_buffer = new std::string();
if (!proto.SerializeToString(proto_buffer)) {
delete proto_buffer;
return errors::DataLoss(ProtoSerializationErrorMessage(proto, filename_));
}
absl::Cord proto_serialized = absl::MakeCordFromExternal(
*proto_buffer,
[proto_buffer](absl::string_view) { delete proto_buffer; });
TF_RETURN_IF_ERROR(record_writer_->WriteRecord(proto_serialized));
#else
std::string proto_serialized;
if (!proto.SerializeToString(&proto_serialized)) {
return errors::DataLoss(ProtoSerializationErrorMessage(proto, filename_));
}
TF_RETURN_IF_ERROR(record_writer_->WriteRecord(proto_serialized));
#endif
}
return absl::OkStatus();
}
Status TFRecordWriter::Sync() {
TF_RETURN_IF_ERROR(record_writer_->Flush());
return dest_->Flush();
}
Status TFRecordWriter::Close() {
if (record_writer_ != nullptr) {
TF_RETURN_IF_ERROR(Sync());
TF_RETURN_IF_ERROR(record_writer_->Close());
TF_RETURN_IF_ERROR(dest_->Close());
record_writer_ = nullptr;
dest_ = nullptr;
}
return absl::OkStatus();
}
TFRecordWriter::~TFRecordWriter() {
Status s = Close();
if (!s.ok()) {
LOG(ERROR) << "Failed to close snapshot file " << filename_ << ": " << s;
}
}
CustomWriter::CustomWriter(const std::string& filename,
const std::string& compression_type,
const DataTypeVector& dtypes)
: filename_(filename),
compression_type_(compression_type),
dtypes_(dtypes) {}
Status CustomWriter::Initialize(tensorflow::Env* env) {
TF_RETURN_IF_ERROR(env->NewAppendableFile(filename_, &dest_));
#if defined(IS_SLIM_BUILD)
if (compression_type_ != io::compression::kNone) {
LOG(ERROR) << "Compression is unsupported on mobile platforms. Turning "
<< "off compression.";
}
#else
if (compression_type_ == io::compression::kGzip) {
zlib_underlying_dest_.swap(dest_);
io::ZlibCompressionOptions zlib_options;
zlib_options = io::ZlibCompressionOptions::GZIP();
io::ZlibOutputBuffer* zlib_output_buffer = new io::ZlibOutputBuffer(
zlib_underlying_dest_.get(), zlib_options.input_buffer_size,
zlib_options.output_buffer_size, zlib_options);
TF_CHECK_OK(zlib_output_buffer->Init());
dest_.reset(zlib_output_buffer);
}
#endif
simple_tensor_mask_.reserve(dtypes_.size());
for (const auto& dtype : dtypes_) {
if (DataTypeCanUseMemcpy(dtype)) {
simple_tensor_mask_.push_back(true);
num_simple_++;
} else {
simple_tensor_mask_.push_back(false);
num_complex_++;
}
}
return absl::OkStatus();
}
Status CustomWriter::WriteTensors(const std::vector<Tensor>& tensors) {
if (compression_type_ != io::compression::kSnappy) {
experimental::SnapshotRecord record;
for (const auto& tensor : tensors) {
TensorProto* t = record.add_tensor();
tensor.AsProtoTensorContent(t);
}
#if defined(TF_CORD_SUPPORT)
auto record_buffer = new std::string();
record.SerializeToString(record_buffer);
absl::Cord record_serialized = absl::MakeCordFromExternal(
*record_buffer,
[record_buffer](absl::string_view) { delete record_buffer; });
return WriteRecord(record_serialized);
#else
return WriteRecord(record.SerializeAsString());
#endif
}
std::vector<const TensorBuffer*> tensor_buffers;
tensor_buffers.reserve(num_simple_);
std::vector<TensorProto> tensor_protos;
tensor_protos.reserve(num_complex_);
experimental::SnapshotTensorMetadata metadata;
int64_t total_size = 0;
for (int i = 0, end = tensors.size(); i < end; ++i) {
const Tensor& tensor = tensors[i];
experimental::TensorMetadata* tensor_metadata =
metadata.add_tensor_metadata();
tensor.shape().AsProto(tensor_metadata->mutable_tensor_shape());
int64_t size = 0;
if (simple_tensor_mask_[i]) {
auto tensor_buffer = DMAHelper::buffer(&tensor);
tensor_buffers.push_back(tensor_buffer);
size = tensor_buffer->size();
} else {
TensorProto proto;
tensor.AsProtoTensorContent(&proto);
size = proto.ByteSizeLong();
tensor_protos.push_back(std::move(proto));
}
tensor_metadata->set_tensor_size_bytes(size);
total_size += size;
}
std::vector<char> uncompressed(total_size);
char* position = uncompressed.data();
int buffer_index = 0;
int proto_index = 0;
for (int i = 0, end = tensors.size(); i < end; ++i) {
const auto& tensor_metadata = metadata.tensor_metadata(i);
if (simple_tensor_mask_[i]) {
memcpy(position, tensor_buffers[buffer_index]->data(),
tensor_metadata.tensor_size_bytes());
buffer_index++;
} else {
tensor_protos[proto_index].SerializeToArray(
position, tensor_metadata.tensor_size_bytes());
proto_index++;
}
position += tensor_metadata.tensor_size_bytes();
}
DCHECK_EQ(position, uncompressed.data() + total_size);
string output;
if (!tsl::port::Snappy_Compress(uncompressed.data(), total_size, &output)) {
return errors::Internal("Failed to compress using snappy.");
}
#if defined(TF_CORD_SUPPORT)
auto metadata_buffer = new std::string();
metadata.SerializeToString(metadata_buffer);
absl::Cord metadata_serialized = absl::MakeCordFromExternal(
*metadata_buffer,
[metadata_buffer](absl::string_view) { delete metadata_buffer; });
#else
std::string metadata_serialized = metadata.SerializeAsString();
#endif
TF_RETURN_IF_ERROR(WriteRecord(metadata_serialized));
TF_RETURN_IF_ERROR(WriteRecord(output));
return absl::OkStatus();
}
Status CustomWriter::Sync() { return dest_->Sync(); }
Status CustomWriter::Close() {
if (dest_ != nullptr) {
TF_RETURN_IF_ERROR(dest_->Close());
dest_ = nullptr;
}
if (zlib_underlying_dest_ != nullptr) {
TF_RETURN_IF_ERROR(zlib_underlying_dest_->Close());
zlib_underlying_dest_ = nullptr;
}
return absl::OkStatus();
}
CustomWriter::~CustomWriter() {
Status s = Close();
if (!s.ok()) {
LOG(ERROR) << "Could not finish writing file: " << s;
}
}
Status CustomWriter::WriteRecord(const StringPiece& data) {
char header[kHeaderSize];
core::EncodeFixed64(header, data.size());
TF_RETURN_IF_ERROR(dest_->Append(StringPiece(header, sizeof(header))));
return dest_->Append(data);
}
#if defined(TF_CORD_SUPPORT)
Status CustomWriter::WriteRecord(const absl::Cord& data) {
char header[kHeaderSize];
core::EncodeFixed64(header, data.size());
TF_RETURN_IF_ERROR(dest_->Append(StringPiece(header, sizeof(header))));
return dest_->Append(data);
}
#endif
Status Reader::Create(Env* env, const std::string& filename,
const string& compression_type, int version,
const DataTypeVector& dtypes,
std::unique_ptr<Reader>* out_reader) {
switch (version) {
case 0:
case 1:
*out_reader = std::make_unique<CustomReader>(filename, compression_type,
version, dtypes);
break;
case 2:
*out_reader =
std::make_unique<TFRecordReader>(filename, compression_type, dtypes);
break;
default:
return errors::InvalidArgument("Snapshot reader version: ", version,
" is not supported.");
}
return (*out_reader)->Initialize(env);
}
Status Reader::SkipRecords(int64_t num_records) {
for (int i = 0; i < num_records; ++i) {
std::vector<Tensor> unused_tensors;
TF_RETURN_IF_ERROR(ReadTensors(&unused_tensors));
}
return absl::OkStatus();
}
class Reader::Dataset : public DatasetBase {
public:
Dataset(DatasetContext&& ctx, const std::string& shard_dir,
const std::string& compression, const int64_t version,
const DataTypeVector& dtypes,
const std::vector<PartialTensorShape>& shapes,
const int64_t start_index)
: DatasetBase(std::move(ctx)),
shard_dir_(shard_dir),
compression_(compression),
version_(version),
dtypes_(dtypes),
shapes_(shapes),
start_index_(start_index) {}
const DataTypeVector& output_dtypes() const override { return dtypes_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return shapes_;
}
std::string DebugString() const override { return "SnapshotDatasetReader"; }
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** node) const override {
Node* shard_dir = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(shard_dir_, &shard_dir));
Node* start_index = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(start_index_, &start_index));
AttrValue compression;
b->BuildAttrValue(compression_, &compression);
AttrValue version;
b->BuildAttrValue(version_, &version);
return b->AddDataset(
this,
{std::make_pair(0, shard_dir), std::make_pair(1, start_index)},
{},
{{kCompression, compression}, {kVersion, version}},
true, node);
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(node_name(), prefix)});
}
private:
class Iterator : public DatasetIterator<Dataset> {
p | #include "tensorflow/core/data/snapshot_utils.h"
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/compression.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace data {
namespace snapshot_util {
namespace {
using ::tensorflow::data::testing::EqualsProto;
using ::tensorflow::data::testing::LocalTempFilename;
void GenerateTensorVector(tensorflow::DataTypeVector& dtypes,
std::vector<Tensor>& tensors) {
std::string tensor_data(1024, 'a');
for (int i = 0; i < 10; ++i) {
Tensor t(tensor_data.data());
dtypes.push_back(t.dtype());
tensors.push_back(t);
}
}
void SnapshotRoundTrip(std::string compression_type, int version) {
std::vector<Tensor> tensors;
tensorflow::DataTypeVector dtypes;
GenerateTensorVector(dtypes, tensors);
std::string filename;
EXPECT_TRUE(Env::Default()->LocalTempFilename(&filename));
std::unique_ptr<Writer> writer;
TF_ASSERT_OK(Writer::Create(tensorflow::Env::Default(), filename,
compression_type, version, dtypes, &writer));
for (int i = 0; i < 100; ++i) {
TF_ASSERT_OK(writer->WriteTensors(tensors));
}
TF_ASSERT_OK(writer->Close());
std::unique_ptr<Reader> reader;
TF_ASSERT_OK(Reader::Create(Env::Default(), filename, compression_type,
version, dtypes, &reader));
for (int i = 0; i < 100; ++i) {
std::vector<Tensor> read_tensors;
TF_ASSERT_OK(reader->ReadTensors(&read_tensors));
EXPECT_EQ(tensors.size(), read_tensors.size());
for (int j = 0; j < read_tensors.size(); ++j) {
TensorProto proto;
TensorProto read_proto;
tensors[j].AsProtoTensorContent(&proto);
read_tensors[j].AsProtoTensorContent(&read_proto);
std::string proto_serialized, read_proto_serialized;
proto.AppendToString(&proto_serialized);
read_proto.AppendToString(&read_proto_serialized);
EXPECT_EQ(proto_serialized, read_proto_serialized);
}
}
TF_ASSERT_OK(Env::Default()->DeleteFile(filename));
}
TEST(SnapshotUtilTest, CombinationRoundTripTest) {
SnapshotRoundTrip(io::compression::kNone, 1);
SnapshotRoundTrip(io::compression::kGzip, 1);
SnapshotRoundTrip(io::compression::kSnappy, 1);
SnapshotRoundTrip(io::compression::kNone, 2);
SnapshotRoundTrip(io::compression::kGzip, 2);
SnapshotRoundTrip(io::compression::kSnappy, 2);
}
TEST(SnapshotUtilTest, MetadataFileRoundTrip) {
experimental::DistributedSnapshotMetadata metadata_in;
metadata_in.set_compression(io::compression::kGzip);
std::string dir = LocalTempFilename();
TF_ASSERT_OK(WriteMetadataFile(Env::Default(), dir, &metadata_in));
experimental::DistributedSnapshotMetadata metadata_out;
bool file_exists;
TF_ASSERT_OK(
ReadMetadataFile(Env::Default(), dir, &metadata_out, &file_exists));
EXPECT_THAT(metadata_in, EqualsProto(metadata_out));
}
TEST(SnapshotUtilTest, MetadataFileDoesntExist) {
experimental::DistributedSnapshotMetadata metadata;
bool file_exists;
TF_ASSERT_OK(ReadMetadataFile(Env::Default(), LocalTempFilename(), &metadata,
&file_exists));
EXPECT_FALSE(file_exists);
}
void SnapshotReaderBenchmarkLoop(::testing::benchmark::State& state,
std::string compression_type, int version) {
tensorflow::DataTypeVector dtypes;
std::vector<Tensor> tensors;
GenerateTensorVector(dtypes, tensors);
std::string filename;
EXPECT_TRUE(Env::Default()->LocalTempFilename(&filename));
std::unique_ptr<Writer> writer;
TF_ASSERT_OK(Writer::Create(tensorflow::Env::Default(), filename,
compression_type, version, dtypes, &writer));
for (auto s : state) {
writer->WriteTensors(tensors).IgnoreError();
}
TF_ASSERT_OK(writer->Close());
std::unique_ptr<Reader> reader;
TF_ASSERT_OK(Reader::Create(Env::Default(), filename, compression_type,
version, dtypes, &reader));
for (auto s : state) {
std::vector<Tensor> read_tensors;
reader->ReadTensors(&read_tensors).IgnoreError();
}
TF_ASSERT_OK(Env::Default()->DeleteFile(filename));
}
void SnapshotCustomReaderNoneBenchmark(::testing::benchmark::State& state) {
SnapshotReaderBenchmarkLoop(state, io::compression::kNone, 1);
}
void SnapshotCustomReaderGzipBenchmark(::testing::benchmark::State& state) {
SnapshotReaderBenchmarkLoop(state, io::compression::kGzip, 1);
}
void SnapshotCustomReaderSnappyBenchmark(::testing::benchmark::State& state) {
SnapshotReaderBenchmarkLoop(state, io::compression::kSnappy, 1);
}
void SnapshotTFRecordReaderNoneBenchmark(::testing::benchmark::State& state) {
SnapshotReaderBenchmarkLoop(state, io::compression::kNone, 2);
}
void SnapshotTFRecordReaderGzipBenchmark(::testing::benchmark::State& state) {
SnapshotReaderBenchmarkLoop(state, io::compression::kGzip, 2);
}
BENCHMARK(SnapshotCustomReaderNoneBenchmark);
BENCHMARK(SnapshotCustomReaderGzipBenchmark);
BENCHMARK(SnapshotCustomReaderSnappyBenchmark);
BENCHMARK(SnapshotTFRecordReaderNoneBenchmark);
BENCHMARK(SnapshotTFRecordReaderGzipBenchmark);
void SnapshotWriterBenchmarkLoop(::testing::benchmark::State& state,
std::string compression_type, int version) {
tensorflow::DataTypeVector dtypes;
std::vector<Tensor> tensors;
GenerateTensorVector(dtypes, tensors);
std::string filename;
EXPECT_TRUE(Env::Default()->LocalTempFilename(&filename));
std::unique_ptr<Writer> writer;
TF_ASSERT_OK(Writer::Create(tensorflow::Env::Default(), filename,
compression_type, version, dtypes, &writer));
for (auto s : state) {
writer->WriteTensors(tensors).IgnoreError();
}
writer->Close().IgnoreError();
TF_ASSERT_OK(Env::Default()->DeleteFile(filename));
}
void SnapshotCustomWriterNoneBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kNone, 1);
}
void SnapshotCustomWriterGzipBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kGzip, 1);
}
void SnapshotCustomWriterSnappyBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kSnappy, 1);
}
void SnapshotTFRecordWriterNoneBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kNone, 2);
}
void SnapshotTFRecordWriterGzipBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kGzip, 2);
}
void SnapshotTFRecordWriterSnappyBenchmark(::testing::benchmark::State& state) {
SnapshotWriterBenchmarkLoop(state, io::compression::kSnappy, 2);
}
BENCHMARK(SnapshotCustomWriterNoneBenchmark);
BENCHMARK(SnapshotCustomWriterGzipBenchmark);
BENCHMARK(SnapshotCustomWriterSnappyBenchmark);
BENCHMARK(SnapshotTFRecordWriterNoneBenchmark);
BENCHMARK(SnapshotTFRecordWriterGzipBenchmark);
BENCHMARK(SnapshotTFRecordWriterSnappyBenchmark);
}
}
}
} |
1,732 | cpp | tensorflow/tensorflow | unbounded_thread_pool | tensorflow/core/data/unbounded_thread_pool.cc | tensorflow/core/data/unbounded_thread_pool_test.cc | #ifndef TENSORFLOW_CORE_DATA_UNBOUNDED_THREAD_POOL_H_
#define TENSORFLOW_CORE_DATA_UNBOUNDED_THREAD_POOL_H_
#include <deque>
#include <functional>
#include <memory>
#include <vector>
#include "tensorflow/core/framework/thread_factory.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/threadpool_interface.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
namespace tensorflow {
namespace data {
class UnboundedThreadPool : public thread::ThreadPoolInterface {
public:
UnboundedThreadPool(Env* env, const string& thread_name)
: unbounded_work_queue_(env, thread_name) {}
UnboundedThreadPool(Env* env, const string& thread_name,
const ThreadOptions& thread_options)
: unbounded_work_queue_(env, thread_name, thread_options) {}
~UnboundedThreadPool() override = default;
std::shared_ptr<ThreadFactory> get_thread_factory();
void Schedule(std::function<void()> fn) override;
int NumThreads() const override;
int CurrentThreadId() const override;
private:
class LogicalThreadFactory;
class LogicalThreadWrapper;
void ScheduleOnWorkQueue(std::function<void()> fn,
std::shared_ptr<Notification> done);
UnboundedWorkQueue unbounded_work_queue_;
};
}
}
#endif
#include "tensorflow/core/data/unbounded_thread_pool.h"
#include <functional>
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/resource.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
namespace tensorflow {
namespace data {
class UnboundedThreadPool::LogicalThreadWrapper : public Thread {
public:
explicit LogicalThreadWrapper(std::shared_ptr<Notification> done)
: done_(std::move(done)) {}
~LogicalThreadWrapper() override {
done_->WaitForNotification();
}
private:
std::shared_ptr<Notification> done_;
};
class UnboundedThreadPool::LogicalThreadFactory : public ThreadFactory {
public:
explicit LogicalThreadFactory(UnboundedThreadPool* pool) : pool_(pool) {}
std::unique_ptr<Thread> StartThread(const string& name,
std::function<void()> fn) override {
auto done = std::make_shared<Notification>();
pool_->ScheduleOnWorkQueue(std::move(fn), done);
return std::make_unique<LogicalThreadWrapper>(std::move(done));
}
private:
UnboundedThreadPool* const pool_;
};
std::shared_ptr<ThreadFactory> UnboundedThreadPool::get_thread_factory() {
return std::make_shared<LogicalThreadFactory>(this);
}
void UnboundedThreadPool::Schedule(std::function<void()> fn) {
auto tagged_fn = [fn = std::move(fn)]() {
tensorflow::ResourceTagger tag(kTFDataResourceTag, "ThreadPool");
fn();
};
ScheduleOnWorkQueue(std::move(tagged_fn), nullptr);
}
int UnboundedThreadPool::NumThreads() const { return -1; }
int UnboundedThreadPool::CurrentThreadId() const { return -1; }
namespace {
void WorkQueueFunc(const std::function<void()>& fn,
std::shared_ptr<Notification> done) {
fn();
if (done) {
done->Notify();
}
}
}
void UnboundedThreadPool::ScheduleOnWorkQueue(
std::function<void()> fn, std::shared_ptr<Notification> done) {
unbounded_work_queue_.Schedule(
std::bind(&WorkQueueFunc, std::move(fn), std::move(done)));
}
}
} | #include "tensorflow/core/data/unbounded_thread_pool.h"
#include <atomic>
#include <memory>
#include <vector>
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
TEST(UnboundedThreadPool, ConcurrentThreadCreation) {
UnboundedThreadPool pool(Env::Default(), "test");
auto thread_factory = pool.get_thread_factory();
std::vector<std::unique_ptr<Thread>> threads;
const int kNumThreadsToCreate = 10;
std::atomic<int> i(0);
for (int j = 0; j < kNumThreadsToCreate; ++j) {
threads.push_back(thread_factory->StartThread("", [=, &i,
&thread_factory]() {
std::vector<std::unique_ptr<Thread>> nested_threads;
for (int k = 0; k < kNumThreadsToCreate; ++k) {
nested_threads.push_back(
thread_factory->StartThread("", [&i]() { ++i; }));
}
nested_threads.clear();
}));
}
threads.clear();
EXPECT_EQ(i, kNumThreadsToCreate * kNumThreadsToCreate);
}
TEST(UnboundedThreadPool, MultipleBlockingThreads) {
UnboundedThreadPool pool(Env::Default(), "test");
auto thread_factory = pool.get_thread_factory();
std::vector<std::unique_ptr<Thread>> threads;
std::vector<int> round_sizes = {5, 10, 15, 20};
for (const int round_size : round_sizes) {
Notification n;
BlockingCounter bc(round_size);
for (int j = 0; j < round_size; ++j) {
threads.push_back(thread_factory->StartThread("", [&bc, &n]() {
bc.DecrementCount();
n.WaitForNotification();
}));
}
bc.Wait();
n.Notify();
threads.clear();
}
}
}
}
} |
1,733 | cpp | tensorflow/tensorflow | dispatcher_state | tensorflow/core/data/service/dispatcher_state.cc | tensorflow/core/data/service/dispatcher_state_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_DISPATCHER_STATE_H_
#define TENSORFLOW_CORE_DATA_SERVICE_DISPATCHER_STATE_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/graph_rewriters.h"
#include "tensorflow/core/data/service/journal.h"
#include "tensorflow/core/data/service/journal.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
class DispatcherState {
public:
DispatcherState();
explicit DispatcherState(
const experimental::DispatcherConfig& dispatcher_config);
DispatcherState(const DispatcherState&) = delete;
DispatcherState& operator=(const DispatcherState&) = delete;
Status Apply(const Update& update);
struct Dataset {
explicit Dataset(const std::string& dataset_id,
const DataServiceMetadata& metadata)
: dataset_id(dataset_id), metadata(metadata) {}
const std::string dataset_id;
const DataServiceMetadata metadata;
};
struct Worker {
explicit Worker(const RegisterWorkerUpdate& register_worker)
: address(register_worker.worker_address()),
transfer_servers({register_worker.transfer_servers().begin(),
register_worker.transfer_servers().end()}),
tags(register_worker.worker_tags().begin(),
register_worker.worker_tags().end()),
uid(register_worker.worker_uid()) {}
const std::string address;
const std::vector<DataTransferServerInfo> transfer_servers;
const std::vector<std::string> tags;
const int64_t uid;
};
struct IterationKey {
explicit IterationKey(absl::string_view name, int64_t repetition)
: name(name), repetition(repetition) {}
friend bool operator==(const IterationKey& lhs, const IterationKey& rhs) {
return lhs.name == rhs.name && lhs.repetition == rhs.repetition;
}
template <typename H>
friend H AbslHashValue(H h, const IterationKey& k) {
return H::combine(std::move(h), k.name, k.repetition);
}
std::string DebugString() const {
return absl::StrCat(name, "/", repetition);
}
const std::string name;
const int64_t repetition;
};
struct DistributedEpochState {
explicit DistributedEpochState(int64_t num_split_providers)
: repetitions(num_split_providers), indices(num_split_providers) {}
std::vector<int64_t> repetitions;
std::vector<int64_t> indices;
};
struct Task;
struct PendingTask {
explicit PendingTask(std::shared_ptr<Task> task, int64_t target_round)
: task(std::move(task)), target_round(target_round) {}
std::shared_ptr<Task> task;
int64_t target_round;
absl::flat_hash_set<int64_t> ready_consumers;
int64_t failures = 0;
};
struct Job {
explicit Job(int64_t id, const std::string& dataset_id,
const ProcessingModeDef& processing_mode, std::string job_name,
std::optional<int64_t> num_consumers,
bool use_cross_trainer_cache, TargetWorkers target_workers)
: id(id),
dataset_id(dataset_id),
processing_mode(processing_mode),
job_name(job_name),
num_consumers(num_consumers),
use_cross_trainer_cache(use_cross_trainer_cache),
target_workers(target_workers) {}
const int64_t id;
const std::string dataset_id;
const ProcessingModeDef processing_mode;
const std::string job_name;
const std::optional<int64_t> num_consumers;
const bool use_cross_trainer_cache;
const TargetWorkers target_workers;
};
struct Iteration {
explicit Iteration(int64_t iteration_id, IterationKey iteration_key,
int64_t num_split_providers, std::shared_ptr<Job> job)
: iteration_id(iteration_id), iteration_key(iteration_key), job(job) {
if (IsDynamicShard(job->processing_mode)) {
distributed_epoch_state = DistributedEpochState(num_split_providers);
}
}
bool IsRoundRobin() const { return job->num_consumers.has_value(); }
std::string DebugString() const {
return absl::StrCat(iteration_key.name, "_", iteration_key.repetition);
}
const int64_t iteration_id;
const IterationKey iteration_key;
const std::shared_ptr<Job> job;
std::optional<DistributedEpochState> distributed_epoch_state;
std::queue<PendingTask> pending_tasks;
int64_t num_clients = 0;
int64_t last_client_released_micros = -1;
bool finished = false;
bool garbage_collected = false;
};
struct Task {
template <class T>
explicit Task(const T& create_task_update,
const std::shared_ptr<Iteration>& iteration)
: task_id(create_task_update.task_id()),
iteration(iteration),
worker_address(create_task_update.worker_address()),
transfer_servers(create_task_update.transfer_servers().begin(),
create_task_update.transfer_servers().end()),
worker_tags(create_task_update.worker_tags().begin(),
create_task_update.worker_tags().end()),
worker_uid(create_task_update.worker_uid()) {}
const int64_t task_id;
const std::shared_ptr<Iteration> iteration;
const std::string worker_address;
const std::vector<DataTransferServerInfo> transfer_servers;
const std::vector<std::string> worker_tags;
const int64_t worker_uid;
int64_t starting_round = 0;
bool finished = false;
bool removed = false;
};
using TasksById = absl::flat_hash_map<int64_t, std::shared_ptr<Task>>;
std::string NextAvailableDatasetId() const;
Status DatasetFromId(const std::string& id,
std::shared_ptr<const Dataset>& dataset) const;
Status WorkerFromAddress(const std::string& address,
std::shared_ptr<const Worker>& worker) const;
std::vector<std::shared_ptr<const Worker>> ListWorkers() const;
int64_t NextAvailableJobId() const;
Status JobFromId(int64_t job_id, std::shared_ptr<const Job>& job) const;
Status JobByName(const std::string& job_name,
std::shared_ptr<const Job>& job) const;
int64_t NextAvailableIterationId() const;
std::vector<std::shared_ptr<const Iteration>> ListIterations() const;
Status IterationFromId(int64_t id,
std::shared_ptr<const Iteration>& iteration) const;
Status IterationByKey(IterationKey key,
std::shared_ptr<const Iteration>& iteration) const;
Status IterationForIterationClientId(
int64_t iteration_client_id, std::shared_ptr<const Iteration>& iteration);
std::vector<int64_t> ListActiveClientIds();
int64_t NextAvailableIterationClientId() const;
int64_t NextAvailableTaskId() const;
Status TaskFromId(int64_t id, std::shared_ptr<const Task>& task) const;
Status TasksForIteration(
int64_t iteration_id,
std::vector<std::shared_ptr<const Task>>& tasks) const;
Status TasksForWorker(const absl::string_view worker_address,
std::vector<std::shared_ptr<const Task>>& tasks) const;
Status ValidateWorker(absl::string_view worker_address) const;
absl::StatusOr<int64_t> GetWorkerIndex(
absl::string_view worker_address) const;
const absl::flat_hash_set<std::string>& ListSnapshotPaths() const {
return snapshot_paths_;
}
std::optional<bool> CompressionDisabledAtRuntime(
const std::string& dataset_id) const;
int64_t GetNumberOfRegisteredWorkers() const { return workers_.size(); }
private:
void RegisterDataset(const RegisterDatasetUpdate& register_dataset);
void RegisterWorker(const RegisterWorkerUpdate& register_worker);
void CreateJob(const CreateJobUpdate& create_job);
void CreateIteration(const CreateIterationUpdate& create_iteration);
void ProduceSplit(const ProduceSplitUpdate& produce_split);
void AcquireIterationClient(
const AcquireIterationClientUpdate& acquire_iteration_client);
void ReleaseIterationClient(
const ReleaseIterationClientUpdate& release_iteration_client);
void GarbageCollectIteration(
const GarbageCollectIterationUpdate& garbage_collect_iteration);
void RemoveTask(const RemoveTaskUpdate& remove_task);
void CreatePendingTask(const CreatePendingTaskUpdate& create_pending_task);
void ClientHeartbeat(const ClientHeartbeatUpdate& client_heartbeat);
void CreateTask(const CreateTaskUpdate& create_task);
void FinishTask(const FinishTaskUpdate& finish_task);
void Snapshot(const SnapshotUpdate& snapshot);
void CompressionDisabledAtRuntime(const CompressionDisabledAtRuntimeUpdate&
compression_disabled_at_runtime);
void UpdateNextAvailableDatasetId();
int64_t next_available_dataset_id_ = 1000;
absl::flat_hash_map<std::string, std::shared_ptr<Dataset>> datasets_by_id_;
absl::flat_hash_map<std::string, std::shared_ptr<Worker>> workers_;
WorkerIndexResolver worker_index_resolver_;
int64_t next_available_job_id_ = 5000;
absl::flat_hash_map<int64_t, std::shared_ptr<Job>> jobs_by_id_;
absl::flat_hash_map<std::string, std::shared_ptr<Job>> jobs_by_name_;
int64_t next_available_iteration_id_ = 2000;
absl::flat_hash_map<int64_t, std::shared_ptr<Iteration>> iterations_;
absl::flat_hash_map<IterationKey, std::shared_ptr<Iteration>>
iterations_by_key_;
int64_t next_available_iteration_client_id_ = 3000;
absl::flat_hash_map<int64_t, std::shared_ptr<Iteration>>
iterations_for_client_ids_;
int64_t next_available_task_id_ = 4000;
TasksById tasks_;
absl::flat_hash_map<int64_t, std::vector<std::shared_ptr<Task>>>
tasks_by_iteration_;
absl::flat_hash_map<std::string, TasksById> tasks_by_worker_;
absl::flat_hash_set<std::string> snapshot_paths_;
absl::flat_hash_map<std::string, bool> compression_disabled_at_runtime_;
};
}
}
#endif
#include "tensorflow/core/data/service/dispatcher_state.h"
#include <algorithm>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/journal.h"
#include "tensorflow/core/data/service/journal.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
DispatcherState::DispatcherState()
: worker_index_resolver_(std::vector<std::string>{}) {}
DispatcherState::DispatcherState(
const experimental::DispatcherConfig& dispatcher_config)
: worker_index_resolver_(dispatcher_config.worker_addresses()) {}
Status DispatcherState::Apply(const Update& update) {
switch (update.update_type_case()) {
case Update::kRegisterDataset:
RegisterDataset(update.register_dataset());
break;
case Update::kRegisterWorker:
RegisterWorker(update.register_worker());
break;
case Update::kCreateJob:
CreateJob(update.create_job());
break;
case Update::kCreateIteration:
CreateIteration(update.create_iteration());
break;
case Update::kProduceSplit:
ProduceSplit(update.produce_split());
break;
case Update::kAcquireIterationClient:
AcquireIterationClient(update.acquire_iteration_client());
break;
case Update::kReleaseIterationClient:
ReleaseIterationClient(update.release_iteration_client());
break;
case Update::kGarbageCollectIteration:
GarbageCollectIteration(update.garbage_collect_iteration());
break;
case Update::kRemoveTask:
RemoveTask(update.remove_task());
break;
case Update::kCreatePendingTask:
CreatePendingTask(update.create_pending_task());
break;
case Update::kClientHeartbeat:
ClientHeartbeat(update.client_heartbeat());
break;
case Update::kCreateTask:
CreateTask(update.create_task());
break;
case Update::kFinishTask:
FinishTask(update.finish_task());
break;
case Update::kSnapshot:
Snapshot(update.snapshot());
break;
case Update::kCompressionDisabledAtRuntime:
CompressionDisabledAtRuntime(update.compression_disabled_at_runtime());
break;
case Update::UPDATE_TYPE_NOT_SET:
return errors::Internal("Update type not set.");
}
return absl::OkStatus();
}
void DispatcherState::RegisterDataset(
const RegisterDatasetUpdate& register_dataset) {
std::string dataset_id = register_dataset.dataset_id();
auto dataset =
std::make_shared<Dataset>(dataset_id, register_dataset.metadata());
DCHECK(!datasets_by_id_.contains(dataset_id));
datasets_by_id_[dataset_id] = dataset;
UpdateNextAvailableDatasetId();
}
void DispatcherState::RegisterWorker(
const RegisterWorkerUpdate& register_worker) {
std::string address = register_worker.worker_address();
DCHECK(!workers_.contains(address));
workers_[address] = std::make_shared<Worker>(register_worker);
tasks_by_worker_[address] =
absl::flat_hash_map<int64_t, std::shared_ptr<Task>>();
worker_index_resolver_.AddWorker(address);
}
void DispatcherState::CreateJob(const CreateJobUpdate& create_job) {
int64_t job_id = create_job.job_id();
std::string job_name = create_job.job_name();
std::optional<int64_t> num_consumers;
if (create_job.optional_num_consumers_case() ==
CreateJobUpdate::kNumConsumers) {
num_consumers = create_job.num_consumers();
}
auto job = std::make_shared<Job>(
job_id, create_job.dataset_id(), create_job.processing_mode_def(),
job_name, num_consumers, create_job.use_cross_trainer_cache(),
create_job.target_workers());
DCHECK(!jobs_by_id_.contains(job_id));
jobs_by_id_[job_id] = job;
DCHECK(!jobs_by_name_.contains(job_name));
jobs_by_name_[job_name] = job;
next_available_job_id_ = std::max(next_available_job_id_, job_id + 1);
}
Status DispatcherState::JobFromId(int64_t job_id,
std::shared_ptr<const Job>& job) const {
auto it = jobs_by_id_.find(job_id);
if (it == jobs_by_id_.end()) {
return errors::NotFound("Job with id ", job_id, " not found");
}
job = it->second;
return absl::OkStatus();
}
Status DispatcherState::JobByName(const std::string& job_name,
std::shared_ptr<const Job>& job) const {
auto it = jobs_by_name_.find(job_name);
if (it == jobs_by_name_.end()) {
return errors::NotFound("Job with name ", job_name, " not found");
}
job = it->second;
return absl::OkStatus();
}
void DispatcherState::CreateIteration(
const CreateIterationUpdate& create_iteration) {
int64_t iteration_id = create_iteration.iteration_id();
int64_t job_id = create_iteration.job_id();
DCHECK(jobs_by_id_.contains(job_id));
auto& job = jobs_by_id_[job_id];
DCHECK(job);
IterationKey iteration_key(job->job_name, create_iteration.repetition());
auto iteration = std::make_shared<Iteration>(
iteration_id, iteration_key, create_iteration.num_split_providers(), job);
DCHECK(!iterations_.contains(iteration_id));
iterations_[iteration_id] = iteration;
tasks_by_iteration_[iteration_id] = std::vector<std::shared_ptr<Task>>();
DCHECK(!iterations_by_key_.contains(iteration_key) ||
iterations_by_key_[iteration_key]->garbage_collected);
iterations_by_key_[iteration_key] = iteration;
next_available_iteration_id_ =
std::max(next_available_iteration_id_, iteration_id + 1);
}
void DispatcherState::ProduceSplit(const ProduceSplitUpdate& produce_split) {
std::shared_ptr<Iteration> iteration =
iterations_[produce_split.iteration_id()];
DCHECK(iteration->distributed_epoch_state.has_value());
DistributedEpochState& state = iteration->distributed_epoch_state.value();
int64_t provider_index = produce_split.split_provider_index();
DCHECK_GE(produce_split.repetition(), state.repetitions[provider_index]);
state.repetitions[provider_index] = produce_split.repetition();
if (produce_split.finished()) {
state.repetitions[provider_index]++;
state.indices[provider_index] = 0;
return;
}
state.indices[provider_index]++;
}
void DispatcherState::AcquireIterationClient(
const AcquireIterationClientUpdate& acquire_iteration_client) {
int64_t iteration_client_id = acquire_iteration_client.iteration_client_id();
std::shared_ptr<Iteration>& iteration =
iterations_for_client_ids_[iteration_client_id];
DCHECK(!iteration);
iteration = iterations_[acquire_iteration_client.iteration_id()];
DCHECK(iteration);
iteration->num_clients++;
next_available_iteration_client_id_ =
std::max(next_available_iteration_client_id_, iteration_client_id + 1);
}
void DispatcherState::ReleaseIterationClient(
const ReleaseIterationClientUpdate& release_iteration_client) {
int64_t iteration_client_id = release_iteration_client.iteration_client_id();
std::shared_ptr<Iteration>& iteration =
iterations_for_client_ids_[iteration_client_id];
DCHECK(iteration);
iteration->num_clients--;
DCHECK_GE(iteration->num_clients, 0);
iteration->last_client_released_micros =
release_iteration_client.time_micros();
iterations_for_client_ids_.erase(iteration_client_id);
}
void DispatcherState::GarbageCollectIteration(
const GarbageCollectIterationUpdate& garbage_collect_iteration) {
int64_t iteration_id = garbage_collect_iteration.iteration_id();
for (auto& task : tasks_by_iteration_[iteration_id]) {
task->finished = true;
tasks_by_worker_[task->worker_address].erase(task->task_id);
}
iterations_[iteration_id]->finished = true;
iterations_[iteration_id]->garbage_collected = true;
}
void DispatcherState::RemoveTask(const RemoveTaskUpdate& remove_task) {
std::shared_ptr<Task>& task = tasks_[remove_task.task_id()];
DCHECK(task);
task->removed = true;
auto& tasks_for_iteration =
tasks_by_iteration_[task->iteration->iteration_id];
for (auto it = tasks_for_iteration.begin(); it != tasks_for_iteration.end();
++it) {
if ((*it)->task_id == task->task_id) {
tasks_for_iteration.erase(it);
break;
}
}
tasks_by_worker_[task->worker_address].erase(task->task_id);
tasks_.erase(task->task_id);
VLOG(1) << "Removed task " << remove_task.task_id() << " from worker "
<< task->worker_address;
}
void DispatcherState::CreatePendingTask(
const CreatePendingTaskUpdate& create_pending_task) {
int64_t task_id = create_pending_task.task_id();
auto& task = tasks_[task_id];
DCHECK_EQ(task, nullptr);
auto& iteration = iterations_[create_pending_task.iteration_id()];
DCHECK_NE(iteration, nullptr);
task = std::make_shared<Task>(create_pending_task, iteration);
iteration->pending_tasks.emplace(task, create_pending_task.starting_round());
tasks_by_worker_[create_pending_task.worker_address()][task->task_id] = task;
next_available_task_id_ = std::max(next_available_task_id_, task_id + 1);
}
void DispatcherState::ClientHeartbeat(
const ClientHeartbeatUpdate& client_heartbeat) {
int64_t iteration_client_id = client_heartbeat.iteration_client_id();
auto& iteration = iterations_for_client_ids_[iteration_client_id];
DCHECK(!iteration->pending_tasks.empty());
auto& task = iteration->pending_tasks.front();
if (client_heartbeat.has_task_rejected()) {
task.failures++;
task.ready_consumers.clear();
task.target_round = client_heartbeat.task_rejected().new_target_round();
}
if (client_heartbeat.task_accepted()) {
task.ready_consumers.insert(iteration_client_id);
if (task.ready_consumers.size() == iteration->job->num_consumers.value()) {
VLOG(1) << "Promoting task " << task.task->task_id
<< " from pending to active";
task.task->starting_round = task.target_round;
tasks_by_iteration_[iteration->iteration_id].push_back(task.task);
iteration->pending_tasks.pop();
}
}
}
void DispatcherState::CreateTask(const CreateTaskUpdate& create_task) {
int64_t task_id = create_task.task_id();
auto& task = tasks_[task_id];
DCHECK_EQ(task, nullptr);
auto& iteration = iterations_[create_task.iteration_id()];
DCHECK_NE(iteration, nullptr);
task = std::make_shared<Task>(create_task, iteration);
tasks_by_iteration_[create_task.iteration_id()].push_back(task);
tasks_by_worker_[create_task.worker_address()][task->task_id] = task;
next_available_task_id_ = std::max(next_available_task_id_, task_id + 1);
}
void DispatcherState::FinishTask(const FinishTaskUpdate& finish_task) {
VLOG(2) << "Marking task " << finish_task.task_id() << " as finished";
int64_t task_id = finish_task.task_id();
auto& task = tasks_[task_id];
DCHECK(task != nullptr);
task->finished = true;
tasks_by_worker_[task->worker_address].erase(task->task_id);
bool all_finished = true;
for (const auto& task_for_iteration :
tasks_by_iteration_[task->iteration->iteration_id]) {
if (!task_for_iteration->finished) {
all_finished = false;
}
}
VLOG(3) << "Iteration " << task->iteration->iteration_id
<< " finished: " << all_finished;
iterations_[task->iteration->iteration_id]->finished = all_finished;
}
std::string DispatcherState::NextAvailableDatasetId() const {
return absl::StrCat(next_available_dataset_id_);
}
void DispatcherState::UpdateNextAvailableDatasetId() {
while (datasets_by_id_.contains(absl::StrCat(next_available_dataset_id_))) {
++next_available_dataset_id_;
}
}
Status DispatcherState::DatasetFromId(
const std::string& id, std::shared_ptr<const Dataset>& dataset) const {
auto it = datasets_by_id_.find(id);
if (it == datasets_by_id_.end()) {
return errors::NotFound("Dataset id ", id, " not found");
}
dataset = it->second;
return absl::OkStatus();
}
Status DispatcherState::WorkerFromAddress(
const std::string& address, std::shared_ptr<const Worker>& worker) const {
auto it = workers_.find(address);
if (it == workers_.end()) {
return errors::NotFound("Worker with address ", address, " not found.");
}
worker = it->second;
return absl::OkStatus();
}
std::vector<std::shared_ptr<const DispatcherState::Worker>>
DispatcherState::ListWorkers() const {
std::vector<std::shared_ptr<const Worker>> workers;
workers.reserve(workers_.size());
for (const auto& it : workers_) {
workers.push_back(it.second);
}
return workers;
}
std::vector<std::shared_ptr<const DispatcherState::Iteration>>
DispatcherState::ListIterations() const {
std::vector<std::shared_ptr<const DispatcherState::Iteration>> iterations;
iterations.reserve(iterations_.size());
for (const auto& it : iterations_) {
iterations.push_back(it.second);
}
return iterations;
}
Status DispatcherState::IterationFromId(
int64_t id, std::shared_ptr<const Iteration>& iteration) const {
auto it = iterations_.find(id);
if (it == iterations_.end()) {
return errors::NotFound("Iteration id ", id, " not found");
}
iteration = it->second;
return absl::OkStatus();
}
Status DispatcherState::IterationByKey(
IterationKey iteration_key,
std::shared_ptr<const Iteration>& iteration) const {
auto it = iterations_by_key_.find(iteration_key);
if (it == iterations_by_key_.end()) {
return errors::NotFound("Iteration key ", iteration_key.DebugString(),
" not found");
}
iteration = it->second;
return absl::OkStatus();
}
int64_t DispatcherState::NextAvailableJobId() const {
return next_available_job_id_;
}
int64_t DispatcherState::NextAvailableIterationId() const {
return next_available_iteration_id_;
}
Status DispatcherState::IterationForIterationClientId(
int64_t iteration_client_id, std::shared_ptr<const Iteration>& iteration) {
iteration = iterations_for_client_ids_[iteration_client_id];
if (!iteration) {
return errors::NotFound("Iteration client id not found: ",
iteration_client_id);
}
return absl::OkStatus();
}
std::vector<int64_t> DispatcherState::ListActiveClientIds() {
std::vector<int64_t> ids;
for (const auto& it : iterations_for_client_ids_) {
if (it.second && !it.second->finished) {
ids.push_back(it.first);
}
}
return ids;
}
int64_t DispatcherState::NextAvailableIterationClientId() const {
return next_available_iteration_client_id_;
}
Status DispatcherState::TaskFromId(int64_t id,
std::shared_ptr<const Task>& task) const {
auto it = tasks_.find(id);
if (it == tasks_.end()) {
return errors::NotFound("Task ", id, " not found");
}
task = it->second;
return absl::OkStatus();
}
Status DispatcherState::TasksForIteration(
int64_t iteration_id,
std::vector<std::shared_ptr<const Task>>& tasks) const {
auto it = tasks_by_iteration_.find(iteration_id);
if (it == tasks_by_iteration_.end()) {
return errors::NotFound("Iteration ", iteration_id, " not found");
}
tasks.clear();
tasks.reserve(it->second.size());
for (const auto& task : it->second) {
tasks.push_back(task);
}
return absl::OkStatus();
}
Status DispatcherState::TasksForWorker(
absl::string_view worker_address,
std::vector<std::shared_ptr<const Task>>& tasks) const {
tasks.clear();
auto it = tasks_by_worker_.find(worker_address);
if (it == tasks_by_worker_.end()) {
return errors::NotFound("Worker ", worker_address, " not found");
}
const absl::flat_hash_map<int64_t, std::shared_ptr<Task>>& worker_tasks =
it->second;
tasks.reserve(worker_tasks.size());
for (const auto& task : worker_tasks) {
tasks.push_back(task.second);
}
return absl::OkStatus();
}
int64_t DispatcherState::NextAvailableTaskId() const {
return next_available_task_id_;
}
Status DispatcherState::ValidateWorker(absl::string_view worker_address) const {
return worker_index_resolver_.ValidateWorker(worker_address);
}
absl::StatusOr<int64_t> DispatcherState::GetWorkerIndex(
absl::string_view worker_address) const {
return worker_index_resolver_.GetWorkerIndex(worker_address);
}
void DispatcherState::Snapshot(const SnapshotUpdate& snapshot) {
snapshot_paths_.insert(snapshot.path());
}
void DispatcherState::CompressionDisabledAtRuntime(
const Compr | #include "tensorflow/core/data/service/dispatcher_state.h"
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/journal.pb.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace data {
namespace {
using Dataset = DispatcherState::Dataset;
using Worker = DispatcherState::Worker;
using IterationKey = DispatcherState::IterationKey;
using Job = DispatcherState::Job;
using Iteration = DispatcherState::Iteration;
using Task = DispatcherState::Task;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
using ::tsl::testing::StatusIs;
Status RegisterDataset(const std::string& dataset_id, DispatcherState& state) {
Update update;
RegisterDatasetUpdate* register_dataset = update.mutable_register_dataset();
register_dataset->set_dataset_id(dataset_id);
return state.Apply(update);
}
Status RegisterWorker(std::string worker_address, DispatcherState& state) {
Update update;
update.mutable_register_worker()->set_worker_address(worker_address);
return state.Apply(update);
}
Status CreateJob(int64_t job_id, const std::string& dataset_id,
const std::string& job_name, DispatcherState& state) {
Update update;
CreateJobUpdate* create_job = update.mutable_create_job();
create_job->set_job_id(job_id);
create_job->set_dataset_id(dataset_id);
create_job->set_job_name(job_name);
return state.Apply(update);
}
Status CreateIteration(int64_t iteration_id, const std::string& dataset_id,
const IterationKey& named_iteration_key,
DispatcherState& state) {
int64_t job_id = state.NextAvailableJobId();
TF_RETURN_IF_ERROR(
CreateJob(job_id, dataset_id, named_iteration_key.name, state));
Update update;
CreateIterationUpdate* create_iteration = update.mutable_create_iteration();
create_iteration->set_job_id(job_id);
create_iteration->set_iteration_id(iteration_id);
create_iteration->set_repetition(named_iteration_key.repetition);
return state.Apply(update);
}
Status CreateIteration(int64_t iteration_id, const std::string& dataset_id,
DispatcherState& state) {
IterationKey key(absl::StrCat(random::New64()), 0);
return CreateIteration(iteration_id, dataset_id, key, state);
}
Status AcquireIterationClientId(int64_t iteration_id,
int64_t iteration_client_id,
DispatcherState& state) {
Update update;
AcquireIterationClientUpdate* acquire_iteration_client =
update.mutable_acquire_iteration_client();
acquire_iteration_client->set_iteration_id(iteration_id);
acquire_iteration_client->set_iteration_client_id(iteration_client_id);
return state.Apply(update);
}
Status ReleaseIterationClientId(int64_t iteration_client_id,
int64_t release_time, DispatcherState& state) {
Update update;
ReleaseIterationClientUpdate* release_iteration_client =
update.mutable_release_iteration_client();
release_iteration_client->set_iteration_client_id(iteration_client_id);
release_iteration_client->set_time_micros(release_time);
return state.Apply(update);
}
Status CreateTask(int64_t task_id, int64_t iteration_id,
const std::string& worker_address, DispatcherState& state) {
Update update;
CreateTaskUpdate* create_task = update.mutable_create_task();
create_task->set_task_id(task_id);
create_task->set_iteration_id(iteration_id);
create_task->set_worker_address(worker_address);
return state.Apply(update);
}
Status FinishTask(int64_t task_id, DispatcherState& state) {
Update update;
FinishTaskUpdate* finish_task = update.mutable_finish_task();
finish_task->set_task_id(task_id);
return state.Apply(update);
}
Status Snapshot(const std::string& path, DispatcherState& state) {
Update update;
SnapshotUpdate* snapshot = update.mutable_snapshot();
snapshot->set_path(path);
return state.Apply(update);
}
}
TEST(DispatcherState, RegisterDataset) {
DispatcherState state;
std::string dataset_id = state.NextAvailableDatasetId();
int64_t dataset_id_int;
ASSERT_TRUE(absl::SimpleAtoi(dataset_id, &dataset_id_int));
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
EXPECT_EQ(state.NextAvailableDatasetId(), absl::StrCat(dataset_id_int + 1));
std::shared_ptr<const Dataset> dataset;
TF_EXPECT_OK(state.DatasetFromId(dataset_id, dataset));
EXPECT_TRUE(dataset->metadata.element_spec().empty());
EXPECT_EQ(dataset->metadata.compression(),
DataServiceMetadata::COMPRESSION_UNSPECIFIED);
}
TEST(DispatcherState, RegisterDatasetWithExplicitID) {
DispatcherState state;
TF_EXPECT_OK(RegisterDataset("dataset_id", state));
std::shared_ptr<const Dataset> dataset;
TF_EXPECT_OK(state.DatasetFromId("dataset_id", dataset));
EXPECT_EQ(dataset->dataset_id, "dataset_id");
}
TEST(DispatcherState, RegisterDatasetsWithDifferentIDs) {
DispatcherState state;
TF_EXPECT_OK(RegisterDataset("dataset_id1", state));
TF_EXPECT_OK(RegisterDataset("dataset_id2", state));
std::shared_ptr<const Dataset> dataset;
TF_EXPECT_OK(state.DatasetFromId("dataset_id1", dataset));
EXPECT_EQ(dataset->dataset_id, "dataset_id1");
TF_EXPECT_OK(state.DatasetFromId("dataset_id2", dataset));
EXPECT_EQ(dataset->dataset_id, "dataset_id2");
}
TEST(DispatcherState, RegisterDatasetCompression) {
DispatcherState state;
const std::string dataset_id = state.NextAvailableDatasetId();
Update update;
RegisterDatasetUpdate* register_dataset = update.mutable_register_dataset();
register_dataset->set_dataset_id(dataset_id);
register_dataset->mutable_metadata()->set_compression(
DataServiceMetadata::COMPRESSION_SNAPPY);
TF_ASSERT_OK(state.Apply(update));
{
std::shared_ptr<const Dataset> dataset;
TF_EXPECT_OK(state.DatasetFromId(dataset_id, dataset));
EXPECT_EQ(dataset->metadata.compression(),
DataServiceMetadata::COMPRESSION_SNAPPY);
}
}
TEST(DispatcherState, RegisterDatasetElementSpec) {
DispatcherState state;
const std::string dataset_id = state.NextAvailableDatasetId();
Update update;
RegisterDatasetUpdate* register_dataset = update.mutable_register_dataset();
register_dataset->set_dataset_id(dataset_id);
register_dataset->mutable_metadata()->set_element_spec(
"encoded_element_spec");
TF_ASSERT_OK(state.Apply(update));
{
std::shared_ptr<const Dataset> dataset;
TF_EXPECT_OK(state.DatasetFromId(dataset_id, dataset));
EXPECT_EQ(dataset->metadata.element_spec(), "encoded_element_spec");
}
}
TEST(DispatcherState, MissingDatasetId) {
DispatcherState state;
std::shared_ptr<const Dataset> dataset;
Status s = state.DatasetFromId("missing_dataset_id", dataset);
EXPECT_EQ(s.code(), error::NOT_FOUND);
}
TEST(DispatcherState, NextAvailableDatasetId) {
DispatcherState state;
std::string dataset_id = state.NextAvailableDatasetId();
int64_t dataset_id_int;
ASSERT_TRUE(absl::SimpleAtoi(dataset_id, &dataset_id_int));
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
EXPECT_NE(state.NextAvailableDatasetId(), dataset_id);
EXPECT_EQ(state.NextAvailableDatasetId(), absl::StrCat(dataset_id_int + 1));
EXPECT_EQ(state.NextAvailableDatasetId(), state.NextAvailableDatasetId());
}
TEST(DispatcherState, RegisterWorker) {
DispatcherState state;
std::string address = "test_worker_address";
TF_EXPECT_OK(RegisterWorker(address, state));
std::shared_ptr<const Worker> worker;
TF_EXPECT_OK(state.WorkerFromAddress(address, worker));
EXPECT_EQ(worker->address, address);
}
TEST(DispatcherState, RegisterWorkerInFixedWorkerSet) {
experimental::DispatcherConfig config;
config.add_worker_addresses("/worker/task/0");
config.add_worker_addresses("/worker/task/1");
config.add_worker_addresses("/worker/task/2");
DispatcherState state(config);
TF_EXPECT_OK(state.ValidateWorker("/worker/task/0:20000"));
TF_EXPECT_OK(state.ValidateWorker("/worker/task/1:20000"));
TF_EXPECT_OK(state.ValidateWorker("/worker/task/2:20000"));
TF_EXPECT_OK(RegisterWorker("/worker/task/0:20000", state));
TF_EXPECT_OK(RegisterWorker("/worker/task/1:20000", state));
TF_EXPECT_OK(RegisterWorker("/worker/task/2:20000", state));
std::shared_ptr<const Worker> worker;
TF_EXPECT_OK(state.WorkerFromAddress("/worker/task/0:20000", worker));
EXPECT_EQ(worker->address, "/worker/task/0:20000");
}
TEST(DispatcherState, RegisterInvalidWorkerInFixedWorkerSet) {
experimental::DispatcherConfig config;
config.add_worker_addresses("/worker/task/0");
config.add_worker_addresses("/worker/task/1");
config.add_worker_addresses("/worker/task/2");
DispatcherState state(config);
EXPECT_THAT(state.ValidateWorker("localhost:20000"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("The worker's address is not configured")));
TF_EXPECT_OK(RegisterWorker("localhost:20000", state));
std::shared_ptr<const Worker> worker;
EXPECT_THAT(state.WorkerFromAddress("/worker/task/0:20000", worker),
StatusIs(error::NOT_FOUND,
"Worker with address /worker/task/0:20000 not found."));
}
TEST(DispatcherState, ListWorkers) {
DispatcherState state;
std::string address_1 = "address_1";
std::string address_2 = "address_2";
{
std::vector<std::shared_ptr<const Worker>> workers = state.ListWorkers();
EXPECT_THAT(workers, IsEmpty());
}
TF_EXPECT_OK(RegisterWorker(address_1, state));
{
std::vector<std::shared_ptr<const Worker>> workers = state.ListWorkers();
EXPECT_THAT(workers, SizeIs(1));
}
TF_EXPECT_OK(RegisterWorker(address_2, state));
{
std::vector<std::shared_ptr<const Worker>> workers = state.ListWorkers();
EXPECT_THAT(workers, SizeIs(2));
}
}
TEST(DispatcherState, MissingWorker) {
DispatcherState state;
std::shared_ptr<const Worker> worker;
Status s = state.WorkerFromAddress("test_worker_address", worker);
EXPECT_EQ(s.code(), error::NOT_FOUND);
}
TEST(DispatcherState, UnknownUpdate) {
DispatcherState state;
Update update;
Status s = state.Apply(update);
EXPECT_EQ(s.code(), error::INTERNAL);
}
TEST(DispatcherState, JobName) {
DispatcherState state;
std::string dataset_id = state.NextAvailableDatasetId();
int64_t job_id = state.NextAvailableJobId();
std::string job_name = "test_name";
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateJob(job_id, dataset_id, job_name, state));
std::shared_ptr<const Job> job;
TF_EXPECT_OK(state.JobByName(job_name, job));
EXPECT_EQ(state.NextAvailableJobId(), job_id + 1);
EXPECT_EQ(job->dataset_id, dataset_id);
EXPECT_FALSE(job->use_cross_trainer_cache);
}
TEST(DispatcherState, JobData) {
DispatcherState state;
std::string dataset_id = state.NextAvailableDatasetId();
int64_t job_id = state.NextAvailableJobId();
int64_t num_consumers = 8;
bool use_cross_trainer_cache = true;
TF_ASSERT_OK(RegisterDataset(dataset_id, state));
Update update;
CreateJobUpdate* create_job = update.mutable_create_job();
create_job->set_job_id(job_id);
create_job->set_dataset_id(dataset_id);
create_job->set_num_consumers(num_consumers);
create_job->set_use_cross_trainer_cache(use_cross_trainer_cache);
TF_ASSERT_OK(state.Apply(update));
std::shared_ptr<const Job> job;
TF_ASSERT_OK(state.JobFromId(job_id, job));
EXPECT_EQ(job->num_consumers, num_consumers);
EXPECT_EQ(job->use_cross_trainer_cache, use_cross_trainer_cache);
}
TEST(DispatcherState, CrossTrainerCacheTask) {
DispatcherState state;
std::string dataset_id = state.NextAvailableDatasetId();
std::string worker_address = "test_worker_address";
TF_ASSERT_OK(RegisterDataset(dataset_id, state));
int64_t job_id = state.NextAvailableJobId();
Update job_update;
CreateJobUpdate* create_job = job_update.mutable_create_job();
create_job->set_job_id(job_id);
create_job->set_dataset_id(dataset_id);
create_job->set_use_cross_trainer_cache(true);
TF_ASSERT_OK(state.Apply(job_update));
int64_t iteration_id = state.NextAvailableIterationId();
Update iteration_update;
CreateIterationUpdate* create_iteration =
iteration_update.mutable_create_iteration();
create_iteration->set_job_id(job_id);
create_iteration->set_iteration_id(iteration_id);
TF_ASSERT_OK(state.Apply(iteration_update));
int64_t task_id = state.NextAvailableTaskId();
TF_EXPECT_OK(CreateTask(task_id, iteration_id, worker_address, state));
std::shared_ptr<const Task> task;
TF_EXPECT_OK(state.TaskFromId(task_id, task));
EXPECT_EQ(task->iteration->iteration_id, iteration_id);
EXPECT_EQ(task->task_id, task_id);
EXPECT_EQ(task->worker_address, worker_address);
EXPECT_TRUE(task->iteration->job->use_cross_trainer_cache);
}
TEST(DispatcherState, CreateTask) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
std::string worker_address = "test_worker_address";
DispatcherState state;
int64_t task_id = state.NextAvailableTaskId();
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(CreateTask(task_id, iteration_id, worker_address, state));
EXPECT_EQ(state.NextAvailableTaskId(), task_id + 1);
{
std::shared_ptr<const Task> task;
TF_EXPECT_OK(state.TaskFromId(task_id, task));
EXPECT_EQ(task->iteration->iteration_id, iteration_id);
EXPECT_EQ(task->task_id, task_id);
EXPECT_EQ(task->worker_address, worker_address);
EXPECT_FALSE(task->iteration->job->use_cross_trainer_cache);
}
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForIteration(iteration_id, tasks));
EXPECT_THAT(tasks, SizeIs(1));
}
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForWorker(worker_address, tasks));
EXPECT_EQ(1, tasks.size());
}
}
TEST(DispatcherState, CreateTasksForSameIteration) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t task_id_1 = 8;
int64_t task_id_2 = 9;
std::string worker_address = "test_worker_address";
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(CreateTask(task_id_1, iteration_id, worker_address, state));
TF_EXPECT_OK(CreateTask(task_id_2, iteration_id, worker_address, state));
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForIteration(iteration_id, tasks));
EXPECT_THAT(tasks, SizeIs(2));
}
}
TEST(DispatcherState, CreateTasksForDifferentIterations) {
std::string dataset_id = "dataset_id";
int64_t iteration_id_1 = 3;
int64_t iteration_id_2 = 4;
int64_t task_id_1 = 8;
int64_t task_id_2 = 9;
std::string worker_address = "test_worker_address";
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id_1, dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id_2, dataset_id, state));
TF_EXPECT_OK(CreateTask(task_id_1, iteration_id_1, worker_address, state));
TF_EXPECT_OK(CreateTask(task_id_2, iteration_id_2, worker_address, state));
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForIteration(iteration_id_1, tasks));
EXPECT_THAT(tasks, SizeIs(1));
}
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForIteration(iteration_id_2, tasks));
EXPECT_THAT(tasks, SizeIs(1));
}
}
TEST(DispatcherState, CreateTasksForSameWorker) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t task_id_1 = 8;
int64_t task_id_2 = 9;
std::string worker_address = "test_worker_address";
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(CreateTask(task_id_1, iteration_id, worker_address, state));
TF_EXPECT_OK(CreateTask(task_id_2, iteration_id, worker_address, state));
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForWorker(worker_address, tasks));
EXPECT_EQ(2, tasks.size());
}
}
TEST(DispatcherState, CreateTasksForDifferentWorkers) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t task_id_1 = 8;
int64_t task_id_2 = 9;
std::string worker_address_1 = "test_worker_address_1";
std::string worker_address_2 = "test_worker_address_2";
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(CreateTask(task_id_1, iteration_id, worker_address_1, state));
TF_EXPECT_OK(CreateTask(task_id_2, iteration_id, worker_address_2, state));
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForWorker(worker_address_1, tasks));
EXPECT_EQ(1, tasks.size());
}
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForWorker(worker_address_2, tasks));
EXPECT_EQ(1, tasks.size());
}
}
TEST(DispatcherState, GetTasksForWorkerEmpty) {
std::string worker_address = "test_worker_address";
DispatcherState state;
TF_EXPECT_OK(RegisterWorker(worker_address, state));
{
std::vector<std::shared_ptr<const Task>> tasks;
TF_EXPECT_OK(state.TasksForWorker(worker_address, tasks));
EXPECT_EQ(0, tasks.size());
}
}
TEST(DispatcherState, FinishTask) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t task_id = 4;
std::string worker_address = "test_worker_address";
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(CreateTask(task_id, iteration_id, worker_address, state));
TF_EXPECT_OK(FinishTask(task_id, state));
std::shared_ptr<const Task> task;
TF_EXPECT_OK(state.TaskFromId(task_id, task));
EXPECT_TRUE(task->finished);
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration));
EXPECT_TRUE(iteration->finished);
}
TEST(DispatcherState, FinishMultiTaskIteration) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t task_id_1 = 4;
int64_t task_id_2 = 5;
std::string worker_address = "test_worker_address";
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(CreateTask(task_id_1, iteration_id, worker_address, state));
TF_EXPECT_OK(CreateTask(task_id_2, iteration_id, worker_address, state));
TF_EXPECT_OK(FinishTask(task_id_1, state));
{
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration));
EXPECT_FALSE(iteration->finished);
}
TF_EXPECT_OK(FinishTask(task_id_2, state));
{
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration));
EXPECT_TRUE(iteration->finished);
}
}
TEST(DispatcherState, AcquireIterationClientId) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t iteration_client_id_1 = 1;
int64_t iteration_client_id_2 = 2;
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id_1, state));
{
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration));
EXPECT_EQ(iteration->num_clients, 1);
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id_2, state));
EXPECT_EQ(iteration->num_clients, 2);
}
{
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(
state.IterationForIterationClientId(iteration_client_id_1, iteration));
EXPECT_EQ(iteration->iteration_id, iteration_id);
}
{
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(
state.IterationForIterationClientId(iteration_client_id_2, iteration));
EXPECT_EQ(iteration->iteration_id, iteration_id);
}
}
TEST(DispatcherState, ReleaseIterationClientId) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t iteration_client_id = 6;
int64_t release_time = 100;
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id, state));
TF_EXPECT_OK(
ReleaseIterationClientId(iteration_client_id, release_time, state));
std::shared_ptr<const Iteration> iteration;
TF_EXPECT_OK(state.IterationFromId(iteration_id, iteration));
EXPECT_EQ(iteration->num_clients, 0);
Status s =
state.IterationForIterationClientId(iteration_client_id, iteration);
EXPECT_EQ(s.code(), error::NOT_FOUND);
}
TEST(DispatcherState, ListActiveClientsEmpty) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t iteration_client_id = 6;
int64_t release_time = 100;
DispatcherState state;
EXPECT_THAT(state.ListActiveClientIds(), IsEmpty());
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id, state));
TF_EXPECT_OK(
ReleaseIterationClientId(iteration_client_id, release_time, state));
EXPECT_THAT(state.ListActiveClientIds(), IsEmpty());
}
TEST(DispatcherState, ListActiveClients) {
std::string dataset_id = "dataset_id";
int64_t iteration_id = 3;
int64_t iteration_client_id_1 = 6;
int64_t iteration_client_id_2 = 7;
int64_t iteration_client_id_3 = 8;
int64_t release_time = 100;
DispatcherState state;
TF_EXPECT_OK(RegisterDataset(dataset_id, state));
TF_EXPECT_OK(CreateIteration(iteration_id, dataset_id, state));
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id_1, state));
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id_2, state));
TF_EXPECT_OK(
ReleaseIterationClientId(iteration_client_id_2, release_time, state));
TF_EXPECT_OK(
AcquireIterationClientId(iteration_id, iteration_client_id_3, state));
EXPECT_THAT(state.ListActiveClientIds(), UnorderedElementsAre(6, 8));
}
TEST(DispatcherState, ListSnapshotPaths) {
DispatcherState state;
absl::flat_hash_set<std::string> snapshot_paths = {"p1", "p2"};
for (const auto& snapshot_path : snapshot_paths) {
TF_EXPECT_OK(Snapshot(snapshot_path, state));
}
EXPECT_EQ(state.ListSnapshotPaths(), snapshot_paths);
}
TEST(DispatcherState, GetNumberOfRegisteredWorkers) {
DispatcherState state;
std::string address_1 = "address_1";
std::string address_2 = "address_2";
EXPECT_EQ(state.GetNumberOfRegisteredWorkers(), 0);
TF_EXPECT_OK(RegisterWorker(address_1, state));
EXPECT_EQ(state.GetNumberOfRegisteredWorkers(), 1);
TF_EXPECT_OK(RegisterWorker(address_2, state));
EXPECT_EQ(state.GetNumberOfRegisteredWorkers(), 2);
}
}
} |
1,734 | cpp | tensorflow/tensorflow | validate_utils | tensorflow/core/data/service/client/validate_utils.cc | tensorflow/core/data/service/client/validate_utils_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_CLIENT_VALIDATE_UTILS_H_
#define TENSORFLOW_CORE_DATA_SERVICE_CLIENT_VALIDATE_UTILS_H_
#include "tensorflow/core/data/service/client/common.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace data {
Status ValidateDataServiceParams(const DataServiceParams& data_service_params);
}
}
#endif
#include "tensorflow/core/data/service/client/validate_utils.h"
#include "tensorflow/core/data/service/client/common.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/worker_impl.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
namespace tensorflow {
namespace data {
namespace {
Status ValidateLocalWorkers(const DataServiceParams& data_service_params) {
if (data_service_params.target_workers != TARGET_WORKERS_LOCAL) {
return absl::OkStatus();
}
if (LocalWorkers::Empty()) {
if (IsStaticShard(data_service_params.processing_mode)) {
return errors::InvalidArgument(
"Static sharding policy <",
ProcessingModeDef::ShardingPolicy_Name(
data_service_params.processing_mode.sharding_policy()),
"> requires local tf.data workers, but no local worker is found. "
"You need to run local tf.data service workers in your training "
"workers. Static sharding also requires a fixed worker pool and "
"a list of worker addresses in the DispatcherConfig. See the "
"\"Processing Modes\" section in the module doc for details.");
}
return errors::InvalidArgument(
"Local reads require local tf.data workers, but no local worker "
"is found. You need to run local tf.data service workers in your "
"training workers.");
}
if (data_service_params.num_consumers.has_value()) {
return errors::InvalidArgument(
"Coordinated reads require non-local workers, but `target_workers` "
"is \"LOCAL\".");
}
return absl::OkStatus();
}
Status ValidateCrossTrainerCache(const DataServiceParams& data_service_params) {
if (!data_service_params.cross_trainer_cache_options.has_value()) {
return absl::OkStatus();
}
if (data_service_params.job_name.empty()) {
return errors::InvalidArgument(
"Cross-trainer caching requires named jobs. Got empty `job_name`.");
}
if (data_service_params.metadata.cardinality() >= 0) {
return errors::InvalidArgument(
"Cross-trainer caching requires the input dataset to be infinite. "
"Got input with cardinality ",
data_service_params.metadata.cardinality());
}
if (data_service_params.repetition > 1) {
return errors::InvalidArgument(
"Cross-trainer caching requires infinite datasets and disallows "
"multiple repetitions of the same dataset. Got repetition ",
data_service_params.repetition);
}
if (data_service_params.num_consumers.has_value()) {
return errors::InvalidArgument(
"Cross-trainer caching does not support coordinated reads. "
"Got number of coordinated consumers: ",
data_service_params.num_consumers.value());
}
return absl::OkStatus();
}
}
Status ValidateDataServiceParams(const DataServiceParams& data_service_params) {
TF_RETURN_IF_ERROR(ValidateLocalWorkers(data_service_params));
TF_RETURN_IF_ERROR(ValidateCrossTrainerCache(data_service_params));
return absl::OkStatus();
}
}
} | #include "tensorflow/core/data/service/client/validate_utils.h"
#include <memory>
#include "tensorflow/core/data/service/client/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/worker_impl.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
DataServiceParams GetDefaultParams() {
DataServiceParams params;
params.dataset_id = "dataset_id";
params.processing_mode.set_sharding_policy(ProcessingModeDef::OFF);
params.address = "localhost";
params.protocol = "grpc";
params.data_transfer_protocol = "grpc";
params.metadata.set_cardinality(kUnknownCardinality);
return params;
}
std::shared_ptr<DataServiceWorkerImpl> GetLocalWorker() {
experimental::WorkerConfig config;
config.set_protocol("grpc");
config.set_dispatcher_address("localhost");
config.set_worker_address("localhost");
return std::make_shared<DataServiceWorkerImpl>(config);
}
TEST(ValidateUtilsTest, DefaultParams) {
TF_EXPECT_OK(ValidateDataServiceParams(GetDefaultParams()));
}
TEST(ValidateUtilsTest, LocalWorkerSuccess) {
DataServiceParams params = GetDefaultParams();
LocalWorkers::Add("localhost", GetLocalWorker());
params.target_workers = TARGET_WORKERS_LOCAL;
TF_EXPECT_OK(ValidateDataServiceParams(params));
LocalWorkers::Remove("localhost");
}
TEST(ValidateUtilsTest, NoLocalWorker) {
DataServiceParams params = GetDefaultParams();
params.target_workers = TARGET_WORKERS_LOCAL;
EXPECT_THAT(
ValidateDataServiceParams(params),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr(
"Local reads require local tf.data workers, but no local worker "
"is found.")));
}
TEST(ValidateUtilsTest, NoLocalWorkerStaticSharding) {
DataServiceParams params = GetDefaultParams();
params.processing_mode.set_sharding_policy(ProcessingModeDef::FILE_OR_DATA);
params.target_workers = TARGET_WORKERS_LOCAL;
EXPECT_THAT(
ValidateDataServiceParams(params),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr(
"Static sharding policy <FILE_OR_DATA> requires local tf.data "
"workers, but no local worker is found.")));
}
TEST(ValidateUtilsTest, LocalReadDisallowsCoordinatedRead) {
DataServiceParams params = GetDefaultParams();
LocalWorkers::Add("localhost", GetLocalWorker());
params.num_consumers = 1;
params.consumer_index = 0;
params.target_workers = TARGET_WORKERS_LOCAL;
EXPECT_THAT(
ValidateDataServiceParams(params),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Coordinated reads require non-local workers, but "
"`target_workers` is \"LOCAL\".")));
LocalWorkers::Remove("localhost");
}
TEST(ValidateUtilsTest, CrossTrainerCacheSuccess) {
DataServiceParams params = GetDefaultParams();
params.job_name = "job_name";
params.repetition = 1;
params.metadata.set_cardinality(kInfiniteCardinality);
params.cross_trainer_cache_options.emplace();
params.cross_trainer_cache_options->set_trainer_id("trainer ID");
TF_EXPECT_OK(ValidateDataServiceParams(params));
}
TEST(ValidateUtilsTest, CrossTrainerCacheRequiresJobName) {
DataServiceParams params = GetDefaultParams();
params.repetition = 1;
params.metadata.set_cardinality(kInfiniteCardinality);
params.cross_trainer_cache_options.emplace();
params.cross_trainer_cache_options->set_trainer_id("trainer ID");
EXPECT_THAT(
ValidateDataServiceParams(params),
StatusIs(
error::INVALID_ARGUMENT,
"Cross-trainer caching requires named jobs. Got empty `job_name`."));
}
TEST(ValidateUtilsTest, CrossTrainerCacheRequiresInfiniteDataset) {
DataServiceParams params = GetDefaultParams();
params.job_name = "job_name";
params.repetition = 1;
params.metadata.set_cardinality(10);
params.cross_trainer_cache_options.emplace();
params.cross_trainer_cache_options->set_trainer_id("trainer ID");
EXPECT_THAT(ValidateDataServiceParams(params),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Cross-trainer caching requires the input "
"dataset to be infinite.")));
}
TEST(ValidateUtilsTest, CrossTrainerCacheDisallowsRepetition) {
DataServiceParams params = GetDefaultParams();
params.job_name = "job_name";
params.repetition = 5;
params.metadata.set_cardinality(kInfiniteCardinality);
params.cross_trainer_cache_options.emplace();
params.cross_trainer_cache_options->set_trainer_id("trainer ID");
EXPECT_THAT(
ValidateDataServiceParams(params),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr(
"Cross-trainer caching requires infinite datasets and disallows "
"multiple repetitions of the same dataset.")));
}
TEST(ValidateUtilsTest, CrossTrainerCacheDisallowsCoordinatedRead) {
DataServiceParams params = GetDefaultParams();
params.job_name = "job_name";
params.repetition = 1;
params.num_consumers = 1;
params.consumer_index = 0;
params.metadata.set_cardinality(kInfiniteCardinality);
params.cross_trainer_cache_options.emplace();
params.cross_trainer_cache_options->set_trainer_id("trainer ID");
EXPECT_THAT(
ValidateDataServiceParams(params),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr(
"Cross-trainer caching does not support coordinated reads.")));
}
}
}
} |
1,735 | cpp | tensorflow/tensorflow | dispatcher_client | tensorflow/core/data/service/dispatcher_client.cc | tensorflow/core/data/service/dispatcher_client_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_DISPATCHER_CLIENT_H_
#define TENSORFLOW_CORE_DATA_SERVICE_DISPATCHER_CLIENT_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/dispatcher.grpc.pb.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
#include "tensorflow/core/protobuf/snapshot.pb.h"
namespace tensorflow {
namespace data {
class DataServiceDispatcherClient : public DataServiceClientBase {
public:
DataServiceDispatcherClient(const std::string& address,
const std::string& protocol)
: DataServiceClientBase(address, protocol) {}
Status Initialize() override;
absl::StatusOr<WorkerHeartbeatResponse> WorkerHeartbeat(
const WorkerHeartbeatRequest& request);
Status WorkerUpdate(const std::string& worker_address,
std::vector<TaskProgress>& task_progress);
Status GetDatasetDef(const std::string& dataset_id, DatasetDef& dataset_def);
Status GetSplit(int64_t iteration_id, int64_t repetition,
int64_t split_provider_index, Tensor& split,
bool& end_of_splits);
virtual Status GetSnapshotSplit(const std::string& worker_address,
const std::string& base_path,
int64_t stream_index, int64_t source_index,
int64_t repetition_index, Tensor& split,
int64_t& local_split_index,
bool& end_of_splits);
Status Snapshot(const DatasetDef& dataset, const std::string& path,
const experimental::DistributedSnapshotMetadata& metadata);
Status RegisterDataset(const DatasetDef& dataset,
const DataServiceMetadata& metadata,
const std::optional<std::string>& requested_dataset_id,
std::string& dataset_id);
Status GetOrCreateJob(const std::string& dataset_id,
const ProcessingModeDef& processing_mode,
const std::optional<std::string>& job_name,
std::optional<int64_t> num_consumers,
bool use_cross_trainer_cache,
TargetWorkers target_workers, int64_t& job_id);
Status GetOrCreateIteration(int64_t job_id, int64_t repetition,
int64_t& iteration_client_id);
Status ReleaseIterationClient(int64_t iteration_client_id);
Status MaybeRemoveTask(int64_t task_id, int64_t consumer_index, int64_t round,
bool& removed);
Status ClientHeartbeat(ClientHeartbeatRequest& req,
ClientHeartbeatResponse& resp);
Status GetWorkers(std::vector<WorkerInfo>& workers);
Status GetDataServiceMetadata(const std::string& dataset_id,
DataServiceMetadata& metadata);
Status GetDataServiceConfig(DataServiceConfig& config);
Status DisableCompressionAtRuntime(
const std::string& dataset_id, bool disable_compression_at_runtime,
DisableCompressionAtRuntimeResponse& response);
protected:
Status EnsureInitialized() override;
private:
mutex mu_;
std::unique_ptr<DispatcherService::Stub> stub_;
};
}
}
#endif
#include "tensorflow/core/data/service/dispatcher_client.h"
#include <cstdint>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "grpcpp/client_context.h"
#include "grpcpp/create_channel.h"
#include "grpcpp/security/credentials.h"
#include "grpcpp/support/channel_arguments.h"
#include "grpcpp/support/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/credentials_factory.h"
#include "tensorflow/core/data/service/dispatcher.grpc.pb.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace data {
Status DataServiceDispatcherClient::Initialize() {
mutex_lock l(mu_);
if (stub_) {
return absl::OkStatus();
}
std::shared_ptr<grpc::ChannelCredentials> credentials;
TF_RETURN_IF_ERROR(
CredentialsFactory::CreateClientCredentials(protocol_, &credentials));
grpc::ChannelArguments args;
args.SetMaxReceiveMessageSize(std::numeric_limits<int32>::max());
args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, true);
auto channel = grpc::CreateCustomChannel(address_, credentials, args);
stub_ = DispatcherService::NewStub(channel);
GetVersionRequest req;
GetVersionResponse resp;
grpc::ClientContext ctx;
grpc::Status s = stub_->GetVersion(&ctx, req, &resp);
if (!s.ok()) {
return grpc_util::WrapError(
absl::StrCat("Failed to get dispatcher version from dispatcher "
"running at ",
address_),
s);
}
if (resp.version() != kDataServiceVersion) {
return errors::FailedPrecondition(
"Version mismatch with tf.data service server. The server is running "
"version ",
resp.version(), ", while the client is running version ",
kDataServiceVersion,
". Please ensure that the client and server side are running the "
"same version of TensorFlow. If you're running an MPM binary, make "
"sure the server is running an up-to-date MPM.");
}
return absl::OkStatus();
}
absl::StatusOr<WorkerHeartbeatResponse>
DataServiceDispatcherClient::WorkerHeartbeat(
const WorkerHeartbeatRequest& request) {
WorkerHeartbeatResponse response;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->WorkerHeartbeat(&client_ctx, request, &response);
if (!status.ok()) {
return grpc_util::WrapError("Failed to perform worker heartbeat", status);
}
return response;
}
Status DataServiceDispatcherClient::WorkerUpdate(
const std::string& worker_address,
std::vector<TaskProgress>& task_progress) {
WorkerUpdateRequest req;
req.set_worker_address(worker_address);
for (const auto& update : task_progress) {
*(req.add_updates()) = update;
}
WorkerUpdateResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->WorkerUpdate(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError("Failed to send worker update", status);
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetDatasetDef(const std::string& dataset_id,
DatasetDef& dataset_def) {
GetDatasetDefRequest req;
req.set_dataset_id(dataset_id);
GetDatasetDefResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->GetDatasetDef(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError("Failed to get dataset def", status);
}
dataset_def = resp.dataset_def();
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetSplit(int64_t iteration_id,
int64_t repetition,
int64_t split_provider_index,
Tensor& split,
bool& end_of_splits) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetSplitRequest req;
req.set_iteration_id(iteration_id);
req.set_repetition(repetition);
req.set_split_provider_index(split_provider_index);
GetSplitResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->GetSplit(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError("Failed to get split", status);
}
end_of_splits = resp.end_of_splits();
if (!end_of_splits) {
if (!split.FromProto(resp.split())) {
return errors::Internal("Failed to parse split tensor proto");
}
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::Snapshot(
const DatasetDef& dataset, const std::string& path,
const experimental::DistributedSnapshotMetadata& metadata) {
TF_RETURN_IF_ERROR(EnsureInitialized());
SnapshotRequest req;
*req.mutable_dataset() = dataset;
req.set_path(path);
*req.mutable_metadata() = metadata;
SnapshotResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->Snapshot(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError("Failed to snapshot", status);
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetSnapshotSplit(
const std::string& worker_address, const std::string& base_path,
int64_t stream_index, int64_t source_index, int64_t repetition_index,
Tensor& split, int64_t& local_split_index, bool& end_of_splits) {
GetSnapshotSplitRequest req;
req.set_worker_address(worker_address);
req.set_base_path(base_path);
req.set_stream_index(stream_index);
req.set_source_index(source_index);
req.set_repetition_index(repetition_index);
GetSnapshotSplitResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->GetSnapshotSplit(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError("Failed to get snapshot split", status);
}
local_split_index = resp.local_split_index();
end_of_splits = resp.end_of_splits();
if (end_of_splits) {
return absl::OkStatus();
}
if (!split.FromProto(resp.split())) {
return errors::Internal("Failed to parse split tensor proto: ",
resp.split().DebugString());
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::RegisterDataset(
const DatasetDef& dataset, const DataServiceMetadata& metadata,
const std::optional<std::string>& requested_dataset_id,
std::string& dataset_id) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetOrRegisterDatasetRequest req;
*req.mutable_dataset() = dataset;
*req.mutable_metadata() = metadata;
if (requested_dataset_id.has_value()) {
req.set_dataset_id(*requested_dataset_id);
}
GetOrRegisterDatasetResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->GetOrRegisterDataset(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError("Failed to register dataset", status);
}
dataset_id = resp.dataset_id();
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetOrCreateJob(
const std::string& dataset_id, const ProcessingModeDef& processing_mode,
const std::optional<std::string>& job_name,
std::optional<int64_t> num_consumers, bool use_cross_trainer_cache,
TargetWorkers target_workers, int64_t& job_id) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetOrCreateJobRequest req;
req.set_dataset_id(dataset_id);
*req.mutable_processing_mode_def() = processing_mode;
if (job_name.has_value()) {
req.set_job_name(job_name.value());
}
if (num_consumers.has_value()) {
req.set_num_consumers(num_consumers.value());
}
req.set_target_workers(target_workers);
req.set_use_cross_trainer_cache(use_cross_trainer_cache);
GetOrCreateJobResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->GetOrCreateJob(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError(
absl::StrCat("Failed to get or create job for dataset with id ",
dataset_id),
status);
}
job_id = resp.job_id();
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetOrCreateIteration(
int64_t job_id, int64_t repetition, int64_t& iteration_client_id) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetOrCreateIterationRequest req;
req.set_job_id(job_id);
req.set_repetition(repetition);
GetOrCreateIterationResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->GetOrCreateIteration(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError(
absl::StrCat("Failed to get or create iteration for job with id ",
job_id),
status);
}
iteration_client_id = resp.iteration_client_id();
return absl::OkStatus();
}
Status DataServiceDispatcherClient::ReleaseIterationClient(
int64_t iteration_client_id) {
TF_RETURN_IF_ERROR(EnsureInitialized());
ReleaseIterationClientRequest req;
req.set_iteration_client_id(iteration_client_id);
ReleaseIterationClientResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->ReleaseIterationClient(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError(
absl::StrCat("Failed to release iteration client with id ",
iteration_client_id),
status);
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::MaybeRemoveTask(int64_t task_id,
int64_t consumer_index,
int64_t round,
bool& removed) {
TF_RETURN_IF_ERROR(EnsureInitialized());
MaybeRemoveTaskRequest req;
req.set_task_id(task_id);
req.set_consumer_index(consumer_index);
req.set_round(round);
MaybeRemoveTaskResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->MaybeRemoveTask(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError("Failed to call MaybeRemoveTask", status);
}
removed = resp.removed();
return absl::OkStatus();
}
Status DataServiceDispatcherClient::ClientHeartbeat(
ClientHeartbeatRequest& req, ClientHeartbeatResponse& resp) {
TF_RETURN_IF_ERROR(EnsureInitialized());
grpc::ClientContext ctx;
grpc::Status s = stub_->ClientHeartbeat(&ctx, req, &resp);
if (!s.ok()) {
return grpc_util::WrapError("Failed to get tasks", s);
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetWorkers(
std::vector<WorkerInfo>& workers) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetWorkersRequest req;
GetWorkersResponse resp;
grpc::ClientContext ctx;
grpc::Status s = stub_->GetWorkers(&ctx, req, &resp);
if (!s.ok()) {
return grpc_util::WrapError("Failed to get workers", s);
}
workers.clear();
for (auto& worker : resp.workers()) {
workers.push_back(worker);
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetDataServiceMetadata(
const std::string& dataset_id, DataServiceMetadata& metadata) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetDataServiceMetadataRequest req;
req.set_dataset_id(dataset_id);
GetDataServiceMetadataResponse resp;
grpc::ClientContext ctx;
grpc::Status s = stub_->GetDataServiceMetadata(&ctx, req, &resp);
if (!s.ok()) {
return grpc_util::WrapError("Failed to get data service metadata", s);
}
metadata = resp.metadata();
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetDataServiceConfig(
DataServiceConfig& config) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetDataServiceConfigRequest request;
GetDataServiceConfigResponse response;
grpc::ClientContext ctx;
grpc::Status s = stub_->GetDataServiceConfig(&ctx, request, &response);
if (!s.ok()) {
return grpc_util::WrapError("Failed to get data service config", s);
}
config = response.config();
return absl::OkStatus();
}
Status DataServiceDispatcherClient::DisableCompressionAtRuntime(
const std::string& dataset_id, bool disable_compression_at_runtime,
DisableCompressionAtRuntimeResponse& response) {
TF_RETURN_IF_ERROR(EnsureInitialized());
grpc::ClientContext ctx;
DisableCompressionAtRuntimeRequest request;
request.set_dataset_id(dataset_id);
request.set_disable_compression_at_runtime(disable_compression_at_runtime);
grpc::Status s = stub_->DisableCompressionAtRuntime(&ctx, request, &response);
if (!s.ok()) {
return grpc_util::WrapError(
"Failed to get runtime compression disabling decision", s);
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::EnsureInitialized() {
return grpc_util::Retry([this] { return Initialize(); },
"Initialize dispatcher client",
kint64max);
}
}
} | #include "tensorflow/core/data/service/dispatcher_client.h"
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <optional>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/data_transfer.h"
#include "tensorflow/core/data/service/dataset_store.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/test_cluster.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/protobuf/snapshot.pb.h"
#include "tensorflow/core/protobuf/struct.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/path.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::data::experimental::DistributedSnapshotMetadata;
using ::tensorflow::data::testing::CreateDummyDistributedSnapshotMetadata;
using ::tensorflow::data::testing::EqualsProto;
using ::tensorflow::data::testing::InfiniteDataset;
using ::tensorflow::data::testing::LocalTempFilename;
using ::tensorflow::data::testing::RangeDataset;
using ::tensorflow::testing::StatusIs;
using ::testing::AllOf;
using ::testing::ContainsRegex;
using ::testing::HasSubstr;
constexpr const char kProtocol[] = "grpc";
DataServiceMetadata GetDefaultMetadata() {
StructuredValue decoded_spec;
TensorShapeProto::Dim* dim =
decoded_spec.mutable_tensor_shape_value()->add_dim();
dim->set_size(1);
dim->set_name(absl::StrCat("dim"));
DataServiceMetadata metadata;
metadata.set_element_spec(decoded_spec.SerializeAsString());
metadata.set_compression(DataServiceMetadata::COMPRESSION_SNAPPY);
metadata.set_cardinality(kUnknownCardinality);
return metadata;
}
class DispatcherClientTest : public ::testing::Test {
protected:
absl::Status SetUpTfDataService(int64_t num_workers,
int64_t worker_max_concurrent_snapshots = 0) {
TestCluster::Config config;
config.num_workers = num_workers;
config.work_dir = tsl::io::JoinPath(tsl::testing::TmpDir(), "work_dir");
config.worker_max_concurrent_snapshots = worker_max_concurrent_snapshots;
test_cluster_ = std::make_unique<TestCluster>(config);
TF_RETURN_IF_ERROR(test_cluster_->Initialize());
dispatcher_client_ = std::make_unique<DataServiceDispatcherClient>(
test_cluster_->DispatcherAddress(), kProtocol);
return absl::OkStatus();
}
absl::StatusOr<std::string> RegisterDataset(
const DatasetDef& dataset, const DataServiceMetadata& metadata,
const std::optional<std::string>& requested_dataset_id = std::nullopt) {
std::string dataset_id;
TF_RETURN_IF_ERROR(dispatcher_client_->RegisterDataset(
dataset, metadata, requested_dataset_id, dataset_id));
return dataset_id;
}
absl::StatusOr<absl::flat_hash_set<std::string>> StartDummySnapshots(
int64_t num_snapshots) {
DistributedSnapshotMetadata metadata =
CreateDummyDistributedSnapshotMetadata();
absl::flat_hash_set<std::string> directories;
for (int64_t i = 0; i < num_snapshots; ++i) {
directories.insert(LocalTempFilename());
}
for (const auto& directory : directories) {
TF_RETURN_IF_ERROR(
dispatcher_client_->Snapshot(RangeDataset(10), directory, metadata));
}
return directories;
}
std::unique_ptr<TestCluster> test_cluster_;
std::unique_ptr<DataServiceDispatcherClient> dispatcher_client_;
};
TEST_F(DispatcherClientTest, GetDataServiceMetadata) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceMetadata metadata = GetDefaultMetadata();
metadata.set_cardinality(10);
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id,
RegisterDataset(RangeDataset(10), metadata));
DataServiceMetadata result;
TF_ASSERT_OK(dispatcher_client_->GetDataServiceMetadata(dataset_id, result));
EXPECT_THAT(result, EqualsProto(metadata));
}
TEST_F(DispatcherClientTest, DatasetDoesNotExist) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceMetadata metadata = GetDefaultMetadata();
EXPECT_THAT(
dispatcher_client_->GetDataServiceMetadata(
"not-found", metadata),
StatusIs(error::NOT_FOUND, HasSubstr("Dataset id not-found not found")));
}
TEST_F(DispatcherClientTest, SnapshotAlreadyStarted) {
TF_ASSERT_OK(SetUpTfDataService(1));
DistributedSnapshotMetadata metadata =
CreateDummyDistributedSnapshotMetadata();
std::string directory = LocalTempFilename();
TF_ASSERT_OK(
dispatcher_client_->Snapshot(RangeDataset(10), directory, metadata));
EXPECT_THAT(
dispatcher_client_->Snapshot(RangeDataset(10), directory, metadata),
StatusIs(error::ALREADY_EXISTS, HasSubstr("already started")));
}
TEST_F(DispatcherClientTest, GetDataServiceConfig) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceConfig config;
TF_ASSERT_OK(dispatcher_client_->GetDataServiceConfig(config));
EXPECT_EQ(config.deployment_mode(), DEPLOYMENT_MODE_COLOCATED);
}
TEST_F(DispatcherClientTest, SnapshotSkeletonWritten) {
TF_ASSERT_OK(SetUpTfDataService(1));
TF_ASSERT_OK_AND_ASSIGN(absl::flat_hash_set<std::string> paths,
StartDummySnapshots(3));
for (const auto& path : paths) {
TF_ASSERT_OK(Env::Default()->FileExists(CommittedChunksDirectory(path)));
TF_ASSERT_OK(Env::Default()->FileExists(StreamsDirectory(path)));
}
}
TEST_F(DispatcherClientTest, SnapshotMetadataAndDatasetDefWritten) {
TF_ASSERT_OK(SetUpTfDataService(1));
TF_ASSERT_OK_AND_ASSIGN(absl::flat_hash_set<std::string> paths,
StartDummySnapshots(3));
for (const auto& path : paths) {
TF_ASSERT_OK(
Env::Default()->FileExists(io::JoinPath(path, "snapshot.metadata")));
TF_ASSERT_OK(
Env::Default()->FileExists(io::JoinPath(path, "dataset_def.proto")));
}
}
TEST_F(DispatcherClientTest, SnapshotsInHeartbeat) {
TF_ASSERT_OK(SetUpTfDataService(1,
3));
TF_ASSERT_OK_AND_ASSIGN(absl::flat_hash_set<std::string> paths,
StartDummySnapshots(3));
WorkerHeartbeatRequest worker_heartbeat_request;
worker_heartbeat_request.set_worker_address(test_cluster_->WorkerAddress(0));
for (int64_t i = 1; i <= 3; ++i) {
TF_ASSERT_OK_AND_ASSIGN(
WorkerHeartbeatResponse worker_heartbeat_response,
dispatcher_client_->WorkerHeartbeat(worker_heartbeat_request));
ASSERT_EQ(worker_heartbeat_response.snapshot_tasks_size(), i);
for (const auto& snapshot_task :
worker_heartbeat_response.snapshot_tasks()) {
ASSERT_TRUE(paths.count(snapshot_task.base_path()));
ASSERT_EQ(snapshot_task.stream_index(), 0);
}
}
}
TEST_F(DispatcherClientTest, GetSnapshotSplit) {
TF_ASSERT_OK(SetUpTfDataService(1));
TF_ASSERT_OK_AND_ASSIGN(absl::flat_hash_set<std::string> paths,
StartDummySnapshots(3));
WorkerHeartbeatRequest worker_heartbeat_request;
worker_heartbeat_request.set_worker_address(test_cluster_->WorkerAddress(0));
TF_ASSERT_OK_AND_ASSIGN(
WorkerHeartbeatResponse worker_heartbeat_response,
dispatcher_client_->WorkerHeartbeat(worker_heartbeat_request));
for (int64_t i = 0; i < 5; ++i) {
for (const auto& snapshot_task :
worker_heartbeat_response.snapshot_tasks()) {
GetSnapshotSplitRequest get_snapshot_split_request;
Tensor split;
int64_t local_split_index = 0;
bool end_of_splits = false;
TF_ASSERT_OK(dispatcher_client_->GetSnapshotSplit(
test_cluster_->WorkerAddress(0), snapshot_task.base_path(),
snapshot_task.stream_index(),
0, 0, split, local_split_index,
end_of_splits));
EXPECT_EQ(local_split_index, i);
EXPECT_FALSE(end_of_splits);
}
}
}
TEST_F(DispatcherClientTest, GetSnapshotSplitMultipleStreams) {
TF_ASSERT_OK(SetUpTfDataService(3,
1));
TF_ASSERT_OK_AND_ASSIGN(absl::flat_hash_set<std::string> paths,
StartDummySnapshots(3));
absl::flat_hash_set<std::string> snapshots_in_progress;
for (int64_t i = 0; i < 3; ++i) {
WorkerHeartbeatRequest worker_heartbeat_request;
worker_heartbeat_request.set_worker_address(
test_cluster_->WorkerAddress(i));
TF_ASSERT_OK_AND_ASSIGN(
WorkerHeartbeatResponse worker_heartbeat_response,
dispatcher_client_->WorkerHeartbeat(worker_heartbeat_request));
EXPECT_EQ(worker_heartbeat_response.snapshot_tasks().size(), 1);
for (const auto& snapshot_task :
worker_heartbeat_response.snapshot_tasks()) {
snapshots_in_progress.insert(snapshot_task.base_path());
GetSnapshotSplitRequest get_snapshot_split_request;
Tensor split;
int64_t local_split_index = 0;
bool end_of_splits = false;
TF_ASSERT_OK(dispatcher_client_->GetSnapshotSplit(
test_cluster_->WorkerAddress(i), snapshot_task.base_path(),
snapshot_task.stream_index(),
0, 0, split, local_split_index,
end_of_splits));
EXPECT_EQ(local_split_index, 0);
EXPECT_FALSE(end_of_splits);
}
}
EXPECT_EQ(snapshots_in_progress, paths);
}
TEST_F(DispatcherClientTest, RegisterDatasetWithExplicitId) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceMetadata metadata = GetDefaultMetadata();
metadata.set_cardinality(10);
TF_ASSERT_OK_AND_ASSIGN(
const std::string dataset_id1,
RegisterDataset(RangeDataset(10), metadata,
"dataset_id"));
EXPECT_EQ(dataset_id1, "dataset_id");
TF_ASSERT_OK_AND_ASSIGN(
const std::string dataset_id2,
RegisterDataset(RangeDataset(10), metadata,
"dataset_id"));
EXPECT_EQ(dataset_id1, dataset_id2);
}
TEST_F(DispatcherClientTest, DatasetsDoNotMatch) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceMetadata metadata = GetDefaultMetadata();
metadata.set_cardinality(10);
TF_ASSERT_OK_AND_ASSIGN(
const std::string dataset_id1,
RegisterDataset(RangeDataset(10), metadata,
"dataset_id"));
EXPECT_EQ(dataset_id1, "dataset_id");
metadata.set_cardinality(kInfiniteCardinality);
EXPECT_THAT(
RegisterDataset(InfiniteDataset(), metadata,
"dataset_id"),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr(
"Datasets with the same ID should have the same structure")));
}
TEST_F(DispatcherClientTest, EnableCrossTrainerCache) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceMetadata metadata = GetDefaultMetadata();
metadata.set_cardinality(kInfiniteCardinality);
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id,
RegisterDataset(InfiniteDataset(), metadata));
ProcessingModeDef processing_mode;
processing_mode.set_sharding_policy(ProcessingModeDef::OFF);
std::string job_name = "job";
int64_t job_id;
TF_ASSERT_OK(dispatcher_client_->GetOrCreateJob(
dataset_id, processing_mode, job_name,
std::nullopt,
true, TARGET_WORKERS_AUTO, job_id));
int64_t iteration_client_id;
TF_ASSERT_OK(dispatcher_client_->GetOrCreateIteration(
job_id, 0, iteration_client_id));
WorkerHeartbeatRequest worker_heartbeat_request;
worker_heartbeat_request.set_worker_address(test_cluster_->WorkerAddress(0));
TF_ASSERT_OK_AND_ASSIGN(
WorkerHeartbeatResponse worker_heartbeat_response,
dispatcher_client_->WorkerHeartbeat(worker_heartbeat_request));
ASSERT_EQ(worker_heartbeat_response.new_tasks_size(), 1);
EXPECT_TRUE(worker_heartbeat_response.new_tasks(0).use_cross_trainer_cache());
}
TEST_F(DispatcherClientTest, CreateNamedJob) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceMetadata metadata = GetDefaultMetadata();
metadata.set_cardinality(10);
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id,
RegisterDataset(RangeDataset(10), metadata));
ProcessingModeDef processing_mode;
processing_mode.set_sharding_policy(ProcessingModeDef::OFF);
std::string job_name = "job";
int64_t job_id_1 = -1;
TF_ASSERT_OK(dispatcher_client_->GetOrCreateJob(
dataset_id, processing_mode, job_name,
std::nullopt,
true, TARGET_WORKERS_AUTO, job_id_1));
int64_t job_id_2 = -2;
TF_ASSERT_OK(dispatcher_client_->GetOrCreateJob(
dataset_id, processing_mode, job_name,
std::nullopt,
true, TARGET_WORKERS_AUTO, job_id_2));
ASSERT_EQ(job_id_1, job_id_2);
}
TEST_F(DispatcherClientTest, NamedJobsDoNotMatch) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceMetadata metadata = GetDefaultMetadata();
metadata.set_cardinality(10);
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id,
RegisterDataset(RangeDataset(10), metadata));
int64_t job_id = 0;
ProcessingModeDef processing_mode;
processing_mode.set_sharding_policy(ProcessingModeDef::OFF);
std::string job_name = "job";
TF_ASSERT_OK(dispatcher_client_->GetOrCreateJob(
dataset_id, processing_mode, job_name,
std::nullopt,
false, TARGET_WORKERS_AUTO, job_id));
processing_mode.set_sharding_policy(ProcessingModeDef::DYNAMIC);
EXPECT_THAT(
dispatcher_client_->GetOrCreateJob(dataset_id, processing_mode, job_name,
std::nullopt,
true,
TARGET_WORKERS_AUTO, job_id),
StatusIs(error::INVALID_ARGUMENT,
AllOf(HasSubstr("but found an existing job with different "
"parameters: "),
ContainsRegex("Existing processing mode: <\\w*std::nullopt, dataset_id));
EXPECT_EQ(dataset_id, "1000");
std::string datasets_dir = tsl::io::JoinPath(config.work_dir, "datasets");
FileSystemDatasetStore dataset_store(datasets_dir);
TF_ASSERT_OK(dataset_store.Put("1001", dataset_def));
if (requested_dataset_id.has_value()) {
TF_ASSERT_OK(dataset_store.Put(*requested_dataset_id, dataset_def));
}
TF_ASSERT_OK(dispatcher_client_->RegisterDataset(
dataset_def, GetDefaultMetadata(),
requested_dataset_id, dataset_id));
if (requested_dataset_id.has_value()) {
EXPECT_EQ(dataset_id, *requested_dataset_id);
} else {
EXPECT_EQ(dataset_id, "1001");
}
}
INSTANTIATE_TEST_SUITE_P(DatasetId, DispatcherClientTest_DatasetId,
::testing::Values(std::nullopt, "dataset_id"));
}
}
} |
1,736 | cpp | tensorflow/tensorflow | url | tensorflow/core/data/service/url.cc | tensorflow/core/data/service/url_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_URL_H_
#define TENSORFLOW_CORE_DATA_SERVICE_URL_H_
#include <string>
#include "absl/strings/string_view.h"
namespace tensorflow {
namespace data {
class URL {
public:
explicit URL(absl::string_view url);
absl::string_view host() const { return host_; }
bool has_port() const { return !port_.empty(); }
absl::string_view port() const { return port_; }
private:
void Parse(absl::string_view url);
std::string host_;
std::string port_;
};
}
}
#endif
#include "tensorflow/core/data/service/url.h"
#include <string>
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/regexp.h"
namespace tensorflow {
namespace data {
URL::URL(absl::string_view url) { Parse(url); }
void URL::Parse(absl::string_view url) {
absl::string_view regexp = "(.*):([a-zA-Z0-9_]+|%port(_[a-zA-Z0-9_]+)?%)";
if (!RE2::FullMatch(url, regexp, &host_, &port_)) {
host_ = std::string(url);
port_ = "";
}
}
}
} | #include "tensorflow/core/data/service/url.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
TEST(URLTest, ParseUrl) {
URL url("localhost");
EXPECT_EQ(url.host(), "localhost");
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseUrlWithProtocol) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseUrlWithPort) {
URL url("localhost:1234");
EXPECT_EQ(url.host(), "localhost");
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "1234");
}
TEST(URLTest, ParseUrlWithProtocolAndPort) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "1234");
}
TEST(URLTest, ParseUrlWithProtocolAndDynamicPort) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "%port%");
}
TEST(URLTest, ParseBorgAddress) {
URL url("/worker/task/0");
EXPECT_EQ(url.host(), "/worker/task/0");
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseBorgAddressWithCustomProtocol) {
URL url("worker:/worker/task/0");
EXPECT_EQ(url.host(), "worker:/worker/task/0");
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseBorgAddressWithNamedPort) {
URL url("/worker/task/0:worker");
EXPECT_EQ(url.host(), "/worker/task/0");
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "worker");
}
TEST(URLTest, ParseBorgAddressWithDynamicPort) {
URL url("/worker/task/0:%port%");
EXPECT_EQ(url.host(), "/worker/task/0");
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "%port%");
}
TEST(URLTest, ParseBorgAddressWithDynamicNamedPort) {
URL url("/worker/task/0:%port_worker%");
EXPECT_EQ(url.host(), "/worker/task/0");
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "%port_worker%");
}
TEST(URLTest, ParseIPv4Address) {
URL url("127.0.0.1");
EXPECT_EQ(url.host(), "127.0.0.1");
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseIPv4AddressWithPort) {
URL url("127.0.0.1:8000");
EXPECT_EQ(url.host(), "127.0.0.1");
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "8000");
}
TEST(URLTest, ParseIPv6Address) {
URL url("[::1]");
EXPECT_EQ(url.host(), "[::1]");
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseIPv6AddressWithProtocol) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseIPv6AddressWithPort) {
URL url("[::1]:23456");
EXPECT_EQ(url.host(), "[::1]");
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "23456");
}
TEST(URLTest, ParseIPv6AddressWithProtocolAndPort) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "23456");
}
TEST(URLTest, ParseIPv6AddressWithProtocolAndDynamicPort) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "%port_name%");
}
TEST(URLTest, ParseNonLocalIPv6Address) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseNonLocalIPv6AddressWithNamedPort) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "worker");
}
TEST(URLTest, ParseEmptyIPv6Address) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseEmptyAddress) {
URL url("");
EXPECT_EQ(url.host(), "");
EXPECT_FALSE(url.has_port());
}
}
}
} |
1,737 | cpp | tensorflow/tensorflow | dataset_store | tensorflow/core/data/service/dataset_store.cc | tensorflow/core/data/service/dataset_store_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_DATASET_STORE_H_
#define TENSORFLOW_CORE_DATA_SERVICE_DATASET_STORE_H_
#include <memory>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/data/service/dispatcher_state.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/lib/io/record_writer.h"
#include "tensorflow/core/platform/env.h"
namespace tensorflow {
namespace data {
class DatasetStore {
public:
virtual ~DatasetStore() = default;
virtual Status Put(const std::string& key, const DatasetDef& dataset) = 0;
virtual Status Get(const std::string& key,
std::shared_ptr<const DatasetDef>& dataset_def) = 0;
};
class FileSystemDatasetStore : public DatasetStore {
public:
explicit FileSystemDatasetStore(const std::string& datasets_dir);
FileSystemDatasetStore(const FileSystemDatasetStore&) = delete;
FileSystemDatasetStore& operator=(const FileSystemDatasetStore&) = delete;
Status Put(const std::string& key, const DatasetDef& dataset) override;
Status Get(const std::string& key,
std::shared_ptr<const DatasetDef>& dataset_def) override;
private:
const std::string datasets_dir_;
};
class MemoryDatasetStore : public DatasetStore {
public:
MemoryDatasetStore() = default;
MemoryDatasetStore(const MemoryDatasetStore&) = delete;
MemoryDatasetStore& operator=(const MemoryDatasetStore&) = delete;
Status Put(const std::string& key, const DatasetDef& dataset) override;
Status Get(const std::string& key,
std::shared_ptr<const DatasetDef>& dataset_def) override;
private:
absl::flat_hash_map<std::string, std::shared_ptr<const DatasetDef>> datasets_;
};
}
}
#endif
#include "tensorflow/core/data/service/dataset_store.h"
#include <memory>
#include <string>
#include "absl/memory/memory.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/utils.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/lib/io/record_writer.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/file_system.h"
#include "tensorflow/core/platform/path.h"
namespace tensorflow {
namespace data {
FileSystemDatasetStore::FileSystemDatasetStore(const std::string& datasets_dir)
: datasets_dir_(datasets_dir) {}
Status FileSystemDatasetStore::Put(const std::string& key,
const DatasetDef& dataset) {
std::string path_to_write = io::JoinPath(datasets_dir_, key);
TF_RETURN_IF_ERROR(WriteDatasetDef(path_to_write, dataset));
return absl::OkStatus();
}
Status FileSystemDatasetStore::Get(
const std::string& key, std::shared_ptr<const DatasetDef>& dataset_def) {
std::string path = io::JoinPath(datasets_dir_, key);
TF_RETURN_IF_ERROR(Env::Default()->FileExists(path));
DatasetDef def;
TF_RETURN_IF_ERROR(ReadDatasetDef(path, def));
dataset_def = std::make_shared<const DatasetDef>(def);
return absl::OkStatus();
}
Status MemoryDatasetStore::Put(const std::string& key,
const DatasetDef& dataset) {
auto& stored_dataset = datasets_[key];
stored_dataset = std::make_shared<const DatasetDef>(dataset);
return absl::OkStatus();
}
Status MemoryDatasetStore::Get(const std::string& key,
std::shared_ptr<const DatasetDef>& dataset_def) {
auto& stored_dataset = datasets_[key];
if (!stored_dataset) {
return errors::NotFound("Dataset with key ", key, " not found");
}
dataset_def = stored_dataset;
return absl::OkStatus();
}
}
} | #include "tensorflow/core/data/service/dataset_store.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
const char kFileSystem[] = "file_system";
const char kMemory[] = "memory";
std::string NewDatasetsDir() {
std::string dir = io::JoinPath(testing::TmpDir(), "datasets");
if (Env::Default()->FileExists(dir).ok()) {
int64_t undeleted_files;
int64_t undeleted_dirs;
CHECK(Env::Default()
->DeleteRecursively(dir, &undeleted_files, &undeleted_dirs)
.ok());
}
CHECK(Env::Default()->RecursivelyCreateDir(dir).ok());
return dir;
}
std::unique_ptr<DatasetStore> MakeStore(const std::string& type) {
if (type == kFileSystem) {
return std::make_unique<FileSystemDatasetStore>(NewDatasetsDir());
} else if (type == kMemory) {
return std::make_unique<MemoryDatasetStore>();
} else {
CHECK(false) << "unexpected type: " << type;
}
}
DatasetDef DatasetDefWithVersion(int32_t version) {
DatasetDef def;
def.mutable_graph()->set_version(version);
return def;
}
}
class DatasetStoreTest : public ::testing::Test,
public ::testing::WithParamInterface<std::string> {};
TEST_P(DatasetStoreTest, StoreAndGet) {
std::unique_ptr<DatasetStore> store = MakeStore(GetParam());
std::string key = "key";
DatasetDef dataset_def = DatasetDefWithVersion(1);
TF_ASSERT_OK(store->Put(key, dataset_def));
std::shared_ptr<const DatasetDef> result;
TF_ASSERT_OK(store->Get(key, result));
EXPECT_EQ(result->graph().version(), dataset_def.graph().version());
}
TEST_P(DatasetStoreTest, StoreAndGetMultiple) {
std::unique_ptr<DatasetStore> store = MakeStore(GetParam());
int64_t num_datasets = 10;
std::vector<std::string> keys;
for (int i = 0; i < num_datasets; ++i) {
std::string key = absl::StrCat("key", i);
DatasetDef dataset_def = DatasetDefWithVersion(i);
TF_ASSERT_OK(store->Put(key, dataset_def));
keys.push_back(key);
}
for (int i = 0; i < num_datasets; ++i) {
std::shared_ptr<const DatasetDef> result;
TF_ASSERT_OK(store->Get(keys[i], result));
EXPECT_EQ(result->graph().version(), i);
}
}
TEST_P(DatasetStoreTest, StoreAlreadyExists) {
std::unique_ptr<DatasetStore> store = MakeStore(GetParam());
int32_t version = 1;
DatasetDef dataset_def = DatasetDefWithVersion(version);
std::string key = "key";
TF_ASSERT_OK(store->Put(key, dataset_def));
TF_EXPECT_OK(store->Put(key, dataset_def));
std::shared_ptr<const DatasetDef> result;
TF_ASSERT_OK(store->Get(key, result));
EXPECT_EQ(result->graph().version(), version);
}
TEST_P(DatasetStoreTest, GetMissing) {
std::unique_ptr<DatasetStore> store = MakeStore(GetParam());
std::shared_ptr<const DatasetDef> result;
Status s = store->Get("missing", result);
EXPECT_EQ(s.code(), error::NOT_FOUND);
}
INSTANTIATE_TEST_SUITE_P(DatasetStoreTests, DatasetStoreTest,
::testing::Values(kFileSystem, kMemory));
}
} |
1,738 | cpp | tensorflow/tensorflow | grpc_dispatcher_impl | tensorflow/core/data/service/grpc_dispatcher_impl.cc | tensorflow/core/data/service/grpc_dispatcher_impl_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_GRPC_DISPATCHER_IMPL_H_
#define TENSORFLOW_CORE_DATA_SERVICE_GRPC_DISPATCHER_IMPL_H_
#include "grpcpp/server_builder.h"
#include "tensorflow/core/data/service/dispatcher.grpc.pb.h"
#include "tensorflow/core/data/service/dispatcher_impl.h"
#include "tensorflow/core/data/service/export.pb.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
class GrpcDispatcherImpl : public DispatcherService::Service {
public:
explicit GrpcDispatcherImpl(const experimental::DispatcherConfig& config,
::grpc::ServerBuilder& server_builder);
~GrpcDispatcherImpl() override { Stop(); }
Status Start();
void Stop();
size_t NumActiveIterations();
DispatcherStateExport ExportState() const;
#define HANDLER(method) \
::grpc::Status method(::grpc::ServerContext* context, \
const method##Request* request, \
method##Response* response) override;
HANDLER(WorkerHeartbeat);
HANDLER(WorkerUpdate);
HANDLER(GetDatasetDef);
HANDLER(GetSplit);
HANDLER(GetVersion);
HANDLER(GetOrRegisterDataset);
HANDLER(ReleaseIterationClient);
HANDLER(MaybeRemoveTask);
HANDLER(GetOrCreateJob);
HANDLER(GetOrCreateIteration);
HANDLER(ClientHeartbeat);
HANDLER(GetWorkers);
HANDLER(GetDataServiceMetadata);
HANDLER(GetDataServiceConfig);
HANDLER(Snapshot);
HANDLER(GetSnapshotSplit);
HANDLER(GetSnapshotStreams);
HANDLER(DisableCompressionAtRuntime);
#undef HANDLER
private:
DataServiceDispatcherImpl impl_;
GrpcDispatcherImpl(const GrpcDispatcherImpl&) = delete;
void operator=(const GrpcDispatcherImpl&) = delete;
};
}
}
#endif
#include "tensorflow/core/data/service/grpc_dispatcher_impl.h"
#include "grpcpp/server_context.h"
#include "tensorflow/core/data/service/export.pb.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
using ::grpc::ServerBuilder;
using ::grpc::ServerContext;
GrpcDispatcherImpl::GrpcDispatcherImpl(
const experimental::DispatcherConfig& config, ServerBuilder& server_builder)
: impl_(config) {
server_builder.RegisterService(this);
VLOG(1) << "Registered data service dispatcher";
}
Status GrpcDispatcherImpl::Start() { return impl_.Start(); }
void GrpcDispatcherImpl::Stop() { impl_.Stop(); }
size_t GrpcDispatcherImpl::NumActiveIterations() {
return impl_.NumActiveIterations();
}
DispatcherStateExport GrpcDispatcherImpl::ExportState() const {
return impl_.ExportState();
}
#define HANDLER(method) \
grpc::Status GrpcDispatcherImpl::method(ServerContext* context, \
const method##Request* request, \
method##Response* response) { \
return ToGrpcStatus(impl_.method(request, response)); \
}
HANDLER(WorkerHeartbeat);
HANDLER(WorkerUpdate);
HANDLER(GetDatasetDef);
HANDLER(GetSplit);
HANDLER(GetVersion);
HANDLER(GetOrRegisterDataset);
HANDLER(ReleaseIterationClient);
HANDLER(MaybeRemoveTask);
HANDLER(GetOrCreateJob);
HANDLER(GetOrCreateIteration);
HANDLER(ClientHeartbeat);
HANDLER(GetWorkers);
HANDLER(GetDataServiceMetadata);
HANDLER(GetDataServiceConfig);
HANDLER(Snapshot);
HANDLER(GetSnapshotSplit);
HANDLER(GetSnapshotStreams);
HANDLER(DisableCompressionAtRuntime);
#undef HANDLER
}
} | #include "tensorflow/core/data/service/grpc_dispatcher_impl.h"
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include "grpcpp/channel.h"
#include "grpcpp/client_context.h"
#include "grpcpp/create_channel.h"
#include "grpcpp/security/credentials.h"
#include "grpcpp/support/channel_arguments.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/credentials_factory.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/server_lib.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::grpc::Channel;
using ::grpc::ChannelArguments;
using ::grpc::ChannelCredentials;
using ::grpc::ClientContext;
constexpr const char kHostAddress[] = "localhost";
constexpr const char kProtocol[] = "grpc";
class GrpcDispatcherImplTest : public ::testing::Test {
protected:
void SetUp() override {
TF_ASSERT_OK(SetUpDispatcherServer());
TF_ASSERT_OK(SetUpDispatcherClientStub());
}
Status SetUpDispatcherServer() {
experimental::DispatcherConfig config;
config.set_protocol(kProtocol);
TF_RETURN_IF_ERROR(NewDispatchServer(config, dispatcher_server_));
return dispatcher_server_->Start();
}
Status SetUpDispatcherClientStub() {
std::shared_ptr<ChannelCredentials> credentials;
TF_RETURN_IF_ERROR(
CredentialsFactory::CreateClientCredentials(kProtocol, &credentials));
ChannelArguments args;
args.SetMaxReceiveMessageSize(std::numeric_limits<int32>::max());
args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, true);
std::shared_ptr<Channel> channel =
::grpc::CreateCustomChannel(GetDispatcherAddress(), credentials, args);
dispatcher_client_stub_ = DispatcherService::NewStub(channel);
return absl::OkStatus();
}
std::string GetDispatcherAddress() const {
return absl::StrCat(kHostAddress, ":", dispatcher_server_->BoundPort());
}
std::unique_ptr<DispatchGrpcDataServer> dispatcher_server_;
std::unique_ptr<DispatcherService::Stub> dispatcher_client_stub_;
};
TEST_F(GrpcDispatcherImplTest, GrpcTest) {
ClientContext ctx;
GetVersionRequest req;
GetVersionResponse resp;
TF_ASSERT_OK(
FromGrpcStatus(dispatcher_client_stub_->GetVersion(&ctx, req, &resp)));
EXPECT_EQ(resp.version(), kDataServiceVersion);
}
}
}
} |
1,739 | cpp | tensorflow/tensorflow | credentials_factory | tensorflow/core/data/service/credentials_factory.cc | tensorflow/core/data/service/credentials_factory_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_CREDENTIALS_FACTORY_H_
#define TENSORFLOW_CORE_DATA_SERVICE_CREDENTIALS_FACTORY_H_
#include <memory>
#include <string>
#include "grpcpp/grpcpp.h"
#include "grpcpp/security/credentials.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace data {
class CredentialsFactory {
public:
virtual ~CredentialsFactory() = default;
virtual std::string Protocol() = 0;
virtual Status CreateServerCredentials(
std::shared_ptr<::grpc::ServerCredentials>* out) = 0;
virtual Status CreateClientCredentials(
std::shared_ptr<::grpc::ChannelCredentials>* out) = 0;
static void Register(CredentialsFactory* factory);
static Status CreateServerCredentials(
absl::string_view protocol,
std::shared_ptr<::grpc::ServerCredentials>* out);
static Status CreateClientCredentials(
absl::string_view protocol,
std::shared_ptr<::grpc::ChannelCredentials>* out);
static bool Exists(absl::string_view protocol);
private:
static Status Get(const absl::string_view protocol, CredentialsFactory** out);
};
}
}
#endif
#include "tensorflow/core/data/service/credentials_factory.h"
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/mutex.h"
namespace tensorflow {
namespace data {
namespace {
mutex* get_lock() {
static mutex lock(LINKER_INITIALIZED);
return &lock;
}
using CredentialsFactories =
std::unordered_map<std::string, CredentialsFactory*>;
CredentialsFactories& credentials_factories() {
static auto& factories = *new CredentialsFactories();
return factories;
}
}
void CredentialsFactory::Register(CredentialsFactory* factory) {
mutex_lock l(*get_lock());
if (!credentials_factories().insert({factory->Protocol(), factory}).second) {
LOG(ERROR)
<< "Two credentials factories are being registered with protocol "
<< factory->Protocol() << ". Which one gets used is undefined.";
}
}
Status CredentialsFactory::Get(absl::string_view protocol,
CredentialsFactory** out) {
mutex_lock l(*get_lock());
auto it = credentials_factories().find(std::string(protocol));
if (it != credentials_factories().end()) {
*out = it->second;
return absl::OkStatus();
}
std::vector<string> available_types;
for (const auto& factory : credentials_factories()) {
available_types.push_back(factory.first);
}
return errors::NotFound("No credentials factory has been registered for ",
"protocol ", protocol,
". The available types are: [ ",
absl::StrJoin(available_types, ", "), " ]");
}
Status CredentialsFactory::CreateServerCredentials(
absl::string_view protocol,
std::shared_ptr<::grpc::ServerCredentials>* out) {
CredentialsFactory* factory;
TF_RETURN_IF_ERROR(CredentialsFactory::Get(protocol, &factory));
TF_RETURN_IF_ERROR(factory->CreateServerCredentials(out));
return absl::OkStatus();
}
Status CredentialsFactory::CreateClientCredentials(
absl::string_view protocol,
std::shared_ptr<::grpc::ChannelCredentials>* out) {
CredentialsFactory* factory;
TF_RETURN_IF_ERROR(CredentialsFactory::Get(protocol, &factory));
TF_RETURN_IF_ERROR(factory->CreateClientCredentials(out));
return absl::OkStatus();
}
bool CredentialsFactory::Exists(absl::string_view protocol) {
mutex_lock l(*get_lock());
return credentials_factories().find(std::string(protocol)) !=
credentials_factories().end();
}
class InsecureCredentialsFactory : public CredentialsFactory {
public:
std::string Protocol() override { return "grpc"; }
Status CreateServerCredentials(
std::shared_ptr<::grpc::ServerCredentials>* out) override {
*out = ::grpc::InsecureServerCredentials();
return absl::OkStatus();
}
Status CreateClientCredentials(
std::shared_ptr<::grpc::ChannelCredentials>* out) override {
*out = ::grpc::InsecureChannelCredentials();
return absl::OkStatus();
}
};
class InsecureCredentialsRegistrar {
public:
InsecureCredentialsRegistrar() {
auto factory = new InsecureCredentialsFactory();
CredentialsFactory::Register(factory);
}
};
static InsecureCredentialsRegistrar registrar;
}
} | #include "tensorflow/core/data/service/credentials_factory.h"
#include <memory>
#include <string>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kFailedToCreateServerCredentials[] =
"Failed to create server credentials.";
constexpr char kFailedToCreateClientCredentials[] =
"Failed to create client credentials.";
class TestCredentialsFactory : public CredentialsFactory {
public:
std::string Protocol() override { return "test"; }
Status CreateServerCredentials(
std::shared_ptr<grpc::ServerCredentials>* out) override {
return errors::Internal(kFailedToCreateServerCredentials);
}
Status CreateClientCredentials(
std::shared_ptr<grpc::ChannelCredentials>* out) override {
return errors::Internal(kFailedToCreateClientCredentials);
}
};
}
TEST(CredentialsFactory, Register) {
TestCredentialsFactory test_factory;
CredentialsFactory::Register(&test_factory);
std::shared_ptr<grpc::ServerCredentials> server_credentials;
ASSERT_EQ(errors::Internal(kFailedToCreateServerCredentials),
CredentialsFactory::CreateServerCredentials(test_factory.Protocol(),
&server_credentials));
std::shared_ptr<grpc::ChannelCredentials> client_credentials;
ASSERT_EQ(errors::Internal(kFailedToCreateClientCredentials),
CredentialsFactory::CreateClientCredentials(test_factory.Protocol(),
&client_credentials));
}
TEST(CredentialsFactory, DefaultGrpcProtocol) {
std::shared_ptr<grpc::ServerCredentials> server_credentials;
TF_ASSERT_OK(
CredentialsFactory::CreateServerCredentials("grpc", &server_credentials));
std::shared_ptr<grpc::ChannelCredentials> client_credentials;
TF_ASSERT_OK(
CredentialsFactory::CreateClientCredentials("grpc", &client_credentials));
}
TEST(CredentialsFactory, MissingServerProtocol) {
std::shared_ptr<grpc::ServerCredentials> server_credentials;
Status s = CredentialsFactory::CreateServerCredentials("unknown_protocol",
&server_credentials);
ASSERT_EQ(error::Code::NOT_FOUND, s.code());
ASSERT_TRUE(
absl::StrContains(s.ToString(),
"No credentials factory has been registered for "
"protocol unknown_protocol"));
}
TEST(CredentialsFactory, MissingClientProtocol) {
std::shared_ptr<grpc::ChannelCredentials> client_credentials;
Status s = CredentialsFactory::CreateClientCredentials("unknown_protocol",
&client_credentials);
ASSERT_EQ(error::Code::NOT_FOUND, s.code());
ASSERT_TRUE(
absl::StrContains(s.ToString(),
"No credentials factory has been registered for "
"protocol unknown_protocol"));
}
}
} |
1,740 | cpp | tensorflow/tensorflow | data_transfer | tensorflow/core/data/service/data_transfer.cc | tensorflow/core/data/service/data_transfer_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_DATA_TRANSFER_H_
#define TENSORFLOW_CORE_DATA_SERVICE_DATA_TRANSFER_H_
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
struct GetElementResult {
GetElementResult() = default;
GetElementResult(const GetElementResult&) = delete;
GetElementResult& operator=(const GetElementResult&) = delete;
GetElementResult(GetElementResult&&) = default;
GetElementResult& operator=(GetElementResult&&) = default;
GetElementResult Copy() const;
size_t EstimatedMemoryUsageBytes() const;
std::vector<Tensor> components;
int64_t element_index = 0;
bool end_of_sequence = false;
bool skip = false;
};
class DataTransferClient {
public:
struct Config {
absl::string_view protocol;
std::string address;
const DeviceBase::AcceleratorDeviceInfo* accelerator_device_info;
Allocator* allocator;
};
using ClientFactoryT =
std::function<Status(Config, std::unique_ptr<DataTransferClient>*)>;
virtual ~DataTransferClient() = default;
virtual Status GetElement(const GetElementRequest& req,
GetElementResult& result) = 0;
virtual void TryCancel() = 0;
static void Register(std::string name, ClientFactoryT factory);
static Status Build(std::string name, Config config,
std::unique_ptr<DataTransferClient>* out);
virtual absl::StatusOr<std::string> GetCompatibilityInfo() const {
return std::string();
}
virtual Status CheckCompatibility(
const std::string& server_compatibility_info) const {
return absl::OkStatus();
}
protected:
Env* const env_ = Env::Default();
};
class DataTransferServer {
public:
using GetElementT =
std::function<Status(const GetElementRequest*, GetElementResult*)>;
using ServerFactoryT =
std::function<Status(GetElementT, std::shared_ptr<DataTransferServer>*)>;
virtual ~DataTransferServer() = default;
virtual Status Start(const experimental::WorkerConfig& config) = 0;
virtual int Port() const = 0;
static void Register(std::string name, ServerFactoryT factory);
static Status Build(std::string name, GetElementT get_element,
std::shared_ptr<DataTransferServer>* out);
virtual absl::StatusOr<std::string> GetCompatibilityInfo() const {
return std::string();
}
};
}
}
#endif
#include "tensorflow/core/data/service/data_transfer.h"
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/strings/str_join.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace data {
namespace {
mutex* get_lock() {
static mutex lock(LINKER_INITIALIZED);
return &lock;
}
using DataTransferServerFactories =
std::unordered_map<std::string, DataTransferServer::ServerFactoryT>;
DataTransferServerFactories& transfer_server_factories() {
static auto& factories = *new DataTransferServerFactories();
return factories;
}
using DataTransferClientFactories =
std::unordered_map<std::string, DataTransferClient::ClientFactoryT>;
DataTransferClientFactories& transfer_client_factories() {
static auto& factories = *new DataTransferClientFactories();
return factories;
}
}
GetElementResult GetElementResult::Copy() const {
GetElementResult copy;
copy.components = components;
copy.element_index = element_index;
copy.end_of_sequence = end_of_sequence;
copy.skip = skip;
return copy;
}
size_t GetElementResult::EstimatedMemoryUsageBytes() const {
size_t size_bytes = components.size() * sizeof(Tensor) +
sizeof(element_index) + sizeof(end_of_sequence) +
sizeof(skip);
for (const Tensor& tensor : components) {
size_bytes += tensor.TotalBytes();
if (tensor.dtype() != DT_VARIANT) {
continue;
}
const Variant& variant = tensor.scalar<Variant>()();
const CompressedElement* compressed = variant.get<CompressedElement>();
if (compressed) {
size_bytes += compressed->SpaceUsedLong();
}
}
return size_bytes;
}
void DataTransferServer::Register(std::string name, ServerFactoryT factory) {
mutex_lock l(*get_lock());
if (!transfer_server_factories().insert({name, factory}).second) {
LOG(ERROR)
<< "Two data transfer server factories are being registered with name "
<< name << ". Which one gets used is undefined.";
}
}
Status DataTransferServer::Build(std::string name, GetElementT get_element,
std::shared_ptr<DataTransferServer>* out) {
mutex_lock l(*get_lock());
auto it = transfer_server_factories().find(name);
if (it != transfer_server_factories().end()) {
return it->second(get_element, out);
}
std::vector<std::string> available_names;
for (const auto& factory : transfer_server_factories()) {
available_names.push_back(factory.first);
}
return errors::NotFound(
"No data transfer server factory has been registered for name ", name,
". The available names are: [ ", absl::StrJoin(available_names, ", "),
" ]");
}
void DataTransferClient::Register(std::string name, ClientFactoryT factory) {
mutex_lock l(*get_lock());
if (!transfer_client_factories().insert({name, factory}).second) {
LOG(ERROR)
<< "Two data transfer client factories are being registered with name "
<< name << ". Which one gets used is undefined.";
}
}
Status DataTransferClient::Build(std::string name, Config config,
std::unique_ptr<DataTransferClient>* out) {
mutex_lock l(*get_lock());
auto it = transfer_client_factories().find(name);
if (it != transfer_client_factories().end()) {
return it->second(config, out);
}
std::vector<string> available_names;
for (const auto& factory : transfer_client_factories()) {
available_names.push_back(factory.first);
}
return errors::NotFound(
"No data transfer client factory has been registered for name ", name,
". The available names are: [ ", absl::StrJoin(available_names, ", "),
" ]");
}
}
} | #include "tensorflow/core/data/service/data_transfer.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
class TestDataTransferServer : public DataTransferServer {
public:
explicit TestDataTransferServer(bool* called) : called_(called) {}
Status Start(const experimental::WorkerConfig& unused_config) override {
*called_ = true;
return absl::OkStatus();
}
int Port() const override { return 0; }
private:
bool* called_;
};
template <class T>
GetElementResult MakeElementResult(T value) {
GetElementResult result;
result.components.push_back(Tensor(std::move(value)));
result.element_index = 0;
result.end_of_sequence = false;
return result;
}
TEST(DataTransferTest, RegisterDataTransferServerBuilder) {
bool called = false;
DataTransferServer::Register("test", [&called](auto ignore, auto* server) {
*server = std::make_shared<TestDataTransferServer>(&called);
return absl::OkStatus();
});
std::shared_ptr<DataTransferServer> server;
TF_ASSERT_OK(DataTransferServer::Build("test", {}, &server));
EXPECT_FALSE(called);
TF_ASSERT_OK(server->Start({}));
EXPECT_TRUE(called);
}
TEST(DataTransferTest, EstimateMemoryUsageBytes) {
GetElementResult empty;
EXPECT_GT(empty.EstimatedMemoryUsageBytes(), 0);
Tensor tensor(DT_INT64, TensorShape({10, 100}));
GetElementResult int64_result = MakeElementResult(tensor);
EXPECT_GT(int64_result.EstimatedMemoryUsageBytes(), 1000 * sizeof(int64_t));
EXPECT_GT(int64_result.EstimatedMemoryUsageBytes(),
int64_result.components[0].AllocatedBytes());
EXPECT_GE(int64_result.EstimatedMemoryUsageBytes(), sizeof(int64_result));
}
TEST(DataTransferTest, EstimateVariantMemoryUsageBytes) {
const size_t data_size = 1000;
std::unique_ptr<CompressedElement> compressed{
protobuf::Arena::Create<CompressedElement>(nullptr)};
compressed->set_data(std::string(data_size, 'a'));
Tensor tensor(DT_VARIANT, TensorShape({}));
tensor.scalar<Variant>()() = *compressed;
GetElementResult variant_result = MakeElementResult(tensor);
EXPECT_GT(variant_result.EstimatedMemoryUsageBytes(), data_size);
EXPECT_GT(variant_result.EstimatedMemoryUsageBytes(),
compressed->ByteSizeLong());
EXPECT_GT(variant_result.EstimatedMemoryUsageBytes(),
compressed->SpaceUsedLong());
}
TEST(DataTransferTest, CopyGetElementResult) {
std::string hello_world = "hello, world!";
GetElementResult result = MakeElementResult(hello_world);
ASSERT_EQ(result.components.size(), 1);
EXPECT_GT(result.EstimatedMemoryUsageBytes(), hello_world.size());
GetElementResult copy = result.Copy();
ASSERT_EQ(copy.components.size(), 1);
test::ExpectEqual(result.components[0], copy.components[0]);
EXPECT_EQ(copy.EstimatedMemoryUsageBytes(),
result.EstimatedMemoryUsageBytes());
}
}
}
} |
1,741 | cpp | tensorflow/tensorflow | byte_size | tensorflow/core/data/service/byte_size.cc | tensorflow/core/data/service/byte_size_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_BYTE_SIZE_H_
#define TENSORFLOW_CORE_DATA_SERVICE_BYTE_SIZE_H_
#include <cstddef>
#include <ostream>
#include <string>
namespace tensorflow {
namespace data {
class ByteSize final {
public:
constexpr ByteSize() = default;
constexpr ByteSize(const ByteSize&) = default;
ByteSize& operator=(const ByteSize&) = default;
constexpr static ByteSize Bytes(size_t n);
template <class T>
constexpr static ByteSize KB(T n);
template <class T>
constexpr static ByteSize MB(T n);
template <class T>
constexpr static ByteSize GB(T n);
template <class T>
constexpr static ByteSize TB(T n);
ByteSize& operator+=(ByteSize rhs);
ByteSize& operator-=(ByteSize rhs);
template <class T>
ByteSize& operator*=(T rhs);
template <class T>
ByteSize& operator/=(T rhs);
size_t ToUnsignedBytes() const;
double ToDoubleBytes() const;
double ToDoubleKB() const;
double ToDoubleMB() const;
double ToDoubleGB() const;
double ToDoubleTB() const;
std::string DebugString() const;
private:
constexpr explicit ByteSize(double bytes) : bytes_(bytes) {}
size_t bytes_ = 0;
};
constexpr ByteSize ByteSize::Bytes(size_t n) { return ByteSize(n); };
template <class T>
constexpr ByteSize ByteSize::KB(T n) {
return ByteSize::Bytes(n * (size_t{1} << 10));
}
template <class T>
constexpr ByteSize ByteSize::MB(T n) {
return ByteSize::Bytes(n * (size_t{1} << 20));
}
template <class T>
constexpr ByteSize ByteSize::GB(T n) {
return ByteSize::Bytes(n * (size_t{1} << 30));
}
template <class T>
constexpr ByteSize ByteSize::TB(T n) {
return ByteSize::Bytes(n * (size_t{1} << 40));
}
inline ByteSize& ByteSize::operator+=(ByteSize rhs) {
bytes_ += rhs.ToUnsignedBytes();
return *this;
}
inline ByteSize& ByteSize::operator-=(ByteSize rhs) {
if (bytes_ < rhs.ToUnsignedBytes()) {
bytes_ = 0;
return *this;
}
bytes_ -= rhs.ToUnsignedBytes();
return *this;
}
template <class T>
inline ByteSize& ByteSize::operator*=(T rhs) {
bytes_ *= rhs;
return *this;
}
template <class T>
inline ByteSize& ByteSize::operator/=(T rhs) {
bytes_ /= rhs;
return *this;
}
inline ByteSize operator+(ByteSize lhs, ByteSize rhs) {
return lhs += rhs;
}
inline ByteSize operator-(ByteSize lhs, ByteSize rhs) {
return lhs -= rhs;
}
template <class T>
inline ByteSize operator*(ByteSize lhs, T rhs) { return lhs *= rhs; }
template <class T>
inline ByteSize operator*(T lhs, ByteSize rhs) { return rhs *= lhs; }
template <class T>
inline ByteSize operator/(ByteSize lhs, T rhs) { return lhs /= rhs; }
inline double operator/(ByteSize lhs, ByteSize rhs) {
return lhs.ToDoubleBytes() / rhs.ToDoubleBytes();
}
inline bool operator<(ByteSize lhs, ByteSize rhs) {
return lhs.ToUnsignedBytes() < rhs.ToUnsignedBytes();
}
inline bool operator>(ByteSize lhs, ByteSize rhs) {
return rhs < lhs;
}
inline bool operator>=(ByteSize lhs, ByteSize rhs) {
return !(lhs < rhs);
}
inline bool operator<=(ByteSize lhs, ByteSize rhs) {
return !(rhs < lhs);
}
inline bool operator==(ByteSize lhs, ByteSize rhs) {
return lhs.ToUnsignedBytes() == rhs.ToUnsignedBytes();
}
inline bool operator!=(ByteSize lhs, ByteSize rhs) {
return !(lhs == rhs);
}
inline std::ostream& operator<<(std::ostream& os, ByteSize byte_size) {
return os << byte_size.DebugString();
}
}
}
#endif
#include "tensorflow/core/data/service/byte_size.h"
#include <cstddef>
#include <string>
#include "absl/strings/str_cat.h"
namespace tensorflow {
namespace data {
size_t ByteSize::ToUnsignedBytes() const { return bytes_; }
double ByteSize::ToDoubleBytes() const { return static_cast<double>(bytes_); }
double ByteSize::ToDoubleKB() const { return *this / ByteSize::KB(1); }
double ByteSize::ToDoubleMB() const { return *this / ByteSize::MB(1); }
double ByteSize::ToDoubleGB() const { return *this / ByteSize::GB(1); }
double ByteSize::ToDoubleTB() const { return *this / ByteSize::TB(1); }
std::string ByteSize::DebugString() const {
if (*this < ByteSize::KB(1)) {
return absl::StrCat(ToUnsignedBytes(), "B");
}
if (*this < ByteSize::MB(1)) {
return absl::StrCat(ToDoubleKB(), "KB");
}
if (*this < ByteSize::GB(1)) {
return absl::StrCat(ToDoubleMB(), "MB");
}
if (*this < ByteSize::TB(1)) {
return absl::StrCat(ToDoubleGB(), "GB");
}
return absl::StrCat(ToDoubleTB(), "TB");
}
}
} | #include "tensorflow/core/data/service/byte_size.h"
#include <cstddef>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::Eq;
using ::testing::Not;
TEST(ByteSizeTest, Constructors) {
EXPECT_EQ(ByteSize::Bytes(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(1), ByteSize::Bytes(1));
EXPECT_EQ(ByteSize::Bytes(1024), ByteSize::Bytes(1024));
EXPECT_EQ(ByteSize::Bytes(1024), ByteSize::KB(1));
EXPECT_EQ(ByteSize::Bytes(size_t{1} << 63), ByteSize::TB(size_t{1} << 23));
EXPECT_EQ(ByteSize::KB(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::KB(1), ByteSize::Bytes(size_t{1} << 10));
EXPECT_EQ(ByteSize::KB(0.9), ByteSize::Bytes(1024 * 0.9));
EXPECT_EQ(ByteSize::KB(1.5), ByteSize::Bytes(1024 * 1.5));
EXPECT_EQ(ByteSize::KB(1.5), ByteSize::KB(1.5));
EXPECT_EQ(ByteSize::KB(1024), ByteSize::MB(1));
EXPECT_EQ(ByteSize::MB(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::MB(1), ByteSize::Bytes(size_t{1} << 20));
EXPECT_EQ(ByteSize::MB(0.9), ByteSize::Bytes(size_t{1} << 20) * 0.9);
EXPECT_EQ(ByteSize::MB(1.5), ByteSize::Bytes(size_t{1} << 20) * 1.5);
EXPECT_EQ(ByteSize::MB(1.5), ByteSize::MB(1.5));
EXPECT_EQ(ByteSize::MB(1024), ByteSize::GB(1));
EXPECT_EQ(ByteSize::GB(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::GB(1), ByteSize::Bytes(size_t{1} << 30));
EXPECT_EQ(ByteSize::GB(0.9), ByteSize::Bytes(size_t{1} << 30) * 0.9);
EXPECT_EQ(ByteSize::GB(1.5), ByteSize::Bytes(size_t{1} << 30) * 1.5);
EXPECT_EQ(ByteSize::GB(1.5), ByteSize::GB(1.5));
EXPECT_EQ(ByteSize::GB(1024), ByteSize::TB(1));
EXPECT_EQ(ByteSize::TB(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::TB(1), ByteSize::Bytes(size_t{1} << 40));
EXPECT_EQ(ByteSize::TB(0.9), ByteSize::Bytes(size_t{1} << 40) * 0.9);
EXPECT_EQ(ByteSize::TB(1.5), ByteSize::Bytes(size_t{1} << 40) * 1.5);
EXPECT_EQ(ByteSize::TB(1.5), ByteSize::TB(1.5));
EXPECT_EQ(ByteSize::TB(1024), ByteSize::TB(1024));
EXPECT_EQ(ByteSize::TB(size_t{1} << 23), ByteSize::TB(size_t{1} << 23));
EXPECT_THAT(ByteSize::Bytes(0), Not(Eq(ByteSize::Bytes(1))));
EXPECT_THAT(ByteSize::Bytes(1025), Not(Eq(ByteSize::KB(1))));
EXPECT_THAT(ByteSize::KB(1), Not(Eq(ByteSize::MB(1))));
EXPECT_THAT(ByteSize::MB(1), Not(Eq(ByteSize::GB(1))));
EXPECT_THAT(ByteSize::GB(1), Not(Eq(ByteSize::TB(1))));
EXPECT_THAT(ByteSize::TB(1), Not(Eq(ByteSize::TB(2))));
}
TEST(ByteSizeTest, ConstexprConstruction) {
constexpr ByteSize default_byte_size;
EXPECT_EQ(default_byte_size, ByteSize::Bytes(0));
constexpr ByteSize bytes = ByteSize::Bytes(1);
EXPECT_EQ(bytes, ByteSize::Bytes(1));
constexpr ByteSize kb = ByteSize::KB(1);
EXPECT_EQ(kb, ByteSize::KB(1));
constexpr ByteSize mb = ByteSize::MB(1);
EXPECT_EQ(mb, ByteSize::MB(1));
constexpr ByteSize gb = ByteSize::GB(1);
EXPECT_EQ(gb, ByteSize::GB(1));
constexpr ByteSize tb = ByteSize::TB(1);
EXPECT_EQ(tb, ByteSize::TB(1));
constexpr ByteSize tb_copy(tb);
EXPECT_EQ(tb_copy, tb);
}
TEST(ByteSizeTest, ConvertToBytes) {
EXPECT_EQ(ByteSize::Bytes(0).ToUnsignedBytes(), 0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0).ToDoubleBytes(), 0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0).ToDoubleKB(), 0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0).ToDoubleMB(), 0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0).ToDoubleGB(), 0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0).ToDoubleTB(), 0);
EXPECT_EQ(ByteSize::Bytes(1).ToUnsignedBytes(), 1);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1).ToDoubleBytes(), 1.0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1).ToDoubleKB(), 1.0 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1).ToDoubleMB(), 1.0 / 1024 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1).ToDoubleGB(), 1.0 / 1024 / 1024 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1).ToDoubleTB(),
1.0 / 1024 / 1024 / 1024 / 1024);
EXPECT_EQ(ByteSize::KB(0.25).ToUnsignedBytes(), 0.25 * (size_t{1} << 10));
EXPECT_DOUBLE_EQ(ByteSize::KB(0.25).ToDoubleBytes(), 0.25 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::KB(0.25).ToDoubleKB(), 0.25);
EXPECT_DOUBLE_EQ(ByteSize::KB(0.25).ToDoubleMB(), 0.25 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::KB(0.25).ToDoubleGB(), 0.25 / 1024 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::KB(0.25).ToDoubleTB(), 0.25 / 1024 / 1024 / 1024);
EXPECT_EQ(ByteSize::MB(0.5).ToUnsignedBytes(), 0.5 * (size_t{1} << 20));
EXPECT_DOUBLE_EQ(ByteSize::MB(0.5).ToDoubleBytes(), 0.5 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::MB(0.5).ToDoubleKB(), 0.5 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::MB(0.5).ToDoubleMB(), 0.5);
EXPECT_DOUBLE_EQ(ByteSize::MB(0.5).ToDoubleGB(), 0.5 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::MB(0.5).ToDoubleTB(), 0.5 / 1024 / 1024);
EXPECT_EQ(ByteSize::GB(10).ToUnsignedBytes(), 10.0 * (size_t{1} << 30));
EXPECT_DOUBLE_EQ(ByteSize::GB(10).ToDoubleBytes(), 10.0 * 1024 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::GB(10).ToDoubleKB(), 10.0 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::GB(10).ToDoubleMB(), 10.0 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::GB(10).ToDoubleGB(), 10.0);
EXPECT_DOUBLE_EQ(ByteSize::GB(10).ToDoubleTB(), 10.0 / 1024);
EXPECT_EQ(ByteSize::TB(1024).ToUnsignedBytes(), 1024 * (size_t{1} << 40));
EXPECT_DOUBLE_EQ(ByteSize::TB(1024).ToDoubleBytes(),
1024.0 * 1024 * 1024 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::TB(1024).ToDoubleKB(),
1024.0 * 1024 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::TB(1024).ToDoubleMB(), 1024.0 * 1024 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::TB(1024).ToDoubleGB(), 1024.0 * 1024);
EXPECT_DOUBLE_EQ(ByteSize::TB(1024).ToDoubleTB(), 1024.0);
}
TEST(ByteSizeTest, Arithmetics) {
EXPECT_EQ(ByteSize::Bytes(0) + ByteSize::Bytes(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(0) + ByteSize::Bytes(1), ByteSize::Bytes(1));
EXPECT_EQ(ByteSize::Bytes(512) + ByteSize::Bytes(512), ByteSize::KB(1));
EXPECT_EQ(ByteSize::Bytes(512) + ByteSize::KB(1), ByteSize::KB(1.5));
EXPECT_EQ(ByteSize::KB(0.5) + ByteSize::KB(1), ByteSize::KB(1.5));
EXPECT_EQ(ByteSize::MB(1) + ByteSize::KB(512), ByteSize::MB(1.5));
EXPECT_EQ(ByteSize::MB(1) + ByteSize::Bytes(512), ByteSize::Bytes(1049088));
EXPECT_EQ(ByteSize::GB(0.5) + ByteSize::MB(256) + ByteSize::MB(256),
ByteSize::GB(1));
std::vector<ByteSize> GBs(1024, ByteSize::GB(1));
EXPECT_EQ(absl::c_accumulate(GBs, ByteSize::Bytes(0)), ByteSize::TB(1));
EXPECT_EQ(ByteSize::TB(1) + ByteSize::TB(0.5) + ByteSize::GB(512),
ByteSize::TB(2));
EXPECT_EQ(ByteSize::Bytes(0) - ByteSize::Bytes(0), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::KB(1) - ByteSize::Bytes(512), ByteSize::KB(0.5));
EXPECT_EQ(ByteSize::MB(1) - ByteSize::KB(512) - ByteSize::KB(512),
ByteSize::MB(0));
EXPECT_EQ(ByteSize::GB(1) - ByteSize::MB(512), ByteSize::GB(0.5));
EXPECT_EQ(ByteSize::GB(0.5) - ByteSize::MB(512), ByteSize::GB(0));
EXPECT_EQ(ByteSize::GB(1) - ByteSize::MB(512) - ByteSize::MB(512),
ByteSize::GB(0));
EXPECT_EQ(ByteSize::TB(1) - ByteSize::GB(512) - ByteSize::GB(512),
ByteSize::GB(0));
EXPECT_EQ(ByteSize::Bytes(0) - ByteSize::Bytes(1), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(0) - ByteSize::GB(1), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::MB(1) - ByteSize::GB(1), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(0) * 0, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::KB(1) * 0, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::MB(1) * 0, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::GB(1) * 0, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::TB(1) * 0, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(1) * 1024, ByteSize::KB(1));
EXPECT_EQ(ByteSize::KB(1) * 1024, ByteSize::MB(1));
EXPECT_EQ(ByteSize::MB(1) * 1024, ByteSize::GB(1));
EXPECT_EQ(ByteSize::GB(1) * 1024, ByteSize::TB(1));
EXPECT_EQ(ByteSize::Bytes(1) * 1.1, ByteSize::Bytes(1));
EXPECT_EQ(ByteSize::KB(1) * 1.2, ByteSize::KB(1.2));
EXPECT_EQ(ByteSize::MB(1) * 1.3, ByteSize::MB(1.3));
EXPECT_EQ(ByteSize::GB(1) * 1.4, ByteSize::GB(1.4));
EXPECT_EQ(ByteSize::TB(1) * 1.5, ByteSize::TB(1.5));
EXPECT_EQ(ByteSize::KB(1) * 0.5, ByteSize::Bytes(512));
EXPECT_EQ(ByteSize::MB(1) * 0.5, ByteSize::KB(512));
EXPECT_EQ(ByteSize::GB(1) * 0.5, ByteSize::MB(512));
EXPECT_EQ(ByteSize::TB(1) * 0.25, ByteSize::GB(256));
EXPECT_EQ(1024 * ByteSize::Bytes(1), ByteSize::KB(1));
EXPECT_EQ(1024 * ByteSize::KB(1), ByteSize::MB(1));
EXPECT_EQ(1024 * ByteSize::MB(1), ByteSize::GB(1));
EXPECT_EQ(1024 * ByteSize::GB(1), ByteSize::TB(1));
EXPECT_EQ(0.9 * ByteSize::TB(1), ByteSize::GB(921.6));
EXPECT_EQ(0 * ByteSize::TB(1), ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::Bytes(0) / 1, ByteSize::Bytes(0));
EXPECT_EQ(ByteSize::KB(1) / 2, ByteSize::KB(0.5));
EXPECT_EQ(ByteSize::MB(1) / 2, ByteSize::KB(512));
EXPECT_EQ(ByteSize::GB(1) / 2, ByteSize::MB(512));
EXPECT_EQ(ByteSize::TB(1.5) / 2, ByteSize::GB(768));
EXPECT_EQ(ByteSize::KB(1) / 0.5, ByteSize::KB(2));
EXPECT_EQ(ByteSize::MB(1) / 0.5, ByteSize::MB(2));
EXPECT_EQ(ByteSize::GB(1) / 0.5, ByteSize::GB(2));
EXPECT_EQ(ByteSize::TB(1) / 0.25, ByteSize::TB(4));
EXPECT_DOUBLE_EQ(ByteSize::Bytes(0) / ByteSize::KB(1), 0.0);
EXPECT_DOUBLE_EQ(ByteSize::Bytes(1) / ByteSize::TB(1),
1.0 / 1024 / 1024 / 1024 / 1024);
EXPECT_DOUBLE_EQ(ByteSize::KB(1) / ByteSize::KB(2), 0.5);
EXPECT_DOUBLE_EQ(ByteSize::KB(512) / ByteSize::MB(1), 0.5);
EXPECT_DOUBLE_EQ(ByteSize::KB(1) / ByteSize::MB(1), 1.0 / 1024.0);
EXPECT_DOUBLE_EQ(ByteSize::MB(1) / ByteSize::GB(1), 1.0 / 1024.0);
EXPECT_DOUBLE_EQ(ByteSize::GB(1) / ByteSize::TB(1), 1.0 / 1024.0);
}
TEST(ByteSizeTest, Assignments) {
ByteSize byte_size;
EXPECT_EQ(byte_size, ByteSize::Bytes(0));
byte_size = ByteSize::Bytes(1);
EXPECT_EQ(byte_size, ByteSize::Bytes(1));
for (size_t i = 0; i < 1023; ++i) {
byte_size += ByteSize::Bytes(1);
}
EXPECT_EQ(byte_size, ByteSize::KB(1));
for (size_t i = 0; i < 10; ++i) {
byte_size *= 2;
}
EXPECT_EQ(byte_size, ByteSize::MB(1));
byte_size *= 1024 * 1024;
EXPECT_EQ(byte_size, ByteSize::TB(1));
for (size_t i = 0; i < 10; ++i) {
byte_size /= 2;
}
EXPECT_EQ(byte_size, ByteSize::GB(1));
for (size_t i = 0; i < 4; ++i) {
byte_size -= ByteSize::MB(256);
}
EXPECT_EQ(byte_size, ByteSize::Bytes(0));
byte_size -= ByteSize::Bytes(1);
EXPECT_EQ(byte_size, ByteSize::Bytes(0));
}
TEST(ByteSizeTest, Comparisons) {
EXPECT_LE(ByteSize::Bytes(0), ByteSize::Bytes(0));
EXPECT_LT(ByteSize::Bytes(0), ByteSize::Bytes(1));
EXPECT_LE(ByteSize::Bytes(0), ByteSize::Bytes(1));
EXPECT_LT(ByteSize::Bytes(1), ByteSize::Bytes(1024));
EXPECT_LE(ByteSize::Bytes(1), ByteSize::Bytes(1024));
EXPECT_LT(ByteSize::Bytes(1024), ByteSize::Bytes(1024 * 1024));
EXPECT_LE(ByteSize::Bytes(1024), ByteSize::Bytes(1024 * 1024));
EXPECT_LT(ByteSize::Bytes(1024), ByteSize::KB(1.1));
EXPECT_LE(ByteSize::Bytes(1024), ByteSize::KB(1.1));
EXPECT_LE(ByteSize::KB(0), ByteSize::Bytes(0));
EXPECT_LE(ByteSize::KB(1), ByteSize::Bytes(1024));
EXPECT_LT(ByteSize::KB(0), ByteSize::Bytes(1));
EXPECT_LE(ByteSize::KB(0), ByteSize::Bytes(1));
EXPECT_LT(ByteSize::KB(0.9), ByteSize::Bytes(1024));
EXPECT_LE(ByteSize::KB(0.9), ByteSize::Bytes(1024));
EXPECT_LT(ByteSize::KB(1), ByteSize::KB(1024));
EXPECT_LE(ByteSize::KB(1), ByteSize::KB(1024));
EXPECT_LT(ByteSize::KB(1), ByteSize::MB(1));
EXPECT_LE(ByteSize::KB(1), ByteSize::MB(1));
EXPECT_LT(ByteSize::KB(1024), ByteSize::MB(1.1));
EXPECT_LE(ByteSize::KB(1024), ByteSize::MB(1.1));
EXPECT_LE(ByteSize::MB(0), ByteSize::Bytes(0));
EXPECT_LT(ByteSize::MB(0), ByteSize::Bytes(1));
EXPECT_LE(ByteSize::MB(0), ByteSize::Bytes(1));
EXPECT_LT(ByteSize::MB(0.9), ByteSize::KB(1024));
EXPECT_LE(ByteSize::MB(0.9), ByteSize::KB(1024));
EXPECT_LT(ByteSize::MB(1), ByteSize::MB(1024));
EXPECT_LE(ByteSize::MB(1), ByteSize::MB(1024));
EXPECT_LT(ByteSize::MB(1), ByteSize::GB(1));
EXPECT_LE(ByteSize::MB(1), ByteSize::GB(1));
EXPECT_LT(ByteSize::MB(1024), ByteSize::GB(1.1));
EXPECT_LE(ByteSize::MB(1024), ByteSize::GB(1.1));
EXPECT_LE(ByteSize::GB(0), ByteSize::Bytes(0));
EXPECT_LT(ByteSize::GB(0), ByteSize::Bytes(1));
EXPECT_LE(ByteSize::GB(0), ByteSize::Bytes(1));
EXPECT_LT(ByteSize::GB(0.9), ByteSize::MB(1024));
EXPECT_LE(ByteSize::GB(0.9), ByteSize::MB(1024));
EXPECT_LT(ByteSize::GB(1), ByteSize::GB(1024));
EXPECT_LE(ByteSize::GB(1), ByteSize::GB(1024));
EXPECT_LT(ByteSize::GB(1), ByteSize::TB(1));
EXPECT_LE(ByteSize::GB(1), ByteSize::TB(1));
EXPECT_LT(ByteSize::GB(1024), ByteSize::TB(1.1));
EXPECT_LE(ByteSize::GB(1024), ByteSize::TB(1.1));
EXPECT_LE(ByteSize::TB(0), ByteSize::Bytes(0));
EXPECT_LT(ByteSize::TB(0), ByteSize::Bytes(1));
EXPECT_LE(ByteSize::TB(0), ByteSize::Bytes(1));
EXPECT_LT(ByteSize::TB(0.9), ByteSize::GB(1024));
EXPECT_LE(ByteSize::TB(0.9), ByteSize::GB(1024));
EXPECT_LT(ByteSize::TB(1), ByteSize::TB(1024));
EXPECT_LE(ByteSize::TB(1), ByteSize::TB(1024));
EXPECT_LT(ByteSize::TB(1024), ByteSize::TB(1025));
EXPECT_LE(ByteSize::TB(1024), ByteSize::TB(1025));
EXPECT_GT(ByteSize::TB(1), ByteSize::GB(1));
EXPECT_GT(ByteSize::GB(1), ByteSize::MB(1));
EXPECT_GT(ByteSize::MB(1), ByteSize::KB(1));
EXPECT_GT(ByteSize::KB(1), ByteSize::Bytes(1));
EXPECT_GT(ByteSize::Bytes(1), ByteSize::Bytes(0));
EXPECT_GT(ByteSize::TB(1), ByteSize::GB(1));
EXPECT_GT(ByteSize::TB(1), ByteSize::GB(1) + ByteSize::MB(1) +
ByteSize::KB(1) + ByteSize::Bytes(1));
EXPECT_GT(ByteSize::GB(1), 0.0000001 * ByteSize::TB(1));
EXPECT_GT(ByteSize::MB(1), ByteSize::KB(1) * 1023);
EXPECT_GT(ByteSize::KB(1), ByteSize::KB(3) / 4);
EXPECT_GT(ByteSize::Bytes(1), ByteSize::TB(0));
EXPECT_GE(ByteSize::TB(0.5), ByteSize::GB(0.5));
EXPECT_GE(ByteSize::GB(0.5), ByteSize::MB(0.5));
EXPECT_GE(ByteSize::MB(0.5), ByteSize::KB(0.5));
EXPECT_GE(ByteSize::KB(0.5), ByteSize::Bytes(1));
EXPECT_GE(ByteSize::Bytes(1), ByteSize::Bytes(0));
EXPECT_GE(ByteSize::TB(0), ByteSize::Bytes(0));
EXPECT_GE(ByteSize::GB(0), ByteSize::Bytes(0));
EXPECT_GE(ByteSize::MB(0), ByteSize::Bytes(0));
EXPECT_GE(ByteSize::KB(0), ByteSize::Bytes(0));
EXPECT_GE(ByteSize::Bytes(0), ByteSize::Bytes(0));
}
TEST(ByteSizeTest, DebugString) {
EXPECT_EQ(ByteSize::Bytes(0).DebugString(), "0B");
EXPECT_EQ(ByteSize::Bytes(1).DebugString(), "1B");
EXPECT_EQ(ByteSize::Bytes(size_t{1} << 10).DebugString(), "1KB");
EXPECT_EQ(ByteSize::Bytes(size_t{1} << 20).DebugString(), "1MB");
EXPECT_EQ(ByteSize::Bytes(size_t{1} << 30).DebugString(), "1GB");
EXPECT_EQ(ByteSize::Bytes(size_t{1} << 40).DebugString(), "1TB");
EXPECT_EQ(ByteSize::KB(0.5).DebugString(), "512B");
EXPECT_EQ(ByteSize::KB(1).DebugString(), "1KB");
EXPECT_EQ(ByteSize::KB(1.5).DebugString(), "1.5KB");
EXPECT_EQ(ByteSize::KB(1024).DebugString(), "1MB");
EXPECT_EQ(ByteSize::KB(1024 * 1024).DebugString(), "1GB");
EXPECT_EQ(ByteSize::KB(1024 * 1024 * 1024).DebugString(), "1TB");
EXPECT_EQ(ByteSize::MB(0.5).DebugString(), "512KB");
EXPECT_EQ(ByteSize::MB(1).DebugString(), "1MB");
EXPECT_EQ(ByteSize::MB(1.5).DebugString(), "1.5MB");
EXPECT_EQ(ByteSize::MB(1024).DebugString(), "1GB");
EXPECT_EQ(ByteSize::MB(1024 * 1024).DebugString(), "1TB");
EXPECT_EQ(ByteSize::GB(0.5).DebugString(), "512MB");
EXPECT_EQ(ByteSize::GB(1).DebugString(), "1GB");
EXPECT_EQ(ByteSize::GB(1.5).DebugString(), "1.5GB");
EXPECT_EQ(ByteSize::GB(1024).DebugString(), "1TB");
EXPECT_EQ(ByteSize::TB(0.5).DebugString(), "512GB");
EXPECT_EQ(ByteSize::TB(1).DebugString(), "1TB");
EXPECT_EQ(ByteSize::TB(1.5).DebugString(), "1.5TB");
EXPECT_EQ(ByteSize::TB(1024).DebugString(), "1024TB");
}
}
}
} |
1,742 | cpp | tensorflow/tensorflow | worker_client | tensorflow/core/data/service/worker_client.cc | tensorflow/core/data/service/worker_client_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_WORKER_CLIENT_H_
#define TENSORFLOW_CORE_DATA_SERVICE_WORKER_CLIENT_H_
#include <memory>
#include <string>
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/data_transfer.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace data {
constexpr const char kLocalTransferProtocol[] = "local";
constexpr const char kGrpcTransferProtocol[] = "grpc";
class DataServiceWorkerClient : public DataServiceClientBase {
public:
DataServiceWorkerClient(
const std::string& address, const std::string& protocol,
const std::string& transfer_protocol,
const DeviceBase::AcceleratorDeviceInfo* accelerator_device_info,
Allocator* allocator)
: DataServiceClientBase(address, protocol),
transfer_protocol_(transfer_protocol),
accelerator_device_info_(accelerator_device_info),
allocator_(allocator) {}
Status GetElement(const GetElementRequest& req, GetElementResult& result);
void TryCancel();
Status CheckCompatibility(
const std::string& server_compatibility_info) const {
return client_->CheckCompatibility(server_compatibility_info);
}
std::string GetDataTransferProtocol() const;
protected:
Status EnsureInitialized() override;
private:
std::string transfer_protocol_;
const DeviceBase::AcceleratorDeviceInfo* accelerator_device_info_;
Allocator* allocator_;
mutex mu_;
std::unique_ptr<DataTransferClient> client_;
};
absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>>
CreateDataServiceWorkerClient(
const std::string& dispatcher_protocol, const DataTransferServerInfo& info,
const DeviceBase::AcceleratorDeviceInfo* accelerator_device_info,
Allocator* allocator);
}
}
#endif
#include "tensorflow/core/data/service/worker_client.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "grpcpp/client_context.h"
#include "grpcpp/create_channel.h"
#include "grpcpp/security/credentials.h"
#include "grpcpp/support/channel_arguments.h"
#include "grpcpp/support/status.h"
#include "absl/container/flat_hash_set.h"
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/data/service/credentials_factory.h"
#include "tensorflow/core/data/service/data_transfer.h"
#include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/data/service/worker.grpc.pb.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/data/service/worker_impl.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/dataset.pb.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace data {
absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>>
CreateDataServiceWorkerClient(
const std::string& dispatcher_protocol, const DataTransferServerInfo& info,
const DeviceBase::AcceleratorDeviceInfo* accelerator_device_info,
Allocator* allocator) {
auto client = std::make_unique<DataServiceWorkerClient>(
info.address(), dispatcher_protocol, info.protocol(),
accelerator_device_info, allocator);
TF_RETURN_IF_ERROR(client->Initialize());
TF_RETURN_WITH_CONTEXT_IF_ERROR(
client->CheckCompatibility(info.compatibility_info()),
"for data transfer protocol '", client->GetDataTransferProtocol(),
"', the compatibility check between the trainer worker and the ",
"tf.data service worker at ", info.address(), "failed");
return client;
}
Status DataServiceWorkerClient::GetElement(const GetElementRequest& req,
GetElementResult& result) {
TF_RETURN_IF_ERROR(EnsureInitialized());
return client_->GetElement(req, result);
}
Status DataServiceWorkerClient::EnsureInitialized() {
mutex_lock l(mu_);
if (client_) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(DataTransferClient::Build(
GetDataTransferProtocol(),
{protocol_, address_, accelerator_device_info_, allocator_}, &client_));
return absl::OkStatus();
}
std::string DataServiceWorkerClient::GetDataTransferProtocol() const {
if (LocalWorkers::Get(address_) != nullptr) {
return kLocalTransferProtocol;
}
return transfer_protocol_;
}
void DataServiceWorkerClient::TryCancel() { client_->TryCancel(); }
class GrpcDataTransferClient : public DataTransferClient {
public:
GrpcDataTransferClient(std::shared_ptr<grpc::ChannelCredentials> credentials,
std::string address) {
VLOG(2) << "Create GrpcDataTransferClient for worker " << address << ".";
grpc::ChannelArguments args;
args.SetMaxReceiveMessageSize(-1);
auto channel = grpc::CreateCustomChannel(address, credentials, args);
stub_ = WorkerService::NewStub(channel);
}
Status GetElement(const GetElementRequest& req,
GetElementResult& result) override {
VLOG(3) << "GetElement for task " << req.task_id() << " from gRPC worker "
<< "server.";
{
mutex_lock l(mu_);
if (cancelled_) {
return errors::Cancelled("Client was cancelled.");
}
}
grpc::ClientContext ctx;
gtl::Cleanup<std::function<void()>> cleanup;
{
mutex_lock l(mu_);
active_contexts_.insert(&ctx);
cleanup = gtl::MakeCleanup([this, &ctx] {
mutex_lock l(mu_);
active_contexts_.erase(&ctx);
});
}
GetElementResponse resp;
int64_t start_time_us = env_->NowMicros();
grpc::Status s = stub_->GetElement(&ctx, req, &resp);
int64_t end_time_us = env_->NowMicros();
if (!s.ok()) {
return grpc_util::WrapError("Failed to get element", s);
}
metrics::RecordTFDataServiceGetElementDuration(kGrpcTransferProtocol,
end_time_us - start_time_us);
result.end_of_sequence = resp.end_of_sequence();
result.skip = resp.skip_task();
switch (resp.element_case()) {
case GetElementResponse::kCompressed: {
Tensor tensor(DT_VARIANT, TensorShape{});
tensor.scalar<Variant>()() = std::move(resp.compressed());
result.components.push_back(tensor);
break;
}
case GetElementResponse::kUncompressed:
for (const auto& component : resp.uncompressed().components()) {
result.components.emplace_back();
if (!result.components.back().FromProto(component)) {
return errors::Internal("Failed to parse tensor.");
}
}
break;
case GetElementResponse::ELEMENT_NOT_SET:
break;
}
return absl::OkStatus();
}
void TryCancel() override {
VLOG(2) << "Cancel GrpcDataTransferClient.";
mutex_lock l(mu_);
cancelled_ = true;
for (const auto& ctx : active_contexts_) {
ctx->TryCancel();
}
}
private:
mutex mu_;
std::unique_ptr<WorkerService::Stub> stub_;
absl::flat_hash_set<::grpc::ClientContext*> active_contexts_
TF_GUARDED_BY(mu_);
bool cancelled_ TF_GUARDED_BY(mu_) = false;
};
class GrpcTransferClientRegistrar {
public:
GrpcTransferClientRegistrar() {
DataTransferClient::Register(
kGrpcTransferProtocol, [](DataTransferClient::Config config,
std::unique_ptr<DataTransferClient>* out) {
std::shared_ptr<grpc::ChannelCredentials> credentials;
TF_RETURN_IF_ERROR(CredentialsFactory::CreateClientCredentials(
config.protocol, &credentials));
*out = std::make_unique<GrpcDataTransferClient>(credentials,
config.address);
return absl::OkStatus();
});
}
};
static GrpcTransferClientRegistrar gprc_client_registrar;
class LocalDataTransferClient : public DataTransferClient {
public:
explicit LocalDataTransferClient(absl::string_view worker_address)
: worker_address_(worker_address) {
VLOG(2) << "Create LocalDataTransferClient for worker " << worker_address_
<< ".";
}
Status GetElement(const GetElementRequest& req,
GetElementResult& result) override {
VLOG(3) << "GetElement for task " << req.task_id() << " from local worker.";
TF_RETURN_IF_ERROR(VerifyClientIsNotCancelled());
TF_ASSIGN_OR_RETURN(std::shared_ptr<DataServiceWorkerImpl> worker,
GetWorker(req));
int64_t start_time_us = env_->NowMicros();
Status s = worker->GetElementResult(&req, &result);
int64_t end_time_us = env_->NowMicros();
TF_RETURN_IF_ERROR(s);
metrics::RecordTFDataServiceGetElementDuration(kLocalTransferProtocol,
end_time_us - start_time_us);
return s;
}
void TryCancel() override {
VLOG(2) << "Cancel LocalDataTransferClient for worker " << worker_address_
<< ".";
mutex_lock l(mu_);
cancelled_ = true;
}
private:
Status VerifyClientIsNotCancelled() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
if (cancelled_) {
return errors::Cancelled(absl::Substitute(
"Client for worker $0 has been cancelled.", worker_address_));
}
return absl::OkStatus();
}
absl::StatusOr<std::shared_ptr<DataServiceWorkerImpl>> GetWorker(
const GetElementRequest& req) const {
std::shared_ptr<DataServiceWorkerImpl> worker =
LocalWorkers::Get(worker_address_);
if (!worker) {
return errors::Cancelled(absl::Substitute(
"Local worker at address $0 is no longer available; cancel request "
"for task $1.",
worker_address_, req.task_id()));
}
return worker;
}
const std::string worker_address_;
mutex mu_;
bool cancelled_ TF_GUARDED_BY(mu_) = false;
};
class LocalTransferClientRegistrar {
public:
LocalTransferClientRegistrar() {
DataTransferClient::Register(
kLocalTransferProtocol, [](DataTransferClient::Config config,
std::unique_ptr<DataTransferClient>* out) {
*out = std::make_unique<LocalDataTransferClient>(config.address);
return absl::OkStatus();
});
}
};
static LocalTransferClientRegistrar local_client_registrar;
}
} | #include "tensorflow/core/data/service/worker_client.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/substitute.h"
#include "absl/types/optional.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/data_transfer.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/dispatcher_client.h"
#include "tensorflow/core/data/service/test_cluster.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/data/service/worker_impl.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::data::testing::RangeSquareDataset;
using ::tensorflow::testing::StatusIs;
using ::testing::MatchesRegex;
constexpr const char kProtocol[] = "grpc";
constexpr const char kAltTransferProtocol[] = "alt";
class WorkerClientTest : public ::testing::TestWithParam<std::string> {
protected:
void SetUp() override { InitializeTestCluster(); }
void InitializeTestCluster(
std::optional<std::string> data_transfer_protocol = std::nullopt) {
test_cluster_ = std::make_unique<TestCluster>(1,
data_transfer_protocol);
TF_ASSERT_OK(test_cluster_->Initialize());
dispatcher_client_ = std::make_unique<DataServiceDispatcherClient>(
test_cluster_->DispatcherAddress(), kProtocol);
}
absl::StatusOr<std::string> RegisterDataset(const int64_t range) {
const auto dataset_def = RangeSquareDataset(range);
std::string dataset_id;
TF_RETURN_IF_ERROR(dispatcher_client_->RegisterDataset(
dataset_def, DataServiceMetadata(),
std::nullopt, dataset_id));
return dataset_id;
}
absl::StatusOr<int64_t> CreateIteration(const std::string& dataset_id) {
ProcessingModeDef processing_mode;
processing_mode.set_sharding_policy(ProcessingModeDef::OFF);
int64_t job_id = 0;
TF_RETURN_IF_ERROR(dispatcher_client_->GetOrCreateJob(
dataset_id, processing_mode, std::nullopt,
std::nullopt, false,
TARGET_WORKERS_AUTO, job_id));
int64_t iteration_client_id = 0;
TF_RETURN_IF_ERROR(dispatcher_client_->GetOrCreateIteration(
job_id, 0, iteration_client_id));
return iteration_client_id;
}
absl::StatusOr<int64_t> GetTaskToRead(const int64_t iteration_client_id) {
ClientHeartbeatRequest request;
ClientHeartbeatResponse response;
request.set_iteration_client_id(iteration_client_id);
TF_RETURN_IF_ERROR(dispatcher_client_->ClientHeartbeat(request, response));
if (response.task_info().empty()) {
return errors::NotFound(absl::Substitute(
"No task found for iteration $0.", iteration_client_id));
}
return response.task_info(0).task_id();
}
absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>> GetWorkerClient(
const std::string& data_transfer_protocol) {
DataTransferServerInfo info;
info.set_address(GetWorkerAddress());
info.set_protocol(data_transfer_protocol);
return CreateDataServiceWorkerClient(kProtocol, info,
nullptr,
nullptr);
}
absl::StatusOr<GetElementResult> GetElement(DataServiceWorkerClient& client,
const int64_t task_id) {
GetElementRequest request;
GetElementResult result;
request.set_task_id(task_id);
TF_RETURN_IF_ERROR(client.GetElement(request, result));
return result;
}
std::string GetDispatcherAddress() const {
return test_cluster_->DispatcherAddress();
}
std::string GetWorkerAddress() const {
return test_cluster_->WorkerAddress(0);
}
std::unique_ptr<TestCluster> test_cluster_;
std::unique_ptr<DataServiceDispatcherClient> dispatcher_client_;
};
class AltDataTransferServer : public DataTransferServer {
public:
explicit AltDataTransferServer(DataTransferServer::GetElementT get_element)
: get_element_(get_element) {}
absl::Status GetElement(const GetElementRequest& req,
GetElementResult& result) {
return get_element_(&req, &result);
}
absl::Status Start(const experimental::WorkerConfig& config) override {
return absl::OkStatus();
}
int Port() const override { return -1; }
private:
DataTransferServer::GetElementT get_element_;
};
class AltDataTransferClient : public DataTransferClient {
public:
explicit AltDataTransferClient(std::shared_ptr<AltDataTransferServer> server)
: server_(server) {}
absl::Status GetElement(const GetElementRequest& req,
GetElementResult& result) override {
return server_->GetElement(req, result);
}
void TryCancel() override {}
private:
std::shared_ptr<AltDataTransferServer> server_;
};
class AltDataTransferRegistrar {
public:
AltDataTransferRegistrar() {
DataTransferServer::Register(
kAltTransferProtocol,
[this](DataTransferServer::GetElementT get_element,
std::shared_ptr<DataTransferServer>* server) {
server_ = std::make_shared<AltDataTransferServer>(get_element);
*server = server_;
return absl::OkStatus();
});
DataTransferClient::Register(
kAltTransferProtocol,
[this](DataTransferClient::Config config,
std::unique_ptr<DataTransferClient>* client) {
*client = std::make_unique<AltDataTransferClient>(server_);
return absl::OkStatus();
});
}
private:
std::shared_ptr<AltDataTransferServer> server_ = nullptr;
};
static AltDataTransferRegistrar alt_data_transfer_registrar;
class DataTransferProtocolWorkerClientTest : public WorkerClientTest {
protected:
void SetUp() override {
std::string data_transfer_protocol = GetParam();
InitializeTestCluster(data_transfer_protocol);
}
};
TEST_F(WorkerClientTest, LocalRead) {
const int64_t range = 5;
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id, RegisterDataset(range));
TF_ASSERT_OK_AND_ASSIGN(const int64_t iteration_client_id,
CreateIteration(dataset_id));
TF_ASSERT_OK_AND_ASSIGN(const int64_t task_id,
GetTaskToRead(iteration_client_id));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<DataServiceWorkerClient> client,
GetWorkerClient(kLocalTransferProtocol));
for (int64_t i = 0; i < range; ++i) {
TF_ASSERT_OK_AND_ASSIGN(GetElementResult result,
GetElement(*client, task_id));
test::ExpectEqual(result.components[0], Tensor(int64_t{i * i}));
EXPECT_FALSE(result.end_of_sequence);
}
LocalWorkers::Remove(GetWorkerAddress());
EXPECT_THAT(GetElement(*client, task_id),
StatusIs(error::CANCELLED,
MatchesRegex("Local worker.*is no longer available.*")));
}
TEST_F(WorkerClientTest, LocalReadEmptyDataset) {
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id,
RegisterDataset(0));
TF_ASSERT_OK_AND_ASSIGN(const int64_t iteration_client_id,
CreateIteration(dataset_id));
TF_ASSERT_OK_AND_ASSIGN(const int64_t task_id,
GetTaskToRead(iteration_client_id));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<DataServiceWorkerClient> client,
GetWorkerClient(kLocalTransferProtocol));
TF_ASSERT_OK_AND_ASSIGN(GetElementResult result,
GetElement(*client, task_id));
EXPECT_TRUE(result.end_of_sequence);
LocalWorkers::Remove(GetWorkerAddress());
EXPECT_THAT(GetElement(*client, task_id),
StatusIs(error::CANCELLED,
MatchesRegex("Local worker.*is no longer available.*")));
}
TEST_P(DataTransferProtocolWorkerClientTest, NetworkRead) {
std::string data_transfer_protocol = GetParam();
LocalWorkers::Remove(GetWorkerAddress());
const int64_t range = 5;
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id, RegisterDataset(range));
TF_ASSERT_OK_AND_ASSIGN(const int64_t iteration_client_id,
CreateIteration(dataset_id));
TF_ASSERT_OK_AND_ASSIGN(const int64_t task_id,
GetTaskToRead(iteration_client_id));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<DataServiceWorkerClient> client,
GetWorkerClient(data_transfer_protocol));
for (int64_t i = 0; i < range; ++i) {
TF_ASSERT_OK_AND_ASSIGN(GetElementResult result,
GetElement(*client, task_id));
test::ExpectEqual(result.components[0], Tensor(int64_t{i * i}));
EXPECT_FALSE(result.end_of_sequence);
}
}
INSTANTIATE_TEST_SUITE_P(
NetworkProtocols, DataTransferProtocolWorkerClientTest,
::testing::Values(kGrpcTransferProtocol, kAltTransferProtocol),
[](const ::testing::TestParamInfo<
DataTransferProtocolWorkerClientTest::ParamType>& info) {
return info.param;
});
TEST_F(WorkerClientTest, LocalServerShutsDown) {
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id,
RegisterDataset(5));
TF_ASSERT_OK_AND_ASSIGN(const int64_t iteration_client_id,
CreateIteration(dataset_id));
TF_ASSERT_OK_AND_ASSIGN(const int64_t task_id,
GetTaskToRead(iteration_client_id));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<DataServiceWorkerClient> client,
GetWorkerClient(kLocalTransferProtocol));
test_cluster_->StopWorkers();
EXPECT_THAT(GetElement(*client, task_id),
StatusIs(error::CANCELLED,
MatchesRegex("Local worker.*is no longer available.*")));
}
TEST_F(WorkerClientTest, CancelClient) {
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id,
RegisterDataset(5));
TF_ASSERT_OK_AND_ASSIGN(const int64_t iteration_client_id,
CreateIteration(dataset_id));
TF_ASSERT_OK_AND_ASSIGN(const int64_t task_id,
GetTaskToRead(iteration_client_id));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<DataServiceWorkerClient> client,
GetWorkerClient(kLocalTransferProtocol));
client->TryCancel();
EXPECT_THAT(GetElement(*client, task_id),
StatusIs(error::CANCELLED,
MatchesRegex("Client for worker.*has been cancelled.")));
}
}
}
} |
1,743 | cpp | tensorflow/tensorflow | grpc_worker_impl | tensorflow/core/data/service/grpc_worker_impl.cc | tensorflow/core/data/service/grpc_worker_impl_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_GRPC_WORKER_IMPL_H_
#define TENSORFLOW_CORE_DATA_SERVICE_GRPC_WORKER_IMPL_H_
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include "grpcpp/server_builder.h"
#include "tensorflow/core/data/service/export.pb.h"
#include "tensorflow/core/data/service/worker.grpc.pb.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/data/service/worker_impl.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
class GrpcWorkerImpl : public WorkerService::Service {
public:
explicit GrpcWorkerImpl(const experimental::WorkerConfig& config,
::grpc::ServerBuilder& server_builder);
~GrpcWorkerImpl() override { Stop(); }
Status Start(const std::string& worker_address,
const std::vector<DataTransferServerInfo>& transfer_servers);
void Stop();
std::function<Status(const GetElementRequest*, GetElementResult*)>
get_element_getter() {
return [this](const GetElementRequest* request, GetElementResult* result) {
return impl_->GetElementResult(request, result);
};
}
WorkerStateExport ExportState() const;
#define HANDLER(method) \
::grpc::Status method(::grpc::ServerContext* context, \
const method##Request* request, \
method##Response* response) override;
HANDLER(ProcessTask);
HANDLER(GetElement);
HANDLER(GetWorkerTasks);
HANDLER(GetSnapshotTaskProgresses);
#undef HANDLER
private:
std::string worker_address_;
std::shared_ptr<DataServiceWorkerImpl> impl_;
GrpcWorkerImpl(const GrpcWorkerImpl&) = delete;
void operator=(const GrpcWorkerImpl&) = delete;
};
}
}
#endif
#include "tensorflow/core/data/service/grpc_worker_impl.h"
#include <memory>
#include <string>
#include <vector>
#include "grpcpp/server_builder.h"
#include "grpcpp/server_context.h"
#include "tensorflow/core/data/service/export.pb.h"
#include "tensorflow/core/data/service/worker_impl.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
using ::grpc::ServerBuilder;
using ::grpc::ServerContext;
GrpcWorkerImpl::GrpcWorkerImpl(const experimental::WorkerConfig& config,
ServerBuilder& server_builder)
: impl_(std::make_shared<DataServiceWorkerImpl>(config)) {
server_builder.RegisterService(this);
VLOG(1) << "Registered data service worker";
}
Status GrpcWorkerImpl::Start(
const std::string& worker_address,
const std::vector<DataTransferServerInfo>& transfer_servers) {
worker_address_ = worker_address;
TF_RETURN_IF_ERROR(impl_->Start(worker_address, transfer_servers));
LocalWorkers::Add(worker_address, impl_);
return absl::OkStatus();
}
void GrpcWorkerImpl::Stop() {
LocalWorkers::Remove(worker_address_);
impl_->Stop();
}
WorkerStateExport GrpcWorkerImpl::ExportState() const {
return impl_->ExportState();
}
#define HANDLER(method) \
::grpc::Status GrpcWorkerImpl::method(ServerContext* context, \
const method##Request* request, \
method##Response* response) { \
return ToGrpcStatus(impl_->method(request, response)); \
}
HANDLER(ProcessTask);
HANDLER(GetElement);
HANDLER(GetWorkerTasks);
HANDLER(GetSnapshotTaskProgresses);
#undef HANDLER
}
} | #include "tensorflow/core/data/service/grpc_worker_impl.h"
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include "grpcpp/channel.h"
#include "grpcpp/client_context.h"
#include "grpcpp/create_channel.h"
#include "grpcpp/security/credentials.h"
#include "grpcpp/support/channel_arguments.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/credentials_factory.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/server_lib.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::grpc::Channel;
using ::grpc::ChannelArguments;
using ::grpc::ChannelCredentials;
using ::grpc::ClientContext;
constexpr const char kHostAddress[] = "localhost";
constexpr const char kProtocol[] = "grpc";
class GrpcWorkerImplTest : public ::testing::Test {
protected:
void SetUp() override {
TF_ASSERT_OK(SetUpDispatcherServer());
TF_ASSERT_OK(SetUpWorkerServer());
TF_ASSERT_OK(SetUpWorkerClientStub());
}
Status SetUpDispatcherServer() {
experimental::DispatcherConfig config;
config.set_protocol(kProtocol);
TF_RETURN_IF_ERROR(NewDispatchServer(config, dispatcher_server_));
return dispatcher_server_->Start();
}
Status SetUpWorkerServer() {
experimental::WorkerConfig config;
config.set_protocol(kProtocol);
config.set_dispatcher_address(GetDispatcherAddress());
config.set_worker_address(absl::StrCat(kHostAddress, ":%port%"));
TF_RETURN_IF_ERROR(NewWorkerServer(config, worker_server_));
return worker_server_->Start();
}
Status SetUpWorkerClientStub() {
std::shared_ptr<ChannelCredentials> credentials;
TF_RETURN_IF_ERROR(
CredentialsFactory::CreateClientCredentials(kProtocol, &credentials));
ChannelArguments args;
args.SetMaxReceiveMessageSize(std::numeric_limits<int32>::max());
args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, true);
std::shared_ptr<Channel> channel =
::grpc::CreateCustomChannel(GetWorkerAddress(), credentials, args);
worker_client_stub_ = WorkerService::NewStub(channel);
return absl::OkStatus();
}
std::string GetDispatcherAddress() const {
return absl::StrCat(kHostAddress, ":", dispatcher_server_->BoundPort());
}
std::string GetWorkerAddress() const {
return absl::StrCat(kHostAddress, ":", worker_server_->BoundPort());
}
std::unique_ptr<DispatchGrpcDataServer> dispatcher_server_;
std::unique_ptr<WorkerGrpcDataServer> worker_server_;
std::unique_ptr<WorkerService::Stub> worker_client_stub_;
};
TEST_F(GrpcWorkerImplTest, GetWorkerTasks) {
ClientContext ctx;
GetWorkerTasksRequest req;
GetWorkerTasksResponse resp;
TF_ASSERT_OK(
FromGrpcStatus(worker_client_stub_->GetWorkerTasks(&ctx, req, &resp)));
EXPECT_EQ(resp.tasks_size(), 0);
}
}
}
} |
1,744 | cpp | tensorflow/tensorflow | journal | tensorflow/core/data/service/journal.cc | tensorflow/core/data/service/journal_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_JOURNAL_H_
#define TENSORFLOW_CORE_DATA_SERVICE_JOURNAL_H_
#include <memory>
#include <string>
#include "tensorflow/core/data/service/journal.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/lib/io/record_writer.h"
#include "tensorflow/core/platform/env.h"
namespace tensorflow {
namespace data {
std::string DataServiceJournalFile(const std::string& journal_dir,
int64_t sequence_number);
class JournalWriter {
public:
virtual ~JournalWriter() = default;
virtual Status Write(const Update& update) = 0;
virtual Status EnsureInitialized() = 0;
};
class FileJournalWriter : public JournalWriter {
public:
explicit FileJournalWriter(Env* env, const std::string& journal_dir);
FileJournalWriter(const FileJournalWriter&) = delete;
FileJournalWriter& operator=(const FileJournalWriter&) = delete;
Status Write(const Update& update) override;
Status EnsureInitialized() override;
private:
Env* env_;
const std::string journal_dir_;
std::unique_ptr<WritableFile> file_;
std::unique_ptr<io::RecordWriter> writer_;
};
class JournalReader {
public:
virtual ~JournalReader() = default;
virtual Status Read(Update& update, bool& end_of_journal) = 0;
};
class FileJournalReader : public JournalReader {
public:
explicit FileJournalReader(Env* env, StringPiece journal_dir);
FileJournalReader(const FileJournalReader&) = delete;
FileJournalReader& operator=(const FileJournalReader&) = delete;
Status Read(Update& update, bool& end_of_journal) override;
private:
Status EnsureInitialized();
Status UpdateFile(const std::string& filename);
Env* env_;
const std::string journal_dir_;
int64_t sequence_number_ = 0;
std::unique_ptr<RandomAccessFile> file_;
std::unique_ptr<io::SequentialRecordReader> reader_;
};
}
}
#endif
#include "tensorflow/core/data/service/journal.h"
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "tensorflow/core/data/service/journal.pb.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/lib/io/record_writer.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/regexp.h"
namespace tensorflow {
namespace data {
namespace {
constexpr StringPiece kJournal = "journal";
Status ParseSequenceNumber(const std::string& journal_file,
int64_t* sequence_number) {
if (!RE2::FullMatch(journal_file, ".*_(\\d+)", sequence_number)) {
return errors::InvalidArgument("Failed to parse journal file name: ",
journal_file);
}
return absl::OkStatus();
}
}
std::string DataServiceJournalFile(const std::string& journal_dir,
int64_t sequence_number) {
return io::JoinPath(journal_dir,
absl::StrCat(kJournal, "_", sequence_number));
}
FileJournalWriter::FileJournalWriter(Env* env, const std::string& journal_dir)
: env_(env), journal_dir_(journal_dir) {}
Status FileJournalWriter::EnsureInitialized() {
if (writer_) {
return absl::OkStatus();
}
std::vector<std::string> journal_files;
TF_RETURN_IF_ERROR(env_->RecursivelyCreateDir(journal_dir_));
TF_RETURN_IF_ERROR(env_->GetChildren(journal_dir_, &journal_files));
int64_t latest_sequence_number = -1;
for (const auto& file : journal_files) {
int64_t sequence_number;
TF_RETURN_IF_ERROR(ParseSequenceNumber(file, &sequence_number));
latest_sequence_number = std::max(latest_sequence_number, sequence_number);
}
std::string journal_file =
DataServiceJournalFile(journal_dir_, latest_sequence_number + 1);
TF_RETURN_IF_ERROR(env_->NewAppendableFile(journal_file, &file_));
writer_ = std::make_unique<io::RecordWriter>(file_.get());
VLOG(1) << "Created journal writer to write to " << journal_file;
return absl::OkStatus();
}
Status FileJournalWriter::Write(const Update& update) {
TF_RETURN_IF_ERROR(EnsureInitialized());
std::string s = update.SerializeAsString();
if (s.empty()) {
return errors::Internal("Failed to serialize update ", update.DebugString(),
" to string");
}
TF_RETURN_IF_ERROR(writer_->WriteRecord(s));
TF_RETURN_IF_ERROR(writer_->Flush());
TF_RETURN_IF_ERROR(file_->Sync());
if (VLOG_IS_ON(4)) {
VLOG(4) << "Wrote journal entry: " << update.DebugString();
}
return absl::OkStatus();
}
FileJournalReader::FileJournalReader(Env* env, StringPiece journal_dir)
: env_(env), journal_dir_(journal_dir) {}
Status FileJournalReader::EnsureInitialized() {
if (reader_) {
return absl::OkStatus();
}
return UpdateFile(DataServiceJournalFile(journal_dir_, 0));
}
Status FileJournalReader::Read(Update& update, bool& end_of_journal) {
TF_RETURN_IF_ERROR(EnsureInitialized());
while (true) {
tstring record;
Status s = reader_->ReadRecord(&record);
if (absl::IsOutOfRange(s)) {
sequence_number_++;
std::string next_journal_file =
DataServiceJournalFile(journal_dir_, sequence_number_);
if (absl::IsNotFound(env_->FileExists(next_journal_file))) {
VLOG(3) << "Next journal file " << next_journal_file
<< " does not exist. End of journal reached.";
end_of_journal = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(UpdateFile(next_journal_file));
continue;
}
TF_RETURN_IF_ERROR(s);
if (!update.ParseFromString(record)) {
return errors::DataLoss("Failed to parse journal record.");
}
if (VLOG_IS_ON(4)) {
VLOG(4) << "Read journal entry: " << update.DebugString();
}
end_of_journal = false;
return absl::OkStatus();
}
}
Status FileJournalReader::UpdateFile(const std::string& filename) {
VLOG(1) << "Reading from journal file " << filename;
TF_RETURN_IF_ERROR(env_->NewRandomAccessFile(filename, &file_));
io::RecordReaderOptions opts;
opts.buffer_size = 2 << 20;
reader_ = std::make_unique<io::SequentialRecordReader>(file_.get(), opts);
return absl::OkStatus();
}
}
} | #include "tensorflow/core/data/service/journal.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/journal.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::HasSubstr;
bool NewJournalDir(std::string& journal_dir) {
std::string filename = testing::TmpDir();
if (!Env::Default()->CreateUniqueFileName(&filename, "journal_dir")) {
return false;
}
journal_dir = filename;
return true;
}
Update MakeCreateIterationUpdate() {
Update update;
CreateIterationUpdate* create_iteration = update.mutable_create_iteration();
create_iteration->set_job_id(3);
create_iteration->set_iteration_id(8);
create_iteration->set_repetition(5);
return update;
}
Update MakeFinishTaskUpdate() {
Update update;
FinishTaskUpdate* finish_task = update.mutable_finish_task();
finish_task->set_task_id(8);
return update;
}
Update MakeRegisterDatasetUpdate() {
Update update;
RegisterDatasetUpdate* register_dataset = update.mutable_register_dataset();
register_dataset->set_dataset_id("dataset_id");
register_dataset->set_fingerprint(3);
return update;
}
Status CheckJournalContent(StringPiece journal_dir,
const std::vector<Update>& expected) {
FileJournalReader reader(Env::Default(), journal_dir);
for (const auto& update : expected) {
Update result;
bool end_of_journal = true;
TF_RETURN_IF_ERROR(reader.Read(result, end_of_journal));
EXPECT_FALSE(end_of_journal);
EXPECT_EQ(result.SerializeAsString(), update.SerializeAsString());
}
Update result;
bool end_of_journal = false;
TF_RETURN_IF_ERROR(reader.Read(result, end_of_journal));
EXPECT_TRUE(end_of_journal);
return absl::OkStatus();
}
}
TEST(Journal, RoundTripMultiple) {
std::string journal_dir;
EXPECT_TRUE(NewJournalDir(journal_dir));
std::vector<Update> updates = {MakeCreateIterationUpdate(),
MakeRegisterDatasetUpdate(),
MakeFinishTaskUpdate()};
FileJournalWriter writer(Env::Default(), journal_dir);
for (const auto& update : updates) {
TF_EXPECT_OK(writer.Write(update));
}
TF_EXPECT_OK(CheckJournalContent(journal_dir, updates));
}
TEST(Journal, AppendExistingJournal) {
std::string journal_dir;
EXPECT_TRUE(NewJournalDir(journal_dir));
std::vector<Update> updates = {MakeCreateIterationUpdate(),
MakeRegisterDatasetUpdate(),
MakeFinishTaskUpdate()};
for (const auto& update : updates) {
FileJournalWriter writer(Env::Default(), journal_dir);
TF_EXPECT_OK(writer.Write(update));
}
TF_EXPECT_OK(CheckJournalContent(journal_dir, updates));
}
TEST(Journal, MissingFile) {
std::string journal_dir;
EXPECT_TRUE(NewJournalDir(journal_dir));
FileJournalReader reader(Env::Default(), journal_dir);
Update result;
bool end_of_journal = true;
Status s = reader.Read(result, end_of_journal);
EXPECT_TRUE(absl::IsNotFound(s));
}
TEST(Journal, NonRecordData) {
std::string journal_dir;
EXPECT_TRUE(NewJournalDir(journal_dir));
TF_ASSERT_OK(Env::Default()->RecursivelyCreateDir(journal_dir));
{
std::unique_ptr<WritableFile> file;
TF_ASSERT_OK(Env::Default()->NewAppendableFile(
DataServiceJournalFile(journal_dir, 0), &file));
TF_ASSERT_OK(file->Append("not record data"));
}
FileJournalReader reader(Env::Default(), journal_dir);
Update result;
bool end_of_journal = true;
Status s = reader.Read(result, end_of_journal);
EXPECT_THAT(s.message(), HasSubstr("corrupted record"));
EXPECT_EQ(s.code(), error::DATA_LOSS);
}
TEST(Journal, InvalidRecordData) {
std::string journal_dir;
EXPECT_TRUE(NewJournalDir(journal_dir));
TF_ASSERT_OK(Env::Default()->RecursivelyCreateDir(journal_dir));
{
std::unique_ptr<WritableFile> file;
TF_ASSERT_OK(Env::Default()->NewAppendableFile(
DataServiceJournalFile(journal_dir, 0), &file));
auto writer = std::make_unique<io::RecordWriter>(file.get());
TF_ASSERT_OK(writer->WriteRecord("not serialized proto"));
}
FileJournalReader reader(Env::Default(), journal_dir);
Update result;
bool end_of_journal = true;
Status s = reader.Read(result, end_of_journal);
EXPECT_THAT(s.message(), HasSubstr("Failed to parse journal record"));
EXPECT_EQ(s.code(), error::DATA_LOSS);
}
}
} |
1,745 | cpp | tensorflow/tensorflow | graph_rewriters | tensorflow/core/data/service/graph_rewriters.cc | tensorflow/core/data/service/graph_rewriters_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_GRAPH_REWRITERS_H_
#define TENSORFLOW_CORE_DATA_SERVICE_GRAPH_REWRITERS_H_
#include <cstdint>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace data {
class RemoveCompressionMapRewriter {
public:
absl::StatusOr<GraphDef> ApplyRemoveCompressionMapRewrite(
const GraphDef& graph_def);
private:
tensorflow::RewriterConfig::CustomGraphOptimizer GetRewriteConfig() const;
};
class AutoShardRewriter {
public:
static absl::StatusOr<AutoShardRewriter> Create(const TaskDef& task_def);
absl::StatusOr<GraphDef> ApplyAutoShardRewrite(const GraphDef& graph_def);
private:
AutoShardRewriter(AutoShardPolicy auto_shard_policy, int64_t num_workers,
int64_t worker_index);
tensorflow::RewriterConfig::CustomGraphOptimizer GetRewriteConfig() const;
const AutoShardPolicy auto_shard_policy_;
const int64_t num_workers_;
const int64_t worker_index_;
};
class WorkerIndexResolver {
public:
template <class T>
explicit WorkerIndexResolver(const T& worker_addresses)
: worker_addresses_(worker_addresses.cbegin(), worker_addresses.cend()) {}
Status ValidateWorker(absl::string_view worker_address) const;
void AddWorker(absl::string_view worker_address);
absl::StatusOr<int64_t> GetWorkerIndex(
absl::string_view worker_address) const;
private:
std::vector<std::string> worker_addresses_;
};
}
}
#endif
#include "tensorflow/core/data/service/graph_rewriters.h"
#include <cstdlib>
#include <iterator>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/strings/match.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "absl/types/optional.h"
#include "tensorflow/core/data/rewrite_utils.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/url.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/grappler_item_builder.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
#include "tensorflow/core/grappler/optimizers/data/auto_shard.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
#include "tensorflow/core/grappler/optimizers/data/remove_compression_map.h"
#include "tensorflow/core/kernels/data/experimental/auto_shard_dataset_op.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/device_properties.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::data::experimental::AutoShardDatasetOp;
constexpr bool kApplyGeneralGrapplerOptimizations = false;
bool HasDynamicPort(absl::string_view address) {
URL url(address);
return url.has_port() && absl::StartsWith(url.port(), "%port") &&
absl::EndsWith(url.port(), "%");
}
bool ShouldReplaceDynamicPort(absl::string_view config_address,
absl::string_view worker_address) {
URL config_url(config_address), worker_url(worker_address);
return (!config_url.has_port() || HasDynamicPort(config_address)) &&
worker_url.has_port() && config_url.host() == worker_url.host();
}
}
absl::StatusOr<GraphDef>
RemoveCompressionMapRewriter::ApplyRemoveCompressionMapRewrite(
const GraphDef& graph_def) {
grappler::RemoveCompressionMap remove_compression_map;
tensorflow::RewriterConfig::CustomGraphOptimizer config = GetRewriteConfig();
TF_RETURN_IF_ERROR(remove_compression_map.Init(&config));
GraphDef input_graph = graph_def;
TF_ASSIGN_OR_RETURN(std::string dataset_node, GetDatasetNode(input_graph));
std::unique_ptr<tensorflow::grappler::GrapplerItem> grappler_item =
GetGrapplerItem(&input_graph, &dataset_node, false,
kApplyGeneralGrapplerOptimizations);
GraphDef rewritten_graph;
std::unordered_map<std::string, tensorflow::DeviceProperties> device_map;
tensorflow::grappler::VirtualCluster cluster(device_map);
grappler::AutoShard::OptimizationStats stats;
TF_RETURN_IF_ERROR(remove_compression_map.OptimizeAndCollectStats(
&cluster, *grappler_item, &rewritten_graph, &stats));
return rewritten_graph;
}
tensorflow::RewriterConfig::CustomGraphOptimizer
RemoveCompressionMapRewriter::GetRewriteConfig() const {
tensorflow::RewriterConfig::CustomGraphOptimizer config;
config.set_name("tf-data-service-remove-compression-map");
return config;
}
absl::StatusOr<AutoShardRewriter> AutoShardRewriter::Create(
const TaskDef& task_def) {
TF_ASSIGN_OR_RETURN(
AutoShardPolicy auto_shard_policy,
ToAutoShardPolicy(task_def.processing_mode_def().sharding_policy()));
return AutoShardRewriter(auto_shard_policy, task_def.num_workers(),
task_def.worker_index());
}
absl::StatusOr<GraphDef> AutoShardRewriter::ApplyAutoShardRewrite(
const GraphDef& graph_def) {
if (auto_shard_policy_ == AutoShardPolicy::OFF) {
return graph_def;
}
VLOG(2) << "Applying auto-shard policy "
<< AutoShardPolicy_Name(auto_shard_policy_)
<< ". Number of workers: " << num_workers_
<< "; worker index: " << worker_index_ << ".";
grappler::AutoShard autoshard;
tensorflow::RewriterConfig::CustomGraphOptimizer config = GetRewriteConfig();
TF_RETURN_IF_ERROR(autoshard.Init(&config));
GraphDef input_graph = graph_def;
TF_ASSIGN_OR_RETURN(std::string dataset_node, GetDatasetNode(input_graph));
std::unique_ptr<tensorflow::grappler::GrapplerItem> grappler_item =
GetGrapplerItem(&input_graph, &dataset_node, false,
kApplyGeneralGrapplerOptimizations);
GraphDef rewritten_graph;
std::unordered_map<std::string, tensorflow::DeviceProperties> device_map;
tensorflow::grappler::VirtualCluster cluster(device_map);
grappler::AutoShard::OptimizationStats stats;
TF_RETURN_IF_ERROR(autoshard.OptimizeAndCollectStats(
&cluster, *grappler_item, &rewritten_graph, &stats));
return rewritten_graph;
}
AutoShardRewriter::AutoShardRewriter(AutoShardPolicy auto_shard_policy,
int64_t num_workers, int64_t worker_index)
: auto_shard_policy_(auto_shard_policy),
num_workers_(num_workers),
worker_index_(worker_index) {}
tensorflow::RewriterConfig::CustomGraphOptimizer
AutoShardRewriter::GetRewriteConfig() const {
tensorflow::RewriterConfig::CustomGraphOptimizer config;
config.set_name("tf-data-service-auto-shard");
(*config.mutable_parameter_map())[AutoShardDatasetOp::kNumWorkers].set_i(
num_workers_);
(*config.mutable_parameter_map())[AutoShardDatasetOp::kIndex].set_i(
worker_index_);
(*config.mutable_parameter_map())[AutoShardDatasetOp::kAutoShardPolicy].set_i(
auto_shard_policy_);
(*config.mutable_parameter_map())[AutoShardDatasetOp::kNumReplicas].set_i(1);
return config;
}
Status WorkerIndexResolver::ValidateWorker(
absl::string_view worker_address) const {
if (worker_addresses_.empty()) {
return absl::OkStatus();
}
for (absl::string_view config_address : worker_addresses_) {
if (config_address == worker_address ||
ShouldReplaceDynamicPort(config_address, worker_address)) {
return absl::OkStatus();
}
}
return errors::FailedPrecondition(absl::Substitute(
"Failed to assign an index for worker $0. Configured workers list: [$1]. "
"The worker's address is not configured, or other workers are already "
"running at the configured host. If your worker has restarted, make sure "
"it runs at the same address and port.",
worker_address, absl::StrJoin(worker_addresses_, ", ")));
}
void WorkerIndexResolver::AddWorker(absl::string_view worker_address) {
for (std::string& config_address : worker_addresses_) {
if (config_address == worker_address) {
return;
}
if (ShouldReplaceDynamicPort(config_address, worker_address)) {
config_address = std::string(worker_address);
return;
}
}
}
absl::StatusOr<int64_t> WorkerIndexResolver::GetWorkerIndex(
absl::string_view worker_address) const {
const auto it = absl::c_find(worker_addresses_, worker_address);
if (it == worker_addresses_.cend()) {
return errors::NotFound(absl::Substitute(
"Failed to shard dataset in tf.data service: Worker $0 is not in the "
"workers list. Got workers list $1.",
worker_address, absl::StrJoin(worker_addresses_, ",")));
}
return std::distance(worker_addresses_.cbegin(), it);
}
}
} | #include "tensorflow/core/data/service/graph_rewriters.h"
#include <string>
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::data::testing::EqualsProto;
using ::tensorflow::data::testing::RangeDatasetWithShardHint;
using ::tensorflow::data::testing::RangeSquareDataset;
using ::tensorflow::testing::IsOkAndHolds;
using ::tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
using ::testing::SizeIs;
absl::StatusOr<NodeDef> GetNode(const GraphDef& graph_def,
absl::string_view name) {
for (const NodeDef& node : graph_def.node()) {
if (node.name() == name) {
return node;
}
}
return errors::NotFound(absl::Substitute("Node $0 not found in graph $1.",
name, graph_def.ShortDebugString()));
}
absl::StatusOr<int64_t> GetValue(const GraphDef& graph_def,
absl::string_view name) {
for (const NodeDef& node : graph_def.node()) {
if (node.name() == name) {
return node.attr().at("value").tensor().int64_val()[0];
}
}
return errors::NotFound(absl::Substitute("Node $0 not found in graph $1.",
name, graph_def.ShortDebugString()));
}
TaskDef GetTaskDef(const ProcessingModeDef::ShardingPolicy sharding_policy,
const int64_t num_workers, const int64_t worker_index) {
TaskDef task_def;
task_def.mutable_processing_mode_def()->set_sharding_policy(sharding_policy);
task_def.set_num_workers(num_workers);
task_def.set_worker_index(worker_index);
return task_def;
}
TEST(AutoShardRewriterTest, AutoShard) {
TaskDef task_def = GetTaskDef(ProcessingModeDef::FILE_OR_DATA,
3, 1);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(10);
TF_ASSERT_OK_AND_ASSIGN(GraphDef rewritten_graph,
rewriter.ApplyAutoShardRewrite(dataset.graph()));
TF_ASSERT_OK_AND_ASSIGN(NodeDef shard_node,
GetNode(rewritten_graph, "ShardDataset"));
ASSERT_THAT(shard_node.input(), SizeIs(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(1)), IsOkAndHolds(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(2)), IsOkAndHolds(1));
}
TEST(AutoShardRewriterTest, ShardByData) {
TaskDef task_def = GetTaskDef(ProcessingModeDef::DATA, 3,
1);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(10);
TF_ASSERT_OK_AND_ASSIGN(GraphDef rewritten_graph,
rewriter.ApplyAutoShardRewrite(dataset.graph()));
TF_ASSERT_OK_AND_ASSIGN(NodeDef shard_node,
GetNode(rewritten_graph, "ShardDataset"));
ASSERT_THAT(shard_node.input(), SizeIs(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(1)), IsOkAndHolds(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(2)), IsOkAndHolds(1));
}
TEST(AutoShardRewriterTest, ShardByFile) {
TaskDef task_def = GetTaskDef(ProcessingModeDef::FILE, 3,
1);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(10);
EXPECT_THAT(rewriter.ApplyAutoShardRewrite(dataset.graph()),
StatusIs(error::NOT_FOUND,
HasSubstr("Found an unshardable source dataset")));
}
TEST(AutoShardRewriterTest, ShardByHint) {
TaskDef task_def = GetTaskDef(ProcessingModeDef::HINT, 3,
1);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeDatasetWithShardHint(10);
TF_ASSERT_OK_AND_ASSIGN(GraphDef rewritten_graph,
rewriter.ApplyAutoShardRewrite(dataset.graph()));
TF_ASSERT_OK_AND_ASSIGN(NodeDef shard_node,
GetNode(rewritten_graph, "ShardDataset"));
ASSERT_THAT(shard_node.input(), SizeIs(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(1)), IsOkAndHolds(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(2)), IsOkAndHolds(1));
}
TEST(AutoShardRewriterTest, NoShard) {
TaskDef task_def =
GetTaskDef(ProcessingModeDef::OFF, 3, 1);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(10);
EXPECT_THAT(rewriter.ApplyAutoShardRewrite(dataset.graph()),
IsOkAndHolds(EqualsProto(dataset.graph())));
}
TEST(AutoShardRewriterTest, EmptyDataset) {
TaskDef task_def =
GetTaskDef(ProcessingModeDef::FILE_OR_DATA, 3,
1);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(0);
TF_ASSERT_OK_AND_ASSIGN(GraphDef rewritten_graph,
rewriter.ApplyAutoShardRewrite(dataset.graph()));
TF_ASSERT_OK_AND_ASSIGN(NodeDef shard_node,
GetNode(rewritten_graph, "ShardDataset"));
ASSERT_THAT(shard_node.input(), SizeIs(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(1)), IsOkAndHolds(3));
EXPECT_THAT(GetValue(rewritten_graph, shard_node.input(2)), IsOkAndHolds(1));
}
TEST(AutoShardRewriterTest, NoWorkers) {
TaskDef task_def =
GetTaskDef(ProcessingModeDef::FILE_OR_DATA, 0,
0);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(10);
EXPECT_THAT(rewriter.ApplyAutoShardRewrite(dataset.graph()),
StatusIs(error::INVALID_ARGUMENT,
"num_workers should be >= 1, currently 0"));
}
TEST(AutoShardRewriterTest, NoWorkersWhenShardIsOff) {
TaskDef task_def =
GetTaskDef(ProcessingModeDef::OFF, 0, 0);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(10);
EXPECT_THAT(rewriter.ApplyAutoShardRewrite(dataset.graph()),
IsOkAndHolds(EqualsProto(dataset.graph())));
}
TEST(AutoShardRewriterTest, WorkerIndexOutOfRange) {
TaskDef task_def =
GetTaskDef(ProcessingModeDef::FILE_OR_DATA, 2,
5);
TF_ASSERT_OK_AND_ASSIGN(AutoShardRewriter rewriter,
AutoShardRewriter::Create(task_def));
DatasetDef dataset = RangeSquareDataset(10);
EXPECT_THAT(rewriter.ApplyAutoShardRewrite(dataset.graph()),
StatusIs(error::INVALID_ARGUMENT,
"index should be >= 0 and < 2, currently 5"));
}
TEST(WorkerIndexResolverTest, AddOneWorker) {
WorkerIndexResolver resolver(std::vector<std::string>{"localhost"});
EXPECT_THAT(resolver.GetWorkerIndex("localhost:12345"),
StatusIs(error::NOT_FOUND));
TF_EXPECT_OK(resolver.ValidateWorker("localhost:12345"));
resolver.AddWorker("localhost:12345");
EXPECT_THAT(resolver.GetWorkerIndex("localhost:12345"), IsOkAndHolds(0));
}
TEST(WorkerIndexResolverTest, AddMultipleWorkers) {
WorkerIndexResolver resolver(std::vector<std::string>{
"/worker/task/0", "/worker/task/1", "/worker/task/2"});
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/2:12345"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/1:23456"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/0:34567"));
resolver.AddWorker("/worker/task/2:12345");
resolver.AddWorker("/worker/task/1:23456");
resolver.AddWorker("/worker/task/0:34567");
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:34567"), IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:23456"), IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:12345"), IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, NamedPorts) {
WorkerIndexResolver resolver(
std::vector<std::string>{"/worker/task/0:worker", "/worker/task/1:worker",
"/worker/task/2:worker"});
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/2:worker"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/1:worker"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/0:worker"));
resolver.AddWorker("/worker/task/2:worker");
resolver.AddWorker("/worker/task/1:worker");
resolver.AddWorker("/worker/task/0:worker");
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:worker"),
IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:worker"),
IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:worker"),
IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, DynamicPorts) {
WorkerIndexResolver resolver(std::vector<std::string>{
"/worker/task/0:%port_worker%", "/worker/task/1:%port_worker%",
"/worker/task/2:%port_worker%"});
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/2:worker"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/1:worker"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/0:worker"));
resolver.AddWorker("/worker/task/2:worker");
resolver.AddWorker("/worker/task/1:worker");
resolver.AddWorker("/worker/task/0:worker");
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:worker"),
IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:worker"),
IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:worker"),
IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, AnonymousPorts) {
WorkerIndexResolver resolver(
std::vector<std::string>{"/worker/task/0:%port%", "/worker/task/1:%port%",
"/worker/task/2:%port%"});
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/2:10000"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/1:10001"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/0:10002"));
resolver.AddWorker("/worker/task/2:10000");
resolver.AddWorker("/worker/task/1:10001");
resolver.AddWorker("/worker/task/0:10002");
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:10002"), IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:10001"), IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:10000"), IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, NumericPorts) {
WorkerIndexResolver resolver(std::vector<std::string>{
"/worker/task/0:12345", "/worker/task/1:23456", "/worker/task/2:34567"});
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:12345"), IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:23456"), IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:34567"), IsOkAndHolds(2));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/2:34567"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/1:23456"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/0:12345"));
resolver.AddWorker("/worker/task/2:34567");
resolver.AddWorker("/worker/task/1:23456");
resolver.AddWorker("/worker/task/0:12345");
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:12345"), IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:23456"), IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:34567"), IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, IPv6Addresses) {
WorkerIndexResolver resolver(std::vector<std::string>{
"[1080:0:0:0:8:800:200C:417A]", "[1080:0:0:0:8:800:200C:417B]",
"[1080:0:0:0:8:800:200C:417C]"});
TF_EXPECT_OK(resolver.ValidateWorker("[1080:0:0:0:8:800:200C:417A]:12345"));
TF_EXPECT_OK(resolver.ValidateWorker("[1080:0:0:0:8:800:200C:417B]:23456"));
TF_EXPECT_OK(resolver.ValidateWorker("[1080:0:0:0:8:800:200C:417C]:34567"));
resolver.AddWorker("[1080:0:0:0:8:800:200C:417A]:12345");
resolver.AddWorker("[1080:0:0:0:8:800:200C:417B]:23456");
resolver.AddWorker("[1080:0:0:0:8:800:200C:417C]:34567");
EXPECT_THAT(resolver.GetWorkerIndex("[1080:0:0:0:8:800:200C:417A]:12345"),
IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("[1080:0:0:0:8:800:200C:417B]:23456"),
IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("[1080:0:0:0:8:800:200C:417C]:34567"),
IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, IPv6AddressesWithDynamicPort) {
WorkerIndexResolver resolver(
std::vector<std::string>{"[1080:0:0:0:8:800:200C:417A]:%port%",
"[1080:0:0:0:8:800:200C:417B]:%port%",
"[1080:0:0:0:8:800:200C:417C]:%port%"});
TF_EXPECT_OK(resolver.ValidateWorker("[1080:0:0:0:8:800:200C:417A]:12345"));
TF_EXPECT_OK(resolver.ValidateWorker("[1080:0:0:0:8:800:200C:417B]:23456"));
TF_EXPECT_OK(resolver.ValidateWorker("[1080:0:0:0:8:800:200C:417C]:34567"));
resolver.AddWorker("[1080:0:0:0:8:800:200C:417A]:12345");
resolver.AddWorker("[1080:0:0:0:8:800:200C:417B]:23456");
resolver.AddWorker("[1080:0:0:0:8:800:200C:417C]:34567");
EXPECT_THAT(resolver.GetWorkerIndex("[1080:0:0:0:8:800:200C:417A]:12345"),
IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("[1080:0:0:0:8:800:200C:417B]:23456"),
IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("[1080:0:0:0:8:800:200C:417C]:34567"),
IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, AddressesWithProtocols) {
WorkerIndexResolver resolver(std::vector<std::string>{
"http:
TF_EXPECT_OK(resolver.ValidateWorker("http:
TF_EXPECT_OK(resolver.ValidateWorker("http:
TF_EXPECT_OK(resolver.ValidateWorker("http:
resolver.AddWorker("http:
resolver.AddWorker("http:
resolver.AddWorker("http:
EXPECT_THAT(resolver.GetWorkerIndex("http:
IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("http:
IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("http:
IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, AddressesWithProtocolsAndDynamicPorts) {
WorkerIndexResolver resolver(std::vector<std::string>{
"http:
"http:
TF_EXPECT_OK(resolver.ValidateWorker("http:
TF_EXPECT_OK(resolver.ValidateWorker("http:
TF_EXPECT_OK(resolver.ValidateWorker("http:
resolver.AddWorker("http:
resolver.AddWorker("http:
resolver.AddWorker("http:
EXPECT_THAT(resolver.GetWorkerIndex("http:
IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("http:
IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("http:
IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, HostNameHasColons) {
WorkerIndexResolver resolver(
std::vector<std::string>{":worker:task:0:%port%", ":worker:task:1:%port%",
":worker:task:2:34567"});
TF_EXPECT_OK(resolver.ValidateWorker(":worker:task:0:12345"));
TF_EXPECT_OK(resolver.ValidateWorker(":worker:task:1:23456"));
TF_EXPECT_OK(resolver.ValidateWorker(":worker:task:2:34567"));
resolver.AddWorker(":worker:task:0:12345");
resolver.AddWorker(":worker:task:1:23456");
resolver.AddWorker(":worker:task:2:34567");
EXPECT_THAT(resolver.GetWorkerIndex(":worker:task:0:12345"), IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex(":worker:task:1:23456"), IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex(":worker:task:2:34567"), IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, ChangeWorkerPort) {
WorkerIndexResolver resolver(std::vector<std::string>{
"/worker/task/0", "/worker/task/1", "/worker/task/2"});
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/2:12345"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/1:23456"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/0:34567"));
resolver.AddWorker("/worker/task/2:12345");
resolver.AddWorker("/worker/task/1:23456");
resolver.AddWorker("/worker/task/0:34567");
EXPECT_THAT(resolver.ValidateWorker("/worker/task/0:99999"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("already running at the configured host")));
EXPECT_THAT(resolver.ValidateWorker("/worker/task/1:99999"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("already running at the configured host")));
EXPECT_THAT(resolver.ValidateWorker("/worker/task/2:99999"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("already running at the configured host")));
}
TEST(WorkerIndexResolverTest, WorkerNotFound) {
WorkerIndexResolver resolver(std::vector<std::string>{
"/worker/task/0", "/worker/task/1", "/worker/task/2"});
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:34567"),
StatusIs(error::NOT_FOUND));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:23456"),
StatusIs(error::NOT_FOUND));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:12345"),
StatusIs(error::NOT_FOUND));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/3:45678"),
StatusIs(error::NOT_FOUND));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/2:12345"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/1:23456"));
TF_EXPECT_OK(resolver.ValidateWorker("/worker/task/0:34567"));
EXPECT_THAT(resolver.ValidateWorker("/worker/task/3:45678"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("The worker's address is not configured")));
resolver.AddWorker("/worker/task/3:45678");
resolver.AddWorker("/worker/task/2:12345");
resolver.AddWorker("/worker/task/1:23456");
resolver.AddWorker("/worker/task/0:34567");
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/0:34567"), IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/1:23456"), IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("/worker/task/2:12345"), IsOkAndHolds(2));
EXPECT_THAT(
resolver.GetWorkerIndex("/worker/task/3:45678"),
StatusIs(error::NOT_FOUND,
HasSubstr(
"Worker /worker/task/3:45678 is not in the workers list.")));
}
TEST(WorkerIndexResolverTest, MultipleWorkersInOneHost) {
WorkerIndexResolver resolver(
std::vector<std::string>{"localhost", "localhost", "localhost"});
TF_EXPECT_OK(resolver.ValidateWorker("localhost:12345"));
resolver.AddWorker("localhost:12345");
TF_EXPECT_OK(resolver.ValidateWorker("localhost:23456"));
resolver.AddWorker("localhost:23456");
TF_EXPECT_OK(resolver.ValidateWorker("localhost:34567"));
resolver.AddWorker("localhost:34567");
EXPECT_THAT(resolver.GetWorkerIndex("localhost:12345"), IsOkAndHolds(0));
EXPECT_THAT(resolver.GetWorkerIndex("localhost:23456"), IsOkAndHolds(1));
EXPECT_THAT(resolver.GetWorkerIndex("localhost:34567"), IsOkAndHolds(2));
}
TEST(WorkerIndexResolverTest, MoreWorkersThanConfigured) {
WorkerIndexResolver resolver(std::vector<std::string>{
"localhost:%port%", "localhost:%port%", "localhost:%port%"});
TF_EXPECT_OK(resolver.ValidateWorker("localhost:12345"));
resolver.AddWorker("localhost:12345");
TF_EXPECT_OK(resolver.ValidateWorker("localhost:23456"));
resolver.AddWorker("localhost:23456");
TF_EXPECT_OK(resolver.ValidateWorker("localhost:34567"));
resolver.AddWorker("localhost:34567");
TF_EXPECT_OK(resolver.ValidateWorker("localhost:12345"));
resolver.AddWorker("localhost:12345");
TF_EXPECT_OK(resolver.ValidateWorker("localhost:23456"));
resolver.AddWorker("localhost:23456");
TF_EXPECT_OK(resolver.ValidateWorker("localhost:34567"));
resolver.AddWorker("localhost:34567");
EXPECT_THAT(resolver.ValidateWorker("localhost:45678"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("already running at the configured host")));
EXPECT_THAT(resolver.ValidateWorker("localhost:56789"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("already running at the configured host")));
}
TEST(WorkerIndexResolverTest, WorkerNotConfigured) {
WorkerIndexResolver resolver(std::vector<std::string>{""});
EXPECT_THAT(resolver.GetWorkerIndex("localhost:12345"),
StatusIs(error::NOT_FOUND));
EXPECT_THAT(resolver.ValidateWorker("localhost:12345"),
StatusIs(error::FAILED_PRECONDITION,
HasSubstr("The worker's address is not configured")));
resolver.AddWorker("localhost:12345");
EXPECT_THAT(resolver.GetWorkerIndex("localhost:12345"),
StatusIs(error::NOT_FOUND));
}
}
}
} |
1,746 | cpp | tensorflow/tensorflow | worker_impl | tensorflow/core/data/service/worker_impl.cc | tensorflow/core/data/service/worker_impl_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_WORKER_IMPL_H_
#define TENSORFLOW_CORE_DATA_SERVICE_WORKER_IMPL_H_
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/hash/hash.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/data_transfer.h"
#include "tensorflow/core/data/service/dispatcher_client.h"
#include "tensorflow/core/data/service/export.pb.h"
#include "tensorflow/core/data/service/snapshot/snapshot_stream_writer.h"
#include "tensorflow/core/data/service/task_runner.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/data/standalone.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
class DataServiceWorkerImpl {
public:
explicit DataServiceWorkerImpl(const experimental::WorkerConfig& config);
~DataServiceWorkerImpl();
Status Start(const std::string& worker_address,
const std::vector<DataTransferServerInfo>& transfer_servers);
void Stop();
Status GetElementResult(const GetElementRequest* request,
GetElementResult* result);
void DeleteLocalTask(const TaskInfo& task_info);
Status ProcessTask(const ProcessTaskRequest* request,
ProcessTaskResponse* response);
Status GetElement(const GetElementRequest* request,
GetElementResponse* response);
Status GetWorkerTasks(const GetWorkerTasksRequest* request,
GetWorkerTasksResponse* response);
Status GetSnapshotTaskProgresses(
const GetSnapshotTaskProgressesRequest* request,
GetSnapshotTaskProgressesResponse* response);
WorkerStateExport ExportState() const;
private:
struct Task {
explicit Task(TaskDef task_def) : task_def(std::move(task_def)) {}
TaskDef task_def;
mutex mu;
bool initialized TF_GUARDED_BY(mu) = false;
int64_t outstanding_requests TF_GUARDED_BY(&DataServiceWorkerImpl::mu_) = 0;
std::unique_ptr<TaskRunner> task_runner;
};
struct SnapshotTask {
std::string base_path;
int64_t stream_index = 0;
template <typename H>
friend H AbslHashValue(H h, const SnapshotTask& task) {
return H::combine(std::move(h), task.base_path, task.stream_index);
}
friend bool operator==(const SnapshotTask& task1,
const SnapshotTask& task2) {
return task1.base_path == task2.base_path &&
task1.stream_index == task2.stream_index;
}
};
Status ValidateWorkerConfig() const;
absl::StatusOr<std::unique_ptr<DataServiceDispatcherClient>>
CreateDispatcherClient() const TF_LOCKS_EXCLUDED(mu_);
Status SendTaskUpdates() TF_LOCKS_EXCLUDED(mu_);
Status ProcessTaskInternal(const TaskDef& task)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
Status EnsureTaskInitialized(Task& task);
void StopTask(Task& task) TF_LOCKS_EXCLUDED(mu_);
void TaskCompletionThread() TF_LOCKS_EXCLUDED(mu_);
void HeartbeatThread() TF_LOCKS_EXCLUDED(mu_);
Status Heartbeat();
absl::StatusOr<bool> DisableCompressionAtRuntime(
const std::string& dataset_id) const;
std::vector<ActiveTask> GetActiveTasks() const TF_LOCKS_EXCLUDED(mu_);
std::vector<int64_t> GetTaskIds(
const std::vector<ActiveTask>& active_tasks) const;
WorkerHeartbeatRequest BuildWorkerHeartbeatRequest() const
TF_LOCKS_EXCLUDED(mu_);
void UpdateTasks(const WorkerHeartbeatResponse& response)
TF_LOCKS_EXCLUDED(mu_);
Status UpdateSnapshotWriters(const WorkerHeartbeatResponse& response)
TF_LOCKS_EXCLUDED(mu_);
absl::StatusOr<std::unique_ptr<StandaloneTaskIterator>>
MakeSnapshotTaskIterator(const SnapshotTaskDef& snapshot_task,
const DatasetDef& dataset_def) const;
std::vector<SnapshotTaskProgress> GetSnapshotTaskProgress() const;
absl::StatusOr<DatasetDef> GetDatasetDef(const TaskDef& task_def) const;
absl::StatusOr<std::unique_ptr<standalone::Dataset>> MakeDataset(
const DatasetDef& dataset_def, const TaskDef& task_def) const;
absl::StatusOr<std::unique_ptr<standalone::Iterator>> MakeDatasetIterator(
standalone::Dataset& dataset, const TaskDef& task_def) const;
const experimental::WorkerConfig config_;
const int64_t worker_uid_;
std::string worker_address_;
std::vector<DataTransferServerInfo> transfer_servers_;
std::unique_ptr<DataServiceDispatcherClient> dispatcher_;
mutable mutex mu_;
condition_variable cv_;
absl::flat_hash_map<int64_t, std::shared_ptr<Task>> tasks_ TF_GUARDED_BY(mu_);
absl::flat_hash_set<int64_t> finished_tasks_ TF_GUARDED_BY(mu_);
absl::flat_hash_set<int64_t> pending_completed_tasks_ TF_GUARDED_BY(mu_);
absl::flat_hash_set<int64_t> deleted_tasks_ TF_GUARDED_BY(mu_);
bool cancelled_ TF_GUARDED_BY(mu_) = false;
bool registered_ TF_GUARDED_BY(mu_) = false;
condition_variable task_completion_cv_ TF_GUARDED_BY(mu_);
condition_variable heartbeat_cv_ TF_GUARDED_BY(mu_);
CancellationManager cancellation_manager_;
absl::flat_hash_map<SnapshotTask, std::unique_ptr<SnapshotStreamWriter>,
absl::Hash<SnapshotTask>>
snapshot_writers_ TF_GUARDED_BY(mu_);
std::unique_ptr<Thread> task_completion_thread_;
std::unique_ptr<Thread> heartbeat_thread_;
DataServiceWorkerImpl(const DataServiceWorkerImpl&) = delete;
void operator=(const DataServiceWorkerImpl&) = delete;
};
class LocalWorkers {
public:
static void Add(absl::string_view worker_address,
std::shared_ptr<DataServiceWorkerImpl> worker);
static std::shared_ptr<DataServiceWorkerImpl> Get(
absl::string_view worker_address);
static bool Empty();
static void Remove(absl::string_view worker_address);
private:
using AddressToWorkerMap =
absl::flat_hash_map<std::string, std::shared_ptr<DataServiceWorkerImpl>>;
static mutex mu_;
static AddressToWorkerMap* local_workers_ TF_GUARDED_BY(mu_);
};
}
}
#endif
#include "tensorflow/core/data/service/worker_impl.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "grpcpp/create_channel.h"
#include "absl/algorithm/container.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "absl/time/time.h"
#include "tensorflow/core/data/service/byte_size.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/data_transfer.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/dispatcher_client.h"
#include "tensorflow/core/data/service/export.pb.h"
#include "tensorflow/core/data/service/graph_rewriters.h"
#include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/snapshot/snapshot_split_provider.h"
#include "tensorflow/core/data/service/snapshot/snapshot_stream_writer.h"
#include "tensorflow/core/data/service/split_provider.h"
#include "tensorflow/core/data/service/task_runner.h"
#include "tensorflow/core/data/service/utils.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/data/standalone.h"
#include "tensorflow/core/framework/dataset.pb.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/env_time.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/host_info.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/dump_graph.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/statusor.h"
#include "tsl/protobuf/status.pb.h"
namespace tensorflow {
namespace data {
namespace {
constexpr absl::Duration kRetryInterval = absl::Seconds(5);
constexpr absl::Duration kDefaultHeartBeatInterval = absl::Seconds(30);
constexpr absl::Duration kDefaultDispatcherTimeout = absl::Hours(1);
using WorkerConfig = experimental::WorkerConfig;
Status MoveElementToResponse(std::vector<Tensor>&& element,
GetElementResponse& resp) {
if (element.size() != 1 || element[0].dtype() != DT_VARIANT ||
!TensorShapeUtils::IsScalar(element[0].shape())) {
for (const auto& component : element) {
UncompressedElement* uncompressed = resp.mutable_uncompressed();
component.AsProtoTensorContent(uncompressed->add_components());
}
return absl::OkStatus();
}
Variant& variant = element[0].scalar<Variant>()();
CompressedElement* compressed = variant.get<CompressedElement>();
if (compressed == nullptr) {
return errors::FailedPrecondition(
"Expected dataset to produce a CompressedElement variant tensor, but "
"it produced ",
variant.TypeName());
}
*resp.mutable_compressed() = *compressed;
return absl::OkStatus();
}
WorkerConfig ApplyWorkerDefaults(const WorkerConfig& config) {
WorkerConfig new_config(config);
if (new_config.heartbeat_interval_ms() == 0) {
new_config.set_heartbeat_interval_ms(
absl::ToInt64Milliseconds(kDefaultHeartBeatInterval));
}
if (new_config.dispatcher_timeout_ms() == 0) {
new_config.set_dispatcher_timeout_ms(
absl::ToInt64Milliseconds(kDefaultDispatcherTimeout));
}
if (new_config.snapshot_max_chunk_size_bytes() == 0) {
new_config.set_snapshot_max_chunk_size_bytes(
kDefaultMaxChunkSize.ToUnsignedBytes());
}
return new_config;
}
TaskDef Export(const TaskDef& task) {
TaskDef result;
switch (task.dataset_case()) {
case TaskDef::kDatasetDef:
result.set_path(
"In-memory dataset graphs are omitted for brevity. To view datasets "
"stored on the dispatcher, configure a `work_dir`.");
break;
case TaskDef::kPath:
result.set_path(task.path());
break;
default:
break;
}
result.set_dataset_id(task.dataset_id());
result.set_task_id(task.task_id());
result.set_iteration_id(task.iteration_id());
result.set_num_split_providers(task.num_split_providers());
result.set_worker_address(task.worker_address());
*result.mutable_processing_mode_def() = task.processing_mode_def();
switch (task.optional_num_consumers_case()) {
case TaskDef::kNumConsumers:
result.set_num_consumers(task.num_consumers());
break;
default:
break;
}
result.set_num_workers(task.num_workers());
result.set_worker_index(task.worker_index());
return result;
}
}
mutex LocalWorkers::mu_(LINKER_INITIALIZED);
LocalWorkers::AddressToWorkerMap* LocalWorkers::local_workers_ =
new AddressToWorkerMap();
DataServiceWorkerImpl::DataServiceWorkerImpl(const WorkerConfig& config)
: config_(ApplyWorkerDefaults(config)), worker_uid_(port::JobUid()) {
metrics::RecordTFDataServiceWorkerCreated();
}
DataServiceWorkerImpl::~DataServiceWorkerImpl() {
mutex_lock l(mu_);
cancelled_ = true;
task_completion_cv_.notify_one();
heartbeat_cv_.notify_one();
}
Status DataServiceWorkerImpl::Start(
const std::string& worker_address,
const std::vector<DataTransferServerInfo>& transfer_servers) {
VLOG(3) << "Starting tf.data service worker at address " << worker_address;
TF_RETURN_IF_ERROR(ValidateWorkerConfig());
worker_address_ = worker_address;
transfer_servers_ = transfer_servers;
TF_ASSIGN_OR_RETURN(dispatcher_, CreateDispatcherClient());
auto should_retry = [this]() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
return !cancelled_;
};
TF_RETURN_IF_ERROR(grpc_util::Retry([this]() { return Heartbeat(); },
should_retry, "Worker heartbeat.",
kint64max));
LOG(INFO) << "Worker registered with dispatcher running at "
<< config_.dispatcher_address()
<< ". Worker config: " << config_.DebugString();
task_completion_thread_ = absl::WrapUnique(
Env::Default()->StartThread({}, "data-service-worker-task-completion",
[this]() { TaskCompletionThread(); }));
heartbeat_thread_ = absl::WrapUnique(Env::Default()->StartThread(
{}, "data-service-worker-heartbeat", [this]() { HeartbeatThread(); }));
mutex_lock l(mu_);
registered_ = true;
return absl::OkStatus();
}
void DataServiceWorkerImpl::Stop() {
absl::flat_hash_map<int64_t, std::shared_ptr<Task>> tasks;
absl::flat_hash_map<SnapshotTask, std::unique_ptr<SnapshotStreamWriter>,
absl::Hash<SnapshotTask>>
snapshot_writers;
{
mutex_lock l(mu_);
cancelled_ = true;
tasks.swap(tasks_);
snapshot_writers.swap(snapshot_writers_);
}
for (const auto& [task_id, task] : tasks) {
StopTask(*task);
}
for (const auto& [unused, snapshot_writer] : snapshot_writers) {
snapshot_writer->Cancel();
}
Env::Default()->SleepForMicroseconds(config_.shutdown_quiet_period_ms() *
1000);
}
Status DataServiceWorkerImpl::ValidateWorkerConfig() const {
const bool any_tag_is_empty = absl::c_any_of(
config_.worker_tags(),
[](const std::string& worker_tag) { return worker_tag.empty(); });
if (any_tag_is_empty) {
return errors::FailedPrecondition(
"Worker tags cannot be empty. Got tags {",
absl::StrJoin(config_.worker_tags().begin(),
config_.worker_tags().end(), ", "),
"}");
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<DataServiceDispatcherClient>>
DataServiceWorkerImpl::CreateDispatcherClient() const TF_LOCKS_EXCLUDED(mu_) {
auto dispatcher = std::make_unique<DataServiceDispatcherClient>(
config_.dispatcher_address(), config_.protocol());
auto should_retry = [this]() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
return !cancelled_;
};
TF_RETURN_IF_ERROR(
grpc_util::Retry([&dispatcher]() { return dispatcher->Initialize(); },
should_retry, "Initialize dispatcher client.",
kint64max));
return dispatcher;
}
Status DataServiceWorkerImpl::GetElementResult(
const GetElementRequest* request, struct GetElementResult* result) {
Task* task = nullptr;
{
mutex_lock l(mu_);
if (cancelled_) {
return errors::Cancelled("Worker is shutting down");
}
if (!registered_) {
return errors::Unavailable(
"Worker has not yet registered with dispatcher.");
}
auto it = tasks_.find(request->task_id());
if (it == tasks_.end()) {
if (deleted_tasks_.contains(request->task_id())) {
return errors::FailedPrecondition(
"Got request for local task ", request->task_id(), " of worker ",
worker_address_, ", which has been deleted. You may be creating ",
"a duplicate iteration which has already finished. To fix this, "
"make sure to create your dataset only once, as opposed to "
"re-creating it repeatedly inside a loop.");
}
if (finished_tasks_.contains(request->task_id())) {
VLOG(3) << "Task is already finished";
result->end_of_sequence = true;
result->skip = false;
return absl::OkStatus();
}
return errors::Unavailable("Task ", request->task_id(), " not found");
}
task = it->second.get();
task->outstanding_requests++;
}
auto cleanup = gtl::MakeCleanup([&] {
mutex_lock l(mu_);
task->outstanding_requests--;
cv_.notify_all();
});
TF_RETURN_IF_ERROR(EnsureTaskInitialized(*task));
TF_RETURN_IF_ERROR(task->task_runner->GetNext(*request, *result));
if (result->end_of_sequence) {
mutex_lock l(mu_);
VLOG(3) << "Reached end_of_sequence for task " << request->task_id();
pending_completed_tasks_.insert(request->task_id());
task_completion_cv_.notify_one();
}
return absl::OkStatus();
}
Status DataServiceWorkerImpl::ProcessTask(const ProcessTaskRequest* request,
ProcessTaskResponse* response) {
mutex_lock l(mu_);
const TaskDef& task = request->task();
VLOG(3) << "Received request to process task " << task.task_id();
return ProcessTaskInternal(task);
}
Status DataServiceWorkerImpl::ProcessTaskInternal(const TaskDef& task_def)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
std::shared_ptr<Task>& task = tasks_[task_def.task_id()];
if (task) {
VLOG(1) << "Received request to process already-processed task "
<< task->task_def.task_id();
return absl::OkStatus();
}
task = std::make_unique<Task>(task_def);
VLOG(3) << "Began processing for task " << task_def.task_id()
<< " with processing mode "
<< task_def.processing_mode_def().DebugString();
return absl::OkStatus();
}
Status DataServiceWorkerImpl::EnsureTaskInitialized(
DataServiceWorkerImpl::Task& task) {
if (task.task_def.worker_address() != worker_address_) {
return errors::Internal(absl::Substitute(
"Dispatcher's worker address $0 does not match worker's address $1.",
task.task_def.worker_address(), worker_address_));
}
mutex_lock l(task.mu);
if (task.initialized) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(DatasetDef dataset_def, GetDatasetDef(task.task_def));
TF_ASSIGN_OR_RETURN(std::unique_ptr<standalone::Dataset> dataset,
MakeDataset(dataset_def, task.task_def));
TF_ASSIGN_OR_RETURN(std::unique_ptr<standalone::Iterator> iterator,
MakeDatasetIterator(*dataset, task.task_def));
auto task_iterator = std::make_unique<StandaloneTaskIterator>(
std::move(dataset), std::move(iterator));
TF_RETURN_IF_ERROR(TaskRunner::Create(
config_, task.task_def, std::move(task_iterator), task.task_runner));
task.initialized = true;
VLOG(3) << "Created iterator for task " << task.task_def.task_id();
return absl::OkStatus();
}
absl::StatusOr<DatasetDef> DataServiceWorkerImpl::GetDatasetDef(
const TaskDef& task_def) const {
switch (task_def.dataset_case()) {
case TaskDef::kDatasetDef:
return task_def.dataset_def();
case TaskDef::kPath: {
DatasetDef def;
Status s = ReadDatasetDef(task_def.path(), def);
if (!s.ok()) {
LOG(INFO) << "Failed to read dataset from " << task_def.path() << ": "
<< s << ". Falling back to reading from dispatcher.";
TF_RETURN_IF_ERROR(
dispatcher_->GetDatasetDef(task_def.dataset_id(), def));
}
return def;
}
case TaskDef::DATASET_NOT_SET:
return errors::Internal("Unrecognized dataset case: ",
task_def.dataset_case());
}
}
absl::StatusOr<bool> DataServiceWorkerImpl::DisableCompressionAtRuntime(
const std::string& dataset_id) const {
DisableCompressionAtRuntimeResponse response;
absl::Time deadline =
absl::FromUnixMicros(EnvTime::NowMicros()) + kDefaultDispatcherTimeout;
auto should_retry = [this]() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
return !cancelled_;
};
TF_RETURN_IF_ERROR(grpc_util::Retry(
[&]() {
return dispatcher_->DisableCompressionAtRuntime(
dataset_id, false, response);
},
should_retry, "Disable compression at runtime.",
absl::ToUnixMicros(deadline)));
if (response.no_compression_to_disable()) {
return false;
}
metrics::RecordTFDataServiceRuntimeCompressionDecision(
response.compression_disabled_at_runtime());
return response.compression_disabled_at_runtime();
}
absl::StatusOr<std::unique_ptr<standalone::Dataset>>
DataServiceWorkerImpl::MakeDataset(const DatasetDef& dataset_def,
const TaskDef& task_def) const {
TF_ASSIGN_OR_RETURN(bool compression_disabled_at_runtime,
DisableCompressionAtRuntime(task_def.dataset_id()));
GraphDef graph = dataset_def.graph();
if (VLOG_IS_ON(1)) {
std::string prefix = absl::StrCat(task_def.dataset_id(), "_", worker_uid_);
DumpGraphDefToFile(absl::StrCat(prefix, "-prerewrite_GraphDef"), graph);
DumpProtoToFile(absl::StrCat(prefix, "-prerewrite_TaskDef"), task_def);
}
if (compression_disabled_at_runtime) {
RemoveCompressionMapRewriter remove_compression_map_rewriter;
TF_ASSIGN_OR_RETURN(
graph, remove_compression_map_rewriter.ApplyRemoveCompressionMapRewrite(
graph));
}
TF_ASSIGN_OR_RETURN(AutoShardRewriter auto_shard_rewriter,
AutoShardRewriter::Create(task_def));
TF_ASSIGN_OR_RETURN(graph, auto_shard_rewriter.ApplyAutoShardRewrite(graph));
std::unique_ptr<standalone::Dataset> dataset;
TF_RETURN_IF_ERROR(standalone::Dataset::FromGraph(
standalone::Dataset::Params(), graph, &dataset));
return dataset;
}
absl::StatusOr<std::unique_ptr<standalone::Iterator>>
DataServiceWorkerImpl::MakeDatasetIterator(standalone::Dataset& dataset,
const TaskDef& task_def) const {
std::unique_ptr<standalone::Iterator> iterator;
if (IsNoShard(task_def.processing_mode_def()) ||
IsStaticShard(task_def.processing_mode_def())) {
TF_RETURN_IF_ERROR(dataset.MakeIterator(&iterator));
return iterator;
}
if (IsDynamicShard(task_def.processing_mode_def())) {
std::vector<std::unique_ptr<SplitProvider>> split_providers;
split_providers.reserve(task_def.num_split_providers());
for (int i = 0; i < task_def.num_split_providers(); ++i) {
split_providers.push_back(std::make_unique<DataServiceSplitProvider>(
config_.dispatcher_address(), config_.protocol(),
task_def.iteration_id(), i, config_.dispatcher_timeout_ms()));
}
TF_RETURN_IF_ERROR(
dataset.MakeIterator(std::move(split_providers), &iterator));
return iterator;
}
return errors::InvalidArgument("Unrecognized processing mode: ",
task_def.processing_mode_def().DebugString());
}
void DataServiceWorkerImpl::StopTask(Task& task) TF_LOCKS_EXCLUDED(mu_) {
{
mutex_lock l(task.mu);
task.initialized = true;
}
if (task.task_runner) {
task.task_runner->Cancel();
}
mutex_lock l(mu_);
while (task.outstanding_requests > 0) {
cv_.wait(l);
}
}
Status DataServiceWorkerImpl::GetElement(const GetElementRequest* request,
GetElementResponse* response) {
VLOG(3) << "Received GetElement request for task " << request->task_id();
struct GetElementResult result;
TF_RETURN_IF_ERROR(GetElementResult(request, &result));
response->set_end_of_sequence(result.end_of_sequence);
response->set_skip_task(result.skip);
if (!response->end_of_sequence() && !response->skip_task()) {
TF_RETURN_IF_ERROR(
MoveElementToResponse(std::move(result.components), *response));
VLOG(3) << "Producing an element for task " << request->task_id();
}
return absl::OkStatus();
}
Status DataServiceWorkerImpl::GetWorkerTasks(
const GetWorkerTasksRequest* request, GetWorkerTasksResponse* response) {
mutex_lock l(mu_);
for (const auto& it : tasks_) {
Task* task = it.second.get();
TaskInfo* task_info = response->add_tasks();
task_info->set_worker_address(worker_address_);
task_info->set_task_id(task->task_def.task_id());
task_info->set_iteration_id(task->task_def.iteration_id());
}
return absl::OkStatus();
}
Status DataServiceWorkerImpl::GetSnapshotTaskProgresses(
const GetSnapshotTaskProgressesRequest* request,
GetSnapshotTaskProgressesResponse* response) {
for (const auto& snapshot_task_progress : GetSnapshotTaskProgress()) {
*response->add_snapshot_task_progresses() = snapshot_task_progress;
}
return absl::OkStatus();
}
void DataServiceWorkerImpl::TaskCompletionThread() TF_LOCKS_EXCLUDED(mu_) {
while (true) {
{
mutex_lock l(mu_);
while (!cancelled_ && pending_completed_tasks_.empty()) {
task_completion_cv_.wait(l);
}
if (cancelled_ && pending_completed_tasks_.empty()) {
VLOG(3) << "Task completion thread shutting down";
return;
}
}
Status s = SendTaskUpdates();
if (!s.ok()) {
LOG(WARNING) << "Failed to send task updates to dispatcher: " << s;
mutex_lock l(mu_);
if (cancelled_) {
VLOG(3) << "Task completion thread shutting down";
return;
} else {
task_completion_cv_.wait_for(
l, absl::ToChronoMicroseconds(kRetryInterval));
}
}
}
}
Status DataServiceWorkerImpl::SendTaskUpdates() TF_LOCKS_EXCLUDED(mu_) {
std::vector<TaskProgress> task_progress;
{
mutex_lock l(mu_);
VLOG(3) << "Sending " << pending_completed_tasks_.size()
<< " task updates to dispatcher";
task_progress.reserve(pending_completed_tasks_.size());
for (int task_id : pending_completed_tasks_) {
task_progress.emplace_back();
task_progress.back().set_task_id(task_id);
task_progress.back().set_completed(true);
}
}
TF_RETURN_IF_ERROR(dispatcher_->WorkerUpdate(worker_address_, task_progress));
mutex_lock l(mu_);
for (const auto& update : task_progress) {
pending_completed_tasks_.erase(update.task_id());
}
VLOG(3) << "Sent " << task_progress.size() << " task updates ";
return absl::OkStatus();
}
void DataServiceWorkerImpl::HeartbeatThread() TF_LOCKS_EXCLUDED(mu_) {
while (true) {
int64_t next_heartbeat_micros =
Env::Default()->NowMicros() + (config_.heartbeat_interval_ms() * 1000);
{
mutex_lock l(mu_);
while (!cancelled_ &&
Env::Default()->NowMicros() < next_heartbeat_micro | #include "tensorflow/core/data/service/worker_impl.h"
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/data/service/test_cluster.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::IsNull;
using ::testing::NotNull;
class LocalWorkersTest : public ::testing::Test {
protected:
void SetUp() override {
test_cluster_ = std::make_unique<TestCluster>(0);
TF_ASSERT_OK(test_cluster_->Initialize());
}
std::unique_ptr<TestCluster> test_cluster_;
};
TEST_F(LocalWorkersTest, AddRemoveLocalWorkers) {
EXPECT_TRUE(LocalWorkers::Empty());
TF_ASSERT_OK(test_cluster_->AddWorker());
TF_ASSERT_OK(test_cluster_->AddWorker());
TF_ASSERT_OK(test_cluster_->AddWorker());
std::vector<std::string> worker_addresses = {test_cluster_->WorkerAddress(0),
test_cluster_->WorkerAddress(1),
test_cluster_->WorkerAddress(2)};
EXPECT_FALSE(LocalWorkers::Empty());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[0]), NotNull());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[1]), NotNull());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[2]), NotNull());
test_cluster_->StopWorker(0);
EXPECT_FALSE(LocalWorkers::Empty());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[0]), IsNull());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[1]), NotNull());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[2]), NotNull());
test_cluster_->StopWorkers();
EXPECT_TRUE(LocalWorkers::Empty());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[0]), IsNull());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[1]), IsNull());
EXPECT_THAT(LocalWorkers::Get(worker_addresses[2]), IsNull());
}
TEST_F(LocalWorkersTest, NoLocalWorker) {
EXPECT_TRUE(LocalWorkers::Empty());
EXPECT_THAT(LocalWorkers::Get(""), IsNull());
EXPECT_THAT(LocalWorkers::Get("Invalid address"),
IsNull());
EXPECT_TRUE(LocalWorkers::Empty());
LocalWorkers::Remove("");
LocalWorkers::Remove("Invalid address");
EXPECT_TRUE(LocalWorkers::Empty());
}
}
}
} |
1,747 | cpp | tensorflow/tensorflow | task_runner | tensorflow/core/data/service/task_runner.cc | tensorflow/core/data/service/task_runner_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_TASK_RUNNER_H_
#define TENSORFLOW_CORE_DATA_SERVICE_TASK_RUNNER_H_
#include <memory>
#include <optional>
#include <vector>
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/cross_trainer_cache.h"
#include "tensorflow/core/data/service/data_transfer.h"
#include "tensorflow/core/data/service/thread_safe_buffer.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/data/standalone.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
class TaskIterator {
public:
virtual ~TaskIterator() = default;
virtual Status GetNext(std::vector<Tensor>& element,
bool& end_of_sequence) = 0;
virtual int64_t Cardinality() const = 0;
virtual absl::StatusOr<std::vector<Tensor>> Save() {
return errors::Unimplemented(
"Serializing a tf.data service task iterator is unsupported.");
}
virtual Status Restore(const std::vector<Tensor>& saved_iterator) {
return errors::Unimplemented(
"Restoring from a tf.data service task iterator is unsupported.");
}
virtual std::shared_ptr<model::Model> model() const { return nullptr; }
};
class StandaloneTaskIterator : public TaskIterator {
public:
StandaloneTaskIterator(std::unique_ptr<standalone::Dataset> dataset,
std::unique_ptr<standalone::Iterator> iterator);
Status GetNext(std::vector<Tensor>& element, bool& end_of_sequence) override;
int64_t Cardinality() const override;
absl::StatusOr<std::vector<Tensor>> Save() override;
Status Restore(const std::vector<Tensor>& saved_iterator) override;
std::shared_ptr<model::Model> model() const override;
private:
std::unique_ptr<standalone::Dataset> dataset_;
std::unique_ptr<standalone::Iterator> iterator_;
};
class TaskRunner {
public:
static Status Create(const experimental::WorkerConfig& worker_config,
const TaskDef& task_def,
std::unique_ptr<TaskIterator> iterator,
std::unique_ptr<TaskRunner>& out);
virtual ~TaskRunner() = default;
virtual Status GetNext(const GetElementRequest& req,
GetElementResult& result) = 0;
virtual void Cancel() = 0;
virtual std::shared_ptr<model::Model> model() const = 0;
};
class FirstComeFirstServedTaskRunner : public TaskRunner {
public:
explicit FirstComeFirstServedTaskRunner(
std::unique_ptr<TaskIterator> iterator);
~FirstComeFirstServedTaskRunner() override;
Status GetNext(const GetElementRequest& req,
GetElementResult& result) override;
Status GetNext(GetElementResult& result);
void Cancel() override;
std::shared_ptr<model::Model> model() const override;
private:
Status PrefetchFn();
void RunPrefetchThread();
absl::StatusOr<GetElementResult> GetNextFromInputIterator()
TF_LOCKS_EXCLUDED(mu_);
const std::shared_ptr<model::Model> model_;
mutex mu_;
std::unique_ptr<TaskIterator> iterator_ TF_GUARDED_BY(mu_);
int64_t element_index_ TF_GUARDED_BY(mu_) = 0;
ThreadSafeBuffer<GetElementResult> buffer_;
std::unique_ptr<Thread> prefetch_thread_;
FirstComeFirstServedTaskRunner(const FirstComeFirstServedTaskRunner&) =
delete;
void operator=(const FirstComeFirstServedTaskRunner&) = delete;
};
class CachingTaskRunner : public TaskRunner {
public:
explicit CachingTaskRunner(std::unique_ptr<TaskIterator> iterator,
size_t max_cache_size_bytes);
~CachingTaskRunner() override;
Status GetNext(const GetElementRequest& req,
GetElementResult& result) override;
void Cancel() override;
std::shared_ptr<model::Model> model() const override;
private:
class GetElementResultSequence : public CachableSequence<GetElementResult> {
public:
explicit GetElementResultSequence(
FirstComeFirstServedTaskRunner& fcfs_task_runner);
absl::StatusOr<GetElementResult> GetNext() override;
size_t GetElementSizeBytes(const GetElementResult& element) const override;
private:
FirstComeFirstServedTaskRunner& fcfs_task_runner_;
};
FirstComeFirstServedTaskRunner fcfs_task_runner_;
CrossTrainerCache<GetElementResult> cache_;
CachingTaskRunner(const CachingTaskRunner&) = delete;
void operator=(const CachingTaskRunner&) = delete;
};
struct Element {
explicit Element(std::vector<Tensor>&& components, int64_t index)
: components(components), index(index) {}
std::vector<Tensor> components;
int64_t index;
};
class PrefetchThread {
public:
explicit PrefetchThread(std::unique_ptr<TaskIterator> iterator,
int64_t round_size);
~PrefetchThread();
void Run();
Status FillBuffer(int64_t wait_us,
std::vector<std::unique_ptr<Element>>& out);
Status GetStatus();
std::shared_ptr<model::Model> model() const;
private:
const std::unique_ptr<TaskIterator> iterator_;
const int64_t round_size_;
mutex mu_;
int64_t index_ TF_GUARDED_BY(mu_) = 0;
std::vector<std::unique_ptr<Element>> buffer_ TF_GUARDED_BY(mu_);
Status status_ TF_GUARDED_BY(mu_) = absl::OkStatus();
condition_variable cv_;
bool cancelled_ TF_GUARDED_BY(mu_) = false;
std::unique_ptr<Thread> thread_;
};
class RoundRobinTaskRunner : public TaskRunner {
public:
RoundRobinTaskRunner(std::unique_ptr<TaskIterator> iterator,
int64_t num_consumers, string worker_address);
Status GetNext(const GetElementRequest& req,
GetElementResult& result) override;
void Cancel() override;
std::shared_ptr<model::Model> model() const override;
private:
Status PrepareFullRound(int64_t wait_us) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
Status PreparePartialRound() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
Status ValidateRequest(const GetElementRequest& req);
Status PrepareRound(const GetElementRequest& req);
const int64_t num_consumers_;
const string worker_address_;
mutex mu_;
bool cancelled_ TF_GUARDED_BY(mu_) = false;
condition_variable new_round_cv_;
absl::flat_hash_map<int64_t,
absl::flat_hash_map<int64_t, const GetElementRequest*>>
requests_ TF_GUARDED_BY(mu_);
int64_t first_round_ TF_GUARDED_BY(mu_) = kint64max;
int64_t current_round_ TF_GUARDED_BY(mu_) = -1;
bool round_skipped_ TF_GUARDED_BY(mu_) = false;
std::vector<std::unique_ptr<Element>> buffer_ TF_GUARDED_BY(mu_);
PrefetchThread prefetch_thread_;
};
}
}
#endif
#include "tensorflow/core/data/service/task_runner.h"
#include <algorithm>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "tensorflow/core/data/service/byte_size.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/cross_trainer_cache.h"
#include "tensorflow/core/data/service/data_transfer.h"
#include "tensorflow/core/data/service/thread_safe_buffer.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/data/standalone.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
namespace tensorflow {
namespace data {
namespace {
constexpr int64_t kWaitBeforeSkipUs = 100 * 1000;
constexpr size_t kDefaultCrossTrainerCacheSizeBytes =
10 * (size_t{1} << 30);
}
StandaloneTaskIterator::StandaloneTaskIterator(
std::unique_ptr<standalone::Dataset> dataset,
std::unique_ptr<standalone::Iterator> iterator)
: dataset_(std::move(dataset)), iterator_(std::move(iterator)) {}
Status StandaloneTaskIterator::GetNext(std::vector<Tensor>& element,
bool& end_of_sequence) {
return iterator_->GetNext(&element, &end_of_sequence);
}
int64_t StandaloneTaskIterator::Cardinality() const {
return dataset_->Get()->Cardinality();
}
absl::StatusOr<std::vector<Tensor>> StandaloneTaskIterator::Save() {
return iterator_->Save();
}
Status StandaloneTaskIterator::Restore(
const std::vector<Tensor>& saved_iterator) {
return iterator_->Restore(saved_iterator);
}
std::shared_ptr<model::Model> StandaloneTaskIterator::model() const {
return iterator_->model();
}
Status TaskRunner::Create(const experimental::WorkerConfig& worker_config,
const TaskDef& task_def,
std::unique_ptr<TaskIterator> iterator,
std::unique_ptr<TaskRunner>& out) {
if (task_def.optional_num_consumers_case() == TaskDef::kNumConsumers) {
int64_t cardinality = iterator->Cardinality();
if (cardinality != kInfiniteCardinality &&
cardinality != kUnknownCardinality) {
return errors::FailedPrecondition(
"Round robin reads require that the input dataset has infinite "
"cardinality, but the dataset has cardinality ",
cardinality,
". Consider adding a `.repeat()` transformation to the dataset.");
}
out = std::make_unique<RoundRobinTaskRunner>(std::move(iterator),
task_def.num_consumers(),
task_def.worker_address());
} else if (task_def.use_cross_trainer_cache()) {
const size_t max_cache_size_bytes =
worker_config.cross_trainer_cache_size_bytes() > 0
? worker_config.cross_trainer_cache_size_bytes()
: kDefaultCrossTrainerCacheSizeBytes;
out = std::make_unique<CachingTaskRunner>(std::move(iterator),
max_cache_size_bytes);
} else {
out = std::make_unique<FirstComeFirstServedTaskRunner>(std::move(iterator));
}
return absl::OkStatus();
}
FirstComeFirstServedTaskRunner::FirstComeFirstServedTaskRunner(
std::unique_ptr<TaskIterator> iterator)
: iterator_(std::move(iterator)), buffer_(1) {
RunPrefetchThread();
}
FirstComeFirstServedTaskRunner::~FirstComeFirstServedTaskRunner() { Cancel(); }
Status FirstComeFirstServedTaskRunner::GetNext(const GetElementRequest& req,
GetElementResult& result) {
if (req.allow_skip() && buffer_.Empty()) {
result.skip = true;
return absl::OkStatus();
}
return GetNext(result);
}
Status FirstComeFirstServedTaskRunner::GetNext(GetElementResult& result) {
TF_ASSIGN_OR_RETURN(result, buffer_.Pop());
return absl::OkStatus();
}
Status FirstComeFirstServedTaskRunner::PrefetchFn() {
while (true) {
TF_RETURN_IF_ERROR(buffer_.Push(GetNextFromInputIterator()));
}
return absl::OkStatus();
}
void FirstComeFirstServedTaskRunner::RunPrefetchThread() {
auto prefetch_fn = [this] {
Status status = PrefetchFn();
if (!status.ok()) {
buffer_.Cancel(status);
}
};
prefetch_thread_ = absl::WrapUnique(Env::Default()->StartThread(
{}, "tf_data_service_fcfs_prefetch_thread",
prefetch_fn));
}
absl::StatusOr<GetElementResult>
FirstComeFirstServedTaskRunner::GetNextFromInputIterator()
TF_LOCKS_EXCLUDED(mu_) {
GetElementResult result;
std::vector<Tensor> element;
bool end_of_task = false;
result.skip = false;
{
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(iterator_->GetNext(element, end_of_task));
result.end_of_sequence = end_of_task;
result.element_index = element_index_++;
}
if (!end_of_task) {
result.components = std::move(element);
}
return result;
}
void FirstComeFirstServedTaskRunner::Cancel() {
VLOG(2) << "Cancelling tf.data service FCFS task.";
buffer_.Cancel(errors::Cancelled("tf.data service FCFS task is cancelled."));
}
std::shared_ptr<model::Model> FirstComeFirstServedTaskRunner::model() const {
return model_;
}
CachingTaskRunner::CachingTaskRunner(std::unique_ptr<TaskIterator> iterator,
size_t max_cache_size_bytes)
: fcfs_task_runner_(std::move(iterator)),
cache_(max_cache_size_bytes,
std::make_unique<GetElementResultSequence>(fcfs_task_runner_)) {
LOG(INFO) << "Initialized tf.data service cross-trainer cache with "
<< ByteSize::Bytes(max_cache_size_bytes) << " of memory.";
}
CachingTaskRunner::~CachingTaskRunner() { Cancel(); }
Status CachingTaskRunner::GetNext(const GetElementRequest& req,
GetElementResult& result) {
TF_ASSIGN_OR_RETURN(std::shared_ptr<const GetElementResult> element,
cache_.Get(req.trainer_id()));
result = element->Copy();
return absl::OkStatus();
}
CachingTaskRunner::GetElementResultSequence::GetElementResultSequence(
FirstComeFirstServedTaskRunner& fcfs_task_runner)
: fcfs_task_runner_(fcfs_task_runner) {}
absl::StatusOr<GetElementResult>
CachingTaskRunner::GetElementResultSequence::GetNext() {
GetElementResult result;
TF_RETURN_IF_ERROR(fcfs_task_runner_.GetNext(result));
if (result.end_of_sequence) {
return errors::InvalidArgument(
"Cross-trainer caching requires the input dataset to be infinite. "
"However, it reached the end of sequence.");
}
return result;
}
size_t CachingTaskRunner::GetElementResultSequence::GetElementSizeBytes(
const GetElementResult& element) const {
return element.EstimatedMemoryUsageBytes();
}
void CachingTaskRunner::Cancel() {
VLOG(2) << "Cancelling tf.data service cross-trainer cache task.";
if (!cache_.IsCancelled()) {
cache_.Cancel(errors::Cancelled(
"tf.data service cross-trainer cache task is cancelled."));
}
fcfs_task_runner_.Cancel();
}
std::shared_ptr<model::Model> CachingTaskRunner::model() const {
return fcfs_task_runner_.model();
}
RoundRobinTaskRunner::RoundRobinTaskRunner(
std::unique_ptr<TaskIterator> iterator, int64_t num_consumers,
string worker_address)
: num_consumers_(num_consumers),
worker_address_(worker_address),
buffer_(num_consumers_),
prefetch_thread_(std::move(iterator), num_consumers_) {
VLOG(1) << "Creating task runner for distributing data round-robin to "
<< num_consumers << " consumers";
}
Status RoundRobinTaskRunner::ValidateRequest(const GetElementRequest& req) {
if (req.consumer_index() < 0 || req.round_index() < 0) {
return errors::FailedPrecondition(
"RoundRobinTaskRunner needs to know the consumer index and element "
"index of each request.");
}
if (req.consumer_index() >= num_consumers_) {
return errors::FailedPrecondition(
"Requesting data for consumer index ", req.consumer_index(),
", but the task is configured for only ", num_consumers_, " consumers");
}
return absl::OkStatus();
}
Status RoundRobinTaskRunner::PrepareFullRound(int64_t wait_us)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
VLOG(1) << worker_address_ << ": Preparing full round for round "
<< current_round_;
TF_RETURN_IF_ERROR(prefetch_thread_.FillBuffer(wait_us, buffer_));
round_skipped_ = buffer_.empty();
new_round_cv_.notify_all();
return absl::OkStatus();
}
Status RoundRobinTaskRunner::PreparePartialRound()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
VLOG(1) << worker_address_ << ": Starting partial round " << first_round_
<< " for " << requests_[first_round_].size() << " consumers";
current_round_ = first_round_;
new_round_cv_.notify_all();
auto next_round_request = *(requests_[first_round_ + 1].begin()->second);
if (next_round_request.skipped_previous_round()) {
VLOG(1) << "Skipping partial round";
round_skipped_ = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(prefetch_thread_.FillBuffer(-1, buffer_));
round_skipped_ = false;
return absl::OkStatus();
}
Status RoundRobinTaskRunner::PrepareRound(const GetElementRequest& req) {
mutex_lock l(mu_);
first_round_ = std::min(first_round_, req.round_index());
absl::flat_hash_map<int64_t, const GetElementRequest*>& round =
requests_[req.round_index()];
round[req.consumer_index()] = &req;
auto cleanup = gtl::MakeCleanup([&]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
requests_[req.round_index()].erase(req.consumer_index());
});
if (current_round_ < req.round_index() && round.size() == num_consumers_) {
current_round_ = req.round_index();
int64_t wait_us = kWaitBeforeSkipUs;
if (!req.allow_skip()) {
wait_us = -1;
}
TF_RETURN_IF_ERROR(PrepareFullRound(wait_us));
}
if (current_round_ < 0 &&
requests_[first_round_].size() + requests_[first_round_ + 1].size() ==
num_consumers_) {
TF_RETURN_IF_ERROR(PreparePartialRound());
}
while (!cancelled_ && current_round_ < req.round_index()) {
TF_RETURN_IF_ERROR(prefetch_thread_.GetStatus());
new_round_cv_.wait(l);
}
if (current_round_ < req.round_index() && cancelled_) {
return errors::Cancelled("Worker is shutting down.");
}
if (current_round_ != req.round_index()) {
return errors::FailedPrecondition(
"Consumer ", req.consumer_index(), " requested data for round ",
req.round_index(), ", but the current round has already reached ",
current_round_,
". This may indicate that the consumer was restarted with the same "
"iteration "
"name.`");
}
return prefetch_thread_.GetStatus();
}
Status RoundRobinTaskRunner::GetNext(const GetElementRequest& req,
GetElementResult& result) {
TF_RETURN_IF_ERROR(ValidateRequest(req));
result.end_of_sequence = false;
VLOG(2) << worker_address_ << ": Received request from consumer index "
<< req.consumer_index() << " for round " << req.round_index();
TF_RETURN_IF_ERROR(PrepareRound(req));
tf_shared_lock l(mu_);
result.skip = round_skipped_;
if (round_skipped_) {
VLOG(1) << worker_address_ << ": Buffer not ready, skipping round "
<< current_round_ << " for consumer " << req.consumer_index();
return absl::OkStatus();
}
auto& buffer_result = buffer_[req.consumer_index()];
result.element_index = buffer_result->index;
std::vector<Tensor> element;
for (auto& component : buffer_result->components) {
element.push_back(tensor::DeepCopy(component));
}
if (VLOG_IS_ON(2)) {
int64_t size = 0;
for (auto& component : element) {
size += component.TotalBytes();
}
VLOG(2) << worker_address_ << ": Returning element " << result.element_index
<< " to consumer " << req.consumer_index() << " for round "
<< req.round_index() << ". element size " << size;
}
result.components = std::move(element);
return absl::OkStatus();
}
void RoundRobinTaskRunner::Cancel() {
mutex_lock l(mu_);
cancelled_ = true;
new_round_cv_.notify_all();
}
std::shared_ptr<model::Model> RoundRobinTaskRunner::model() const {
return prefetch_thread_.model();
}
PrefetchThread::PrefetchThread(std::unique_ptr<TaskIterator> iterator,
int64_t round_size)
: iterator_(std::move(iterator)), round_size_(round_size) {
thread_ = absl::WrapUnique(
Env::Default()->StartThread({}, "round-robin-prefetch", [&] { Run(); }));
}
PrefetchThread::~PrefetchThread() {
mutex_lock l(mu_);
cancelled_ = true;
cv_.notify_all();
}
void PrefetchThread::Run() {
while (true) {
{
mutex_lock l(mu_);
while (!cancelled_ && buffer_.size() >= round_size_) {
cv_.wait(l);
}
if (cancelled_) {
return;
}
}
std::vector<Tensor> element;
bool end_of_sequence;
Status s = iterator_->GetNext(element, end_of_sequence);
if (!s.ok()) {
mutex_lock l(mu_);
status_ = s;
cv_.notify_all();
return;
}
if (end_of_sequence) {
mutex_lock l(mu_);
status_ = errors::FailedPrecondition(
"Encountered end of sequence on a round-robin read iterator. "
"Please ensure that the dataset used for round-robin reading has "
"infinite cardinality, e.g. by adding a .repeat() transformation "
"at the end.");
cv_.notify_all();
return;
}
mutex_lock l(mu_);
buffer_.push_back(std::make_unique<Element>(std::move(element), index_++));
cv_.notify_all();
}
}
Status PrefetchThread::FillBuffer(int64_t wait_us,
std::vector<std::unique_ptr<Element>>& out) {
int64_t start_us = Env::Default()->NowMicros();
out.clear();
mutex_lock l(mu_);
while (buffer_.size() < round_size_ && !cancelled_ && status_.ok()) {
int64_t remaining_us = start_us + wait_us - Env::Default()->NowMicros();
if (wait_us >= 0 && remaining_us <= 0) {
break;
}
cv_.wait_for(l, std::chrono::microseconds(remaining_us));
}
TF_RETURN_IF_ERROR(status_);
if (cancelled_) {
return errors::Cancelled("Prefetch thread cancelled");
}
if (buffer_.size() < round_size_) {
DCHECK_GE(wait_us, 0);
return absl::OkStatus();
}
for (auto& elem : buffer_) {
out.push_back(std::move(elem));
}
buffer_.clear();
cv_.notify_all();
return absl::OkStatus();
}
Status PrefetchThread::GetStatus() {
mutex_lock l(mu_);
return status_;
}
std::shared_ptr<model::Model> PrefetchThread::model() const {
return iterator_->model();
}
}
} | #include "tensorflow/core/data/service/task_runner.h"
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/core/data/service/data_transfer.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::testing::IsOkAndHolds;
using ::tensorflow::testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::Gt;
using ::testing::HasSubstr;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAreArray;
constexpr size_t kSmallCache = 100;
constexpr size_t kLargeCache = 10 * (size_t{1} << 30);
class RangeIterator : public TaskIterator {
public:
explicit RangeIterator(const int64_t range, const bool repeat)
: range_(range), repeat_(repeat) {}
Status GetNext(std::vector<Tensor>& element, bool& end_of_sequence) override {
end_of_sequence = (next_ >= range_);
if (end_of_sequence) {
return absl::OkStatus();
}
element = {Tensor{next_++}};
if (repeat_) {
next_ = next_ % range_;
}
return absl::OkStatus();
}
int64_t Cardinality() const override {
return repeat_ ? kInfiniteCardinality : range_;
}
private:
const int64_t range_;
const bool repeat_;
int64_t next_ = 0;
};
class InfiniteRangeIterator : public TaskIterator {
public:
InfiniteRangeIterator() = default;
Status GetNext(std::vector<Tensor>& element, bool& end_of_sequence) override {
element = {Tensor{next_++}};
return absl::OkStatus();
}
int64_t Cardinality() const override { return kInfiniteCardinality; }
private:
int64_t next_ = 0;
};
template <class T>
class ElementOrErrorIterator : public TaskIterator {
public:
explicit ElementOrErrorIterator(const std::vector<StatusOr<T>>& elements)
: elements_(elements) {}
Status GetNext(std::vector<Tensor>& element, bool& end_of_sequence) override {
end_of_sequence = (next_ >= elements_.size());
if (end_of_sequence) {
return absl::OkStatus();
}
const StatusOr<T>& next_element = elements_[next_++];
TF_RETURN_IF_ERROR(next_element.status());
element = {Tensor{*next_element}};
return absl::OkStatus();
}
int64_t Cardinality() const override { return elements_.size(); }
private:
const std::vector<StatusOr<T>> elements_;
int64_t next_ = 0;
};
template <class T>
StatusOr<std::vector<T>> GetTaskRunnerOutput(TaskRunner& runner,
const GetElementRequest& request) {
std::vector<T> output;
for (bool end_of_sequence = false; !end_of_sequence;) {
GetElementResult result;
TF_RETURN_IF_ERROR(runner.GetNext(request, result));
end_of_sequence = result.end_of_sequence;
if (end_of_sequence) {
break;
}
if (result.components.size() != 1) {
return errors::Internal("GetElementResult Tensor size should be 1.");
}
output.push_back(result.components[0].unaligned_flat<T>().data()[0]);
}
return output;
}
template <class T>
StatusOr<T> GetNextFromTaskRunner(TaskRunner& runner,
const GetElementRequest& request) {
GetElementResult result;
TF_RETURN_IF_ERROR(runner.GetNext(request, result));
if (result.end_of_sequence) {
return errors::OutOfRange("TaskRunner has reached the end of sequence.");
}
if (result.components.size() != 1) {
return errors::Internal("GetElementResult Tensor size should be 1.");
}
return result.components[0].unaligned_flat<T>().data()[0];
}
template <class T>
StatusOr<std::vector<T>> GetElementsFromTaskRunner(
TaskRunner& runner, const GetElementRequest& request,
const size_t num_elements) {
std::vector<T> output;
for (size_t i = 0; i < num_elements; ++i) {
TF_ASSIGN_OR_RETURN(T next, GetNextFromTaskRunner<T>(runner, request));
output.push_back(next);
}
return output;
}
std::vector<int64_t> GetRange(const size_t range) {
std::vector<int64_t> result;
for (int64_t i = 0; i < range; ++i) {
result.push_back(i);
}
return result;
}
Status RunConsumer(int64_t consumer_index, int64_t start_index,
int64_t end_index, TaskRunner& task_runner,
std::vector<int64_t>& output) {
for (int64_t next_index = start_index; next_index < end_index; ++next_index) {
GetElementRequest request;
request.set_round_index(next_index);
request.set_consumer_index(consumer_index);
request.set_skipped_previous_round(false);
request.set_allow_skip(false);
GetElementResult result;
do {
TF_RETURN_IF_ERROR(task_runner.GetNext(request, result));
if (!result.end_of_sequence) {
output.push_back(result.components[0].flat<int64_t>()(0));
}
} while (result.skip);
}
return absl::OkStatus();
}
}
TEST(FirstComeFirstServedTaskRunnerTest, GetNext) {
size_t range = 10;
FirstComeFirstServedTaskRunner runner(
std::make_unique<RangeIterator>(range, false));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> output,
GetTaskRunnerOutput<int64_t>(runner, GetElementRequest()));
EXPECT_THAT(output, ElementsAreArray(GetRange(range)));
GetElementResult result;
TF_ASSERT_OK(runner.GetNext(GetElementRequest(), result));
EXPECT_TRUE(result.end_of_sequence);
}
TEST(FirstComeFirstServedTaskRunnerTest, EmptyDataset) {
FirstComeFirstServedTaskRunner runner(
std::make_unique<RangeIterator>(0, false));
for (int i = 0; i < 5; ++i) {
GetElementResult result;
TF_ASSERT_OK(runner.GetNext(GetElementRequest(), result));
EXPECT_TRUE(result.end_of_sequence);
}
}
TEST(FirstComeFirstServedTaskRunnerTest, Cancel) {
size_t range = 10;
FirstComeFirstServedTaskRunner runner(
std::make_unique<RangeIterator>(range, false));
runner.Cancel();
for (int i = 0; i < range; ++i) {
GetElementResult result;
EXPECT_THAT(runner.GetNext(GetElementRequest(), result),
testing::StatusIs(error::CANCELLED));
}
}
TEST(FirstComeFirstServedTaskRunnerTest, ConcurrentReaders) {
size_t range = 1000;
size_t num_readers = 10;
FirstComeFirstServedTaskRunner runner(
std::make_unique<RangeIterator>(range, false));
mutex mu;
std::vector<int64_t> results;
std::vector<std::unique_ptr<Thread>> reader_threads;
for (int i = 0; i < num_readers; ++i) {
reader_threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("Trainer_", i),
[&runner, &results, &mu]() {
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> output,
GetTaskRunnerOutput<int64_t>(runner, GetElementRequest()));
GetElementResult result;
TF_ASSERT_OK(runner.GetNext(GetElementRequest(), result));
EXPECT_TRUE(result.end_of_sequence);
mutex_lock l(mu);
std::move(output.begin(), output.end(), std::back_inserter(results));
})));
}
for (auto& thread : reader_threads) {
thread.reset();
}
EXPECT_THAT(results, UnorderedElementsAreArray(GetRange(range)));
}
TEST(FirstComeFirstServedTaskRunnerTest, GetNextAndCancel) {
size_t range = 10;
FirstComeFirstServedTaskRunner runner(
std::make_unique<RangeIterator>(range, false));
int64_t i;
for (i = 0; i < range / 2; ++i) {
EXPECT_THAT(GetNextFromTaskRunner<int64_t>(runner, GetElementRequest()),
IsOkAndHolds(i));
}
runner.Cancel();
for (; i < range; ++i) {
GetElementResult result;
EXPECT_THAT(runner.GetNext(GetElementRequest(), result),
testing::StatusIs(error::CANCELLED));
}
}
TEST(FirstComeFirstServedTaskRunnerTest, Error) {
FirstComeFirstServedTaskRunner runner(
std::make_unique<ElementOrErrorIterator<tstring>>(
std::vector<absl::StatusOr<tstring>>{
tstring("First element"),
errors::InvalidArgument("Invalid argument"),
tstring("Second element"), errors::Aborted("Aborted")}));
EXPECT_THAT(GetNextFromTaskRunner<tstring>(runner, GetElementRequest()),
IsOkAndHolds("First element"));
EXPECT_THAT(GetNextFromTaskRunner<tstring>(runner, GetElementRequest()),
testing::StatusIs(error::INVALID_ARGUMENT));
EXPECT_THAT(GetNextFromTaskRunner<tstring>(runner, GetElementRequest()),
IsOkAndHolds("Second element"));
EXPECT_THAT(GetNextFromTaskRunner<tstring>(runner, GetElementRequest()),
testing::StatusIs(error::ABORTED));
}
TEST(CachingTaskRunnerTest, GetNext) {
size_t range = 10;
CachingTaskRunner runner(std::make_unique<InfiniteRangeIterator>(),
kLargeCache);
size_t num_trainers = 10;
for (size_t i = 0; i < num_trainers; ++i) {
GetElementRequest request;
request.set_trainer_id(absl::StrCat("Trainer ", i));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> output,
GetElementsFromTaskRunner<int64_t>(runner, request, range));
EXPECT_THAT(output, ElementsAreArray(GetRange(range)));
GetElementResult result;
TF_ASSERT_OK(runner.GetNext(request, result));
EXPECT_FALSE(result.end_of_sequence);
}
}
TEST(CachingTaskRunnerTest, EmptyDataset) {
CachingTaskRunner runner(
std::make_unique<RangeIterator>(0, false),
kLargeCache);
GetElementRequest request;
request.set_trainer_id("Trainer ID");
GetElementResult result;
EXPECT_THAT(runner.GetNext(request, result),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Cross-trainer caching requires the input "
"dataset to be infinite.")));
}
TEST(CachingTaskRunnerTest, SlowClientSkipsData) {
size_t range = 1000;
CachingTaskRunner runner(std::make_unique<InfiniteRangeIterator>(),
kSmallCache);
GetElementRequest request;
request.set_trainer_id("Fast trainer");
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> fast_trainer_output,
GetElementsFromTaskRunner<int64_t>(runner, request, range));
EXPECT_THAT(fast_trainer_output, ElementsAreArray(GetRange(range)));
request.set_trainer_id("Slow trainer");
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> slow_trainer_output,
GetElementsFromTaskRunner<int64_t>(runner, request, range));
EXPECT_THAT(slow_trainer_output, SizeIs(range));
EXPECT_THAT(slow_trainer_output[0], Gt(0));
}
TEST(CachingTaskRunnerTest, ConcurrentTrainers) {
size_t range = 100;
size_t num_readers = 10;
CachingTaskRunner runner(std::make_unique<InfiniteRangeIterator>(),
kLargeCache);
std::vector<std::unique_ptr<Thread>> reader_threads;
for (int i = 0; i < num_readers; ++i) {
reader_threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("Trainer_", i),
[&runner, range, i]() {
GetElementRequest request;
request.set_trainer_id(absl::StrCat("Trainer_", i));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> output,
GetElementsFromTaskRunner<int64_t>(runner, request, range));
EXPECT_THAT(output, ElementsAreArray(GetRange(range)));
GetElementResult result;
TF_ASSERT_OK(runner.GetNext(request, result));
EXPECT_FALSE(result.end_of_sequence);
})));
}
}
TEST(CachingTaskRunnerTest, Cancel) {
CachingTaskRunner runner(std::make_unique<InfiniteRangeIterator>(),
kLargeCache);
GetElementRequest request;
request.set_trainer_id("Trainer ID");
int i;
for (i = 0; i < 10; ++i) {
EXPECT_THAT(GetNextFromTaskRunner<int64_t>(runner, request),
IsOkAndHolds(i));
}
runner.Cancel();
for (; i < 10; ++i) {
GetElementResult result;
EXPECT_THAT(runner.GetNext(request, result),
testing::StatusIs(error::CANCELLED));
}
}
TEST(CachingTaskRunnerTest, CancelConcurrentReaders) {
size_t num_readers = 10;
CachingTaskRunner runner(std::make_unique<InfiniteRangeIterator>(),
kSmallCache);
std::vector<std::unique_ptr<Thread>> reader_threads;
for (size_t i = 0; i < num_readers; ++i) {
reader_threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("Trainer_", i),
[&runner]() {
for (size_t j = 0; true; ++j) {
GetElementRequest request;
request.set_trainer_id(absl::StrCat("Trainer_", (j % 100)));
GetElementResult result;
Status status = runner.GetNext(request, result);
if (!status.ok()) {
return;
}
ASSERT_FALSE(result.end_of_sequence);
ASSERT_EQ(result.components.size(), 1);
}
})));
}
Env::Default()->SleepForMicroseconds(1000000);
runner.Cancel();
for (auto& thread : reader_threads) {
thread.reset();
}
GetElementRequest request;
GetElementResult result;
request.set_trainer_id(absl::StrCat("Trainer_", 0));
EXPECT_THAT(runner.GetNext(request, result),
testing::StatusIs(error::CANCELLED));
}
TEST(CachingTaskRunnerTest, Errors) {
size_t num_readers = 10;
CachingTaskRunner runner(
std::make_unique<ElementOrErrorIterator<tstring>>(
std::vector<absl::StatusOr<tstring>>{
tstring("First element"),
errors::Cancelled("Cancelled"),
tstring("Second element"),
errors::FailedPrecondition("FailedPrecondition"),
tstring("Third element"),
errors::Unavailable("Unavailable"),
}),
kLargeCache);
std::vector<std::unique_ptr<Thread>> reader_threads;
std::vector<std::vector<tstring>> results;
results.reserve(num_readers);
for (size_t i = 0; i < num_readers; ++i) {
results.emplace_back();
std::vector<tstring>& result = results.back();
reader_threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("Trainer_", i),
[&runner, &result, i]() {
GetElementRequest request;
request.set_trainer_id(absl::StrCat("Trainer_", i));
while (true) {
absl::StatusOr<tstring> element =
GetNextFromTaskRunner<tstring>(runner, request);
if (element.ok()) {
result.push_back(*element);
}
if (errors::IsInvalidArgument(element.status())) {
EXPECT_THAT(
element.status(),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Cross-trainer caching requires the input "
"dataset to be infinite.")));
return;
}
}
})));
}
for (auto& thread : reader_threads) {
thread.reset();
}
EXPECT_EQ(results.size(), num_readers);
for (const std::vector<tstring>& result : results) {
EXPECT_THAT(result,
ElementsAre(tstring("First element"), tstring("Second element"),
tstring("Third element")));
}
}
class ConsumeParallelTest
: public ::testing::Test,
public ::testing::WithParamInterface<std::tuple<int64_t, int64_t>> {};
TEST_P(ConsumeParallelTest, ConsumeParallel) {
int64_t num_elements = std::get<0>(GetParam());
int64_t num_consumers = std::get<1>(GetParam());
RoundRobinTaskRunner runner(
std::make_unique<RangeIterator>(num_elements, true),
num_consumers,
"test_worker_address");
std::vector<std::vector<int64_t>> per_consumer_results;
std::vector<std::unique_ptr<Thread>> consumers;
mutex mu;
Status error;
for (int consumer = 0; consumer < num_consumers; ++consumer) {
mutex_lock l(mu);
per_consumer_results.emplace_back();
consumers.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("consumer_", consumer), [&, consumer] {
std::vector<int64_t> results;
Status s = RunConsumer(consumer, 0,
num_elements, runner, results);
mutex_lock l(mu);
if (!s.ok()) {
error = s;
return;
}
per_consumer_results[consumer] = std::move(results);
})));
}
consumers.clear();
mutex_lock l(mu);
TF_ASSERT_OK(error);
for (int i = 0; i < num_elements; ++i) {
int consumer = i % num_consumers;
int round = i / num_consumers;
EXPECT_EQ(per_consumer_results[consumer][round], i);
}
}
INSTANTIATE_TEST_SUITE_P(ConsumeParallelTests, ConsumeParallelTest,
::testing::Values(std::make_tuple(1000, 5),
std::make_tuple(1003, 5),
std::make_tuple(1000, 20),
std::make_tuple(4, 20),
std::make_tuple(0, 20)));
TEST(RoundRobinTaskRunner, ConsumeParallelPartialRound) {
int64_t num_consumers = 5;
std::vector<int64_t> starting_rounds = {12, 11, 11, 12, 12};
int64_t end_index = 15;
std::vector<std::vector<int64_t>> expected_consumer_results = {
{5, 10, 15}, {1, 6, 11, 16}, {2, 7, 12, 17}, {8, 13, 18}, {9, 14, 19}};
RoundRobinTaskRunner runner(
std::make_unique<RangeIterator>(30, true), num_consumers,
"test_worker_address");
std::vector<std::vector<int64_t>> per_consumer_results;
std::vector<std::unique_ptr<Thread>> consumers;
mutex mu;
Status error;
for (int consumer = 0; consumer < num_consumers; ++consumer) {
mutex_lock l(mu);
per_consumer_results.emplace_back();
consumers.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("consumer_", consumer), [&, consumer] {
std::vector<int64_t> results;
Status s = RunConsumer(consumer, starting_rounds[consumer], end_index,
runner, results);
mutex_lock l(mu);
if (!s.ok()) {
error = s;
return;
}
per_consumer_results[consumer] = std::move(results);
})));
}
consumers.clear();
mutex_lock l(mu);
TF_ASSERT_OK(error);
for (int consumer = 0; consumer < num_consumers; ++consumer) {
EXPECT_EQ(per_consumer_results[consumer],
expected_consumer_results[consumer]);
}
}
}
} |
1,748 | cpp | tensorflow/tensorflow | auto_scaler | tensorflow/core/data/service/auto_scaler.cc | tensorflow/core/data/service/auto_scaler_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_AUTO_SCALER_H_
#define TENSORFLOW_CORE_DATA_SERVICE_AUTO_SCALER_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/time/time.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/status.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
class AutoScaler {
public:
AutoScaler() = default;
std::optional<int64_t> GetOptimalNumberOfWorkers() const
TF_LOCKS_EXCLUDED(mu_);
absl::Status ReportProcessingTime(const std::string& worker_address,
absl::Duration processing_time)
TF_LOCKS_EXCLUDED(mu_);
absl::Status ReportTargetProcessingTime(int64_t consumer_id,
absl::Duration target_processing_time)
TF_LOCKS_EXCLUDED(mu_);
absl::Status RemoveWorker(const std::string& worker_address)
TF_LOCKS_EXCLUDED(mu_);
absl::Status RemoveConsumer(int64_t consumer_id) TF_LOCKS_EXCLUDED(mu_);
private:
mutable tsl::mutex mu_;
absl::flat_hash_map<std::string, double> worker_throughputs_
TF_GUARDED_BY(mu_);
absl::flat_hash_map<int64_t, double> consumption_rates_ TF_GUARDED_BY(mu_);
};
class MultipleIterationsAutoScaler {
public:
MultipleIterationsAutoScaler() = default;
absl::Status UnregisterIteration(int64_t iteration_id) TF_LOCKS_EXCLUDED(mu_);
absl::Status UpdateOptimalNumberOfWorkersMetric(
int64_t current_number_of_workers) TF_LOCKS_EXCLUDED(mu_);
std::optional<int64_t> GetOptimalNumberOfWorkers() const
TF_LOCKS_EXCLUDED(mu_);
absl::Status ReportProcessingTime(int64_t iteration_id,
const std::string& worker_address,
absl::Duration processing_time)
TF_LOCKS_EXCLUDED(mu_);
absl::Status ReportTargetProcessingTime(int64_t iteration_id,
int64_t consumer_id,
absl::Duration target_processing_time)
TF_LOCKS_EXCLUDED(mu_);
absl::Status RemoveWorker(int64_t iteration_id,
const std::string& worker_address)
TF_LOCKS_EXCLUDED(mu_);
absl::Status RemoveConsumer(int64_t iteration_id, int64_t consumer_id)
TF_LOCKS_EXCLUDED(mu_);
private:
void EnsureIterationIsRegistered(int64_t iteration_id)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
mutable tsl::mutex mu_;
absl::flat_hash_map<int64_t, std::unique_ptr<AutoScaler>> auto_scalers_
TF_GUARDED_BY(mu_);
};
}
}
#endif
#include "tensorflow/core/data/service/auto_scaler.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "tensorflow/core/framework/metrics.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr double kAutoScalerOutlierSigmas = 1.0;
template <typename T>
double GetMedian(const absl::flat_hash_map<T, double>& rates) {
std::vector<double> sorted_rates;
for (const auto& [id, rate] : rates) {
sorted_rates.push_back(rate);
}
std::sort(sorted_rates.begin(), sorted_rates.end());
return sorted_rates[sorted_rates.size() / 2];
}
template <typename T>
double GetMean(const absl::flat_hash_map<T, double>& rates) {
double rates_sum = 0.0;
for (const auto& [id, rate] : rates) {
rates_sum += rate;
}
if (rates_sum == 0.0) return 0.0;
return rates_sum / static_cast<double>(rates.size());
}
template <typename T>
double GetStandardDeviation(const absl::flat_hash_map<T, double>& rates,
double mean) {
double squared_distances_sum = 0.0;
for (const auto& [id, rate] : rates) {
squared_distances_sum += (rate - mean) * (rate - mean);
}
if (squared_distances_sum == 0.0 || rates.size() <= 1) return 0.0;
return std::sqrt(squared_distances_sum /
static_cast<double>(rates.size() - 1));
}
template <typename T>
void ReplaceOutliers(const absl::flat_hash_map<T, double>& rates,
std::vector<double>& rates_without_outliers,
double outlier_sigmas) {
if (rates.empty()) return;
double mean = GetMean(rates);
double median = GetMedian(rates);
double standard_deviation = GetStandardDeviation(rates, mean);
double lower_threshold = mean - standard_deviation * outlier_sigmas;
double upper_threshold = mean + standard_deviation * outlier_sigmas;
for (const auto& [id, rate] : rates) {
if (rate >= lower_threshold && rate <= upper_threshold) {
rates_without_outliers.push_back(rate);
} else {
rates_without_outliers.push_back(median);
}
}
}
std::optional<int64_t> AutoScaler::GetOptimalNumberOfWorkers() const
TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
if (worker_throughputs_.empty() || consumption_rates_.empty())
return std::nullopt;
std::vector<double> consumption_rates_without_outliers;
ReplaceOutliers(consumption_rates_, consumption_rates_without_outliers,
kAutoScalerOutlierSigmas);
double consumption_rates_sum_ =
std::accumulate(consumption_rates_without_outliers.begin(),
consumption_rates_without_outliers.end(), 0.0);
std::vector<double> worker_throughputs_without_outliers;
ReplaceOutliers(worker_throughputs_, worker_throughputs_without_outliers,
kAutoScalerOutlierSigmas);
double worker_throughputs_sum_ =
std::accumulate(worker_throughputs_without_outliers.begin(),
worker_throughputs_without_outliers.end(), 0.0);
double average_worker_throughput =
worker_throughputs_sum_ / static_cast<double>(worker_throughputs_.size());
int64_t optimal_number_of_workers =
ceil(consumption_rates_sum_ / average_worker_throughput);
return std::max(int64_t{1}, optimal_number_of_workers);
}
absl::Status AutoScaler::ReportProcessingTime(const std::string& worker_address,
absl::Duration processing_time)
TF_LOCKS_EXCLUDED(mu_) {
if (processing_time <= absl::ZeroDuration()) {
return absl::InvalidArgumentError(absl::StrCat(
"Cannot update processing_time with a ZeroDuration or negative value: ",
absl::FormatDuration(processing_time)));
}
double worker_throughput = 1.0 / absl::ToDoubleSeconds(processing_time);
tsl::mutex_lock l(mu_);
worker_throughputs_[worker_address] = worker_throughput;
return absl::OkStatus();
}
absl::Status AutoScaler::ReportTargetProcessingTime(
int64_t consumer_id, absl::Duration target_processing_time)
TF_LOCKS_EXCLUDED(mu_) {
if (target_processing_time <= absl::ZeroDuration()) {
return absl::InvalidArgumentError(
absl::StrCat("Cannot update target_processing_time with a ZeroDuration "
"or negative value: ",
absl::FormatDuration(target_processing_time)));
}
double consumption_rate = 1.0 / absl::ToDoubleSeconds(target_processing_time);
tsl::mutex_lock l(mu_);
consumption_rates_[consumer_id] = consumption_rate;
return absl::OkStatus();
}
absl::Status AutoScaler::RemoveWorker(const std::string& worker_address)
TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
if (!worker_throughputs_.contains(worker_address))
return absl::NotFoundError(
absl::StrCat("Worker with address ", worker_address, " not found"));
worker_throughputs_.erase(worker_address);
return absl::OkStatus();
}
absl::Status AutoScaler::RemoveConsumer(int64_t consumer_id)
TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
if (!consumption_rates_.contains(consumer_id))
return absl::NotFoundError(
absl::StrCat("Consumer with ID ", consumer_id, " not found"));
consumption_rates_.erase(consumer_id);
return absl::OkStatus();
}
void MultipleIterationsAutoScaler::EnsureIterationIsRegistered(
int64_t iteration_id) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!auto_scalers_.contains(iteration_id)) {
auto_scalers_[iteration_id] = std::make_unique<AutoScaler>();
}
}
absl::Status MultipleIterationsAutoScaler::UnregisterIteration(
int64_t iteration_id) TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
if (!auto_scalers_.contains(iteration_id))
return absl::NotFoundError(absl::StrCat("AutoScaler for iteration_id ",
iteration_id, " does not exist"));
auto_scalers_.erase(iteration_id);
return absl::OkStatus();
}
absl::Status MultipleIterationsAutoScaler::UpdateOptimalNumberOfWorkersMetric(
int64_t current_number_of_workers) TF_LOCKS_EXCLUDED(mu_) {
if (current_number_of_workers <= 0)
return absl::InvalidArgumentError(
"The current number of workers must be positive");
std::optional<int64_t> optimal_number_of_workers =
GetOptimalNumberOfWorkers();
if (!optimal_number_of_workers)
return absl::UnavailableError(
"Cannot update the optimal number of workers metric because there are "
"no reported processing and target processing times for at least one "
"iteration");
VLOG(3) << "Estimated optimal number of workers: "
<< optimal_number_of_workers.value();
int64_t bound_optimal_number_of_workers = optimal_number_of_workers.value();
if (bound_optimal_number_of_workers > current_number_of_workers * 4 ||
bound_optimal_number_of_workers > current_number_of_workers + 500) {
bound_optimal_number_of_workers = std::min(current_number_of_workers * 4,
current_number_of_workers + 500);
}
bound_optimal_number_of_workers =
std::min(bound_optimal_number_of_workers, int64_t{100000});
VLOG(3) << "Bound optimal number of workers: "
<< bound_optimal_number_of_workers;
metrics::RecordTFDataServiceOptimalNumberOfWorkers(
bound_optimal_number_of_workers);
return absl::OkStatus();
}
std::optional<int64_t> MultipleIterationsAutoScaler::GetOptimalNumberOfWorkers()
const TF_LOCKS_EXCLUDED(mu_) {
int64_t optimal_number_of_workers = 0;
{
tsl::tf_shared_lock l(mu_);
for (const auto& [iteration_id, auto_scaler] : auto_scalers_) {
std::optional<int64_t> current_optimal_number_of_workers =
auto_scaler->GetOptimalNumberOfWorkers();
if (!current_optimal_number_of_workers.has_value()) continue;
optimal_number_of_workers = std::max(
optimal_number_of_workers, current_optimal_number_of_workers.value());
}
}
if (optimal_number_of_workers == 0)
return std::nullopt;
else
return optimal_number_of_workers;
}
absl::Status MultipleIterationsAutoScaler::ReportProcessingTime(
int64_t iteration_id, const std::string& worker_address,
absl::Duration processing_time) TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
EnsureIterationIsRegistered(iteration_id);
absl::Status status = auto_scalers_[iteration_id]->ReportProcessingTime(
worker_address, processing_time);
return status;
}
absl::Status MultipleIterationsAutoScaler::ReportTargetProcessingTime(
int64_t iteration_id, int64_t consumer_id,
absl::Duration target_processing_time) TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
EnsureIterationIsRegistered(iteration_id);
absl::Status status = auto_scalers_[iteration_id]->ReportTargetProcessingTime(
consumer_id, target_processing_time);
return status;
}
absl::Status MultipleIterationsAutoScaler::RemoveWorker(
int64_t iteration_id, const std::string& worker_address)
TF_LOCKS_EXCLUDED(mu_) {
tsl::tf_shared_lock l(mu_);
if (!auto_scalers_.contains(iteration_id))
return absl::NotFoundError(absl::StrCat(
"There are no reported times for iteration_id ", iteration_id));
absl::Status status =
auto_scalers_[iteration_id]->RemoveWorker(worker_address);
return status;
}
absl::Status MultipleIterationsAutoScaler::RemoveConsumer(int64_t iteration_id,
int64_t consumer_id)
TF_LOCKS_EXCLUDED(mu_) {
tsl::tf_shared_lock l(mu_);
if (!auto_scalers_.contains(iteration_id))
return absl::NotFoundError(absl::StrCat(
"There are no reported times for iteration_id ", iteration_id));
absl::Status status =
auto_scalers_[iteration_id]->RemoveConsumer(consumer_id);
return status;
}
}
} | #include "tensorflow/core/data/service/auto_scaler.h"
#include <optional>
#include "absl/time/time.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace data {
namespace {
using ::tsl::testing::StatusIs;
TEST(AutoScalerTest, GetOptimalNumberOfWorkersInitialState) {
AutoScaler auto_scaler;
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), std::nullopt);
}
TEST(AutoScalerTest, GetOptimalNumberOfWorkersNoRegisteredWorkers) {
AutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(10)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), std::nullopt);
}
TEST(AutoScalerTest, GetOptimalNumberOfWorkersNoRegisteredConsumers) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(10)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), std::nullopt);
}
TEST(AutoScalerTest, GetOptimalNumberOfWorkersExpectedEstimate1) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(auto_scaler.ReportTargetProcessingTime(0, absl::Seconds(0.025)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), 8);
}
TEST(AutoScalerTest, GetOptimalNumberOfWorkersExpectedEstimate2) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/1:20000",
absl::Seconds(0.15)));
TF_ASSERT_OK(auto_scaler.ReportTargetProcessingTime(0, absl::Seconds(0.025)));
TF_ASSERT_OK(auto_scaler.ReportTargetProcessingTime(1, absl::Seconds(0.05)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), 11);
}
TEST(AutoScalerTest, GetOptimalNumberOfWorkersExpectedEstimate3) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Seconds(0.1)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/1:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(auto_scaler.ReportTargetProcessingTime(0, absl::Seconds(0.01)));
TF_ASSERT_OK(auto_scaler.ReportTargetProcessingTime(1, absl::Seconds(0.02)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), 20);
}
TEST(AutoScalerTest, GetOptimalNumberOfWorkersRemoveOutliersTPT) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Nanoseconds(80000000)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Nanoseconds(500)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, absl::Nanoseconds(3000000)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(2, absl::Nanoseconds(2000000)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), 107);
}
TEST(AutoScalerTest, GetOptimalNumberOfWorkersRemoveOutliersPT) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Nanoseconds(80000000)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/1:20000",
absl::Nanoseconds(70000000)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/2:20000",
absl::Nanoseconds(1000)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Nanoseconds(300000)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), 244);
}
TEST(AutoScalerTest, ReportProcessingTimeNewWorker) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(10)));
}
TEST(AutoScalerTest, ReportProcessingTimeExistingWorker) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(20)));
}
TEST(AutoScalerTest, ReportProcessingTimeNewAndExisting) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/1:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/2:20000",
absl::Microseconds(30)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(30)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/1:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/2:20000",
absl::Microseconds(10)));
}
TEST(AutoScalerTest, ReportProcessingTimeZeroDuration) {
AutoScaler auto_scaler;
absl::Status result = auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::ZeroDuration());
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AutoScalerTest, ReportProcessingTimeNegativeDuration) {
AutoScaler auto_scaler;
absl::Status result = auto_scaler.ReportProcessingTime(
"/worker/task/0:20000", absl::Microseconds(-10));
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AutoScalerTest, ReportTargetProcessingTimeNewConsumer) {
AutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(10)));
}
TEST(AutoScalerTest, ReportTargetProcessingTimeExistingConsumer) {
AutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(20)));
}
TEST(AutoScalerTest, ReportTargetProcessingTimeNewAndExisting) {
AutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, absl::Microseconds(20)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(2, absl::Microseconds(30)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(30)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, absl::Microseconds(20)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(2, absl::Microseconds(10)));
}
TEST(AutoScalerTest, ReportTargetProcessingTimeZeroDuration) {
AutoScaler auto_scaler;
absl::Status result =
auto_scaler.ReportTargetProcessingTime(0, absl::ZeroDuration());
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AutoScalerTest, ReportTargetProcessingTimeNegativeDuration) {
AutoScaler auto_scaler;
absl::Status result =
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(-10));
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AutoScalerTest, RemoveWorkerSuccessful) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/1:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.RemoveWorker("/worker/task/0:20000"));
TF_ASSERT_OK(auto_scaler.RemoveWorker("/worker/task/1:20000"));
}
TEST(AutoScalerTest, RemoveNonexistentWorker) {
AutoScaler auto_scaler;
EXPECT_THAT(auto_scaler.RemoveWorker("/worker/task/0:20000"),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(AutoScalerTest, RemoveWorkerAfterNewPTReported) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.RemoveWorker("/worker/task/0:20000"));
}
TEST(AutoScalerTest, RemoveConsumerSuccessful) {
AutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(30)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, absl::Microseconds(30)));
TF_ASSERT_OK(auto_scaler.RemoveConsumer(0));
TF_ASSERT_OK(auto_scaler.RemoveConsumer(1));
}
TEST(AutoScalerTest, RemoveNonexistentConsumer) {
AutoScaler auto_scaler;
EXPECT_THAT(auto_scaler.RemoveConsumer(0),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(AutoScalerTest, RemoveConsumerAfterNewTPTReported) {
AutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(30)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.RemoveConsumer(0));
}
TEST(MultipleIterationsAutoScalerTest, UnregisterExistingIteration) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(5)));
TF_ASSERT_OK(auto_scaler.UnregisterIteration(0));
}
TEST(MultipleIterationsAutoScalerTest, UnregisterNonexistentIteration) {
MultipleIterationsAutoScaler auto_scaler;
EXPECT_THAT(auto_scaler.UnregisterIteration(0),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetricInvalidCurrentWorkers) {
MultipleIterationsAutoScaler auto_scaler;
absl::Status status = auto_scaler.UpdateOptimalNumberOfWorkersMetric(0);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kInvalidArgument));
status = auto_scaler.UpdateOptimalNumberOfWorkersMetric(-1);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetricNoReportedTimes) {
MultipleIterationsAutoScaler auto_scaler;
absl::Status status = auto_scaler.UpdateOptimalNumberOfWorkersMetric(1);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kUnavailable));
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetricNoReportedPTs) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(5)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(5)));
absl::Status status = auto_scaler.UpdateOptimalNumberOfWorkersMetric(1);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kUnavailable));
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetricNoReportedTPTs) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(10)));
absl::Status status = auto_scaler.UpdateOptimalNumberOfWorkersMetric(1);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kUnavailable));
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetricWithReportedTimes) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(5)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(5)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.UpdateOptimalNumberOfWorkersMetric(1));
monitoring::testing::CellReader<int64_t> cell_reader(
"/tensorflow/data/service/optimal_number_of_workers");
EXPECT_GT(cell_reader.Read(), 0);
metrics::RecordTFDataServiceOptimalNumberOfWorkers(0);
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetricIncreaseWithinLimit) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(500)));
TF_ASSERT_OK(auto_scaler.UpdateOptimalNumberOfWorkersMetric(15));
monitoring::testing::CellReader<int64_t> cell_reader(
"/tensorflow/data/service/optimal_number_of_workers");
EXPECT_EQ(cell_reader.Read(), 50);
metrics::RecordTFDataServiceOptimalNumberOfWorkers(0);
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetric4xIncreaseLimit) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(1)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.UpdateOptimalNumberOfWorkersMetric(2));
monitoring::testing::CellReader<int64_t> cell_reader(
"/tensorflow/data/service/optimal_number_of_workers");
EXPECT_EQ(cell_reader.Read(), 8);
metrics::RecordTFDataServiceOptimalNumberOfWorkers(0);
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetric500IncreaseLimit) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(1)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10000)));
TF_ASSERT_OK(auto_scaler.UpdateOptimalNumberOfWorkersMetric(1000));
monitoring::testing::CellReader<int64_t> cell_reader(
"/tensorflow/data/service/optimal_number_of_workers");
EXPECT_EQ(cell_reader.Read(), 1500);
metrics::RecordTFDataServiceOptimalNumberOfWorkers(0);
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetricMaxLimit) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(1)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(200000)));
TF_ASSERT_OK(auto_scaler.UpdateOptimalNumberOfWorkersMetric(99700));
monitoring::testing::CellReader<int64_t> cell_reader(
"/tensorflow/data/service/optimal_number_of_workers");
EXPECT_EQ(cell_reader.Read(), 100000);
metrics::RecordTFDataServiceOptimalNumberOfWorkers(0);
}
TEST(MultipleIterationsAutoScalerTest, GetOptimalNumberOfWorkersInitialState) {
MultipleIterationsAutoScaler auto_scaler;
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), std::nullopt);
}
TEST(MultipleIterationsAutoScalerTest,
GetOptimalNumberOfWorkersNoRegisteredWorkers) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(5)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(5)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), std::nullopt);
}
TEST(MultipleIterationsAutoScalerTest,
GetOptimalNumberOfWorkersNoRegisteredConsumers) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(10)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), std::nullopt);
}
TEST(MultipleIterationsAutoScalerTest,
GetOptimalNumberOfWorkersExpectedEstimate1) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Seconds(0.025)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/1:20000",
absl::Seconds(0.15)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Seconds(0.025)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 1, absl::Seconds(0.05)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), 11);
}
TEST(MultipleIterationsAutoScalerTest,
GetOptimalNumberOfWorkersExpectedEstimate2) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Seconds(0.025)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/1:20000",
absl::Seconds(0.15)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Seconds(0.025)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 1, absl::Seconds(0.05)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(2, "/worker/task/0:20000",
absl::Seconds(0.1)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(2, "/worker/task/1:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(2, 0, absl::Seconds(0.01)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(2, 1, absl::Seconds(0.02)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), 20);
}
TEST(MultipleIterationsAutoScalerTest, ReportProcessingTimeNewIteration) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
}
TEST(MultipleIterationsAutoScalerTest, ReportProcessingTimeNewWorker) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/1:20000",
absl::Microseconds(10)));
}
TEST(MultipleIterationsAutoScalerTest, ReportProcessingTimeExistingWorker) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(10)));
}
TEST(MultipleIterationsAutoScalerTest, ReportProcessingTimeNewAndExisting) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/1:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/1:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/1:20000",
absl::Microseconds(30)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/1:20000",
absl::Microseconds(30)));
}
TEST(MultipleIterationsAutoScalerTest, ReportProcessingTimeZeroDuration) {
MultipleIterationsAutoScaler auto_scaler;
absl::Status result = auto_scaler.ReportProcessingTime(
0, "/worker/task/0:20000", absl::ZeroDuration());
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(MultipleIterationsAutoScalerTest, ReportProcessingTimeNegativeDuration) {
MultipleIterationsAutoScaler auto_scaler;
absl::Status result = auto_scaler.ReportProcessingTime(
0, "/worker/task/0:20000", absl::Microseconds(-10));
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(MultipleIterationsAutoScalerTest, ReportTargetProcessingTimeNewIteration) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
}
TEST(MultipleIterationsAutoScalerTest, ReportTargetProcessingTimeNewConsumer) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 1, absl::Microseconds(10)));
}
TEST(MultipleIterationsAutoScalerTest,
ReportTargetProcessingTimeExistingWorker) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(10)));
}
TEST(MultipleIterationsAutoScalerTest,
ReportTargetProcessingTimeNewAndExisting) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 1, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 1, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(20)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 1, absl::Microseconds(30)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(20)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 1, absl::Microseconds(30)));
}
TEST(MultipleIterationsAutoScalerTest, ReportTargetProcessingTimeZeroDuration) {
MultipleIterationsAutoScaler auto_scaler;
absl::Status result =
auto_scaler.ReportTargetProcessingTime(0, 0, absl::ZeroDuration());
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(MultipleIterationsAutoScalerTest,
ReportTargetProcessingTimeNegativeDuration) {
MultipleIterationsAutoScaler auto_scaler;
absl::Status result =
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(-10));
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(MultipleIterationsAutoScalerTest, RemoveWorkerUnregisteredIteration) {
MultipleIterationsAutoScaler auto_scaler;
EXPECT_THAT(auto_scaler.RemoveWorker(0, "/worker/task/1:20000"),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(auto_scaler.RemoveWorker(1, "/worker/task/1:20000"),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(MultipleIterationsAutoScalerTest, RemoveWorkerSuccessful) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.RemoveWorker(0, "/worker/task/0:20000"));
TF_ASSERT_OK(auto_scaler.RemoveWorker(1, "/worker/task/0:20000"));
}
TEST(MultipleIterationsAutoScalerTest, RemoveNonexistentWorker) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
EXPECT_THAT(auto_scaler.RemoveWorker(0, "/worker/task/1:20000"),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(MultipleIterationsAutoScalerTest, RemoveWorkerAfterNewPTReported) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.RemoveWorker(0, "/worker/task/0:20000"));
}
TEST(MultipleIterationsAutoScalerTest, RemoveConsumerUnregisteredIteration) {
MultipleIterationsAutoScaler auto_scaler;
EXPECT_THAT(auto_scaler.RemoveConsumer(0, 0),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(auto_scaler.RemoveConsumer(1, 0),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(MultipleIterationsAutoScalerTest, RemoveConsumerSuccessful) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.RemoveConsumer(0, 0));
TF_ASSERT_OK(auto_scaler.RemoveConsumer(1, 0));
}
TEST(MultipleIterationsAutoScalerTest, RemoveNonexistentConsumer) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
EXPECT_THAT(auto_scaler.RemoveConsumer(0, 1),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(MultipleIterationsAutoScalerTest, RemoveConsumerAfterNewTPTReported) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.RemoveConsumer(0, 0));
}
}
}
} |
1,749 | cpp | tensorflow/tensorflow | split_provider | tensorflow/core/data/service/split_provider.cc | tensorflow/core/data/service/split_provider_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_SPLIT_PROVIDER_H_
#define TENSORFLOW_CORE_DATA_SERVICE_SPLIT_PROVIDER_H_
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/dispatcher_client.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
class DataServiceSplitProvider : public SplitProvider {
public:
DataServiceSplitProvider(const std::string& address,
const std::string& protocol, int64_t iteration_id,
int64_t split_provider_index, int64_t timeout_ms)
: address_(address),
protocol_(protocol),
iteration_id_(iteration_id),
split_provider_index_(split_provider_index),
timeout_ms_(timeout_ms) {}
Status GetNext(Tensor* split, bool* end_of_splits) override;
Status Reset() override;
Status Save(std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) override;
Status Restore(std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) override;
private:
const std::string address_;
const std::string protocol_;
const int64_t iteration_id_;
const int64_t split_provider_index_;
const int64_t timeout_ms_;
mutex mu_;
int64_t repetition_ TF_GUARDED_BY(mu_) = 0;
std::unique_ptr<DataServiceDispatcherClient> dispatcher_ TF_GUARDED_BY(mu_);
};
Status CreateSplitProviders(
const DatasetDef& dataset_def,
std::vector<std::unique_ptr<SplitProvider>>& split_providers);
}
}
#endif
#include "tensorflow/core/data/service/split_provider.h"
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/dispatcher_client.h"
#include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/data/standalone.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
Status DataServiceSplitProvider::GetNext(Tensor* split, bool* end_of_splits)
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
if (!dispatcher_) {
dispatcher_ =
std::make_unique<DataServiceDispatcherClient>(address_, protocol_);
}
TF_RETURN_IF_ERROR(grpc_util::Retry(
[this, split, end_of_splits]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return dispatcher_->GetSplit(iteration_id_, repetition_,
split_provider_index_, *split,
*end_of_splits);
},
"get next split",
Env::Default()->NowMicros() +
(timeout_ms_ * EnvTime::kMillisToMicros)));
if (*end_of_splits) {
VLOG(1) << "Reached end of splits for iteration_id=" << iteration_id_
<< ", repetition=" << repetition_;
} else {
VLOG(1) << "Requested split: " << split->DebugString()
<< "; with iteration_id=" << iteration_id_
<< ", repetition=" << repetition_;
}
return absl::OkStatus();
}
Status DataServiceSplitProvider::Reset() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
repetition_++;
return absl::OkStatus();
}
Status DataServiceSplitProvider::Save(
std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) {
return errors::Unimplemented(
"Save is not implemented for DataServiceSplitProvider");
}
Status DataServiceSplitProvider::Restore(
std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) {
return errors::Unimplemented(
"Restore is not implemented for DataServiceSplitProvider");
}
Status CreateSplitProviders(
const DatasetDef& dataset_def,
std::vector<std::unique_ptr<SplitProvider>>& split_providers) {
standalone::Dataset::Params params;
std::unique_ptr<standalone::Dataset> standalone_dataset;
TF_RETURN_IF_ERROR(standalone::Dataset::FromGraph(params, dataset_def.graph(),
&standalone_dataset));
TF_RETURN_IF_ERROR(standalone_dataset->MakeSplitProviders(&split_providers));
return absl::OkStatus();
}
}
} | #include "tensorflow/core/data/service/split_provider.h"
#include <array>
#include <cstdint>
#include <memory>
#include <tuple>
#include <vector>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/framework/dataset.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::ElementsAre;
using ::testing::UnorderedElementsAre;
std::vector<int64_t> GetCardinalities(
const std::vector<std::unique_ptr<SplitProvider>>& split_providers) {
std::vector<int64_t> cardinalities;
for (const auto& split_provider : split_providers) {
cardinalities.push_back(split_provider->Cardinality());
}
return cardinalities;
}
TEST(SplitProviderTest, RangeCardinality) {
DatasetDef range_dataset = testing::RangeDataset(10);
std::vector<std::unique_ptr<SplitProvider>> split_providers;
TF_ASSERT_OK(CreateSplitProviders(range_dataset, split_providers));
EXPECT_THAT(GetCardinalities(split_providers), UnorderedElementsAre(10));
}
class RepeatedSplitProviderTest
: public ::testing::TestWithParam<std::tuple<int64_t, int64_t, int64_t>> {
public:
int64_t Range() const { return std::get<0>(GetParam()); }
int64_t RepeatCount() const { return std::get<1>(GetParam()); }
int64_t ExpectedCardinality() const { return std::get<2>(GetParam()); }
};
constexpr std::array<std::tuple<int64_t, int64_t, int64_t>, 5>
kRepeatedSplitProviderTestCases{{{9, 9, 81},
{9, 0, 0},
{9, -1, kInfiniteCardinality},
{0, -1, 0},
{-1, 1, 0}}};
TEST_P(RepeatedSplitProviderTest, RepeatedDatasetCardinality) {
TF_ASSERT_OK_AND_ASSIGN(
DatasetDef repeated_dataset,
testing::GetTestDataset(
"repeated_dataset",
{absl::StrCat(Range()), absl::StrCat(RepeatCount())}));
std::vector<std::unique_ptr<SplitProvider>> split_providers;
TF_ASSERT_OK(CreateSplitProviders(repeated_dataset, split_providers));
EXPECT_THAT(GetCardinalities(split_providers),
ElementsAre(ExpectedCardinality()));
}
INSTANTIATE_TEST_SUITE_P(MyGroup, RepeatedSplitProviderTest,
::testing::ValuesIn(kRepeatedSplitProviderTestCases));
TEST(SplitProviderTest, EnumerateCardinality) {
TF_ASSERT_OK_AND_ASSIGN(DatasetDef enumerate_dataset,
testing::GetTestDataset("enumerate_dataset"));
std::vector<std::unique_ptr<SplitProvider>> split_providers;
TF_ASSERT_OK(CreateSplitProviders(enumerate_dataset, split_providers));
EXPECT_THAT(GetCardinalities(split_providers),
UnorderedElementsAre(3, kInfiniteCardinality));
}
TEST(SplitProviderTest, ChooseFromDatasetsCardinality) {
TF_ASSERT_OK_AND_ASSIGN(DatasetDef sample_from_datasets,
testing::GetTestDataset("choose_from_datasets"));
std::vector<std::unique_ptr<SplitProvider>> split_providers;
TF_ASSERT_OK(CreateSplitProviders(sample_from_datasets, split_providers));
EXPECT_THAT(GetCardinalities(split_providers),
UnorderedElementsAre(5, 5, 5, kInfiniteCardinality));
}
TEST(SplitProviderTest, SampleFromDatasetsCardinality) {
TF_ASSERT_OK_AND_ASSIGN(DatasetDef sample_from_datasets,
testing::GetTestDataset("sample_from_datasets"));
std::vector<std::unique_ptr<SplitProvider>> split_providers;
TF_ASSERT_OK(CreateSplitProviders(sample_from_datasets, split_providers));
EXPECT_THAT(GetCardinalities(split_providers),
UnorderedElementsAre(5, 5, 5, kInfiniteCardinality));
}
}
}
} |
1,750 | cpp | tensorflow/tensorflow | snapshot_stream_writer | tensorflow/core/data/service/snapshot/snapshot_stream_writer.cc | tensorflow/core/data/service/snapshot/snapshot_stream_writer_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_SNAPSHOT_SNAPSHOT_STREAM_WRITER_H_
#define TENSORFLOW_CORE_DATA_SERVICE_SNAPSHOT_SNAPSHOT_STREAM_WRITER_H_
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/substitute.h"
#include "absl/time/time.h"
#include "tensorflow/core/data/service/byte_size.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/snapshot/parallel_tfrecord_writer.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/task_runner.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/protobuf/service_config.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr ByteSize kDefaultMaxChunkSize = ByteSize::GB(6);
constexpr absl::Duration kDefaultCheckpointInterval = absl::Minutes(30);
struct SnapshotWriterParams {
std::string snapshot_path;
int64_t stream_index = 0;
std::string compression;
Env* env = nullptr;
ByteSize max_chunk_size = kDefaultMaxChunkSize;
absl::Duration checkpoint_interval = kDefaultCheckpointInterval;
bool test_only_keep_temp_files = false;
std::string StreamDirectory() const {
return tensorflow::data::StreamDirectory(snapshot_path, stream_index);
}
std::string CommittedChunksDirectory() const {
return tensorflow::data::CommittedChunksDirectory(snapshot_path);
}
std::string UncommittedChunksDirectory() const {
return tensorflow::data::UncommittedChunksDirectory(snapshot_path,
stream_index);
}
std::string CheckpointsDirectory() const {
return tensorflow::data::CheckpointsDirectory(snapshot_path, stream_index);
}
std::string DebugString() const {
return absl::Substitute(
"SnapshotWriterParams { base_path: $0, stream: $1, compression: $2 }",
snapshot_path, stream_index, compression);
}
};
class SnapshotStreamWriter {
public:
explicit SnapshotStreamWriter(const SnapshotWriterParams& params,
std::unique_ptr<TaskIterator> iterator);
virtual ~SnapshotStreamWriter() = default;
SnapshotStreamWriter(const SnapshotStreamWriter&) = delete;
SnapshotStreamWriter& operator=(const SnapshotStreamWriter&) = delete;
absl::StatusOr<bool> Completed() const;
absl::StatusOr<bool> Wait();
void Cancel();
private:
void WriteSnapshotAndLog();
absl::Status WriteSnapshot();
bool StreamAlreadyCompleted() const;
absl::Status InitializeDirectories();
bool ShouldWriteChunks() const;
absl::Status WriteChunks();
bool ShouldWriteRecord() const;
absl::Status WriteRecord(ParallelTFRecordWriter& writer);
absl::Status Commit(const ParallelTFRecordWriter::FileToStatsMap& file_stats);
absl::Status FinalizeStream(absl::Status status);
absl::Status WriteDoneFile();
absl::Status WriteErrorFile(const absl::Status& status);
absl::Status Save(const ParallelTFRecordWriter::FileToStatsMap& file_stats);
absl::Status DeleteOutdatedCheckpoints(int64_t checkpoint_index);
absl::Status DeleteCheckpoints();
absl::Status Restore();
absl::StatusOr<std::string> LastCheckpointName() const;
absl::Status SyncCheckpointWithChunks(std::optional<int64_t> checkpoint_index,
int64_t checkpoint_num_elements);
absl::StatusOr<int64_t> LastCommittedChunkIndex();
std::string CheckpointPath(int64_t chunk_index,
int64_t chunk_num_elements) const;
std::string CheckpointPath(const std::string& checkpoint_name) const;
const SnapshotWriterParams params_;
std::unique_ptr<TaskIterator> iterator_;
int64_t chunk_index_ = 0;
absl::Time last_commit_time_ = absl::Now();
bool end_of_sequence_ = false;
mutable mutex mu_;
absl::StatusOr<bool> completed_ TF_GUARDED_BY(mu_) = false;
std::unique_ptr<Thread> snapshot_thread_;
};
}
}
#endif
#include "tensorflow/core/data/service/snapshot/snapshot_stream_writer.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/time/time.h"
#include "tensorflow/core/data/service/byte_size.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/data/service/snapshot/parallel_tfrecord_writer.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/snapshot/utils.h"
#include "tensorflow/core/data/service/worker.pb.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/data/utils.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
namespace data {
namespace {
constexpr ByteSize kTFRecordReaderOutputBufferSize = ByteSize::GB(1);
constexpr int64_t kUnknownNumElements = -1;
constexpr const char kFileShardDelimiter[] = "_CHUNK_SHARDS_";
absl::StatusOr<int64_t> GetUncommittedChunkIndex(const std::string& filename) {
std::vector<std::string> tokens =
absl::StrSplit(filename, kFileShardDelimiter);
if (tokens.size() != 2) {
return absl::InternalError(
absl::StrCat("Invalid tf.data snapshot chunk file: ", filename,
". Expected sharded chunk files."));
}
tokens = absl::StrSplit(tokens[0], '_');
int64_t chunk_index = 0;
if (tokens.size() != 2 || tokens[0] != "chunk" ||
!absl::SimpleAtoi(tokens[1], &chunk_index) || chunk_index < 0) {
return absl::InternalError(
absl::StrCat("Invalid tf.data snapshot chunk file: ", filename,
". Expected chunk_<chunk_index>."));
}
return chunk_index;
}
size_t TotalNumElements(
const ParallelTFRecordWriter::FileToStatsMap& file_stats) {
size_t num_elements = 0;
for (const auto& [file, stats] : file_stats) {
num_elements += stats.num_records;
}
return num_elements;
}
ByteSize TotalBytes(const ParallelTFRecordWriter::FileToStatsMap& file_stats) {
ByteSize bytes;
for (const auto& [file, stats] : file_stats) {
bytes += stats.estimated_size;
}
return bytes;
}
}
SnapshotStreamWriter::SnapshotStreamWriter(
const SnapshotWriterParams& params, std::unique_ptr<TaskIterator> iterator)
: params_(params), iterator_(std::move(iterator)) {
DCHECK_NE(iterator_.get(), nullptr);
last_commit_time_ = absl::FromUnixMicros(params_.env->NowMicros());
snapshot_thread_ = absl::WrapUnique(params_.env->StartThread(
{}, "tf_data_service_snapshot_thread",
[this]() { WriteSnapshotAndLog(); }));
}
void SnapshotStreamWriter::WriteSnapshotAndLog() TF_LOCKS_EXCLUDED(mu_) {
if (StreamAlreadyCompleted()) {
LOG(INFO) << "Distributed tf.data snapshot stream has already been "
<< "completed for " << params_.DebugString();
mutex_lock l(mu_);
completed_ = true;
return;
}
LOG(INFO) << "Writing distributed tf.data snapshot stream: "
<< params_.DebugString();
absl::Status status = WriteSnapshot();
if (IsPreemptedError(status)) {
LOG(INFO) << "tf.data service snapshot writer is cancelled: " << status;
return;
}
status = FinalizeStream(status);
mutex_lock l(mu_);
if (!status.ok()) {
LOG(ERROR) << "Failed to write distributed tf.data snapshot stream: "
<< params_.DebugString() << ". Status: " << status;
completed_ = std::move(status);
return;
}
LOG(INFO) << "Finished writing distributed tf.data snapshot stream: "
<< params_.DebugString();
completed_ = true;
iterator_ = nullptr;
}
absl::Status SnapshotStreamWriter::WriteSnapshot() TF_LOCKS_EXCLUDED(mu_) {
TF_RETURN_IF_ERROR(InitializeDirectories());
TF_RETURN_IF_ERROR(Restore());
while (ShouldWriteChunks()) {
TF_RETURN_IF_ERROR(WriteChunks());
}
mutex_lock l(mu_);
return completed_.status();
}
bool SnapshotStreamWriter::StreamAlreadyCompleted() const {
std::string done_file_path =
StreamDoneFilePath(params_.snapshot_path, params_.stream_index);
return params_.env->FileExists(done_file_path).ok();
}
absl::Status SnapshotStreamWriter::InitializeDirectories() {
TF_RETURN_IF_ERROR(
params_.env->RecursivelyCreateDir(params_.UncommittedChunksDirectory()));
TF_RETURN_IF_ERROR(
params_.env->RecursivelyCreateDir(params_.CheckpointsDirectory()));
return absl::OkStatus();
}
bool SnapshotStreamWriter::ShouldWriteChunks() const TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
return !end_of_sequence_ && completed_.ok();
}
absl::Status SnapshotStreamWriter::WriteChunks() {
LOG(INFO) << "Writing distributed tf.data snapshot " << params_.snapshot_path
<< ", stream " << params_.stream_index << ", chunk " << chunk_index_
<< ".";
std::string chunks_prefix = tsl::io::JoinPath(
params_.UncommittedChunksDirectory(),
absl::StrCat("chunk_", chunk_index_, kFileShardDelimiter));
ParallelTFRecordWriter writer(TranslateFileName(chunks_prefix),
params_.compression, params_.env,
params_.max_chunk_size);
do {
TF_RETURN_IF_ERROR(WriteRecord(writer));
} while (ShouldWriteRecord());
TF_ASSIGN_OR_RETURN(const ParallelTFRecordWriter::FileToStatsMap file_stats,
writer.Finalize());
TF_RETURN_IF_ERROR(Completed().status());
TF_RETURN_IF_ERROR(Commit(file_stats));
metrics::RecordTFDataServiceSnapshotBytesCommitted(
TotalBytes(file_stats).ToUnsignedBytes());
return absl::OkStatus();
}
bool SnapshotStreamWriter::ShouldWriteRecord() const {
mutex_lock l(mu_);
if (!completed_.ok() || end_of_sequence_) {
return false;
}
const absl::Time now = absl::FromUnixMicros(params_.env->NowMicros());
const absl::Duration adjusted_checkpoint_interval = std::min(
params_.checkpoint_interval, absl::Minutes(0.5 * chunk_index_ + 5));
return now < last_commit_time_ + adjusted_checkpoint_interval;
}
absl::Status SnapshotStreamWriter::WriteRecord(ParallelTFRecordWriter& writer) {
std::vector<Tensor> element;
TF_RETURN_IF_ERROR(iterator_->GetNext(element, end_of_sequence_));
if (end_of_sequence_) {
return absl::OkStatus();
}
return writer.Write(std::move(element));
}
absl::Status SnapshotStreamWriter::Commit(
const ParallelTFRecordWriter::FileToStatsMap& file_stats) {
TF_RETURN_IF_ERROR(Save(file_stats));
for (const auto& [file, stats] : file_stats) {
std::string committed_chunk_path =
tsl::io::JoinPath(params_.CommittedChunksDirectory(),
absl::StrCat("chunk_", params_.stream_index, "_",
chunk_index_++, "_", stats.num_records));
TF_RETURN_IF_ERROR(params_.env->RenameFile(file, committed_chunk_path));
}
last_commit_time_ = absl::FromUnixMicros(params_.env->NowMicros());
return absl::OkStatus();
}
absl::Status SnapshotStreamWriter::FinalizeStream(absl::Status status) {
if (status.ok()) {
status = WriteDoneFile();
}
if (!status.ok()) {
WriteErrorFile(status).IgnoreError();
}
absl::Status s = DeleteCheckpoints();
if (!s.ok()) {
LOG(ERROR) << "Failed to clean up checkpoints at "
<< params_.CheckpointsDirectory() << ": " << s;
}
return status;
}
absl::Status SnapshotStreamWriter::WriteDoneFile() {
std::string done_file_path =
StreamDoneFilePath(params_.snapshot_path, params_.stream_index);
return AtomicallyWriteStringToFile(done_file_path, "", params_.env);
}
absl::Status SnapshotStreamWriter::WriteErrorFile(const absl::Status& status) {
std::string error_file_path =
tsl::io::JoinPath(params_.StreamDirectory(), "ERROR");
return AtomicallyWriteStringToFile(error_file_path, status.ToString(),
params_.env);
}
absl::StatusOr<bool> SnapshotStreamWriter::Completed() const
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
return completed_;
}
absl::StatusOr<bool> SnapshotStreamWriter::Wait() TF_LOCKS_EXCLUDED(mu_) {
snapshot_thread_.reset();
mutex_lock l(mu_);
return completed_;
}
void SnapshotStreamWriter::Cancel() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
completed_ = absl::CancelledError(
"The tf.data service snapshot writer has been cancelled.");
}
absl::Status SnapshotStreamWriter::Save(
const ParallelTFRecordWriter::FileToStatsMap& file_stats) {
const size_t num_elements = TotalNumElements(file_stats);
const ByteSize byte_size = TotalBytes(file_stats);
LOG(INFO) << "Checkpointing distributed tf.data snapshot writer for snapshot "
<< params_.DebugString() << ". Stream " << params_.stream_index
<< ", chunk " << chunk_index_
<< ", number of elements in chunk: " << num_elements
<< ", chunk size: " << byte_size << ".";
tsl::profiler::TraceMe activity("SnapshotCheckpoint",
tsl::profiler::TraceMeLevel::kInfo);
absl::Time start_time = absl::FromUnixMicros(params_.env->NowMicros());
int64_t checkpoint_index = chunk_index_ + file_stats.size();
std::string checkpoint_path = CheckpointPath(checkpoint_index, num_elements);
TF_ASSIGN_OR_RETURN(std::vector<Tensor> serialized_iterator,
iterator_->Save());
TF_RETURN_IF_ERROR(AtomicallyWriteTFRecords(
checkpoint_path, serialized_iterator, params_.compression, params_.env));
absl::Time end_time = absl::FromUnixMicros(params_.env->NowMicros());
LOG(INFO) << "Wrote checkpoint file " << checkpoint_path << ". "
<< "Checkpointing distributed tf.data snapshot writer took "
<< (end_time - start_time);
return DeleteOutdatedCheckpoints(checkpoint_index);
}
absl::Status SnapshotStreamWriter::DeleteOutdatedCheckpoints(
int64_t checkpoint_index) {
if (params_.test_only_keep_temp_files) {
return absl::OkStatus();
}
std::vector<std::string> checkpoint_filenames;
TF_RETURN_IF_ERROR(params_.env->GetChildren(params_.CheckpointsDirectory(),
&checkpoint_filenames));
for (const std::string& checkpoint_filename : checkpoint_filenames) {
std::string checkpoint_filepath =
tsl::io::JoinPath(params_.CheckpointsDirectory(), checkpoint_filename);
if (IsTemporaryFile(checkpoint_filename)) {
TF_RETURN_IF_ERROR(params_.env->DeleteFile(checkpoint_filepath));
continue;
}
TF_ASSIGN_OR_RETURN(auto checkpoint_filename_tokens,
ParseCheckpointFilename(checkpoint_filename));
auto [checkpoint_file_index, _] = checkpoint_filename_tokens;
if (checkpoint_file_index < checkpoint_index) {
TF_RETURN_IF_ERROR(params_.env->DeleteFile(checkpoint_filepath));
}
}
return absl::OkStatus();
}
absl::Status SnapshotStreamWriter::DeleteCheckpoints() {
if (params_.test_only_keep_temp_files) {
return absl::OkStatus();
}
LOG(INFO) << "Deleting tf.data snapshot checkpoints directory: "
<< params_.CheckpointsDirectory();
if (params_.env->FileExists(params_.CheckpointsDirectory()).ok()) {
int64_t undeleted_files, undeleted_dirs;
return params_.env->DeleteRecursively(params_.CheckpointsDirectory(),
&undeleted_files, &undeleted_dirs);
}
return absl::OkStatus();
}
absl::Status SnapshotStreamWriter::Restore() {
absl::StatusOr<std::string> checkpoint_name = LastCheckpointName();
if (absl::IsNotFound(checkpoint_name.status())) {
return SyncCheckpointWithChunks(std::nullopt,
kUnknownNumElements);
}
TF_RETURN_IF_ERROR(checkpoint_name.status());
snapshot_util::TFRecordReaderImpl reader(
CheckpointPath(*checkpoint_name), params_.compression,
kTFRecordReaderOutputBufferSize.ToUnsignedBytes());
TF_RETURN_IF_ERROR(reader.Initialize(params_.env));
TF_ASSIGN_OR_RETURN(std::vector<Tensor> serialized_tensors,
reader.GetTensors());
TF_RETURN_IF_ERROR(iterator_->Restore(serialized_tensors));
TF_ASSIGN_OR_RETURN(auto checkpoint_name_tokens,
ParseCheckpointFilename(*checkpoint_name));
auto [checkpoint_index, checkpoint_num_elements] = checkpoint_name_tokens;
TF_RETURN_IF_ERROR(
SyncCheckpointWithChunks(checkpoint_index, checkpoint_num_elements));
chunk_index_ = checkpoint_index;
LOG(INFO) << "Restored distributed tf.data snapshot writer. Snapshot "
<< params_.snapshot_path << ", stream " << params_.stream_index
<< ", chunk " << checkpoint_index << ".";
return absl::OkStatus();
}
absl::StatusOr<std::string> SnapshotStreamWriter::LastCheckpointName() const {
TF_ASSIGN_OR_RETURN(std::vector<std::string> checkpoint_names,
GetChildren(params_.CheckpointsDirectory(), params_.env));
if (checkpoint_names.empty()) {
return absl::NotFoundError(
absl::StrCat("No checkpoint has been written in directory ",
params_.CheckpointsDirectory()));
}
int64_t last_index = -1;
std::string last_checkpoint_name = "";
for (const std::string& checkpoint_name : checkpoint_names) {
TF_ASSIGN_OR_RETURN(auto checkpoint_name_tokens,
ParseCheckpointFilename(checkpoint_name));
auto [checkpoint_index, unused] = checkpoint_name_tokens;
if (checkpoint_index > last_index) {
last_index = checkpoint_index;
last_checkpoint_name = checkpoint_name;
}
}
return last_checkpoint_name;
}
absl::Status SnapshotStreamWriter::SyncCheckpointWithChunks(
std::optional<int64_t> checkpoint_index, int64_t checkpoint_num_elements) {
TF_ASSIGN_OR_RETURN(
std::vector<std::string> uncommitted_chunks,
GetChildren(params_.UncommittedChunksDirectory(), params_.env));
TF_ASSIGN_OR_RETURN(int64_t last_committed_chunk_index,
LastCommittedChunkIndex());
int64_t next_chunk_index = last_committed_chunk_index + 1;
for (const std::string& uncommitted_chunk : uncommitted_chunks) {
std::string uncommitted_chunk_filename = tsl::io::JoinPath(
params_.UncommittedChunksDirectory(), uncommitted_chunk);
TF_ASSIGN_OR_RETURN(int64_t uncommitted_chunk_index,
GetUncommittedChunkIndex(uncommitted_chunk));
if (checkpoint_index.has_value() &&
uncommitted_chunk_index < *checkpoint_index) {
int64_t chunk_num_elements = (next_chunk_index == *checkpoint_index - 1)
? checkpoint_num_elements
: kUnknownNumElements;
std::string committed_chunk_filename = tsl::io::JoinPath(
params_.CommittedChunksDirectory(),
absl::StrCat("chunk_", params_.stream_index, "_", next_chunk_index,
"_", chunk_num_elements));
TF_RETURN_IF_ERROR(params_.env->RenameFile(uncommitted_chunk_filename,
committed_chunk_filename));
++next_chunk_index;
} else {
TF_RETURN_IF_ERROR(params_.env->DeleteFile(uncommitted_chunk_filename));
}
}
if (checkpoint_index.has_value() && next_chunk_index != *checkpoint_index) {
return absl::InternalError(absl::StrCat(
"Failed to recover tf.data snapshot writer: Unable to find chunks [",
next_chunk_index, ", ", *checkpoint_index, ")."));
}
return absl::OkStatus();
}
absl::StatusOr<int64_t> SnapshotStreamWriter::LastCommittedChunkIndex() {
std::string committed_chunks_directory = params_.CommittedChunksDirectory();
TF_ASSIGN_OR_RETURN(
std::vector<std::string> committed_chunks,
GetChildren(params_.CommittedChunksDirectory(), params_.env));
int64_t last_committed_chunk_index = -1;
for (const std::string& committed_chunk : committed_chunks) {
TF_ASSIGN_OR_RETURN(auto chunk_filename_tokens,
ParseChunkFilename(committed_chunk));
const auto [stream_index, chunk_index, _] = chunk_filename_tokens;
if (stream_index != params_.stream_index) {
continue;
}
if (chunk_index > last_committed_chunk_index) {
last_committed_chunk_index = chunk_index;
}
}
return last_committed_chunk_index;
}
std::string SnapshotStreamWriter::CheckpointPath(
int64_t chunk_index, int64_t chunk_num_elements) const {
return tsl::io::JoinPath(
params_.CheckpointsDirectory(),
absl::StrCat("checkpoint_", chunk_index, "_", chunk_num_elements));
}
std::string SnapshotStreamWriter::CheckpointPath(
const std::string& checkpoint_name) const {
return tsl::io::JoinPath(params_.CheckpointsDirectory(), checkpoint_name);
}
}
} | #include "tensorflow/core/data/service/snapshot/snapshot_stream_writer.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/service/byte_size.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/snapshot/test_utils.h"
#include "tensorflow/core/data/service/task_runner.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/data/standalone.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/lib/io/compression.h"
#include "tsl/lib/monitoring/cell_reader.h"
#include "tsl/platform/env.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
using ::testing::ValuesIn;
using ::tsl::monitoring::testing::CellReader;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
absl::StatusOr<std::unique_ptr<StandaloneTaskIterator>> TestIterator(
const DatasetDef& dataset_def) {
std::unique_ptr<standalone::Dataset> dataset;
TF_RETURN_IF_ERROR(standalone::Dataset::FromGraph(
standalone::Dataset::Params(), dataset_def.graph(), &dataset));
std::unique_ptr<standalone::Iterator> iterator;
TF_RETURN_IF_ERROR(dataset->MakeIterator(&iterator));
return std::make_unique<StandaloneTaskIterator>(std::move(dataset),
std::move(iterator));
}
template <class T>
class ElementOrErrorIterator : public TaskIterator {
public:
explicit ElementOrErrorIterator(
const std::vector<absl::StatusOr<T>>& elements)
: elements_(elements) {}
absl::Status GetNext(std::vector<Tensor>& element,
bool& end_of_sequence) override {
end_of_sequence = (next_ >= elements_.size());
if (end_of_sequence) {
return absl::OkStatus();
}
const absl::StatusOr<T>& next_element = elements_[next_++];
TF_RETURN_IF_ERROR(next_element.status());
element = {Tensor{*next_element}};
return absl::OkStatus();
}
absl::StatusOr<std::vector<Tensor>> Save() override {
return std::vector<Tensor>{};
}
absl::Status Restore(const std::vector<Tensor>& saved_iterator) override {
return absl::OkStatus();
}
int64_t Cardinality() const override { return elements_.size(); }
private:
const std::vector<absl::StatusOr<T>> elements_;
int64_t next_ = 0;
};
absl::StatusOr<std::string> CreateSnapshotDirectory() {
std::string snapshot_path;
if (!Env::Default()->LocalTempFilename(&snapshot_path)) {
return absl::FailedPreconditionError(
"Failed to create local temp file for snapshot.");
}
TF_RETURN_IF_ERROR(Env::Default()->RecursivelyCreateDir(
CommittedChunksDirectory(snapshot_path)));
return snapshot_path;
}
absl::StatusOr<std::unique_ptr<snapshot_util::Reader>> CreateSnapshotReader(
const std::string& snapshot_path, int64_t num_elements,
const std::string& compression, Env* env) {
static constexpr int kTFRecordReader = 2;
DataTypeVector dtypes(num_elements, DT_INT64);
std::unique_ptr<snapshot_util::Reader> reader;
TF_RETURN_IF_ERROR(snapshot_util::Reader::Create(
env, snapshot_path, compression, kTFRecordReader, dtypes, &reader));
return reader;
}
template <class T>
absl::StatusOr<std::vector<T>> ReadSnapshot(const std::string& snapshot_path,
const std::string& compression,
int64_t num_elements) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<snapshot_util::Reader> reader,
CreateSnapshotReader(snapshot_path, num_elements,
compression, Env::Default()));
std::vector<Tensor> tensors;
TF_RETURN_IF_ERROR(reader->ReadTensors(&tensors));
std::vector<T> result;
for (const Tensor& tensor : tensors) {
result.push_back(tensor.unaligned_flat<T>().data()[0]);
}
return result;
}
absl::StatusOr<std::string> ReadStringFromFile(const std::string& filename) {
std::string data;
TF_RETURN_IF_ERROR(ReadFileToString(Env::Default(), filename, &data));
return data;
}
class SnapshotStreamWriterParameterizedTest
: public ::testing::TestWithParam<std::string> {
public:
std::string Compression() const { return GetParam(); }
};
TEST_P(SnapshotStreamWriterParameterizedTest, WriteSnapshot) {
CellReader<int64_t> cell_reader(
"/tensorflow/data/service/snapshot_bytes_committed");
EXPECT_EQ(cell_reader.Delta(), 0);
int64_t range = 10;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<StandaloneTaskIterator> iterator,
TestIterator(testing::RangeDataset(range)));
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
SnapshotWriterParams writer_params{snapshot_path, 0,
Compression(), Env::Default()};
SnapshotStreamWriter snapshot_writer(writer_params, std::move(iterator));
EXPECT_THAT(snapshot_writer.Wait(), IsOkAndHolds(true));
EXPECT_THAT(testing::ReadSnapshot<int64_t>(snapshot_path, Compression()),
IsOkAndHolds(UnorderedElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)));
EXPECT_THAT(
GetChildren(writer_params.UncommittedChunksDirectory(), Env::Default()),
IsOkAndHolds(IsEmpty()));
EXPECT_GE(cell_reader.Delta(), 80);
}
TEST_P(SnapshotStreamWriterParameterizedTest, StreamAlreadyCompleted) {
int64_t range = 10;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<StandaloneTaskIterator> iterator,
TestIterator(testing::RangeDataset(range)));
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
SnapshotWriterParams writer_params{snapshot_path, 0,
Compression(), Env::Default()};
SnapshotStreamWriter snapshot_writer(writer_params, std::move(iterator));
EXPECT_THAT(snapshot_writer.Wait(), IsOkAndHolds(true));
EXPECT_THAT(testing::ReadSnapshot<int64_t>(snapshot_path, Compression()),
IsOkAndHolds(UnorderedElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)));
TF_ASSERT_OK_AND_ASSIGN(iterator, TestIterator(testing::RangeDataset(range)));
SnapshotStreamWriter duplicate_writer(writer_params, std::move(iterator));
EXPECT_THAT(snapshot_writer.Wait(), IsOkAndHolds(true));
EXPECT_THAT(testing::ReadSnapshot<int64_t>(snapshot_path, Compression()),
IsOkAndHolds(UnorderedElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)));
}
TEST_P(SnapshotStreamWriterParameterizedTest, WriteSnapshotChunks) {
int64_t range = 10;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<StandaloneTaskIterator> iterator,
TestIterator(testing::RangeDataset(range)));
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
SnapshotWriterParams writer_params{snapshot_path, 0,
Compression(), Env::Default(),
ByteSize::Bytes(1)};
SnapshotStreamWriter snapshot_writer(writer_params, std::move(iterator));
EXPECT_THAT(snapshot_writer.Wait(), IsOkAndHolds(true));
EXPECT_THAT(
GetChildren(writer_params.CommittedChunksDirectory(), Env::Default()),
IsOkAndHolds(SizeIs(range)));
EXPECT_THAT(testing::ReadSnapshot<int64_t>(snapshot_path, Compression()),
IsOkAndHolds(UnorderedElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)));
}
TEST_P(SnapshotStreamWriterParameterizedTest, WriteDoneFile) {
int64_t range = 10;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<StandaloneTaskIterator> iterator,
TestIterator(testing::RangeDataset(range)));
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
std::string done_file_path = tsl::io::JoinPath(
StreamDirectory(snapshot_path, 0), "DONE");
std::string error_file_path = tsl::io::JoinPath(
StreamDirectory(snapshot_path, 0), "ERROR");
EXPECT_THAT(Env::Default()->FileExists(done_file_path),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(Env::Default()->FileExists(error_file_path),
StatusIs(absl::StatusCode::kNotFound));
SnapshotWriterParams writer_params{snapshot_path, 0,
Compression(), Env::Default(),
ByteSize::Bytes(1)};
SnapshotStreamWriter snapshot_writer(writer_params, std::move(iterator));
EXPECT_THAT(snapshot_writer.Wait(), IsOkAndHolds(true));
TF_EXPECT_OK(Env::Default()->FileExists(done_file_path));
EXPECT_THAT(Env::Default()->FileExists(error_file_path),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(snapshot_writer.Completed(), IsOkAndHolds(true));
}
TEST_P(SnapshotStreamWriterParameterizedTest, WriteErrorFile) {
auto error_iterator = std::make_unique<ElementOrErrorIterator<tstring>>(
std::vector<absl::StatusOr<tstring>>{
tstring("First element"),
absl::InvalidArgumentError("Invalid argument"),
tstring("Second element"), absl::AbortedError("Aborted")});
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
std::string done_file_path = tsl::io::JoinPath(
StreamDirectory(snapshot_path, 0), "DONE");
std::string error_file_path = tsl::io::JoinPath(
StreamDirectory(snapshot_path, 0), "ERROR");
EXPECT_THAT(Env::Default()->FileExists(done_file_path),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(Env::Default()->FileExists(error_file_path),
StatusIs(absl::StatusCode::kNotFound));
SnapshotWriterParams writer_params{snapshot_path, 0,
Compression(), Env::Default(),
ByteSize::Bytes(1)};
SnapshotStreamWriter snapshot_writer(writer_params,
std::move(error_iterator));
EXPECT_THAT(snapshot_writer.Wait(),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Env::Default()->FileExists(done_file_path),
StatusIs(absl::StatusCode::kNotFound));
TF_EXPECT_OK(Env::Default()->FileExists(error_file_path));
EXPECT_THAT(ReadStringFromFile(error_file_path),
IsOkAndHolds(HasSubstr("Invalid argument")));
EXPECT_THAT(snapshot_writer.Completed(),
StatusIs(absl::StatusCode::kInvalidArgument));
}
INSTANTIATE_TEST_SUITE_P(Compression, SnapshotStreamWriterParameterizedTest,
ValuesIn<std::string>({tsl::io::compression::kNone,
tsl::io::compression::kGzip,
tsl::io::compression::kSnappy,
tsl::io::compression::kZlib}));
TEST(SnapshotStreamWriterTest, EmptyDataset) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<StandaloneTaskIterator> iterator,
TestIterator(testing::RangeDataset(0)));
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
SnapshotWriterParams writer_params{snapshot_path, 0,
tsl::io::compression::kSnappy,
Env::Default()};
SnapshotStreamWriter snapshot_writer(writer_params, std::move(iterator));
EXPECT_THAT(snapshot_writer.Wait(), IsOkAndHolds(true));
EXPECT_THAT(testing::ReadSnapshot<int64_t>(snapshot_path,
tsl::io::compression::kSnappy),
IsOkAndHolds(IsEmpty()));
}
TEST(SnapshotStreamWriterTest, Cancel) {
const int64_t range = 10000;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<StandaloneTaskIterator> iterator,
TestIterator(testing::RangeDataset(range)));
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
SnapshotWriterParams writer_params{snapshot_path, 0,
tsl::io::compression::kSnappy,
Env::Default()};
SnapshotStreamWriter snapshot_writer(writer_params, std::move(iterator));
snapshot_writer.Cancel();
EXPECT_THAT(snapshot_writer.Wait(), StatusIs(absl::StatusCode::kCancelled));
}
}
}
} |
1,751 | cpp | tensorflow/tensorflow | snapshot_chunk_provider | tensorflow/core/data/service/snapshot/snapshot_chunk_provider.cc | tensorflow/core/data/service/snapshot/snapshot_chunk_provider_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_SNAPSHOT_SNAPSHOT_CHUNK_PROVIDER_H_
#define TENSORFLOW_CORE_DATA_SERVICE_SNAPSHOT_SNAPSHOT_CHUNK_PROVIDER_H_
#include <cstdint>
#include <functional>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/btree_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/env.h"
namespace tensorflow {
namespace data {
class SnapshotChunkProvider : public SplitProvider {
public:
SnapshotChunkProvider(absl::string_view snapshot_path, tsl::Env* env);
~SnapshotChunkProvider() override = default;
SnapshotChunkProvider(const SnapshotChunkProvider&) = delete;
SnapshotChunkProvider& operator=(const SnapshotChunkProvider&) = delete;
absl::Status GetNext(Tensor* split, bool* end_of_splits) override;
absl::Status Reset() override;
absl::Status Save(std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) override;
absl::Status Restore(std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) override;
int64_t Cardinality() const override;
void Cancel() override;
private:
struct SnapshotState {
SnapshotState() = default;
explicit SnapshotState(bool snapshot_is_done)
: snapshot_is_done(snapshot_is_done) {}
explicit SnapshotState(absl::Status status) : status(std::move(status)) {}
bool snapshot_is_done = false;
absl::Status status = absl::OkStatus();
};
struct ChunkOrder {
bool operator()(const std::string& chunk1, const std::string& chunk2) const;
};
using OrderedChunkSet = absl::btree_set<std::string, ChunkOrder>;
static std::string SetToString(const OrderedChunkSet& s);
static OrderedChunkSet SetFromString(absl::string_view s);
absl::Status UpdateSnapshot();
absl::StatusOr<SnapshotState> GetSnapshotState();
absl::StatusOr<std::vector<std::string>> GetAvailableChunks();
const std::string snapshot_path_;
tsl::Env* const env_;
mutable absl::Mutex mu_;
OrderedChunkSet chunks_read_ ABSL_GUARDED_BY(mu_);
OrderedChunkSet chunks_unread_ ABSL_GUARDED_BY(mu_);
SnapshotState snapshot_state_ ABSL_GUARDED_BY(mu_);
};
}
}
#endif
#include "tensorflow/core/data/service/snapshot/snapshot_chunk_provider.h"
#include <cstdint>
#include <functional>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/btree_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/retrying_utils.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/tstring.h"
#include "tsl/protobuf/status.pb.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kChunksRead[] = "chunks_read";
constexpr absl::string_view kSetElementDelimiter = ",";
Tensor ConvertToTensor(absl::string_view s) {
Tensor tensor(DT_STRING, TensorShape({}));
tensor.scalar<tsl::tstring>()() = tsl::tstring(s);
return tensor;
}
std::string AbsPath(absl::string_view snapshot_path, absl::string_view chunk) {
return tsl::io::JoinPath(CommittedChunksDirectory(snapshot_path), chunk);
}
void Backoff(int num_retries, tsl::Env* env) {
if (num_retries >= 1) {
absl::Duration retry_backoff = tsl::ComputeRetryBackoff(num_retries - 1);
env->SleepForMicroseconds(absl::ToInt64Microseconds(retry_backoff));
}
}
}
SnapshotChunkProvider::SnapshotChunkProvider(absl::string_view snapshot_path,
tsl::Env* env)
: snapshot_path_(snapshot_path), env_(env) {}
absl::Status SnapshotChunkProvider::GetNext(Tensor* split, bool* end_of_splits)
ABSL_LOCKS_EXCLUDED(mu_) {
for (int num_retries = 0;; ++num_retries) {
Backoff(num_retries, env_);
absl::MutexLock l(&mu_);
TF_RETURN_IF_ERROR(snapshot_state_.status);
if (!chunks_unread_.empty()) {
std::string next_chunk = *chunks_unread_.begin();
chunks_read_.insert(next_chunk);
chunks_unread_.erase(next_chunk);
*split = ConvertToTensor(AbsPath(snapshot_path_, next_chunk));
*end_of_splits = false;
return absl::OkStatus();
}
if (snapshot_state_.snapshot_is_done) {
*end_of_splits = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(UpdateSnapshot());
}
}
absl::Status SnapshotChunkProvider::UpdateSnapshot()
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
TF_ASSIGN_OR_RETURN(snapshot_state_, GetSnapshotState());
TF_RETURN_IF_ERROR(snapshot_state_.status);
TF_ASSIGN_OR_RETURN(std::vector<std::string> chunks, GetAvailableChunks());
for (const std::string& chunk : chunks) {
if (!chunks_read_.contains(chunk)) {
chunks_unread_.insert(std::string(chunk));
}
}
return absl::OkStatus();
}
absl::StatusOr<SnapshotChunkProvider::SnapshotState>
SnapshotChunkProvider::GetSnapshotState() {
std::string error_file_path = SnapshotErrorFilePath(snapshot_path_);
if (env_->FileExists(error_file_path).ok()) {
StatusProto status_proto;
TF_RETURN_IF_ERROR(ReadTextProto(env_, error_file_path, &status_proto));
absl::Status status = tsl::StatusFromProto(status_proto);
if (status.ok()) {
return absl::InternalError(absl::StrCat(
"Unexpected snapshot ERROR file contains an OK status at ",
error_file_path, "."));
}
return SnapshotState(status);
}
return SnapshotState(
env_->FileExists(SnapshotDoneFilePath(snapshot_path_)).ok());
}
absl::StatusOr<std::vector<std::string>>
SnapshotChunkProvider::GetAvailableChunks() {
absl::StatusOr<std::vector<std::string>> status_or_chunks =
GetChildren(CommittedChunksDirectory(snapshot_path_), env_);
if (status_or_chunks.ok()) {
return *std::move(status_or_chunks);
} else if (absl::IsNotFound(status_or_chunks.status())) {
return std::vector<std::string>{};
}
return status_or_chunks.status();
}
absl::Status SnapshotChunkProvider::Reset() {
absl::MutexLock l(&mu_);
chunks_read_.clear();
chunks_unread_.clear();
return UpdateSnapshot();
}
absl::Status SnapshotChunkProvider::Save(
std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) {
absl::MutexLock l(&mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name(kChunksRead), SetToString(chunks_read_)));
return absl::OkStatus();
}
absl::Status SnapshotChunkProvider::Restore(
std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) {
absl::MutexLock l(&mu_);
tsl::tstring chunks_read;
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kChunksRead), &chunks_read));
chunks_read_ = SetFromString(chunks_read);
return UpdateSnapshot();
}
int64_t SnapshotChunkProvider::Cardinality() const {
return SnapshotChunksCardinality(snapshot_path_, env_);
}
void SnapshotChunkProvider::Cancel() {
absl::MutexLock l(&mu_);
if (snapshot_state_.snapshot_is_done || !snapshot_state_.status.ok()) {
return;
}
snapshot_state_.status = absl::CancelledError(
absl::StrCat("Cancelled loading tf.data snapshot at ", snapshot_path_));
VLOG(2) << snapshot_state_.status;
}
std::string SnapshotChunkProvider::SetToString(
const SnapshotChunkProvider::OrderedChunkSet& s) {
return absl::StrJoin(s, kSetElementDelimiter);
}
SnapshotChunkProvider::OrderedChunkSet SnapshotChunkProvider::SetFromString(
absl::string_view s) {
if (s.empty()) {
return {};
}
std::vector<std::string> split = absl::StrSplit(s, kSetElementDelimiter);
return OrderedChunkSet(split.begin(), split.end());
}
bool SnapshotChunkProvider::ChunkOrder::operator()(
const std::string& chunk1, const std::string& chunk2) const {
absl::StatusOr<std::tuple<int64_t, int64_t, int64_t>> tokens1 =
ParseChunkFilename(chunk1);
absl::StatusOr<std::tuple<int64_t, int64_t, int64_t>> tokens2 =
ParseChunkFilename(chunk2);
if (!tokens1.status().ok()) {
LOG_EVERY_N_SEC(ERROR, 60) << "Failed to parse tf.data snapshot chunk file "
<< chunk1 << ": " << tokens1.status();
return chunk1 < chunk2;
}
if (!tokens2.status().ok()) {
LOG_EVERY_N_SEC(ERROR, 60) << "Failed to parse tf.data snapshot chunk file "
<< chunk2 << ": " << tokens2.status();
return chunk1 < chunk2;
}
auto [stream_index1, chunk_index1, num_records1] = *tokens1;
auto [stream_index2, chunk_index2, num_records2] = *tokens2;
if (chunk_index1 != chunk_index2) {
return chunk_index1 < chunk_index2;
}
return stream_index1 < stream_index2;
}
}
} | #include "tensorflow/core/data/service/snapshot/snapshot_chunk_provider.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/tstring.h"
#include "tsl/protobuf/status.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::UnorderedElementsAreArray;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
absl::StatusOr<std::string> CreateSnapshotDirectory() {
std::string snapshot_path;
if (!tsl::Env::Default()->LocalTempFilename(&snapshot_path)) {
return absl::FailedPreconditionError(
"Failed to create local temp file for snapshot.");
}
TF_RETURN_IF_ERROR(tsl::Env::Default()->RecursivelyCreateDir(
CommittedChunksDirectory(snapshot_path)));
return snapshot_path;
}
absl::Status WriteChunk(absl::string_view snapshot_path,
absl::string_view chunk_file) {
return AtomicallyWriteStringToFile(
tsl::io::JoinPath(CommittedChunksDirectory(snapshot_path), chunk_file),
"", tsl::Env::Default());
}
absl::Status SetDone(absl::string_view snapshot_path) {
return AtomicallyWriteStringToFile(SnapshotDoneFilePath(snapshot_path), "",
tsl::Env::Default());
}
absl::Status SetStatus(absl::string_view snapshot_path,
const absl::Status& status) {
return AtomicallyWriteTextProto(SnapshotErrorFilePath(snapshot_path),
tsl::StatusToProto(status),
tsl::Env::Default());
}
absl::StatusOr<std::string> GetChunk(
SnapshotChunkProvider& snapshot_chunk_provider) {
Tensor split;
bool end_of_splits = false;
TF_RETURN_IF_ERROR(snapshot_chunk_provider.GetNext(&split, &end_of_splits));
if (end_of_splits) {
return absl::OutOfRangeError("No more available chunks.");
}
return split.unaligned_flat<tsl::tstring>().data()[0];
}
absl::StatusOr<std::vector<std::string>> GetAllChunks(
SnapshotChunkProvider& snapshot_chunk_provider) {
std::vector<std::string> chunks;
while (true) {
Tensor split;
bool end_of_splits = false;
TF_RETURN_IF_ERROR(snapshot_chunk_provider.GetNext(&split, &end_of_splits));
if (end_of_splits) {
return chunks;
}
chunks.push_back(split.unaligned_flat<tsl::tstring>().data()[0]);
}
return chunks;
}
std::vector<std::string> JoinPaths(absl::string_view snapshot_path,
const std::vector<std::string> chunks) {
std::vector<std::string> joined_chunks;
for (absl::string_view chunk : chunks) {
joined_chunks.push_back(
tsl::io::JoinPath(CommittedChunksDirectory(snapshot_path), chunk));
}
return joined_chunks;
}
std::string full_name(const std::string& name) {
return FullName("test", name);
}
absl::Status SaveAndRestore(SplitProvider& split_provider) {
VariantTensorDataWriter writer;
TF_RETURN_IF_ERROR(split_provider.Save(full_name, &writer));
std::vector<const VariantTensorData*> variants;
writer.GetData(&variants);
VariantTensorDataReader reader(variants);
TF_RETURN_IF_ERROR(split_provider.Restore(full_name, &reader));
return absl::OkStatus();
}
TEST(SnapshotChunkProviderTest, EmptySnapshot) {
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
TF_ASSERT_OK(SetDone(snapshot_path));
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
EXPECT_THAT(GetAllChunks(snapshot_chunk_provider), IsOkAndHolds(IsEmpty()));
EXPECT_THAT(GetAllChunks(snapshot_chunk_provider), IsOkAndHolds(IsEmpty()));
}
TEST(SnapshotChunkProviderTest, SingleReader) {
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
std::vector<std::string> chunks = {"chunk_4_4_4", "chunk_3_3_3",
"chunk_2_2_2", "chunk_1_1_1",
"chunk_0_0_0"};
for (absl::string_view chunk : chunks) {
TF_ASSERT_OK(WriteChunk(snapshot_path, chunk));
}
TF_ASSERT_OK(SetDone(snapshot_path));
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
absl::c_reverse(chunks);
EXPECT_THAT(GetAllChunks(snapshot_chunk_provider),
IsOkAndHolds(ElementsAreArray(JoinPaths(snapshot_path, chunks))));
}
TEST(SnapshotChunkProviderTest, Cardinality) {
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_0_0_0"));
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
EXPECT_EQ(snapshot_chunk_provider.Cardinality(), kUnknownCardinality);
std::vector<std::string> chunks = {"chunk_1_1_1", "chunk_2_2_2",
"chunk_3_3_3", "chunk_4_4_4"};
for (absl::string_view chunk : chunks) {
TF_ASSERT_OK(WriteChunk(snapshot_path, chunk));
}
EXPECT_EQ(snapshot_chunk_provider.Cardinality(), kUnknownCardinality);
TF_ASSERT_OK(SetDone(snapshot_path));
EXPECT_EQ(snapshot_chunk_provider.Cardinality(), 5);
}
TEST(SnapshotChunkProviderTest, WaitForSnapshot) {
std::string snapshot_path;
ASSERT_TRUE(tsl::Env::Default()->LocalTempFilename(&snapshot_path));
absl::Mutex mu;
std::vector<std::string> result;
std::unique_ptr<tsl::Thread> reader_thread =
absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, "Reader",
[&snapshot_path, &mu, &result]() {
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::string> chunks,
GetAllChunks(snapshot_chunk_provider));
absl::MutexLock l(&mu);
result = std::move(chunks);
}));
{
absl::MutexLock l(&mu);
EXPECT_TRUE(result.empty());
}
TF_ASSERT_OK(tsl::Env::Default()->RecursivelyCreateDir(
CommittedChunksDirectory(snapshot_path)));
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_0_0_0"));
TF_ASSERT_OK(SetDone(snapshot_path));
reader_thread.reset();
absl::MutexLock l(&mu);
EXPECT_THAT(result,
ElementsAreArray(JoinPaths(snapshot_path, {"chunk_0_0_0"})));
}
TEST(SnapshotChunkProviderTest, ConcurrentReadWrite) {
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
const int num_readers = 10;
absl::Mutex mu;
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
std::vector<std::string> result;
std::vector<std::unique_ptr<tsl::Thread>> reader_threads;
for (int i = 0; i < num_readers; ++i) {
reader_threads.push_back(absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, absl::StrCat("Reader_", i),
[&snapshot_chunk_provider, &mu, &result]() {
while (true) {
tsl::Env::Default()->SleepForMicroseconds(25);
Tensor split;
bool end_of_splits = false;
TF_ASSERT_OK(
snapshot_chunk_provider.GetNext(&split, &end_of_splits));
if (end_of_splits) {
break;
}
absl::MutexLock l(&mu);
result.push_back(split.unaligned_flat<tsl::tstring>().data()[0]);
}
})));
}
int num_streams = 10, num_chunks_per_stream = 50;
std::vector<std::unique_ptr<tsl::Thread>> stream_threads;
for (int i = 0; i < num_streams; ++i) {
stream_threads.push_back(absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, absl::StrCat("Writer_", i),
[&snapshot_path, num_chunks_per_stream, i]() {
for (int j = 0; j < num_chunks_per_stream; ++j) {
std::string filename = absl::StrCat("chunk_", i, "_", j, "_1");
TF_ASSERT_OK(WriteChunk(snapshot_path, filename));
tsl::Env::Default()->SleepForMicroseconds(35);
}
})));
}
stream_threads.clear();
TF_ASSERT_OK(SetDone(snapshot_path));
reader_threads.clear();
std::vector<std::string> expected;
for (int i = 0; i < num_streams; ++i) {
for (int j = 0; j < num_chunks_per_stream; ++j) {
expected.push_back(absl::StrCat("chunk_", i, "_", j, "_1"));
}
}
EXPECT_THAT(result,
UnorderedElementsAreArray(JoinPaths(snapshot_path, expected)));
}
TEST(SnapshotChunkProviderTest, SaveRestore) {
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
std::vector<std::string> chunks = {"chunk_4_4_4", "chunk_3_3_3",
"chunk_2_2_2", "chunk_1_1_1",
"chunk_0_0_0"};
for (absl::string_view chunk : chunks) {
TF_ASSERT_OK(WriteChunk(snapshot_path, chunk));
}
TF_ASSERT_OK(SetDone(snapshot_path));
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
EXPECT_THAT(GetChunk(snapshot_chunk_provider),
IsOkAndHolds(tsl::io::JoinPath(
CommittedChunksDirectory(snapshot_path), "chunk_0_0_0")));
TF_ASSERT_OK(SaveAndRestore(snapshot_chunk_provider));
EXPECT_THAT(GetAllChunks(snapshot_chunk_provider),
IsOkAndHolds(ElementsAreArray(
JoinPaths(snapshot_path, {"chunk_1_1_1", "chunk_2_2_2",
"chunk_3_3_3", "chunk_4_4_4"}))));
}
TEST(SnapshotChunkProviderTest, SnapshotError) {
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
std::unique_ptr<tsl::Thread> reader_thread =
absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, "Reader", [&snapshot_path]() {
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
EXPECT_THAT(
GetAllChunks(snapshot_chunk_provider),
StatusIs(absl::StatusCode::kFailedPrecondition, "Test error."));
}));
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_0_0_0"));
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_1_0_0"));
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_2_0_0"));
TF_ASSERT_OK(
SetStatus(snapshot_path, absl::FailedPreconditionError("Test error.")));
reader_thread.reset();
}
TEST(SnapshotChunkProviderTest, Cancel) {
TF_ASSERT_OK_AND_ASSIGN(std::string snapshot_path, CreateSnapshotDirectory());
SnapshotChunkProvider snapshot_chunk_provider(snapshot_path,
tsl::Env::Default());
std::unique_ptr<tsl::Thread> reader_thread =
absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, "Reader",
[&snapshot_chunk_provider]() {
EXPECT_THAT(
GetAllChunks(snapshot_chunk_provider),
StatusIs(absl::StatusCode::kCancelled,
HasSubstr("Cancelled loading tf.data snapshot at")));
}));
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_0_0_0"));
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_1_0_0"));
TF_ASSERT_OK(WriteChunk(snapshot_path, "chunk_2_0_0"));
snapshot_chunk_provider.Cancel();
reader_thread.reset();
}
}
}
} |
1,752 | cpp | tensorflow/tensorflow | parallel_tfrecord_writer | tensorflow/core/data/service/snapshot/parallel_tfrecord_writer.cc | tensorflow/core/data/service/snapshot/parallel_tfrecord_writer_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_SNAPSHOT_PARALLEL_TFRECORD_WRITER_H_
#define TENSORFLOW_CORE_DATA_SERVICE_SNAPSHOT_PARALLEL_TFRECORD_WRITER_H_
#include <cstdint>
#include <deque>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/core/data/service/byte_size.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/threadpool.h"
namespace tensorflow {
namespace data {
class ParallelTFRecordWriter {
public:
explicit ParallelTFRecordWriter(const std::string& file_prefix,
const std::string& compression, tsl::Env* env,
ByteSize max_file_size = ByteSize::GB(6),
int64_t num_write_threads = 2,
int64_t buffer_size = 1);
virtual ~ParallelTFRecordWriter();
ParallelTFRecordWriter(const ParallelTFRecordWriter&) = delete;
ParallelTFRecordWriter& operator=(const ParallelTFRecordWriter&) = delete;
absl::Status Write(std::vector<Tensor> record);
struct FileStats {
int64_t num_records = 0;
ByteSize estimated_size;
};
using FileToStatsMap = absl::flat_hash_map<std::string, FileStats>;
absl::StatusOr<FileToStatsMap> Finalize();
private:
void WriteFiles();
bool HasNext() const;
absl::Status WriteFile();
bool ShouldWriteFile(const std::string& filename) const;
absl::Status WriteRecord(const std::string& filename,
snapshot_util::TFRecordWriter& writer);
absl::StatusOr<std::optional<std::vector<Tensor>>> GetNextRecord(
const std::string& filename);
absl::Status DeleteEmptyFile(const std::string& filename);
absl::StatusOr<std::string> GetUniqueFile() const;
void UpdateStatus(absl::Status status);
tsl::Env* const env_;
const std::string file_prefix_;
const std::string compression_;
const ByteSize max_file_size_;
const int64_t buffer_size_;
mutable absl::Mutex mu_;
mutable absl::CondVar ready_to_push_;
mutable absl::CondVar ready_to_pop_;
bool finalized_ ABSL_GUARDED_BY(mu_) = false;
absl::Status status_ ABSL_GUARDED_BY(mu_);
FileToStatsMap file_stats_ ABSL_GUARDED_BY(mu_);
std::deque<std::vector<Tensor>> buffer_ ABSL_GUARDED_BY(mu_);
std::unique_ptr<tsl::thread::ThreadPool> thread_pool_;
};
}
}
#endif
#include "tensorflow/core/data/service/snapshot/parallel_tfrecord_writer.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/core/data/service/byte_size.h"
#include "tensorflow/core/data/service/snapshot/utils.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/random.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
namespace data {
ParallelTFRecordWriter::ParallelTFRecordWriter(const std::string& file_prefix,
const std::string& compression,
tsl::Env* env,
ByteSize max_file_size,
int64_t num_write_threads,
int64_t buffer_size)
: env_(env),
file_prefix_(file_prefix),
compression_(compression),
max_file_size_(max_file_size),
buffer_size_(buffer_size) {
thread_pool_ = std::make_unique<tsl::thread::ThreadPool>(
env_, tsl::ThreadOptions{}, "write_tfrecord_thread", num_write_threads);
for (int64_t i = 0; i < num_write_threads; ++i) {
thread_pool_->Schedule([this]() { WriteFiles(); });
}
}
ParallelTFRecordWriter::~ParallelTFRecordWriter() {
absl::Status status = Finalize().status();
if (!status.ok()) {
LOG(ERROR) << "Parallel TFRecord writer failed with error: " << status;
}
}
absl::Status ParallelTFRecordWriter::Write(std::vector<Tensor> record)
ABSL_LOCKS_EXCLUDED(mu_) {
absl::MutexLock l(&mu_);
while (status_.ok() && !finalized_ && buffer_.size() >= buffer_size_) {
ready_to_push_.Wait(&mu_);
}
TF_RETURN_IF_ERROR(status_);
if (finalized_) {
return absl::FailedPreconditionError(absl::StrCat(
"Trying to write a closed TFRecord file at ", file_prefix_, "."));
}
buffer_.push_back(std::move(record));
ready_to_pop_.Signal();
return absl::OkStatus();
}
absl::StatusOr<ParallelTFRecordWriter::FileToStatsMap>
ParallelTFRecordWriter::Finalize() ABSL_LOCKS_EXCLUDED(mu_) {
{
absl::MutexLock l(&mu_);
finalized_ = true;
ready_to_push_.SignalAll();
ready_to_pop_.SignalAll();
}
thread_pool_.reset();
absl::MutexLock l(&mu_);
TF_RETURN_IF_ERROR(status_);
return file_stats_;
}
void ParallelTFRecordWriter::WriteFiles() {
while (HasNext()) {
UpdateStatus(WriteFile());
}
}
bool ParallelTFRecordWriter::HasNext() const ABSL_LOCKS_EXCLUDED(mu_) {
absl::MutexLock l(&mu_);
if (!status_.ok()) {
return false;
}
return !finalized_ || !buffer_.empty();
}
absl::Status ParallelTFRecordWriter::WriteFile() ABSL_LOCKS_EXCLUDED(mu_) {
TF_ASSIGN_OR_RETURN(const std::string filename, GetUniqueFile());
snapshot_util::TFRecordWriter writer(filename, compression_);
TF_RETURN_IF_ERROR(writer.Initialize(env_));
while (ShouldWriteFile(filename)) {
TF_RETURN_IF_ERROR(WriteRecord(filename, writer));
}
TF_RETURN_IF_ERROR(writer.Close());
return DeleteEmptyFile(filename);
}
bool ParallelTFRecordWriter::ShouldWriteFile(const std::string& filename) const
ABSL_LOCKS_EXCLUDED(mu_) {
if (!HasNext()) {
return false;
}
absl::MutexLock l(&mu_);
auto iterator = file_stats_.find(filename);
return iterator == file_stats_.end() ||
iterator->second.estimated_size < max_file_size_;
}
absl::Status ParallelTFRecordWriter::WriteRecord(
const std::string& filename, snapshot_util::TFRecordWriter& writer) {
TF_ASSIGN_OR_RETURN(std::optional<std::vector<Tensor>> record,
GetNextRecord(filename));
if (!record.has_value()) {
return absl::OkStatus();
}
tsl::profiler::TraceMe activity("WriteTFRecord",
tsl::profiler::TraceMeLevel::kInfo);
TF_RETURN_IF_ERROR(writer.WriteTensors(*std::move(record)));
return absl::OkStatus();
}
absl::StatusOr<std::optional<std::vector<Tensor>>>
ParallelTFRecordWriter::GetNextRecord(const std::string& filename)
ABSL_LOCKS_EXCLUDED(mu_) {
absl::MutexLock l(&mu_);
while (status_.ok() && !finalized_ && buffer_.empty()) {
ready_to_pop_.Wait(&mu_);
}
TF_RETURN_IF_ERROR(status_);
if (buffer_.empty()) {
return std::nullopt;
}
std::vector<Tensor> record = std::move(buffer_.front());
ByteSize estimated_size = EstimatedSize(record);
LOG_EVERY_N_SEC(INFO, 1) << "Writing TFRecord of " << estimated_size
<< " to file " << filename << "*.";
++file_stats_[filename].num_records;
file_stats_[filename].estimated_size += estimated_size;
buffer_.pop_front();
ready_to_push_.SignalAll();
return record;
}
absl::Status ParallelTFRecordWriter::DeleteEmptyFile(
const std::string& filename) ABSL_LOCKS_EXCLUDED(mu_) {
absl::MutexLock l(&mu_);
auto iterator = file_stats_.find(filename);
if (iterator != file_stats_.end() && iterator->second.num_records > 0) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(env_->DeleteFile(filename));
if (iterator != file_stats_.end()) {
file_stats_.erase(iterator);
}
return absl::OkStatus();
}
absl::StatusOr<std::string> ParallelTFRecordWriter::GetUniqueFile() const {
std::string filename = absl::StrCat(file_prefix_, "__shard__",
absl::Hex(tsl::random::New64()), "_");
if (!env_->CreateUniqueFileName(&filename, ".tfrecord")) {
return absl::InternalError(
absl::StrCat("Failed to write file ", filename,
": Unable to open temporary files."));
}
return filename;
}
void ParallelTFRecordWriter::UpdateStatus(absl::Status status)
ABSL_LOCKS_EXCLUDED(mu_) {
if (status.ok()) {
return;
}
absl::MutexLock l(&mu_);
status_.Update(std::move(status));
ready_to_push_.SignalAll();
ready_to_pop_.SignalAll();
}
}
} | #include "tensorflow/core/data/service/snapshot/parallel_tfrecord_writer.h"
#include <cstdint>
#include <iterator>
#include <limits>
#include <memory>
#include <numeric>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/service/byte_size.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/lib/io/compression.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::Each;
using ::testing::Eq;
using ::testing::Field;
using ::testing::Gt;
using ::testing::IsEmpty;
using ::testing::Le;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAreArray;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
absl::StatusOr<std::string> TestDir() {
std::string test_dir;
if (!tsl::Env::Default()->LocalTempFilename(&test_dir)) {
return absl::FailedPreconditionError("Failed to create local temp file.");
}
TF_RETURN_IF_ERROR(tsl::Env::Default()->RecursivelyCreateDir(test_dir));
return test_dir;
}
class RangeIterator {
public:
explicit RangeIterator(const int64_t range) : range_(range) {}
absl::Status GetNext(std::vector<Tensor>& element, bool& end_of_sequence) {
end_of_sequence = (next_ >= range_);
if (end_of_sequence) {
return absl::OkStatus();
}
element = {Tensor{next_++}};
return absl::OkStatus();
}
int64_t Cardinality() const { return range_; }
private:
const int64_t range_;
int64_t next_ = 0;
};
absl::StatusOr<ParallelTFRecordWriter::FileToStatsMap> WriteRecords(
ParallelTFRecordWriter& writer, RangeIterator& iterator,
bool finalize_writer = true) {
std::vector<Tensor> record;
bool end_of_sequence = false;
TF_RETURN_IF_ERROR(iterator.GetNext(record, end_of_sequence));
while (!end_of_sequence) {
TF_RETURN_IF_ERROR(writer.Write(record));
TF_RETURN_IF_ERROR(iterator.GetNext(record, end_of_sequence));
}
if (finalize_writer) {
return writer.Finalize();
}
return ParallelTFRecordWriter::FileToStatsMap();
}
template <class T>
absl::StatusOr<std::vector<T>> ReadRecords(const std::string& filename,
const std::string& compression) {
snapshot_util::TFRecordReader reader(filename, compression,
DataTypeVector{DT_INT64});
TF_RETURN_IF_ERROR(reader.Initialize(tsl::Env::Default()));
std::vector<T> result;
while (true) {
std::vector<Tensor> record;
absl::Status status = reader.ReadTensors(&record);
if (absl::IsOutOfRange(status)) {
break;
}
TF_RETURN_IF_ERROR(status);
for (const Tensor& tensor : record) {
result.push_back(tensor.unaligned_flat<T>().data()[0]);
}
}
return result;
}
template <class T>
absl::StatusOr<std::vector<T>> ReadRecords(
const std::vector<std::string>& filenames, const std::string& compression) {
std::vector<T> result;
for (const std::string& filename : filenames) {
TF_ASSIGN_OR_RETURN(std::vector<T> records,
ReadRecords<T>(filename, compression));
absl::c_move(records, std::back_inserter(result));
}
return result;
}
std::vector<int64_t> Range(int64_t range) {
std::vector<int64_t> result(range);
std::iota(result.begin(), result.end(), 0);
return result;
}
std::vector<int64_t> Repeat(const std::vector<int64_t>& values,
int64_t repeat) {
std::vector<int64_t> result;
for (int64_t i = 0; i < repeat; ++i) {
absl::c_copy(values, std::back_inserter(result));
}
return result;
}
template <class K, class V>
std::pair<std::vector<K>, std::vector<V>> Unzip(
const absl::flat_hash_map<K, V>& m) {
std::vector<K> keys;
std::vector<V> values;
for (const auto& [k, v] : m) {
keys.push_back(k);
values.push_back(v);
}
return std::make_pair(keys, values);
}
class ParallelTFRecordWriterParamTest
: public ::testing::TestWithParam<std::tuple<
int64_t, int64_t, ByteSize, int64_t, int64_t, std::string>> {
protected:
int64_t NumElements() const { return std::get<0>(GetParam()); }
int64_t NumClients() const { return std::get<1>(GetParam()); }
ByteSize MaxFileSize() const { return std::get<2>(GetParam()); }
int64_t NumWriteThreads() const { return std::get<3>(GetParam()); }
int64_t BufferSize() const { return std::get<4>(GetParam()); }
std::string Compression() const { return std::get<5>(GetParam()); }
void VerifyFileStats(
const std::vector<ParallelTFRecordWriter::FileStats>& file_stats,
int64_t expected_num_elements) const {
auto add_num_elements = [](int64_t num_elements,
const ParallelTFRecordWriter::FileStats& stats) {
return num_elements + stats.num_records;
};
EXPECT_EQ(absl::c_accumulate(file_stats, 0, add_num_elements),
expected_num_elements);
EXPECT_THAT(
file_stats,
Each(Field(&ParallelTFRecordWriter::FileStats::num_records, Gt(0))));
EXPECT_THAT(file_stats,
Each(Field(&ParallelTFRecordWriter::FileStats::estimated_size,
Le(MaxFileSize() + ByteSize::Bytes(16)))));
if (MaxFileSize() <= ByteSize::Bytes(1)) {
EXPECT_THAT(
file_stats,
Each(Field(&ParallelTFRecordWriter::FileStats::num_records, Eq(1))));
EXPECT_THAT(file_stats, SizeIs(expected_num_elements));
}
if (MaxFileSize() >= ByteSize::GB(1)) {
EXPECT_THAT(file_stats, SizeIs(Le(NumWriteThreads())));
}
}
};
TEST_P(ParallelTFRecordWriterParamTest, WriteRecords) {
TF_ASSERT_OK_AND_ASSIGN(std::string test_dir, TestDir());
ParallelTFRecordWriter parallel_tfrecord_writer(
test_dir, Compression(), tsl::Env::Default(), MaxFileSize(),
NumWriteThreads(), BufferSize());
RangeIterator range_iterator(NumElements());
TF_ASSERT_OK_AND_ASSIGN(
ParallelTFRecordWriter::FileToStatsMap file_stats,
WriteRecords(parallel_tfrecord_writer, range_iterator));
const auto [files, stats] = Unzip(file_stats);
EXPECT_THAT(ReadRecords<int64_t>(files, Compression()),
IsOkAndHolds(UnorderedElementsAreArray(Range(NumElements()))));
VerifyFileStats(stats, NumElements());
}
TEST_P(ParallelTFRecordWriterParamTest, ConcurrentWrites) {
TF_ASSERT_OK_AND_ASSIGN(std::string test_dir, TestDir());
ParallelTFRecordWriter parallel_tfrecord_writer(
test_dir, Compression(), tsl::Env::Default(), MaxFileSize(),
NumWriteThreads(), BufferSize());
std::vector<std::unique_ptr<tsl::Thread>> client_threads;
for (int i = 0; i < NumClients(); ++i) {
client_threads.push_back(absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, absl::StrCat("Client_", i),
[this, ¶llel_tfrecord_writer]() {
RangeIterator range_iterator(NumElements());
TF_ASSERT_OK(WriteRecords(parallel_tfrecord_writer, range_iterator,
false)
.status());
})));
}
client_threads.clear();
TF_ASSERT_OK_AND_ASSIGN(ParallelTFRecordWriter::FileToStatsMap file_stats,
parallel_tfrecord_writer.Finalize());
const auto [files, stats] = Unzip(file_stats);
EXPECT_THAT(ReadRecords<int64_t>(files, Compression()),
IsOkAndHolds(UnorderedElementsAreArray(
Repeat(Range(NumElements()), NumClients()))));
VerifyFileStats(stats, NumElements() * NumClients());
}
INSTANTIATE_TEST_SUITE_P(ParallelTFRecordWriterParams,
ParallelTFRecordWriterParamTest,
::testing::Combine(
::testing::Values(0, 1, 100),
::testing::Values(1, 5),
::testing::Values(ByteSize::Bytes(1),
ByteSize::Bytes(100),
ByteSize::GB(1)),
::testing::Values(1, 5),
::testing::Values(1, 10000),
::testing::Values(tsl::io::compression::kNone,
tsl::io::compression::kSnappy,
tsl::io::compression::kZlib)));
TEST(ParallelTFRecordWriterTest, WriteNoRecord) {
TF_ASSERT_OK_AND_ASSIGN(std::string test_dir, TestDir());
ParallelTFRecordWriter parallel_tfrecord_writer(
test_dir, tsl::io::compression::kNone, tsl::Env::Default());
TF_ASSERT_OK_AND_ASSIGN(ParallelTFRecordWriter::FileToStatsMap file_stats,
parallel_tfrecord_writer.Finalize());
const auto [files, stats] = Unzip(file_stats);
EXPECT_THAT(ReadRecords<int64_t>(files, tsl::io::compression::kNone),
IsOkAndHolds(IsEmpty()));
}
TEST(ParallelTFRecordWriterTest, CannotWriteFinalizedWriter) {
TF_ASSERT_OK_AND_ASSIGN(std::string test_dir, TestDir());
std::string file_prefix = "file";
ParallelTFRecordWriter parallel_tfrecord_writer(
test_dir, tsl::io::compression::kNone, tsl::Env::Default());
std::unique_ptr<tsl::Thread> client_thread =
absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, "Client",
[¶llel_tfrecord_writer]() {
RangeIterator range_iterator(std::numeric_limits<int64_t>::max());
EXPECT_THAT(WriteRecords(parallel_tfrecord_writer, range_iterator),
StatusIs(absl::StatusCode::kFailedPrecondition));
}));
parallel_tfrecord_writer.Finalize().status().IgnoreError();
client_thread.reset();
}
TEST(ParallelTFRecordWriterTest, DirectoryDoesNotExist) {
ParallelTFRecordWriter parallel_tfrecord_writer("/directory/does/not/exists",
tsl::io::compression::kNone,
tsl::Env::Default());
RangeIterator range_iterator(10);
std::vector<Tensor> element;
bool end_of_sequence = false;
TF_ASSERT_OK(range_iterator.GetNext(element, end_of_sequence));
parallel_tfrecord_writer.Write(element).IgnoreError();
EXPECT_THAT(parallel_tfrecord_writer.Finalize().status(),
StatusIs(absl::StatusCode::kNotFound));
}
}
}
} |
1,753 | cpp | tensorflow/tensorflow | snapshot_split_provider | tensorflow/core/data/service/snapshot/snapshot_split_provider.cc | tensorflow/core/data/service/snapshot/snapshot_split_provider_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_SNAPSHOT_SNAPSHOT_SPLIT_PROVIDER_H_
#define TENSORFLOW_CORE_DATA_SERVICE_SNAPSHOT_SNAPSHOT_SPLIT_PROVIDER_H_
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include "absl/container/btree_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/time/time.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/dispatcher_client.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
class SnapshotSplitProvider : public SplitProvider {
public:
SnapshotSplitProvider(const std::string& worker_address,
const SnapshotTaskDef& snapshot_task,
int64_t source_index, absl::Duration timeout,
std::unique_ptr<DataServiceDispatcherClient> dispatcher,
Env* env);
absl::Status GetNext(Tensor* split, bool* end_of_splits) override;
absl::Status Reset() override;
absl::Status Save(std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) override;
absl::Status Restore(std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) override;
private:
const std::string worker_address_;
const SnapshotTaskDef snapshot_task_;
const int64_t source_index_;
const absl::Duration timeout_;
Env* const env_;
absl::Status GetAndValidateSplit(Tensor* split, bool* end_of_splits);
absl::Status GetSplitFromFile(const std::string& split_file, Tensor* split,
bool* end_of_splits);
absl::StatusOr<int64_t> GetSplitFromDispatcher(Tensor* split,
bool* end_of_splits);
absl::StatusOr<absl::btree_map<int64_t, std::string>> GetSplitsFiles(
int64_t start_index) const;
absl::Status ValidateSplitFiles(
const absl::btree_map<int64_t, std::string>& split_files,
int64_t start_index) const;
absl::Status ValidateSplitFiles(
const absl::btree_map<int64_t, std::string>& split_files,
int64_t start_index, int64_t end_index, bool end_of_splits) const;
mutable mutex mu_;
std::unique_ptr<DataServiceDispatcherClient> dispatcher_ TF_GUARDED_BY(mu_);
int64_t next_split_index_ TF_GUARDED_BY(mu_) = 0;
int64_t repetition_index_ TF_GUARDED_BY(mu_) = 0;
absl::btree_map<int64_t, std::string> split_to_file_map_ TF_GUARDED_BY(mu_);
};
}
}
#endif
#include "tensorflow/core/data/service/snapshot/snapshot_split_provider.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/btree_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/dispatcher_client.h"
#include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/path.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNextSplitIndex[] = "next_split_index";
constexpr char kRepetitionIndex[] = "repetition_index";
absl::StatusOr<int64_t> GetRepetitionIndex(const std::string& split_file) {
tsl::StringPiece repetition_dir_path = tsl::io::Dirname(split_file);
tsl::StringPiece repetition_dir_name = tsl::io::Basename(repetition_dir_path);
return ParseRepetitionDirectoryName(repetition_dir_name);
}
}
SnapshotSplitProvider::SnapshotSplitProvider(
const std::string& worker_address, const SnapshotTaskDef& snapshot_task,
int64_t source_index, absl::Duration timeout,
std::unique_ptr<DataServiceDispatcherClient> dispatcher, Env* env)
: worker_address_(worker_address),
snapshot_task_(snapshot_task),
source_index_(source_index),
timeout_(timeout),
env_(env) {
mutex_lock l(mu_);
dispatcher_ = std::move(dispatcher);
}
absl::Status SnapshotSplitProvider::GetNext(Tensor* split, bool* end_of_splits)
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(GetAndValidateSplit(split, end_of_splits));
if (!*end_of_splits) {
++next_split_index_;
}
return absl::OkStatus();
}
absl::Status SnapshotSplitProvider::GetAndValidateSplit(Tensor* split,
bool* end_of_splits)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (split_to_file_map_.contains(next_split_index_)) {
return GetSplitFromFile(split_to_file_map_[next_split_index_], split,
end_of_splits);
}
TF_ASSIGN_OR_RETURN(int64_t dispatcher_split_index,
GetSplitFromDispatcher(split, end_of_splits));
if (dispatcher_split_index == next_split_index_) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(split_to_file_map_, GetSplitsFiles(next_split_index_));
TF_RETURN_IF_ERROR(ValidateSplitFiles(split_to_file_map_, next_split_index_,
dispatcher_split_index,
*end_of_splits));
return GetSplitFromFile(split_to_file_map_[next_split_index_], split,
end_of_splits);
}
absl::Status SnapshotSplitProvider::GetSplitFromFile(
const std::string& split_file, Tensor* split, bool* end_of_splits)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
VLOG(3) << "Getting the next split from file: " << split_file;
TF_ASSIGN_OR_RETURN(int64_t repetition_index, GetRepetitionIndex(split_file));
if (repetition_index_ < repetition_index) {
*end_of_splits = true;
return absl::OkStatus();
}
snapshot_util::TFRecordReaderImpl reader(split_file,
tsl::io::compression::kNone);
TF_RETURN_IF_ERROR(reader.Initialize(env_));
TF_ASSIGN_OR_RETURN(std::vector<Tensor> tensors, reader.GetTensors());
if (tensors.size() != 1) {
return absl::InternalError(absl::StrCat(
"A snapshot split file is expected to contain 1 tensor. Got ",
tensors.size(), " tensors from ", split_file, "."));
}
*split = std::move(tensors[0]);
*end_of_splits = false;
return absl::OkStatus();
}
absl::StatusOr<int64_t> SnapshotSplitProvider::GetSplitFromDispatcher(
Tensor* split, bool* end_of_splits) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
int64_t local_split_index = 0;
TF_RETURN_IF_ERROR(grpc_util::Retry(
[this, split, &local_split_index, end_of_splits]()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return dispatcher_->GetSnapshotSplit(
worker_address_, snapshot_task_.base_path(),
snapshot_task_.stream_index(), source_index_, repetition_index_,
*split, local_split_index, *end_of_splits);
},
"Get next split for snapshot",
env_->NowMicros() +
absl::ToInt64Microseconds(timeout_)));
return local_split_index;
}
absl::StatusOr<absl::btree_map<int64_t, std::string>>
SnapshotSplitProvider::GetSplitsFiles(int64_t start_index) const
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
absl::btree_map<int64_t, std::string> split_to_file_map;
std::string splits_directory = SourceDirectory(
snapshot_task_.base_path(), snapshot_task_.stream_index(), source_index_);
TF_ASSIGN_OR_RETURN(std::vector<std::string> repetition_directories,
GetChildren(splits_directory, env_));
for (const std::string& repetition : repetition_directories) {
std::string repetition_dir = io::JoinPath(splits_directory, repetition);
TF_ASSIGN_OR_RETURN(std::vector<std::string> split_files,
GetChildren(repetition_dir, env_));
for (const std::string& split_file : split_files) {
TF_ASSIGN_OR_RETURN(auto split_index, ParseSplitFilename(split_file));
auto [local_split_index, global_split_index] = split_index;
if (local_split_index >= start_index) {
split_to_file_map[local_split_index] =
tsl::io::JoinPath(repetition_dir, split_file);
}
}
}
TF_RETURN_IF_ERROR(ValidateSplitFiles(split_to_file_map, start_index));
return split_to_file_map;
}
absl::Status SnapshotSplitProvider::ValidateSplitFiles(
const absl::btree_map<int64_t, std::string>& split_files,
int64_t start_index) const {
if (split_files.empty()) {
return absl::OkStatus();
}
if (split_files.cbegin()->first != start_index) {
return absl::InternalError(absl::StrCat("Failed to get split ", start_index,
" for snapshot ",
snapshot_task_.DebugString()));
}
int64_t end_index = split_files.rbegin()->first;
if (end_index - start_index + 1 != split_files.size()) {
return absl::InternalError(absl::StrCat(
"Failed to get split ", start_index, ". Some splits between [",
start_index, ", ", end_index, "] are missing for snapshot ",
snapshot_task_.DebugString()));
}
return absl::OkStatus();
}
absl::Status SnapshotSplitProvider::ValidateSplitFiles(
const absl::btree_map<int64_t, std::string>& split_files,
int64_t start_index, int64_t end_index, bool end_of_splits) const {
TF_RETURN_IF_ERROR(ValidateSplitFiles(split_files, start_index));
if (end_index < start_index) {
return absl::InternalError(absl::StrCat(
"The tf.data service worker is expected to read split ", start_index,
", but the dispatcher returns split ", end_index, " for snapshot ",
snapshot_task_.DebugString()));
}
if (end_of_splits) {
end_index = end_index - 1;
}
if (split_files.empty() || split_files.cbegin()->first != start_index ||
split_files.rbegin()->first < end_index) {
return absl::InternalError(absl::StrCat(
"The tf.data service dispatcher has written split ", end_index,
". However, not all splits between [", start_index, ", ", end_index,
"] are found for snapshot ", snapshot_task_.DebugString()));
}
return absl::OkStatus();
}
absl::Status SnapshotSplitProvider::Reset() {
mutex_lock l(mu_);
++repetition_index_;
LOG(INFO) << "Reset tf.data snapshot split provider for snapshot "
<< snapshot_task_.ShortDebugString() << ", repetition "
<< repetition_index_ << ".";
return absl::OkStatus();
}
absl::Status SnapshotSplitProvider::Save(
std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name(kNextSplitIndex), next_split_index_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name(kRepetitionIndex), repetition_index_));
return absl::OkStatus();
}
absl::Status SnapshotSplitProvider::Restore(
std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) TF_LOCKS_EXCLUDED(mu_) {
int64_t next_split_index = 0;
int64_t repetition_index = 0;
TF_RETURN_IF_ERROR(
reader->ReadScalar(full_name(kNextSplitIndex), &next_split_index));
TF_RETURN_IF_ERROR(
reader->ReadScalar(full_name(kRepetitionIndex), &repetition_index));
mutex_lock l(mu_);
next_split_index_ = next_split_index;
repetition_index_ = repetition_index;
TF_ASSIGN_OR_RETURN(split_to_file_map_, GetSplitsFiles(next_split_index_));
LOG(INFO) << "Restored snapshot split provider for snapshot "
<< snapshot_task_.ShortDebugString() << ", next split "
<< next_split_index_ << ", repetition " << repetition_index_ << ".";
return absl::OkStatus();
}
}
} | #include "tensorflow/core/data/service/snapshot/snapshot_split_provider.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/dispatcher_client.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tensorflow/core/protobuf/snapshot.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/lib/io/compression.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::_;
using testing::CreateDummyDistributedSnapshotMetadata;
using ::testing::DoAll;
using ::testing::HasSubstr;
using testing::LocalTempFilename;
using ::testing::Return;
using ::testing::SetArgReferee;
using tsl::testing::StatusIs;
class MockDispatcherClient : public DataServiceDispatcherClient {
public:
explicit MockDispatcherClient()
: DataServiceDispatcherClient("localhost",
"grpc") {}
MOCK_METHOD(absl::Status, GetSnapshotSplit,
(const std::string& worker_address, const std::string& base_path,
int64_t stream_index, int64_t source_index,
int64_t repetition_index, Tensor& split,
int64_t& local_split_index, bool& end_of_splits),
(override));
};
SnapshotTaskDef TestSnapshotTask() {
SnapshotTaskDef snapshot_task;
snapshot_task.set_base_path(LocalTempFilename());
snapshot_task.set_stream_index(0);
snapshot_task.set_num_sources(1);
*snapshot_task.mutable_metadata() = CreateDummyDistributedSnapshotMetadata();
return snapshot_task;
}
absl::Status WriteSplits(const SnapshotTaskDef& snapshot_task,
int64_t num_splits) {
std::string source_dir =
RepetitionDirectory(snapshot_task.base_path(),
snapshot_task.stream_index(), 0,
0);
TF_RETURN_IF_ERROR(Env::Default()->RecursivelyCreateDir(source_dir));
for (int64_t i = 0; i < num_splits; ++i) {
std::string split_filename = absl::StrCat("split_", i, "_", i);
std::string split_path = tsl::io::JoinPath(source_dir, split_filename);
Tensor split(int64_t{i});
TF_RETURN_IF_ERROR(AtomicallyWriteTFRecords(
split_path, {split}, tsl::io::compression::kNone, Env::Default()));
}
return absl::OkStatus();
}
TEST(SnapshotSplitProviderTest, GetSplitFromDispatcher) {
const SnapshotTaskDef snapshot_task = TestSnapshotTask();
Tensor split(int64_t{0});
auto mock_dispatcher_ptr = std::make_unique<MockDispatcherClient>();
MockDispatcherClient* mock_dispatcher = mock_dispatcher_ptr.get();
EXPECT_CALL(*mock_dispatcher, GetSnapshotSplit(_, _, _, _, _, _, _, _))
.WillOnce(DoAll(SetArgReferee<5>(split),
SetArgReferee<6>(0),
SetArgReferee<7>(false),
Return(absl::OkStatus())));
Tensor result;
bool end_of_splits = false;
SnapshotSplitProvider split_provider(
"worker_address", snapshot_task, 0,
absl::Seconds(10), std::move(mock_dispatcher_ptr),
Env::Default());
TF_EXPECT_OK(split_provider.GetNext(&result, &end_of_splits));
test::ExpectTensorEqual<int64_t>(result, split);
EXPECT_FALSE(end_of_splits);
}
TEST(SnapshotSplitProviderTest, GetSplitFromFile) {
const SnapshotTaskDef snapshot_task = TestSnapshotTask();
Tensor split(int64_t{9});
auto mock_dispatcher_ptr = std::make_unique<MockDispatcherClient>();
MockDispatcherClient* mock_dispatcher = mock_dispatcher_ptr.get();
EXPECT_CALL(*mock_dispatcher, GetSnapshotSplit(_, _, _, _, _, _, _, _))
.WillOnce(DoAll(SetArgReferee<5>(split),
SetArgReferee<6>(9),
SetArgReferee<7>(false),
Return(absl::OkStatus())));
TF_ASSERT_OK(WriteSplits(snapshot_task, 10));
SnapshotSplitProvider split_provider(
"worker_address", snapshot_task, 0,
absl::Seconds(10), std::move(mock_dispatcher_ptr),
Env::Default());
for (int64_t i = 0; i < 10; ++i) {
Tensor result;
bool end_of_splits = false;
TF_EXPECT_OK(split_provider.GetNext(&result, &end_of_splits));
test::ExpectTensorEqual<int64_t>(result, Tensor(int64_t{i}));
EXPECT_FALSE(end_of_splits);
}
}
TEST(SnapshotSplitProviderTest, EndOfSplits) {
const SnapshotTaskDef snapshot_task = TestSnapshotTask();
auto mock_dispatcher_ptr = std::make_unique<MockDispatcherClient>();
MockDispatcherClient* mock_dispatcher = mock_dispatcher_ptr.get();
EXPECT_CALL(*mock_dispatcher, GetSnapshotSplit(_, _, _, _, _, _, _, _))
.WillOnce(DoAll(SetArgReferee<6>(0),
SetArgReferee<7>(true),
Return(absl::OkStatus())));
SnapshotSplitProvider split_provider(
"worker_address", snapshot_task, 0,
absl::Seconds(10), std::move(mock_dispatcher_ptr),
Env::Default());
Tensor result;
bool end_of_splits = false;
TF_EXPECT_OK(split_provider.GetNext(&result, &end_of_splits));
EXPECT_TRUE(end_of_splits);
}
TEST(SnapshotSplitProviderTest, SplitNotFound) {
const SnapshotTaskDef snapshot_task = TestSnapshotTask();
Tensor split(int64_t{10});
auto mock_dispatcher_ptr = std::make_unique<MockDispatcherClient>();
MockDispatcherClient* mock_dispatcher = mock_dispatcher_ptr.get();
EXPECT_CALL(*mock_dispatcher, GetSnapshotSplit(_, _, _, _, _, _, _, _))
.WillOnce(DoAll(SetArgReferee<5>(split),
SetArgReferee<6>(10),
SetArgReferee<7>(false),
Return(absl::OkStatus())));
TF_ASSERT_OK(WriteSplits(snapshot_task, 0));
SnapshotSplitProvider split_provider(
"worker_address", snapshot_task, 0,
absl::Seconds(10), std::move(mock_dispatcher_ptr),
Env::Default());
Tensor result;
bool end_of_splits = false;
EXPECT_THAT(split_provider.GetNext(&result, &end_of_splits),
StatusIs(absl::StatusCode::kInternal,
HasSubstr("not all splits between [0, 10] are found")));
}
std::string full_name(const std::string& name) {
return FullName("test", name);
}
TEST(SnapshotSplitProviderTest, SaveRestore) {
const SnapshotTaskDef snapshot_task = TestSnapshotTask();
Tensor split(int64_t{9});
auto mock_dispatcher_ptr = std::make_unique<MockDispatcherClient>();
MockDispatcherClient* mock_dispatcher = mock_dispatcher_ptr.get();
EXPECT_CALL(*mock_dispatcher, GetSnapshotSplit(_, _, _, _, _, _, _, _))
.WillOnce(DoAll(SetArgReferee<5>(split),
SetArgReferee<6>(9),
SetArgReferee<7>(false),
Return(absl::OkStatus())));
TF_ASSERT_OK(WriteSplits(snapshot_task, 10));
SnapshotSplitProvider split_provider(
"worker_address", snapshot_task, 0,
absl::Seconds(10), std::move(mock_dispatcher_ptr),
Env::Default());
for (int64_t i = 0; i < 5; ++i) {
Tensor result;
bool end_of_splits = false;
TF_EXPECT_OK(split_provider.GetNext(&result, &end_of_splits));
test::ExpectTensorEqual<int64_t>(result, Tensor(int64_t{i}));
EXPECT_FALSE(end_of_splits);
}
VariantTensorDataWriter writer;
TF_ASSERT_OK(split_provider.Save(full_name, &writer));
std::vector<const VariantTensorData*> variants;
writer.GetData(&variants);
VariantTensorDataReader reader(variants);
SnapshotSplitProvider restored_split_provider(
"worker_address", snapshot_task, 0,
absl::Seconds(10), std::make_unique<MockDispatcherClient>(),
Env::Default());
TF_ASSERT_OK(restored_split_provider.Restore(full_name, &reader));
for (int64_t i = 5; i <= 9; ++i) {
Tensor result;
bool end_of_splits = false;
TF_EXPECT_OK(split_provider.GetNext(&result, &end_of_splits));
test::ExpectTensorEqual<int64_t>(result, Tensor(int64_t{i}));
EXPECT_FALSE(end_of_splits);
}
}
}
}
} |
1,754 | cpp | tensorflow/tensorflow | path_utils | tensorflow/core/data/service/snapshot/path_utils.cc | tensorflow/core/data/service/snapshot/path_utils_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_SNAPSHOT_PATH_UTILS_H_
#define TENSORFLOW_CORE_DATA_SERVICE_SNAPSHOT_PATH_UTILS_H_
#include <cstdint>
#include <string>
#include <tuple>
#include <utility>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
namespace tensorflow {
namespace data {
std::string StreamsDirectory(absl::string_view snapshot_path);
std::string StreamDirectory(absl::string_view snapshot_path,
int64_t stream_index);
std::string SplitsDirectory(absl::string_view snapshot_path,
int64_t stream_index);
std::string SourceDirectory(absl::string_view snapshot_path,
int64_t stream_index, int64_t source_index);
std::string RepetitionDirectory(absl::string_view snapshot_path,
int64_t stream_index, int64_t source_index,
int64_t repetition_index);
std::string SplitPath(absl::string_view snapshot_path, int64_t stream_index,
int64_t source_index, int64_t repetition_index,
int64_t local_index, int64_t global_index);
absl::StatusOr<int64_t> ParseStreamDirectoryName(
absl::string_view stream_directory_name);
absl::StatusOr<int64_t> ParseSourceDirectoryName(
absl::string_view source_directory_name);
absl::StatusOr<int64_t> ParseRepetitionDirectoryName(
absl::string_view repetition_directory_name);
absl::StatusOr<std::pair<int64_t, int64_t>> ParseSplitFilename(
absl::string_view split_filename);
absl::StatusOr<std::pair<int64_t, int64_t>> ParseCheckpointFilename(
absl::string_view checkpoint_filename);
absl::StatusOr<std::tuple<int64_t, int64_t, int64_t>> ParseChunkFilename(
absl::string_view chunk_filename);
std::string StreamDoneFilePath(absl::string_view snapshot_path,
int64_t stream_index);
std::string StreamWorkerFilePath(absl::string_view snapshot_path,
int64_t stream_index);
std::string StreamWorkerFilePath(absl::string_view stream_path);
std::string SnapshotDoneFilePath(absl::string_view snapshot_path);
std::string SnapshotErrorFilePath(absl::string_view snapshot_path);
std::string SnapshotMetadataFilePath(absl::string_view snapshot_path);
std::string DatasetDefFilePath(absl::string_view snapshot_path);
std::string DatasetSpecFilePath(absl::string_view snapshot_path);
std::string CheckpointsDirectory(absl::string_view snapshot_path,
int64_t stream_index);
std::string CommittedChunksDirectory(absl::string_view snapshot_path);
std::string UncommittedChunksDirectory(absl::string_view snapshot_path,
int64_t stream_index);
}
}
#endif
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include <cstdint>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "tsl/platform/path.h"
namespace tensorflow {
namespace data {
namespace {
constexpr const char kDoneFileName[] = "DONE";
constexpr const char kErrorFileName[] = "ERROR";
constexpr const char kWorkerFileName[] = "owner_worker";
constexpr const char kSnapshotMetadataFileName[] = "snapshot.metadata";
constexpr const char kDatasetDefFileName[] = "dataset_def.proto";
constexpr const char kDatasetSpecFileName[] = "dataset_spec.pb";
constexpr const char kStreamsDirectoryName[] = "streams";
constexpr const char kSplitsDirectoryName[] = "splits";
constexpr const char kCheckpointsDirectoryName[] = "checkpoints";
constexpr const char kCommittedChunksDirectoryName[] = "chunks";
constexpr const char kUncommittedChunksDirectoryName[] = "uncommitted_chunks";
constexpr int64_t kUnknownNumElements = -1;
}
std::string StreamsDirectory(absl::string_view snapshot_path) {
return tsl::io::JoinPath(snapshot_path, kStreamsDirectoryName);
}
std::string StreamDirectory(absl::string_view snapshot_path,
int64_t stream_index) {
return tsl::io::JoinPath(StreamsDirectory(snapshot_path),
absl::StrCat("stream_", stream_index));
}
std::string SplitsDirectory(absl::string_view snapshot_path,
int64_t stream_index) {
return tsl::io::JoinPath(StreamDirectory(snapshot_path, stream_index),
kSplitsDirectoryName);
}
std::string SourceDirectory(absl::string_view snapshot_path,
int64_t stream_index, int64_t source_index) {
return tsl::io::JoinPath(SplitsDirectory(snapshot_path, stream_index),
absl::StrCat("source_", source_index));
}
std::string RepetitionDirectory(absl::string_view snapshot_path,
int64_t stream_index, int64_t source_index,
int64_t repetition_index) {
return tsl::io::JoinPath(
SourceDirectory(snapshot_path, stream_index, source_index),
absl::StrCat("repetition_", repetition_index));
}
std::string SplitPath(absl::string_view snapshot_path, int64_t stream_index,
int64_t source_index, int64_t repetition_index,
int64_t local_index, int64_t global_index) {
return tsl::io::JoinPath(
RepetitionDirectory(snapshot_path, stream_index, source_index,
repetition_index),
absl::StrCat("split_", local_index, "_", global_index));
}
absl::StatusOr<int64_t> ParseStreamDirectoryName(
absl::string_view stream_directory_name) {
std::vector<std::string> tokens = absl::StrSplit(stream_directory_name, '_');
int64_t stream_index = 0;
if (tokens.size() != 2 || tokens[0] != "stream" ||
!absl::SimpleAtoi(tokens[1], &stream_index) || stream_index < 0) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid stream directory name: ", stream_directory_name,
". Expected stream_<stream_index>."));
}
return stream_index;
}
absl::StatusOr<int64_t> ParseSourceDirectoryName(
absl::string_view source_directory_name) {
std::vector<std::string> tokens = absl::StrSplit(source_directory_name, '_');
int64_t source_index = 0;
if (tokens.size() != 2 || tokens[0] != "source" ||
!absl::SimpleAtoi(tokens[1], &source_index) || source_index < 0) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid source directory name: ", source_directory_name,
". Expected source_<source_index>."));
}
return source_index;
}
absl::StatusOr<int64_t> ParseRepetitionDirectoryName(
absl::string_view repetition_directory_name) {
std::vector<std::string> tokens =
absl::StrSplit(repetition_directory_name, '_');
int64_t repetition_index = 0;
if (tokens.size() != 2 || tokens[0] != "repetition" ||
!absl::SimpleAtoi(tokens[1], &repetition_index) || repetition_index < 0) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid repetition directory name: ", repetition_directory_name,
". Expected repetition_<repetition_index>."));
}
return repetition_index;
}
absl::StatusOr<std::pair<int64_t, int64_t>> ParseSplitFilename(
absl::string_view split_filename) {
std::vector<std::string> tokens =
absl::StrSplit(tsl::io::Basename(split_filename), '_');
int64_t local_split_index = 0, global_split_index = 0;
if (tokens.size() != 3 || tokens[0] != "split" ||
!absl::SimpleAtoi(tokens[1], &local_split_index) ||
local_split_index < 0 ||
!absl::SimpleAtoi(tokens[2], &global_split_index) ||
global_split_index < 0) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid split file name: ", split_filename,
". Expected split_<local_split_index>_<global_split_index>."));
}
if (local_split_index > global_split_index) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid split file name: ", split_filename, ". The local split index ",
local_split_index, " exceeds the global split index ",
global_split_index, "."));
}
return std::make_pair(local_split_index, global_split_index);
}
absl::StatusOr<std::pair<int64_t, int64_t>> ParseCheckpointFilename(
absl::string_view checkpoint_filename) {
std::vector<std::string> tokens = absl::StrSplit(checkpoint_filename, '_');
int64_t checkpoint_index = 0, checkpoint_num_elements = 0;
if (tokens.size() != 3 || tokens[0] != "checkpoint" ||
!absl::SimpleAtoi(tokens[1], &checkpoint_index) || checkpoint_index < 0 ||
!absl::SimpleAtoi(tokens[2], &checkpoint_num_elements) ||
(checkpoint_num_elements < 0 &&
checkpoint_num_elements != kUnknownNumElements)) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid checkpoint file name: ", checkpoint_filename,
". Expected checkpoint_<checkpoint_index>_<checkpoint_num_elements>."));
}
return std::make_pair(checkpoint_index, checkpoint_num_elements);
}
absl::StatusOr<std::tuple<int64_t, int64_t, int64_t>> ParseChunkFilename(
absl::string_view chunk_filename) {
std::vector<std::string> tokens = absl::StrSplit(chunk_filename, '_');
int64_t stream_index = 0, stream_chunk_index = 0, chunk_num_elements = 0;
if (tokens.size() != 4 || tokens[0] != "chunk" ||
!absl::SimpleAtoi(tokens[1], &stream_index) || stream_index < 0 ||
!absl::SimpleAtoi(tokens[2], &stream_chunk_index) ||
stream_chunk_index < 0 ||
!absl::SimpleAtoi(tokens[3], &chunk_num_elements) ||
(chunk_num_elements < 0 && chunk_num_elements != kUnknownNumElements)) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid chunk file name: ", chunk_filename,
". Expected "
"chunk_<stream_index>_<stream_chunk_index>_<chunk_num_elements>."));
}
return std::make_tuple(stream_index, stream_chunk_index, chunk_num_elements);
}
std::string SnapshotMetadataFilePath(absl::string_view snapshot_path_) {
return tsl::io::JoinPath(snapshot_path_, kSnapshotMetadataFileName);
}
std::string DatasetDefFilePath(absl::string_view snapshot_path_) {
return tsl::io::JoinPath(snapshot_path_, kDatasetDefFileName);
}
std::string DatasetSpecFilePath(absl::string_view snapshot_path_) {
return tsl::io::JoinPath(snapshot_path_, kDatasetSpecFileName);
}
std::string StreamDoneFilePath(absl::string_view snapshot_path,
int64_t stream_index) {
return tsl::io::JoinPath(StreamDirectory(snapshot_path, stream_index),
kDoneFileName);
}
std::string StreamWorkerFilePath(absl::string_view snapshot_path,
int64_t stream_index) {
return StreamWorkerFilePath(StreamDirectory(snapshot_path, stream_index));
}
std::string StreamWorkerFilePath(absl::string_view stream_path) {
return tsl::io::JoinPath(stream_path, kWorkerFileName);
}
std::string SnapshotDoneFilePath(absl::string_view snapshot_path) {
return tsl::io::JoinPath(snapshot_path, kDoneFileName);
}
std::string SnapshotErrorFilePath(absl::string_view snapshot_path) {
return tsl::io::JoinPath(snapshot_path, kErrorFileName);
}
std::string CheckpointsDirectory(absl::string_view snapshot_path,
int64_t stream_index) {
return tsl::io::JoinPath(StreamDirectory(snapshot_path, stream_index),
kCheckpointsDirectoryName);
}
std::string CommittedChunksDirectory(absl::string_view snapshot_path) {
return tsl::io::JoinPath(snapshot_path, kCommittedChunksDirectoryName);
}
std::string UncommittedChunksDirectory(absl::string_view snapshot_path,
int64_t stream_index) {
return tsl::io::JoinPath(StreamDirectory(snapshot_path, stream_index),
kUncommittedChunksDirectoryName);
}
}
} | #include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::FieldsAre;
using ::testing::HasSubstr;
using ::testing::MatchesRegex;
using ::testing::Pair;
using tsl::testing::IsOkAndHolds;
using tsl::testing::StatusIs;
TEST(PathUtilsTest, StreamsDirectory) {
EXPECT_THAT(StreamsDirectory("/path/to/snapshot"),
MatchesRegex("/path/to/snapshot.streams"));
}
TEST(PathUtilsTest, StreamDirectory) {
EXPECT_THAT(StreamDirectory("/path/to/snapshot", 0),
MatchesRegex("/path/to/snapshot.streams.stream_0"));
}
TEST(PathUtilsTest, SplitsDirectory) {
EXPECT_THAT(SplitsDirectory("/path/to/snapshot", 0),
MatchesRegex("/path/to/snapshot.streams.stream_0.splits"));
}
TEST(PathUtilsTest, SourceDirectory) {
EXPECT_THAT(
SourceDirectory("/path/to/snapshot", 0,
1),
MatchesRegex("/path/to/snapshot.streams.stream_0.splits.source_1"));
}
TEST(PathUtilsTest, RepetitionDirectory) {
EXPECT_THAT(
RepetitionDirectory("/path/to/snapshot", 0,
1, 2),
MatchesRegex(
"/path/to/snapshot.streams.stream_0.splits.source_1.repetition_2"));
}
TEST(PathUtilsTest, SplitPath) {
EXPECT_THAT(
SplitPath("/path/to/snapshot", 0, 1,
2, 3, 4),
MatchesRegex(
"/path/to/"
"snapshot.streams.stream_0.splits.source_1.repetition_2.split_3_4"));
}
TEST(PathUtilsTest, ParseStreamDirectoryName) {
EXPECT_THAT(ParseStreamDirectoryName("stream_1"), IsOkAndHolds(1));
}
TEST(PathUtilsTest, ParseSourceDirectoryName) {
EXPECT_THAT(ParseSourceDirectoryName("source_1"), IsOkAndHolds(1));
EXPECT_THAT(ParseSourceDirectoryName(""),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected source_<source_index>")));
EXPECT_THAT(ParseSourceDirectoryName("source_-1"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected source_<source_index>")));
EXPECT_THAT(ParseSourceDirectoryName("chunk_1"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected source_<source_index>")));
}
TEST(PathUtilsTest, ParseRepetitionDirectoryName) {
EXPECT_THAT(ParseRepetitionDirectoryName("repetition_1"), IsOkAndHolds(1));
EXPECT_THAT(ParseRepetitionDirectoryName(""),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected repetition_<repetition_index>")));
EXPECT_THAT(ParseRepetitionDirectoryName("repetition_-1"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected repetition_<repetition_index>")));
EXPECT_THAT(ParseRepetitionDirectoryName("chunk_1"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected repetition_<repetition_index>")));
}
TEST(PathUtilsTest, InvalidStreamDirectoryName) {
EXPECT_THAT(ParseStreamDirectoryName(""),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected stream_<stream_index>")));
EXPECT_THAT(ParseStreamDirectoryName("stream_-1"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected stream_<stream_index>")));
EXPECT_THAT(ParseStreamDirectoryName("chunk_1"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected stream_<stream_index>")));
}
TEST(PathUtilsTest, ParseSplitFilename) {
EXPECT_THAT(ParseSplitFilename("split_0_1"), IsOkAndHolds(Pair(0, 1)));
}
TEST(PathUtilsTest, InvalidSplitFilename) {
EXPECT_THAT(
ParseSplitFilename(""),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected split_<local_split_index>_<global_split_index>")));
EXPECT_THAT(
ParseSplitFilename("split_123"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected split_<local_split_index>_<global_split_index>")));
EXPECT_THAT(
ParseSplitFilename("split_-1_(-1)"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected split_<local_split_index>_<global_split_index>")));
EXPECT_THAT(
ParseSplitFilename("chunk_1_2"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected split_<local_split_index>_<global_split_index>")));
EXPECT_THAT(
ParseSplitFilename("split_5_0"),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr(
"The local split index 5 exceeds the global split index 0")));
}
TEST(PathUtilsTest, ParseCheckpointFilename) {
EXPECT_THAT(ParseCheckpointFilename("checkpoint_0_1"),
IsOkAndHolds(Pair(0, 1)));
EXPECT_THAT(ParseCheckpointFilename("checkpoint_0_-1"),
IsOkAndHolds(Pair(0, -1)));
}
TEST(PathUtilsTest, InvalidCheckpointFilename) {
EXPECT_THAT(
ParseCheckpointFilename(""),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected "
"checkpoint_<checkpoint_index>_<checkpoint_num_elements>")));
EXPECT_THAT(
ParseCheckpointFilename("checkpoint_123"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected "
"checkpoint_<checkpoint_index>_<checkpoint_num_elements>")));
EXPECT_THAT(
ParseCheckpointFilename("checkpoint_-1_(-1)"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected "
"checkpoint_<checkpoint_index>_<checkpoint_num_elements>")));
EXPECT_THAT(
ParseCheckpointFilename("chunk_1_2"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr(
"Expected "
"checkpoint_<checkpoint_index>_<checkpoint_num_elements>")));
}
TEST(PathUtilsTest, ParseChunkFilename) {
EXPECT_THAT(ParseChunkFilename("chunk_0_1_2"),
IsOkAndHolds(FieldsAre(0, 1, 2)));
EXPECT_THAT(ParseChunkFilename("chunk_0_1_-1"),
IsOkAndHolds(FieldsAre(0, 1, -1)));
}
TEST(PathUtilsTest, InvalidChunkFilename) {
EXPECT_THAT(ParseChunkFilename(""),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected "
"chunk_<stream_index>_<stream_chunk_index>_<"
"chunk_num_elements>")));
EXPECT_THAT(ParseChunkFilename("chunk_123_0"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected "
"chunk_<stream_index>_<stream_chunk_index>_<"
"chunk_num_elements>")));
EXPECT_THAT(ParseChunkFilename("chunk_-1_(-1)_0"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected "
"chunk_<stream_index>_<stream_chunk_index>_<"
"chunk_num_elements>")));
EXPECT_THAT(ParseChunkFilename("split_1_2_3"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Expected "
"chunk_<stream_index>_<stream_chunk_index>_<"
"chunk_num_elements>")));
}
TEST(PathUtilsTest, StreamDoneFilePath) {
EXPECT_THAT(StreamDoneFilePath("/path/to/snapshot", 0),
MatchesRegex("/path/to/snapshot.streams.stream_0.DONE"));
}
TEST(PathUtilsTest, StreamWorkerFilePath) {
EXPECT_THAT(StreamWorkerFilePath("/path/to/snapshot", 0),
MatchesRegex("/path/to/snapshot.streams.stream_0.owner_worker"));
EXPECT_THAT(StreamWorkerFilePath("/path/to/snapshot/streams/stream_0"),
MatchesRegex("/path/to/snapshot.streams.stream_0.owner_worker"));
}
TEST(PathUtilsTest, SnapshotDoneFilePath) {
EXPECT_THAT(SnapshotDoneFilePath("/path/to/snapshot"),
MatchesRegex("/path/to/snapshot.DONE"));
}
TEST(PathUtilsTest, SnapshotErrorFilePath) {
EXPECT_THAT(SnapshotErrorFilePath("/path/to/snapshot"),
MatchesRegex("/path/to/snapshot.ERROR"));
}
TEST(PathUtilsTest, SnapshotMetadataFilePath) {
EXPECT_THAT(SnapshotMetadataFilePath("/path/to/snapshot"),
MatchesRegex("/path/to/snapshot.snapshot.metadata"));
}
TEST(PathUtilsTest, DatasetDefFilePath) {
EXPECT_THAT(DatasetDefFilePath("/path/to/snapshot"),
MatchesRegex("/path/to/snapshot.dataset_def.proto"));
}
TEST(PathUtilsTest, DatasetSpefFilePath) {
EXPECT_THAT(DatasetSpecFilePath("/path/to/snapshot"),
MatchesRegex("/path/to/snapshot.dataset_spec.pb"));
}
TEST(PathUtilsTest, CheckpointsDirectory) {
EXPECT_THAT(CheckpointsDirectory("/path/to/snapshot", 0),
MatchesRegex("/path/to/snapshot.streams.stream_0.checkpoints"));
}
TEST(PathUtilsTest, CommittedChunksDirectory) {
EXPECT_THAT(CommittedChunksDirectory("/path/to/snapshot"),
MatchesRegex("/path/to/snapshot.chunks"));
}
TEST(PathUtilsTest, UncommittedChunksDirectory) {
EXPECT_THAT(
UncommittedChunksDirectory("/path/to/snapshot", 0),
MatchesRegex("/path/to/snapshot.streams.stream_0.uncommitted_chunks"));
}
}
}
} |
1,755 | cpp | tensorflow/tensorflow | prefetched_split_provider | tensorflow/core/data/service/snapshot/prefetched_split_provider.cc | tensorflow/core/data/service/snapshot/prefetched_split_provider_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_SNAPSHOT_PREFETCHED_SPLIT_PROVIDER_H_
#define TENSORFLOW_CORE_DATA_SERVICE_SNAPSHOT_PREFETCHED_SPLIT_PROVIDER_H_
#include <cstddef>
#include <memory>
#include <optional>
#include <string>
#include "absl/base/thread_annotations.h"
#include "absl/container/btree_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/threadpool.h"
namespace tensorflow {
namespace data {
class PrefetchedSplitProvider {
public:
explicit PrefetchedSplitProvider(
std::unique_ptr<SplitProvider> split_provider,
const std::string& directory, tsl::Env* env,
size_t num_write_threads = 20, size_t buffer_size_per_thread = 5);
virtual ~PrefetchedSplitProvider();
PrefetchedSplitProvider(const PrefetchedSplitProvider&) = delete;
PrefetchedSplitProvider& operator=(const PrefetchedSplitProvider&) = delete;
absl::StatusOr<std::optional<Tensor>> GetNext(const std::string& split_path);
absl::Status Reset();
void Cancel();
private:
struct SplitAndIndex {
Tensor split;
size_t index = 0;
std::string SplitPath(const std::string& directory) const {
return tsl::io::JoinPath(directory,
absl::StrCat("split_", index, ".tfrecord"));
}
friend bool operator<(const SplitAndIndex& lhs, const SplitAndIndex& rhs) {
return lhs.index < rhs.index;
}
};
absl::Status InitDirs();
std::unique_ptr<tsl::thread::ThreadPool> RunPrefetchThreads();
void PrefetchLoop();
bool ShouldPrefetchSplit() const;
absl::StatusOr<bool> PrefetchSplit();
absl::StatusOr<std::optional<SplitAndIndex>> GetSplitFromProvider();
void UpdateStatus(absl::Status status);
tsl::Env* const env_;
const std::string directory_;
const size_t num_write_threads_;
const size_t buffer_size_;
mutable absl::Mutex mu_;
mutable absl::CondVar ready_to_push_;
mutable absl::CondVar ready_to_pop_;
std::unique_ptr<SplitProvider> split_provider_;
absl::Status status_ ABSL_GUARDED_BY(mu_);
bool reset_ ABSL_GUARDED_BY(mu_) = false;
size_t split_index_to_read_ ABSL_GUARDED_BY(mu_) = 0;
size_t split_index_to_write_ ABSL_GUARDED_BY(mu_) = 0;
size_t finished_threads_ ABSL_GUARDED_BY(mu_) = 0;
absl::btree_set<SplitAndIndex> buffer_ ABSL_GUARDED_BY(mu_);
std::unique_ptr<tsl::thread::ThreadPool> thread_pool_ ABSL_GUARDED_BY(mu_);
};
}
}
#endif
#include "tensorflow/core/data/service/snapshot/prefetched_split_provider.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/lib/io/compression.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
namespace tensorflow {
namespace data {
PrefetchedSplitProvider::PrefetchedSplitProvider(
std::unique_ptr<SplitProvider> split_provider, const std::string& directory,
tsl::Env* env, size_t num_write_threads, size_t buffer_size_per_thread)
: env_(env),
directory_(directory),
num_write_threads_(num_write_threads),
buffer_size_(num_write_threads_ * buffer_size_per_thread),
split_provider_(std::move(split_provider)) {
absl::Status status = InitDirs();
if (!status.ok()) {
UpdateStatus(std::move(status));
return;
}
absl::MutexLock l(&mu_);
thread_pool_ = RunPrefetchThreads();
}
PrefetchedSplitProvider::~PrefetchedSplitProvider() { Cancel(); }
absl::StatusOr<std::optional<Tensor>> PrefetchedSplitProvider::GetNext(
const std::string& split_path) ABSL_LOCKS_EXCLUDED(mu_) {
absl::MutexLock l(&mu_);
while (status_.ok() &&
(buffer_.empty() || buffer_.begin()->index != split_index_to_read_) &&
(finished_threads_ < num_write_threads_ || reset_)) {
ready_to_pop_.Wait(&mu_);
}
TF_RETURN_IF_ERROR(status_);
if (buffer_.empty()) {
return std::nullopt;
}
if (buffer_.begin()->index != split_index_to_read_) {
return absl::InternalError(absl::StrCat(
"Failed to get tf.data snapshot split. Expected split ",
split_index_to_read_, ", got split ", buffer_.begin()->index,
". This is likely a tf.data bug."));
}
auto it = buffer_.begin();
SplitAndIndex split = std::move(*it);
buffer_.erase(it);
TF_RETURN_IF_ERROR(env_->RenameFile(split.SplitPath(directory_), split_path));
++split_index_to_read_;
ready_to_push_.Signal();
return std::move(split.split);
}
std::unique_ptr<tsl::thread::ThreadPool>
PrefetchedSplitProvider::RunPrefetchThreads() {
auto thread_pool = std::make_unique<tsl::thread::ThreadPool>(
env_, tsl::ThreadOptions{}, "tf_data_prefetch_splits_thread",
num_write_threads_);
for (size_t i = 0; i < num_write_threads_; ++i) {
thread_pool->Schedule([this]() { PrefetchLoop(); });
}
return thread_pool;
}
void PrefetchedSplitProvider::PrefetchLoop() ABSL_LOCKS_EXCLUDED(mu_) {
while (ShouldPrefetchSplit()) {
absl::StatusOr<bool> has_next = PrefetchSplit();
if (!has_next.status().ok()) {
UpdateStatus(has_next.status());
break;
}
if (!*has_next) {
break;
}
}
absl::MutexLock l(&mu_);
if (++finished_threads_ >= num_write_threads_) {
ready_to_pop_.SignalAll();
}
}
bool PrefetchedSplitProvider::ShouldPrefetchSplit() const
ABSL_LOCKS_EXCLUDED(mu_) {
absl::MutexLock l(&mu_);
return status_.ok() && !reset_;
}
absl::StatusOr<bool> PrefetchedSplitProvider::PrefetchSplit()
ABSL_LOCKS_EXCLUDED(mu_) {
TF_ASSIGN_OR_RETURN(std::optional<SplitAndIndex> split,
GetSplitFromProvider());
if (!split.has_value()) {
return false;
}
TF_RETURN_IF_ERROR(
AtomicallyWriteTFRecords(split->SplitPath(directory_), {split->split},
tsl::io::compression::kNone, env_));
absl::MutexLock l(&mu_);
buffer_.insert(std::move(*split));
ready_to_pop_.Signal();
return true;
}
absl::StatusOr<std::optional<PrefetchedSplitProvider::SplitAndIndex>>
PrefetchedSplitProvider::GetSplitFromProvider() ABSL_LOCKS_EXCLUDED(mu_) {
absl::MutexLock l(&mu_);
while (status_.ok() && buffer_.size() >= buffer_size_ && !reset_) {
ready_to_push_.Wait(&mu_);
}
TF_RETURN_IF_ERROR(status_);
if (reset_) {
return std::nullopt;
}
Tensor split;
bool end_of_splits = false;
TF_RETURN_IF_ERROR(split_provider_->GetNext(&split, &end_of_splits));
if (end_of_splits) {
return std::nullopt;
}
return SplitAndIndex{split, split_index_to_write_++};
}
absl::Status PrefetchedSplitProvider::Reset() ABSL_LOCKS_EXCLUDED(mu_) {
std::unique_ptr<tsl::thread::ThreadPool> thread_pool;
{
absl::MutexLock l(&mu_);
reset_ = true;
ready_to_push_.SignalAll();
ready_to_pop_.SignalAll();
thread_pool = std::move(thread_pool_);
}
thread_pool.reset();
TF_RETURN_IF_ERROR(split_provider_->Reset());
absl::MutexLock l(&mu_);
TF_RETURN_IF_ERROR(status_);
reset_ = false;
split_index_to_read_ = 0;
split_index_to_write_ = 0;
finished_threads_ = 0;
buffer_.clear();
TF_RETURN_IF_ERROR(InitDirs());
thread_pool_ = RunPrefetchThreads();
return absl::OkStatus();
}
void PrefetchedSplitProvider::Cancel() {
UpdateStatus(
absl::CancelledError("tf.data prefetched split provider is shut down."));
std::unique_ptr<tsl::thread::ThreadPool> thread_pool;
{
absl::MutexLock l(&mu_);
thread_pool = std::move(thread_pool_);
}
}
absl::Status PrefetchedSplitProvider::InitDirs() {
if (env_->FileExists(directory_).ok()) {
int64_t undeleted_files, undeleted_dirs;
TF_RETURN_IF_ERROR(
env_->DeleteRecursively(directory_, &undeleted_files, &undeleted_dirs));
}
return env_->RecursivelyCreateDir(directory_);
}
void PrefetchedSplitProvider::UpdateStatus(absl::Status status)
ABSL_LOCKS_EXCLUDED(mu_) {
if (status.ok()) {
return;
}
absl::MutexLock l(&mu_);
status_.Update(std::move(status));
ready_to_push_.SignalAll();
ready_to_pop_.SignalAll();
}
}
} | #include "tensorflow/core/data/service/snapshot/prefetched_split_provider.h"
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/split_provider.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/lib/io/compression.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::ElementsAreArray;
using ::testing::IsSupersetOf;
using ::testing::UnorderedElementsAreArray;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
absl::StatusOr<std::vector<std::string>> TestDirs(size_t num_dirs) {
std::vector<std::string> test_dirs;
std::string base_dir;
if (!tsl::Env::Default()->LocalTempFilename(&base_dir)) {
return absl::FailedPreconditionError("Failed to create local temp file.");
}
TF_RETURN_IF_ERROR(tsl::Env::Default()->RecursivelyCreateDir(base_dir));
for (size_t i = 0; i < num_dirs; ++i) {
std::string test_dir =
tsl::io::JoinPath(base_dir, absl::StrCat("test_dir_", i));
TF_RETURN_IF_ERROR(tsl::Env::Default()->RecursivelyCreateDir(test_dir));
test_dirs.push_back(std::move(test_dir));
}
return test_dirs;
}
absl::StatusOr<std::unique_ptr<SplitProvider>> RangeSplitProvider(
int64_t range) {
DatasetDef range_dataset = testing::RangeDataset(range);
std::vector<std::unique_ptr<SplitProvider>> split_providers;
TF_RETURN_IF_ERROR(CreateSplitProviders(range_dataset, split_providers));
if (split_providers.size() != 1) {
return absl::InternalError(
absl::StrCat("Range dataset should have one split provider, got ",
split_providers.size(), "."));
}
return std::move(split_providers[0]);
}
template <class T>
T GetValue(const Tensor& tensor) {
return tensor.unaligned_flat<T>().data()[0];
}
template <class T>
absl::StatusOr<T> GetValueFromFile(const std::string& filename) {
snapshot_util::TFRecordReaderImpl reader(filename,
tsl::io::compression::kNone);
TF_RETURN_IF_ERROR(reader.Initialize(tsl::Env::Default()));
TF_ASSIGN_OR_RETURN(std::vector<Tensor> tensors, reader.GetTensors());
if (tensors.size() != 1) {
return absl::InternalError(absl::StrCat(
"A snapshot split file is expected to contain 1 tensor. Got ",
tensors.size(), " tensors from ", filename, "."));
}
return GetValue<T>(tensors[0]);
}
template <class T>
absl::StatusOr<std::vector<T>> GetSplits(
PrefetchedSplitProvider& prefetched_split_provider,
const std::string& test_dir) {
std::vector<T> splits;
for (size_t i = 0;; ++i) {
std::string target_split_path =
tsl::io::JoinPath(test_dir, absl::StrCat("split_", i));
TF_ASSIGN_OR_RETURN(std::optional<Tensor> split,
prefetched_split_provider.GetNext(target_split_path));
if (!split.has_value()) {
return splits;
}
T split_value = GetValue<T>(*split);
TF_ASSIGN_OR_RETURN(T split_from_file,
GetValueFromFile<T>(target_split_path));
if (split_value != split_from_file) {
return absl::InternalError(
absl::StrCat("Inconsistent splits. From buffer: ", split_value,
", from file: ", split_from_file, "."));
}
splits.push_back(split_value);
}
return splits;
}
std::vector<int64_t> Range(int64_t range) {
std::vector<int64_t> result(range);
std::iota(result.begin(), result.end(), 0);
return result;
}
class PrefetchedSplitProviderParamTest
: public ::testing::TestWithParam<
std::tuple<int64_t, size_t, size_t, size_t>> {
protected:
int64_t NumElements() const { return std::get<0>(GetParam()); }
size_t NumClients() const { return std::get<1>(GetParam()); }
size_t NumWriteThreads() const { return std::get<2>(GetParam()); }
size_t BufferSizePerThread() const { return std::get<3>(GetParam()); }
};
TEST_P(PrefetchedSplitProviderParamTest, GetSplits) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<SplitProvider> split_provider,
RangeSplitProvider(NumElements()));
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::string> test_dirs,
TestDirs(2));
PrefetchedSplitProvider prefetched_split_provider(
std::move(split_provider), test_dirs[0], tsl::Env::Default(),
NumWriteThreads(), BufferSizePerThread());
EXPECT_THAT(GetSplits<int64_t>(prefetched_split_provider, test_dirs[1]),
IsOkAndHolds(ElementsAreArray(Range(NumElements()))));
}
TEST_P(PrefetchedSplitProviderParamTest, ConcurrentGetSplits) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<SplitProvider> split_provider,
RangeSplitProvider(NumElements()));
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::string> test_dirs,
TestDirs(1 + NumClients()));
PrefetchedSplitProvider prefetched_split_provider(
std::move(split_provider), test_dirs[0], tsl::Env::Default(),
NumWriteThreads(), BufferSizePerThread());
absl::Mutex mu;
std::vector<int64_t> splits;
std::vector<std::unique_ptr<tsl::Thread>> client_threads;
for (int i = 0; i < NumClients(); ++i) {
client_threads.push_back(absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, absl::StrCat("Client_", i),
[i, &prefetched_split_provider, &splits, &test_dirs, &mu]() {
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> splits_per_thread,
GetSplits<int64_t>(prefetched_split_provider, test_dirs[1 + i]));
EXPECT_TRUE(absl::c_is_sorted(splits_per_thread));
absl::MutexLock l(&mu);
absl::c_move(splits_per_thread, std::back_inserter(splits));
})));
}
client_threads.clear();
EXPECT_THAT(splits, UnorderedElementsAreArray(Range(NumElements())));
}
TEST_P(PrefetchedSplitProviderParamTest, Reset) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<SplitProvider> split_provider,
RangeSplitProvider(NumElements()));
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::string> test_dirs,
TestDirs(2));
PrefetchedSplitProvider prefetched_split_provider(
std::move(split_provider), test_dirs[0], tsl::Env::Default(),
NumWriteThreads(), BufferSizePerThread());
for (int i = 0; i < 3; ++i) {
EXPECT_THAT(GetSplits<int64_t>(prefetched_split_provider, test_dirs[1]),
IsOkAndHolds(ElementsAreArray(Range(NumElements()))));
TF_EXPECT_OK(prefetched_split_provider.Reset());
}
}
TEST_P(PrefetchedSplitProviderParamTest, ConcurrentGetSplitsAndReset) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<SplitProvider> split_provider,
RangeSplitProvider(NumElements()));
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::string> test_dirs,
TestDirs(1 + NumClients()));
PrefetchedSplitProvider prefetched_split_provider(
std::move(split_provider), test_dirs[0], tsl::Env::Default(),
NumWriteThreads(), BufferSizePerThread());
absl::Mutex mu;
std::vector<int64_t> splits;
std::vector<std::unique_ptr<tsl::Thread>> client_threads;
for (int i = 0; i < NumClients(); ++i) {
client_threads.push_back(absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, absl::StrCat("Client_", i),
[i, &prefetched_split_provider, &splits, &test_dirs, &mu]() {
TF_ASSERT_OK_AND_ASSIGN(
std::vector<int64_t> splits_per_thread,
GetSplits<int64_t>(prefetched_split_provider, test_dirs[1 + i]));
absl::MutexLock l(&mu);
absl::c_move(splits_per_thread, std::back_inserter(splits));
})));
}
TF_EXPECT_OK(prefetched_split_provider.Reset());
client_threads.clear();
EXPECT_THAT(splits, IsSupersetOf(Range(NumElements())));
}
INSTANTIATE_TEST_SUITE_P(
PrefetchedSplitProviderParams, PrefetchedSplitProviderParamTest,
::testing::Combine(
::testing::Values(0, 10, 1000),
::testing::Values(1, 5),
::testing::Values(1, 10),
::testing::Values(1, 10000)));
TEST(PrefetchedSplitProviderTest, Cancellation) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<SplitProvider> split_provider,
RangeSplitProvider(999999));
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::string> test_dirs,
TestDirs(2));
PrefetchedSplitProvider prefetched_split_provider(
std::move(split_provider), test_dirs[0], tsl::Env::Default(),
2, 1);
std::unique_ptr<tsl::Thread> client_thread =
absl::WrapUnique(tsl::Env::Default()->StartThread(
{}, "client_thread",
[&prefetched_split_provider, &test_dirs]() {
EXPECT_THAT(
GetSplits<int64_t>(prefetched_split_provider, test_dirs[1]),
StatusIs(absl::StatusCode::kCancelled));
}));
prefetched_split_provider.Cancel();
client_thread.reset();
}
TEST(PrefetchedSplitProviderTest, ShutdownWithUnreadSplits) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<SplitProvider> split_provider,
RangeSplitProvider(100));
TF_ASSERT_OK_AND_ASSIGN(std::vector<std::string> test_dirs,
TestDirs(2));
PrefetchedSplitProvider prefetched_split_provider(
std::move(split_provider), test_dirs[0], tsl::Env::Default());
TF_EXPECT_OK(prefetched_split_provider.Reset());
}
}
}
} |
1,756 | cpp | tensorflow/tensorflow | snapshot_manager | tensorflow/core/data/service/snapshot/snapshot_manager.cc | tensorflow/core/data/service/snapshot/snapshot_manager_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_SNAPSHOT_SNAPSHOT_MANAGER_H_
#define TENSORFLOW_CORE_DATA_SERVICE_SNAPSHOT_SNAPSHOT_MANAGER_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/btree_map.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "absl/time/time.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/snapshot/prefetched_split_provider.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/protobuf/snapshot.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
#include "tsl/protobuf/status.pb.h"
namespace tensorflow {
namespace data {
class SnapshotAssignmentManager {
public:
explicit SnapshotAssignmentManager(int64_t worker_max_concurrent_snapshots)
: worker_max_concurrent_snapshots_(worker_max_concurrent_snapshots) {}
absl::StatusOr<bool> TryAddAssignment(absl::string_view snapshot_path,
absl::string_view worker_address,
int64_t stream_index);
void RemoveAssignment(absl::string_view snapshot_path,
absl::string_view worker_address, int64_t stream_index);
void AddSnapshot(absl::string_view snapshot_path);
std::vector<std::string> LoadBalanceSnapshots(
absl::string_view worker_address);
int64_t worker_max_concurrent_snapshots() const {
return worker_max_concurrent_snapshots_;
}
private:
struct Assignment {
std::string snapshot_path;
int64_t stream_index;
template <typename H>
friend H AbslHashValue(H h, const Assignment& a) {
return H::combine(std::move(h), a.snapshot_path, a.stream_index);
}
friend bool operator==(const Assignment& lhs, const Assignment& rhs) {
return lhs.snapshot_path == rhs.snapshot_path &&
lhs.stream_index == rhs.stream_index;
}
std::string DebugString() const {
return absl::Substitute(
"Assignment { snapshot_path: $0, stream_index: $1 }", snapshot_path,
stream_index);
}
};
absl::flat_hash_map<std::string, absl::flat_hash_set<Assignment>> assignments_
TF_GUARDED_BY(mu_);
absl::flat_hash_map<std::string, int64_t> snapshot_assignment_counts_
TF_GUARDED_BY(mu_);
const int64_t worker_max_concurrent_snapshots_;
mutable tsl::mutex mu_;
};
class SnapshotManager {
public:
static absl::StatusOr<std::unique_ptr<SnapshotManager>> Start(
const SnapshotRequest& request,
SnapshotAssignmentManager& assignment_manager, Env* env);
static absl::StatusOr<std::unique_ptr<SnapshotManager>> Resume(
absl::string_view path, SnapshotAssignmentManager& assignment_manager,
Env* env);
absl::Status WorkerHeartbeat(const WorkerHeartbeatRequest& request,
WorkerHeartbeatResponse& response);
absl::Status GetSnapshotSplit(const GetSnapshotSplitRequest& request,
GetSnapshotSplitResponse& response);
absl::Status GetSnapshotStreams(GetSnapshotStreamsResponse& response);
void Cancel();
private:
SnapshotManager(absl::string_view path,
SnapshotAssignmentManager& assignment_manager, Env* env)
: path_(path),
env_(env),
last_progress_log_time_(absl::FromUnixMicros(env->NowMicros())),
assignment_manager_(assignment_manager) {}
absl::Status Start(const SnapshotRequest& request);
absl::Status WriteOnDiskSkeleton();
absl::Status WriteOnDiskMetadata(const SnapshotRequest& request);
absl::Status Resume();
absl::Status ReadOnDiskMetadata();
absl::Status ReadOnDiskStreams();
absl::StatusOr<std::optional<std::pair<int64_t, bool>>>
MaybeGetOrCreateStreamAssignment(
absl::string_view worker_address,
const SnapshotTaskProgress* snapshot_progress);
absl::Status HandleStreamCompletion(int64_t stream_index,
absl::string_view worker_address);
void ReassignPreviouslyAssignedStream(int64_t stream_index,
absl::string_view worker_address);
std::optional<int64_t> MaybeAssignOrphanStream(
absl::string_view worker_address);
absl::StatusOr<std::optional<int64_t>> MaybeCreateAndAssignNewStream(
absl::string_view worker_address);
absl::Status HandleStreamError(absl::string_view worker_address,
const StatusProto& status_proto);
mutable tsl::mutex mu_;
mutable tsl::mutex get_split_mu_;
const std::string path_;
tsl::Env* const env_;
experimental::DistributedSnapshotMetadata metadata_ TF_GUARDED_BY(mu_);
absl::Time last_progress_log_time_ TF_GUARDED_BY(mu_);
absl::flat_hash_set<std::string> dead_workers_ TF_GUARDED_BY(mu_);
struct Stream {
explicit Stream(int64_t num_sources)
: num_assigned_splits_per_source(num_sources) {}
enum class State {
kActive,
kDone,
};
std::vector<int64_t> num_assigned_splits_per_source;
int64_t num_assigned_splits() const {
return absl::c_accumulate(num_assigned_splits_per_source, 0);
}
State state = State::kActive;
};
struct Source {
Source(std::unique_ptr<PrefetchedSplitProvider> split_provider,
int64_t repetition_index, int64_t cardinality)
: split_provider(std::move(split_provider)),
repetition_index(repetition_index),
cardinality(cardinality) {}
std::unique_ptr<PrefetchedSplitProvider> split_provider;
int64_t repetition_index = 0;
const int64_t cardinality;
};
class StreamRestorer {
public:
explicit StreamRestorer(tsl::Env* env, absl::string_view path,
int64_t stream_index, int64_t num_sources,
SnapshotAssignmentManager& assignment_manager)
: env_(env),
path_(path),
stream_index_(stream_index),
num_sources_(num_sources),
assignment_manager_(assignment_manager) {}
absl::Status ReadOnDiskStream();
const std::optional<Stream>& GetStream() const { return restored_stream_; }
int64_t StreamIndex() const { return stream_index_; }
const std::string& WorkerAddress() const { return worker_address_; }
const absl::flat_hash_set<int64_t>& GlobalSplitIndices() const {
return global_split_indices_;
}
private:
absl::StatusOr<std::string> OwnerWorkerAddress() const;
absl::Status ReadOnDiskSource(int64_t source_index);
absl::Status ReadOnDiskSplit(int64_t source_index,
const std::vector<std::string>& split_files,
const std::string& split_file);
absl::Status SkipSplit(SplitProvider& split_provider);
tsl::Env* const env_;
const std::string path_;
const int64_t stream_index_;
const int64_t num_sources_;
SnapshotAssignmentManager& assignment_manager_;
std::string worker_address_;
std::optional<Stream> restored_stream_;
absl::flat_hash_set<int64_t> global_split_indices_;
};
absl::Status RestoreFrom(
const StreamRestorer& stream_restorer,
const std::vector<std::string>& stream_directories,
std::vector<std::unique_ptr<SplitProvider>>& split_providers,
std::vector<int64_t>& repetition_indices,
absl::flat_hash_set<int64_t>& global_split_indices);
Stream& GetStream(int64_t stream_index);
absl::Status InitStreamDirectory(
int64_t stream_index, const std::string& worker_address,
const std::vector<int64_t>& repetitions_per_source);
std::vector<Source> sources_ TF_GUARDED_BY(mu_);
absl::StatusOr<std::vector<Source>> CreateSources(
const DatasetDef& dataset_def) const;
absl::StatusOr<int64> GetSplitsCardinality();
absl::Status ResetSource(Source& source, int64_t source_index);
int64_t num_sources() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return sources_.size();
}
absl::btree_map<int64_t, Stream> streams_ TF_GUARDED_BY(mu_);
int64_t num_completed_streams_ TF_GUARDED_BY(mu_) = 0;
absl::flat_hash_map<std::string, int64_t> assignments_ TF_GUARDED_BY(mu_);
SnapshotAssignmentManager& assignment_manager_ TF_GUARDED_BY(mu_);
int64_t num_assigned_splits_ TF_GUARDED_BY(mu_) = 0;
int64_t num_total_splits_ TF_GUARDED_BY(mu_) = 0;
enum class Mode {
kActive,
kWindingDown,
kDone,
kError,
};
Mode mode_ TF_GUARDED_BY(mu_) = Mode::kActive;
absl::Status status_ TF_GUARDED_BY(mu_);
};
}
}
#endif
#include "tensorflow/core/data/service/snapshot/snapshot_manager.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/btree_map.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/snapshot/file_utils.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/snapshot/prefetched_split_provider.h"
#include "tensorflow/core/data/service/split_provider.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/lib/io/compression.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/thread_annotations.h"
#include "tsl/platform/threadpool.h"
#include "tsl/protobuf/error_codes.pb.h"
#include "tsl/protobuf/status.pb.h"
namespace tensorflow {
namespace data {
namespace {
const absl::Duration kProgressLoggingInterval = absl::Minutes(1);
absl::StatusOr<int64_t> CountSplits(SplitProvider& split_provider) {
if (split_provider.Cardinality() != kUnknownCardinality) {
return split_provider.Cardinality();
}
int64_t num_splits = 0;
Tensor tensor;
for (bool end_of_splits = false; !end_of_splits; ++num_splits) {
TF_RETURN_IF_ERROR(split_provider.GetNext(&tensor, &end_of_splits));
}
--num_splits;
TF_RETURN_IF_ERROR(split_provider.Reset());
return num_splits;
}
absl::Status SkipSplit(SplitProvider& split_provider,
int64_t& repetition_index) {
Tensor tensor;
bool end_of_splits = false;
TF_RETURN_IF_ERROR(split_provider.GetNext(&tensor, &end_of_splits));
while (end_of_splits) {
++repetition_index;
TF_RETURN_IF_ERROR(split_provider.Reset());
TF_RETURN_IF_ERROR(split_provider.GetNext(&tensor, &end_of_splits));
}
return absl::OkStatus();
}
std::string PrefetchedSplitDir(const std::string& snapshot_path,
int64_t source_index) {
return tsl::io::JoinPath(snapshot_path, "prefetched_splits",
absl::StrCat("source_", source_index));
}
}
absl::StatusOr<bool> SnapshotAssignmentManager::TryAddAssignment(
absl::string_view snapshot_path, absl::string_view worker_address,
int64_t stream_index) TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
if (assignments_[worker_address].size() >=
worker_max_concurrent_snapshots()) {
return false;
}
Assignment assignment{std::string(snapshot_path), stream_index};
auto [unused, success] = assignments_[worker_address].insert(assignment);
if (!success) {
return absl::InternalError(absl::StrCat("Worker ", worker_address,
" already had an assignment for ",
assignment.DebugString()));
}
++snapshot_assignment_counts_[snapshot_path];
return true;
}
void SnapshotAssignmentManager::RemoveAssignment(
absl::string_view snapshot_path, absl::string_view worker_address,
int64_t stream_index) TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
auto num_erased = assignments_[worker_address].erase(
{std::string(snapshot_path), stream_index});
if ((snapshot_assignment_counts_[snapshot_path] -= num_erased) <= 0) {
snapshot_assignment_counts_.erase(snapshot_path);
}
}
void SnapshotAssignmentManager::AddSnapshot(absl::string_view snapshot_path)
TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
if (!snapshot_assignment_counts_.contains(snapshot_path)) {
snapshot_assignment_counts_[snapshot_path] = 0;
}
}
std::vector<std::string> SnapshotAssignmentManager::LoadBalanceSnapshots(
absl::string_view worker_address) TF_LOCKS_EXCLUDED(mu_) {
std::vector<std::string> result;
tsl::mutex_lock l(mu_);
result.reserve(snapshot_assignment_counts_.size());
const auto it = assignments_.find(worker_address);
if (it != assignments_.end()) {
for (const Assignment& assignment : it->second) {
result.push_back(assignment.snapshot_path);
}
}
if (result.size() >= worker_max_concurrent_snapshots()) {
return result;
}
absl::btree_multimap<size_t, std::string> snapshots_by_count;
for (const auto& [snapshot, count] : snapshot_assignment_counts_) {
snapshots_by_count.emplace(count, snapshot);
}
for (const auto& [_, snapshot] : snapshots_by_count) {
if (absl::c_find(result, snapshot) == result.end()) {
result.push_back(snapshot);
return result;
}
}
return result;
}
absl::StatusOr<std::unique_ptr<SnapshotManager>> SnapshotManager::Start(
const SnapshotRequest& request,
SnapshotAssignmentManager& assignment_manager, Env* env) {
std::unique_ptr<SnapshotManager> snapshot_manager{
new SnapshotManager{request.path(), assignment_manager, env}};
TF_RETURN_IF_ERROR(snapshot_manager->Start(request));
return snapshot_manager;
}
absl::Status SnapshotManager::Start(const SnapshotRequest& request)
TF_LOCKS_EXCLUDED(mu_) {
LOG(INFO) << "Starting to write tf.data snapshot at " << request.path();
if (env_->FileExists(request.path()).ok()) {
return errors::AlreadyExists("tf.data snapshot at ", request.path(),
" already exists.");
}
tsl::mutex_lock l(mu_);
TF_RETURN_IF_ERROR(WriteOnDiskSkeleton());
TF_RETURN_IF_ERROR(WriteOnDiskMetadata(request));
TF_ASSIGN_OR_RETURN(sources_, CreateSources(request.dataset()));
TF_ASSIGN_OR_RETURN(num_total_splits_, GetSplitsCardinality());
metadata_ = request.metadata();
LOG(INFO) << "Started writing tf.data distributed snapshot at " << path_;
return absl::OkStatus();
}
absl::StatusOr<std::vector<SnapshotManager::Source>>
SnapshotManager::CreateSources(const DatasetDef& dataset_def) const
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
std::vector<std::unique_ptr<SplitProvider>> split_providers;
TF_RETURN_IF_ERROR(CreateSplitProviders(dataset_def, split_providers));
std::vector<SnapshotManager::Source> sources;
sources.reserve(split_providers.size());
for (size_t i = 0; i < split_providers.size(); ++i) {
TF_ASSIGN_OR_RETURN(size_t cardinality, CountSplits(*split_providers[i]));
sources.emplace_back(
std::make_unique<PrefetchedSplitProvider>(
std::move(split_providers[i]), PrefetchedSplitDir(path_, i), env_),
0, cardinality);
}
return sources;
}
absl::StatusOr<int64_t> SnapshotManager::GetSplitsCardinality()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return absl::c_accumulate(sources_, 0,
[](size_t cardinality, const Source& source) {
return cardinality + source.cardinality;
});
}
absl::Status SnapshotManager::WriteOnDiskSkeleton()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
TF_RETURN_IF_ERROR(
env_->RecursivelyCreateDir(CommittedChunksDirectory(path_)));
TF_RETURN_IF_ERROR(env_->RecursivelyCreateDir(StreamsDirectory(path_)));
return absl::OkStatus();
}
absl::Status SnapshotManager::WriteOnDiskMetadata(
const SnapshotRequest& request) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
TF_RETURN_IF_ERROR(AtomicallyWriteTextProto(SnapshotMetadataFilePath(path_),
request.metadata(), env_));
TF_RETURN_IF_ERROR(AtomicallyWriteStringToFile(
DatasetSpecFilePath(path_), request.metadata().element_spec(), env_));
TF_RETURN_IF_ERROR(AtomicallyWriteBinaryProto(DatasetDefFilePath(path_),
request.dataset(), env_));
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<SnapshotManager>> SnapshotManager::Resume(
absl::string_view path, SnapshotAssignmentManager& assignment_manager,
Env* env) {
SnapshotManager* snapshot_manager =
new SnapshotManager(path, assignment_manager, env);
TF_RETURN_IF_ERROR(snapshot_manager->Resume());
return absl::WrapUnique(snapshot_manager);
}
absl::Status SnapshotManager::Resume() TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
if (!env_->FileExists(path_).ok()) {
return absl::InternalError(
absl::StrCat("Failed to recover tf.data snapshot at ", path_,
": the snapshot path doesn't exist."));
}
if (env_->FileExists(SnapshotDoneFilePath(path_)).ok()) {
mode_ = Mode::kDone;
LOG(INFO) << "Recovered finished tf.data snapshot at " << path_;
return absl::OkStatus();
}
if (env_->FileExists(SnapshotErrorFilePath(path_)).ok()) {
mode_ = Mode::kError;
StatusProto status_proto;
TF_RETURN_IF_ERROR(
ReadTextProto(env_, SnapshotErrorFilePath(path_), &status_proto));
status_ = tsl::StatusFromProto(status_proto);
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(ReadOnDiskMetadata());
TF_RETURN_IF_ERROR(ReadOnDiskStreams());
LOG(INFO) << "Resumed writing tf.data distributed snapshot at " << path_;
return absl::OkStatus();
}
absl::Status SnapshotManager::ReadOnDiskMetadata()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!env_->FileExists(SnapshotMetadataFilePath(path_)).ok()) {
return absl::InternalError(
absl::StrCat("Failed to recover snapshot at ", path_,
": snapshot has no snapshot.metadata"));
}
TF_RETURN_IF_ERROR(
ReadTextProto(env_, SnapshotMetadataFilePath(path_), &metadata_));
if (!env_->FileExists(DatasetDefFilePath(path_)).ok()) {
return absl::InternalError(
absl::StrCat("Failed to recovery snapshot at ", path_,
": snapshot has no dataset_def.proto"));
}
return absl::OkStatus();
}
absl::Status SnapshotManager::ReadOnDiskStreams()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
std::string streams_path = StreamsDirectory(path_);
TF_ASSIGN_OR_RETURN(const std::vector<std::string> stream_directories,
GetChildren(streams_path, env_));
DatasetDef dataset_def;
TF_RETURN_IF_ERROR(
tsl::ReadBinaryProto(env_, DatasetDefFilePath(path_), &dataset_def));
std::vector<std::unique_ptr<SplitProvider>> split_providers;
TF_RETURN_IF_ERROR(CreateSplitProviders(dataset_def, split_providers));
std::vector<int64_t> repetition_indices(split_providers.size(), 0);
std::vector<int64_t> cardinalities;
for (size_t i = 0; i < split_providers.size(); ++i) {
TF_ASSIGN_OR_RETURN(int64_t cardinality, CountSplits(*split_providers[i]));
cardinalities.push_back(cardinality);
}
tsl::mutex mu;
absl::Status resume_status;
absl::flat_hash_set<int64_t> global_split_indices;
auto thread_pool = std::make_unique<tsl::thread::ThreadPool>(
env_, tsl::ThreadOptions{}, "restore_snapshot_stream_thread",
std::max(size_t{1}, stream_directories.size()));
for (const auto& stream_directory : stream_directories) {
std::string stream_path = tsl::io::JoinPath(streams_path, stream_directory);
std::vector<std::string> tokens = absl::StrSplit(stream_directory, '_');
int64_t stream_index;
if (tokens.size() != 2 || !absl::SimpleAtoi(tokens[1], &stream_index) ||
stream_index < 0) {
return absl::InternalError(absl::StrCat(
"Can't parse tf.data snapshot stream directory ", stream_path,
": filename must have the format stream_<stream_index>."));
}
thread_pool->Schedule([this, &stream_directories, stream_index,
&split_providers, &repetition_indices,
&global_split_indices, &resume_status,
&mu]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
StreamRestorer stream_restorer(env_, path_, stream_index,
split_providers.size(),
assignment_manager_);
absl::Status s = stream_restorer.ReadOnDiskStream();
tsl::mutex_lock l(mu);
resume_status.Update(s);
resume_status.Update(RestoreFrom(stream_restorer, stream_directories,
split_providers, repetition_indices,
global_split_indices));
});
}
thread_pool.reset();
TF_RETURN_IF_ERROR(resume_status);
for (int64_t i = 0; i < split_providers.size(); ++i) {
sources_.emplace_back(
std::make_unique<PrefetchedSplitProvider>(
std::move(split_providers[i]), PrefetchedSplitDir(path_, i), env_),
repetition_indices[i], cardinalities[i]);
}
TF_ASSIGN_OR_RETURN(num_total_splits_, GetSplitsCardinality());
for (int64_t i = 0; i < global_split_indices.size(); ++i) {
if (!global_split_indices.contains(i)) {
return absl::InternalError(
absl::StrCat("Failed to restore tf.data snapshot at ", path_,
": Found missing global split index ", i, "."));
}
}
num_assigned_splits_ = global_split_indices.size();
if (!streams_.empty() && absl::c_all_of(streams_, [](const auto& stream) {
return stream.second.state == Stream::State::kDone;
})) {
mode_ = Mode::kDone;
TF_RETURN_IF_ERROR(AtomicallyWriteStringToFile(SnapshotDoneFilePath(path_),
std::string(), env_));
LOG(INFO) << "Finished writing tf.data distributed snapshot at " << path_;
}
return absl::OkStatus();
}
absl::StatusOr<std::string>
SnapshotManager::StreamRestorer::OwnerWorkerAddress() const {
std::string worker_address;
TF_RETURN_IF_ERROR(
env_->FileExists(StreamWorkerFilePath(path_, stream_index_)));
TF_RETURN_IF_ERROR(tsl::ReadFileToString(
env_, StreamWorkerFilePath(path_, stream_index_), &worker_address));
return worker_address;
}
absl::Status SnapshotManager::StreamRestorer::ReadOnDiskStream() {
absl::StatusOr<std::string> worker_address = OwnerWorkerAddress();
if (!worker_address.ok()) {
return absl::OkStatus();
}
worker_address_ = *worker_address;
restored_stream_.emplace(num_sources_);
std::string splits_path = SplitsDirectory(path_, stream_index_);
TF_ASSIGN_OR_RETURN(std::vector<std::string> source_directories,
GetChildren(splits_path, env_));
for (const auto& source_directory : source_directories) {
std::string source_path = tsl::io::JoinPath(splits_path, source_directory);
std::vector<std::string> tokens = absl::StrSplit(source_directory, '_');
int64_t source_index = 0;
if (tokens.size() != 2 || !absl::SimpleAtoi(tokens[1], &source_index) ||
source_index < 0) {
return absl::InternalError(absl::StrCat(
"Can't parse tf.data snapshot source directory ", source_path,
": filename must have the format source_<source_index>."));
}
if (source_index >= num_sources_) {
return absl::InternalError(
absl::StrCat("Found conflict between the number of sources, ",
num_sources_, ", and the filename of ", source_path));
}
TF_RETURN_IF_ERROR(ReadOnDiskSource(source_index));
}
if (env_->FileExists(StreamDoneFilePath(path_, stream_index_)).ok()) {
restored_stream_->state = Stream::State::kDone;
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(bool assignment_added,
assignment_manager_.TryAddAssignment(
path_, *worker_address, stream_index_));
if (!assignment_added) {
return absl::InternalError(absl::StrCat(
"Failed to recover tf.data snapshot dispatcher: Worker ",
*worker_address, " was assigned too many streams. At most ",
assignment_manager_.worker_max_concurrent_snapshots(),
" streams are allowed."));
}
return absl::OkStatus();
}
absl::Status SnapshotManager::StreamRestorer::ReadOnDiskSource(
int64_t source_index) {
std::string source_directory =
SourceDirectory(path_, stream_index_, source_index);
TF_ASSIGN_OR_RETURN(std::vector<std::string> repetition_directories, | #include "tensorflow/core/data/service/snapshot/snapshot_manager.h"
#include <memory>
#include <string>
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/status_to_from_proto.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
#include "tsl/protobuf/status.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::_;
using ::testing::ElementsAre;
using ::testing::IsEmpty;
using ::testing::Not;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
template <class T>
T GetValue(const Tensor& tensor) {
return tensor.unaligned_flat<T>().data()[0];
}
TEST(SnapshotManagerTest, CreateStreamAssignment) {
std::string snapshot_path = testing::LocalTempFilename();
SnapshotRequest request;
*request.mutable_dataset() = testing::RangeDataset(10);
request.set_path(snapshot_path);
*request.mutable_metadata() =
testing::CreateDummyDistributedSnapshotMetadata();
SnapshotAssignmentManager snapshot_assignment_manager(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> snapshot_manager,
SnapshotManager::Start(request, snapshot_assignment_manager,
Env::Default()));
WorkerHeartbeatRequest heartbeat_request;
WorkerHeartbeatResponse heartbeat_response;
heartbeat_request.set_worker_address("localhost");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
ASSERT_EQ(heartbeat_response.snapshot_tasks().size(), 1);
EXPECT_EQ(heartbeat_response.snapshot_tasks(0).base_path(), snapshot_path);
EXPECT_EQ(heartbeat_response.snapshot_tasks(0).stream_index(), 0);
EXPECT_EQ(heartbeat_response.snapshot_tasks(0).num_sources(), 1);
}
TEST(SnapshotManagerTest, GetSnapshotSplit) {
std::string snapshot_path = testing::LocalTempFilename();
SnapshotRequest request;
*request.mutable_dataset() = testing::RangeDataset(10);
request.set_path(snapshot_path);
*request.mutable_metadata() =
testing::CreateDummyDistributedSnapshotMetadata();
SnapshotAssignmentManager snapshot_assignment_manager(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> snapshot_manager,
SnapshotManager::Start(request, snapshot_assignment_manager,
Env::Default()));
WorkerHeartbeatRequest heartbeat_request;
WorkerHeartbeatResponse heartbeat_response;
heartbeat_request.set_worker_address("localhost");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
const SnapshotTaskDef& task = heartbeat_response.snapshot_tasks(0);
GetSnapshotSplitRequest get_split_request;
GetSnapshotSplitResponse get_split_response;
get_split_request.set_worker_address("localhost");
get_split_request.set_base_path(task.base_path());
get_split_request.set_stream_index(task.stream_index());
get_split_request.set_source_index(0);
for (int64_t i = 0; i < 10; ++i) {
TF_ASSERT_OK(snapshot_manager->GetSnapshotSplit(get_split_request,
get_split_response));
Tensor tensor;
ASSERT_TRUE(tensor.FromProto(get_split_response.split()));
EXPECT_EQ(GetValue<int64_t>(tensor), i);
}
}
TEST(SnapshotManagerTest, HandleStreamCompletion) {
std::string snapshot_path = testing::LocalTempFilename();
SnapshotRequest request;
*request.mutable_dataset() = testing::RangeDataset(10);
request.set_path(snapshot_path);
*request.mutable_metadata() =
testing::CreateDummyDistributedSnapshotMetadata();
SnapshotAssignmentManager snapshot_assignment_manager(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> snapshot_manager,
SnapshotManager::Start(request, snapshot_assignment_manager,
Env::Default()));
WorkerHeartbeatRequest heartbeat_request;
WorkerHeartbeatResponse heartbeat_response;
heartbeat_request.set_worker_address("localhost:1");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
heartbeat_request.Clear();
heartbeat_response.Clear();
heartbeat_request.set_worker_address("localhost:2");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
ASSERT_EQ(heartbeat_response.snapshot_tasks().size(), 1);
const SnapshotTaskDef& snapshot_task = heartbeat_response.snapshot_tasks(0);
EXPECT_EQ(snapshot_task.base_path(), snapshot_path);
EXPECT_EQ(snapshot_task.stream_index(), 1);
EXPECT_EQ(snapshot_task.num_sources(), 1);
heartbeat_request.Clear();
heartbeat_response.Clear();
heartbeat_request.set_worker_address("localhost:1");
SnapshotTaskProgress progress;
*progress.mutable_snapshot_task() = snapshot_task;
progress.set_completed(true);
(*heartbeat_request.mutable_snapshot_task_progress())[snapshot_path] =
progress;
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
EXPECT_TRUE(heartbeat_response.snapshot_tasks().empty());
heartbeat_request.Clear();
heartbeat_response.Clear();
heartbeat_request.set_worker_address("localhost:1");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
EXPECT_TRUE(heartbeat_response.snapshot_tasks().empty());
}
TEST(SnapshotManagerTest, Resume) {
std::string snapshot_path = testing::LocalTempFilename();
SnapshotRequest request;
*request.mutable_dataset() = testing::RangeDataset(10);
request.set_path(snapshot_path);
*request.mutable_metadata() =
testing::CreateDummyDistributedSnapshotMetadata();
SnapshotAssignmentManager snapshot_assignment_manager_1(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> snapshot_manager,
SnapshotManager::Start(request, snapshot_assignment_manager_1,
Env::Default()));
WorkerHeartbeatRequest heartbeat_request;
WorkerHeartbeatResponse heartbeat_response;
heartbeat_request.set_worker_address("localhost");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
EXPECT_THAT(heartbeat_response.snapshot_tasks(), SizeIs(1));
heartbeat_response.Clear();
SnapshotAssignmentManager snapshot_assignment_manager_2(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> resumed_manager,
SnapshotManager::Resume(snapshot_path, snapshot_assignment_manager_2,
Env::Default()));
TF_EXPECT_OK(
resumed_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
EXPECT_THAT(heartbeat_response.snapshot_tasks(), SizeIs(1));
}
TEST(SnapshotManagerTest, SnapshotStreamError) {
std::string snapshot_path = testing::LocalTempFilename();
SnapshotRequest snapshot_request;
*snapshot_request.mutable_dataset() = testing::RangeDataset(10);
snapshot_request.set_path(snapshot_path);
*snapshot_request.mutable_metadata() =
testing::CreateDummyDistributedSnapshotMetadata();
SnapshotAssignmentManager snapshot_assignment_manager(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> snapshot_manager,
SnapshotManager::Start(snapshot_request, snapshot_assignment_manager,
Env::Default()));
WorkerHeartbeatRequest heartbeat_request;
WorkerHeartbeatResponse heartbeat_response;
heartbeat_request.set_worker_address("localhost");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
const SnapshotTaskDef& task = heartbeat_response.snapshot_tasks(0);
heartbeat_response.Clear();
SnapshotTaskProgress snapshot_task_progress;
*snapshot_task_progress.mutable_snapshot_task() = task;
*snapshot_task_progress.mutable_status() =
tsl::StatusToProto(errors::NotFound("Not found"));
(*heartbeat_request.mutable_snapshot_task_progress())[snapshot_path] =
snapshot_task_progress;
TF_EXPECT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
EXPECT_THAT(heartbeat_response.snapshot_tasks(), IsEmpty());
TF_ASSERT_OK(
Env::Default()->FileExists(SnapshotErrorFilePath(snapshot_path)));
StatusProto status_proto;
TF_ASSERT_OK(ReadTextProto(
Env::Default(), SnapshotErrorFilePath(snapshot_path), &status_proto));
EXPECT_THAT(tsl::StatusFromProto(status_proto),
StatusIs(error::NOT_FOUND, "Not found"));
}
TEST(SnapshotManagerTest, ResumeFromError) {
std::string snapshot_path = testing::LocalTempFilename();
SnapshotRequest request;
*request.mutable_dataset() = testing::RangeDataset(10);
request.set_path(snapshot_path);
*request.mutable_metadata() =
testing::CreateDummyDistributedSnapshotMetadata();
SnapshotAssignmentManager snapshot_assignment_manager_1(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> snapshot_manager,
SnapshotManager::Start(request, snapshot_assignment_manager_1,
Env::Default()));
WorkerHeartbeatRequest heartbeat_request;
WorkerHeartbeatResponse heartbeat_response;
heartbeat_request.set_worker_address("localhost");
TF_ASSERT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
ASSERT_THAT(heartbeat_response.snapshot_tasks(), SizeIs(1));
const SnapshotTaskDef& task = heartbeat_response.snapshot_tasks(0);
heartbeat_response.Clear();
SnapshotTaskProgress snapshot_task_progress;
*snapshot_task_progress.mutable_snapshot_task() = task;
*snapshot_task_progress.mutable_status() =
tsl::StatusToProto(errors::NotFound("Not found"));
(*heartbeat_request.mutable_snapshot_task_progress())[snapshot_path] =
snapshot_task_progress;
TF_EXPECT_OK(
snapshot_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
EXPECT_THAT(heartbeat_response.snapshot_tasks(), IsEmpty());
heartbeat_response.Clear();
SnapshotAssignmentManager snapshot_assignment_manager_2(
2);
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SnapshotManager> resumed_manager,
SnapshotManager::Resume(snapshot_path, snapshot_assignment_manager_2,
Env::Default()));
TF_EXPECT_OK(
resumed_manager->WorkerHeartbeat(heartbeat_request, heartbeat_response));
EXPECT_THAT(heartbeat_response.snapshot_tasks(), IsEmpty());
}
TEST(SnapshotAssignmentManagerTest, LoadBalanceSnapshots) {
SnapshotAssignmentManager snapshot_assignment_manager(
2);
snapshot_assignment_manager.AddSnapshot("snapshot_1");
snapshot_assignment_manager.AddSnapshot("snapshot_2");
snapshot_assignment_manager.AddSnapshot("snapshot_3");
EXPECT_THAT(snapshot_assignment_manager.TryAddAssignment(
"snapshot_3", "worker_1", 0),
IsOkAndHolds(true));
EXPECT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_1"),
ElementsAre("snapshot_3", _));
ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_2"),
ElementsAre(Not("snapshot_3")));
EXPECT_THAT(snapshot_assignment_manager.TryAddAssignment(
"snapshot_2", "worker_1", 0),
IsOkAndHolds(true));
ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_1"),
UnorderedElementsAre("snapshot_2", "snapshot_3"));
EXPECT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_2"),
ElementsAre("snapshot_1"));
EXPECT_THAT(snapshot_assignment_manager.TryAddAssignment(
"snapshot_1", "worker_1", 0),
IsOkAndHolds(false));
EXPECT_THAT(snapshot_assignment_manager.TryAddAssignment(
"snapshot_2", "worker_2", 0),
IsOkAndHolds(true));
ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_1"),
UnorderedElementsAre("snapshot_2", "snapshot_3"));
EXPECT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_2"),
ElementsAre("snapshot_2", "snapshot_1"));
snapshot_assignment_manager.RemoveAssignment("snapshot_2", "worker_1",
0);
EXPECT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_1"),
ElementsAre("snapshot_3", "snapshot_1"));
ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_2"),
ElementsAre("snapshot_2", "snapshot_1"));
snapshot_assignment_manager.RemoveAssignment("snapshot_3", "worker_1",
0);
ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_1"),
ElementsAre("snapshot_1"));
ASSERT_THAT(snapshot_assignment_manager.LoadBalanceSnapshots("worker_2"),
ElementsAre("snapshot_2", "snapshot_1"));
}
}
}
} |
1,757 | cpp | tensorflow/tensorflow | data_service_client | tensorflow/core/data/service/client/data_service_client.cc | tensorflow/core/data/service/client/data_service_client_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_CLIENT_DATA_SERVICE_CLIENT_H_
#define TENSORFLOW_CORE_DATA_SERVICE_CLIENT_DATA_SERVICE_CLIENT_H_
#include <functional>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/data/service/client/common.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/dispatcher_client.h"
#include "tensorflow/core/data/service/worker_client.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
class DataServiceContext {
public:
virtual ~DataServiceContext() = default;
virtual std::unique_ptr<Thread> StartThread(const string& name,
std::function<void()> fn) = 0;
virtual void RecordBufferEnqueue(const std::vector<Tensor>& element) = 0;
virtual void RecordBufferDequeue(const std::vector<Tensor>& element) = 0;
virtual double GetTargetProcessingTimeNsec() const = 0;
virtual int64_t UpdateMaxOutstandingRequests(
int64_t max_outstanding_requests,
int64_t requested_outstanding_requests) = 0;
};
using DataServiceContextFactory =
std::function<std::unique_ptr<DataServiceContext>()>;
class DataServiceClient {
public:
explicit DataServiceClient(const DataServiceParams& params);
virtual ~DataServiceClient();
DataServiceClient(const DataServiceClient&) = delete;
DataServiceClient& operator=(const DataServiceClient&) = delete;
Status Initialize(
const DeviceBase::AcceleratorDeviceInfo* accelerator_device_info,
Allocator* allocator);
virtual absl::StatusOr<GetNextResult> GetNext(
DataServiceContextFactory context_factory);
void Cancel();
TraceMeMetadata GetTraceMeMetadata() const;
private:
struct Task {
Task(const TaskInfo& info, std::unique_ptr<DataServiceWorkerClient> worker)
: info(info), worker(std::move(worker)) {}
const TaskInfo info;
std::unique_ptr<DataServiceWorkerClient> worker;
int64_t round = 0;
bool removed = false;
bool skipped_previous_round = false;
bool in_use TF_GUARDED_BY(&DataServiceClient::mu_) = false;
bool end_of_sequence TF_GUARDED_BY(&DataServiceClient::mu_) = false;
int64_t num_retries = 0;
};
struct Result {
Result() = default;
Result(Result&&) = default;
Result& operator=(Result&&) = default;
Result(const Result&) = delete;
Result& operator=(const Result&) = delete;
bool ready TF_GUARDED_BY(&DataServiceClient::mu_) = false;
std::vector<Tensor> element TF_GUARDED_BY(&DataServiceClient::mu_);
int64_t element_index TF_GUARDED_BY(&DataServiceClient::mu_) = -1;
int64_t task_id TF_GUARDED_BY(&DataServiceClient::mu_) = -1;
bool end_of_sequence TF_GUARDED_BY(&DataServiceClient::mu_) = false;
bool skip TF_GUARDED_BY(&DataServiceClient::mu_) = false;
};
void EnsureThreadsStarted();
void CancelThreads();
bool Finished() const;
bool ShouldWaitForNext() const;
void DeleteLocalWorkerTasks();
bool ShouldDeleteLocalTask(const TaskInfo& task) const;
void TaskThreadManager();
void TryBlockRound(int64_t round) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void UpdateIterationFinished(bool iteration_finished);
Status AddTask(const TaskInfo& task_info);
absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>> CreateWorkerClient(
const TaskInfo& task_info);
absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>> CreateWorkerClient(
const std::string& protocol, const TaskInfo& task_info);
absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>>
CreateGrpcWorkerClient(const TaskInfo& task_info);
absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>>
CreateAlternativeWorkerClientWithGrpcFallback(
const DataTransferServerInfo& transfer_server, const TaskInfo& task_info);
void Heartbeat();
void UpdateTasks(const ClientHeartbeatResponse& resp);
bool ShouldReadFromTask(const TaskInfo& task) const;
void RecordTFMetrics(const ClientHeartbeatResponse& resp);
void UpdateBufferSize();
void UpdateWorkerThreads();
void RunWorkerThread(std::function<void()> done);
bool ShouldProcessTask();
std::shared_ptr<Task> GetTaskToProcess();
void AdvanceTaskIndex();
Status TryGetElement(const Task& task, bool allow_skip,
GetElementResult& result);
void ProcessGetElementResponse(bool enqueue_result,
GetElementResult& get_element_result,
std::shared_ptr<Result> result, Task& task);
Status GetElementTraced(Task* task, int64_t deadline_micros,
bool enqueue_result, bool allow_skip,
std::shared_ptr<Result> result);
Status MaybeRemoveTask(Task& task, int64_t deadline_micros, Result& result);
Status GetElement(Task* task, int64_t deadline_micros, bool enqueue_result,
bool allow_skip, std::shared_ptr<Result> result);
bool ResultReady() const;
std::shared_ptr<Result> PopNextResult();
bool IsCoordinatedRead() const;
std::string DebugString() const;
const DataServiceParams params_;
mutable mutex mu_;
condition_variable get_next_cv_ TF_GUARDED_BY(mu_);
condition_variable worker_thread_cv_ TF_GUARDED_BY(mu_);
condition_variable manager_thread_cv_ TF_GUARDED_BY(mu_);
bool cancelled_ TF_GUARDED_BY(mu_) = false;
int64_t outstanding_requests_ TF_GUARDED_BY(mu_) = 0;
int64_t max_outstanding_requests_ TF_GUARDED_BY(mu_);
int64_t num_running_worker_threads_ TF_GUARDED_BY(mu_) = 0;
int64_t next_task_index_ TF_GUARDED_BY(mu_) = 0;
int64_t finished_tasks_ TF_GUARDED_BY(mu_) = 0;
std::vector<std::shared_ptr<Task>> tasks_ TF_GUARDED_BY(mu_);
int64_t current_round_ TF_GUARDED_BY(mu_) = 0;
std::optional<int64_t> round_robin_round_limit_ TF_GUARDED_BY(mu_);
Status status_ TF_GUARDED_BY(mu_) = absl::OkStatus();
std::queue<std::shared_ptr<Result>> results_ TF_GUARDED_BY(mu_);
bool initialized_ = false;
std::unique_ptr<DataServiceContext> ctx_ TF_GUARDED_BY(mu_);
int64_t job_id_;
int64_t iteration_client_id_;
std::unique_ptr<DataServiceDispatcherClient> dispatcher_;
const DeviceBase::AcceleratorDeviceInfo* accelerator_device_info_;
Allocator* allocator_;
int64_t get_next_index_ TF_GUARDED_BY(mu_) = 0;
bool iteration_finished_ TF_GUARDED_BY(mu_) = false;
bool should_finish_iteration_ TF_GUARDED_BY(mu_) = true;
absl::flat_hash_set<int64_t> worker_uids_ TF_GUARDED_BY(mu_);
std::vector<std::unique_ptr<Thread>> worker_threads_ TF_GUARDED_BY(mu_);
std::unique_ptr<Thread> task_thread_manager_ TF_GUARDED_BY(mu_);
};
}
}
#endif
#include "tensorflow/core/data/service/client/data_service_client.h"
#include <algorithm>
#include <functional>
#include <limits>
#include <memory>
#include <optional>
#include <random>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/strings/ascii.h"
#include "absl/strings/substitute.h"
#include "absl/time/time.h"
#include "tensorflow/core/data/service/client/common.h"
#include "tensorflow/core/data/service/client/validate_utils.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/dispatcher_client.h"
#include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/data/service/worker_client.h"
#include "tensorflow/core/data/service/worker_impl.h"
#include "tensorflow/core/data/utils.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tsl/platform/host_info.h"
#include "tsl/platform/retrying_utils.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
bool IsColocatedTask(const TaskInfo& task) {
return absl::c_any_of(task.worker_tags(), [](std::string_view worker_tag) {
return absl::AsciiStrToUpper(worker_tag) == kColocatedWorkerTag;
});
}
absl::StatusOr<DataTransferServerInfo> GetTransferServer(
const std::string& protocol, const TaskInfo& task_info) {
for (const auto& transfer_server : task_info.transfer_servers()) {
if (transfer_server.protocol() == protocol) {
return transfer_server;
}
}
return errors::NotFound("protocol ", protocol,
" is not available for worker ",
task_info.worker_address());
}
}
DataServiceClient::DataServiceClient(const DataServiceParams& params)
: params_(params),
max_outstanding_requests_(params.max_outstanding_requests) {}
DataServiceClient::~DataServiceClient() {
VLOG(2) << "Destroying data service client for iteration id "
<< iteration_client_id_;
task_thread_manager_.reset();
if (initialized_) {
Status s = dispatcher_->ReleaseIterationClient(iteration_client_id_);
if (!s.ok()) {
LOG(WARNING) << "Failed to release iteration client id: " << s;
}
}
for (auto& worker_thread : worker_threads_) {
worker_thread.reset();
}
DeleteLocalWorkerTasks();
VLOG(2) << "Destroyed data service dataset iterator for iteration id "
<< iteration_client_id_;
}
Status DataServiceClient::Initialize(
const DeviceBase::AcceleratorDeviceInfo* accelerator_device_info,
Allocator* allocator) {
accelerator_device_info_ = accelerator_device_info;
allocator_ = allocator;
TF_RETURN_IF_ERROR(ValidateDataServiceParams(params_));
VLOG(3) << "Connecting to " << params_.address
<< " in tf.data service client.";
dispatcher_ = std::make_unique<DataServiceDispatcherClient>(params_.address,
params_.protocol);
int64_t deadline_micros = kint64max;
std::optional<std::string> job_name;
if (!params_.job_name.empty()) {
job_name = params_.job_name;
}
TF_RETURN_IF_ERROR(grpc_util::Retry(
[&]() {
return dispatcher_->GetOrCreateJob(
params_.dataset_id, params_.processing_mode, job_name,
params_.num_consumers,
params_.cross_trainer_cache_options.has_value(),
params_.target_workers, job_id_);
},
strings::StrCat("get or create job with dispatcher at ", params_.address),
deadline_micros));
TF_RETURN_IF_ERROR(grpc_util::Retry(
[&]() {
return dispatcher_->GetOrCreateIteration(job_id_, params_.repetition,
iteration_client_id_);
},
strings::StrCat("get or create iteration with dispatcher at ",
params_.address),
deadline_micros));
initialized_ = true;
return absl::OkStatus();
}
absl::StatusOr<GetNextResult> DataServiceClient::GetNext(
DataServiceContextFactory context_factory) TF_LOCKS_EXCLUDED(mu_) {
VLOG(3) << "Getting the next element from tf.data service client.";
mutex_lock l(mu_);
if (ctx_ == nullptr) {
ctx_ = context_factory();
}
EnsureThreadsStarted();
std::shared_ptr<Result> result;
do {
while (!ResultReady() && !Finished() && !cancelled_ && status_.ok()) {
VLOG(3) << "Blocking in GetNext: " << DebugString();
get_next_cv_.wait(l);
}
if (cancelled_) {
VLOG(3) << "Returning from GetNext due to cancellation";
return errors::Cancelled("Data service iterator was cancelled");
}
if (!status_.ok()) {
VLOG(3) << "Returning from GetNext with error " << status_;
return status_;
}
if (results_.empty()) {
VLOG(3) << "Returning from GetNext with end_of_sequence";
return GetNextResult::EndOfSequence();
}
if (!ResultReady()) {
VLOG(3) << "Returning from GetNext with internal error";
return errors::Internal("Expected a result to be ready, but none were.");
}
result = PopNextResult();
worker_thread_cv_.notify_one();
if (result->skip) {
VLOG(3) << "Skipping result from task " << result->task_id;
}
} while (result->skip);
GetNextResult next;
next.end_of_sequence = result->end_of_sequence;
if (next.end_of_sequence) {
VLOG(1) << "Returning end_of_sequence";
return next;
}
VLOG(1) << "Returning the next element from data service dataset's "
<< "Iterator: task " << result->task_id << ", element "
<< result->element_index;
if (IsCoordinatedRead()) {
VLOG(1) << "Consumer " << *params_.consumer_index << ": Result "
<< get_next_index_++;
}
next.tensors.swap(result->element);
return next;
}
void DataServiceClient::Cancel() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
for (const auto& task : tasks_) {
task->worker->TryCancel();
}
cancelled_ = true;
worker_thread_cv_.notify_all();
manager_thread_cv_.notify_all();
get_next_cv_.notify_all();
}
TraceMeMetadata DataServiceClient::GetTraceMeMetadata() const {
TraceMeMetadata result;
int64_t num_tasks = -1;
int64_t autotuned_max_outstanding_requests = model::kAutotune;
if (mu_.try_lock()) {
num_tasks = tasks_.size() - finished_tasks_;
autotuned_max_outstanding_requests = max_outstanding_requests_;
mu_.unlock();
}
result.push_back(std::make_pair(
"num_tasks",
num_tasks == -1
? kTraceInfoUnavailable
: strings::Printf("%lld", static_cast<long long>(num_tasks))));
result.push_back(std::make_pair("job_name", params_.job_name));
result.push_back(std::make_pair(
"max_outstanding_requests",
strings::Printf(
"%lld", static_cast<long long>(params_.max_outstanding_requests))));
if (params_.max_outstanding_requests == model::kAutotune) {
result.push_back(std::make_pair(
"autotuned_max_outstanding_requests",
strings::Printf("%lld", static_cast<long long>(
autotuned_max_outstanding_requests))));
}
return result;
}
void DataServiceClient::EnsureThreadsStarted()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!task_thread_manager_ && !cancelled_) {
task_thread_manager_ = ctx_->StartThread("task-thread-manager",
[this]() { TaskThreadManager(); });
}
}
bool DataServiceClient::Finished() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return num_running_worker_threads_ == 0 && !ShouldWaitForNext();
}
bool DataServiceClient::ShouldWaitForNext() const
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (should_finish_iteration_) {
return !iteration_finished_;
}
return tasks_.empty() || finished_tasks_ < tasks_.size();
}
void DataServiceClient::DeleteLocalWorkerTasks() TF_LOCKS_EXCLUDED(mu_) {
std::vector<std::shared_ptr<Task>> tasks;
{
mutex_lock l(mu_);
tasks = tasks_;
}
for (const std::shared_ptr<Task>& task : tasks) {
std::shared_ptr<DataServiceWorkerImpl> worker =
LocalWorkers::Get(task->info.worker_address());
if (worker && ShouldDeleteLocalTask(task->info)) {
worker->DeleteLocalTask(task->info);
}
}
}
bool DataServiceClient::ShouldDeleteLocalTask(const TaskInfo& task) const
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (IsCoordinatedRead()) {
return false;
}
if (params_.target_workers == TARGET_WORKERS_LOCAL) {
return true;
}
return params_.target_workers == TARGET_WORKERS_AUTO && IsColocatedTask(task);
}
void DataServiceClient::TaskThreadManager() TF_LOCKS_EXCLUDED(mu_) {
auto cleanup =
gtl::MakeCleanup([] { VLOG(1) << "Task thread manager exiting"; });
VLOG(1) << "Starting task thread manager";
uint64 next_check = Env::Default()->NowMicros();
while (true) {
{
mutex_lock l(mu_);
while (!cancelled_ && Env::Default()->NowMicros() < next_check) {
int64_t remaining_time = next_check - Env::Default()->NowMicros();
VLOG(4) << "Task thread manager waiting for " << remaining_time << "us";
manager_thread_cv_.wait_for(l,
std::chrono::microseconds(remaining_time));
}
if (cancelled_) {
VLOG(3) << "Task thread manager finished";
return;
}
}
Heartbeat();
UpdateBufferSize();
UpdateWorkerThreads();
next_check = Env::Default()->NowMicros() +
absl::ToInt64Microseconds(params_.task_refresh_interval);
}
}
void DataServiceClient::TryBlockRound(int64_t round)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (round_robin_round_limit_.has_value() &&
round_robin_round_limit_.value() == round) {
return;
}
if (current_round_ >= round) {
VLOG(1) << "Rejecting request to block round " << round
<< ", because processing has already begun for round "
<< current_round_;
return;
}
VLOG(1) << "Accepting request to block round " << round;
round_robin_round_limit_ = round;
}
void DataServiceClient::UpdateIterationFinished(bool iteration_finished)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!iteration_finished) {
return;
}
iteration_finished_ = true;
get_next_cv_.notify_all();
worker_thread_cv_.notify_all();
}
absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>>
DataServiceClient::CreateWorkerClient(const std::string& protocol,
const TaskInfo& task_info) {
TF_ASSIGN_OR_RETURN(DataTransferServerInfo transfer_server,
GetTransferServer(protocol, task_info));
return CreateDataServiceWorkerClient(params_.protocol, transfer_server,
accelerator_device_info_, allocator_);
}
absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>>
DataServiceClient::CreateGrpcWorkerClient(const TaskInfo& task_info) {
return CreateWorkerClient(kGrpcTransferProtocol, task_info);
}
absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>>
DataServiceClient::CreateAlternativeWorkerClientWithGrpcFallback(
const DataTransferServerInfo& transfer_server, const TaskInfo& task_info) {
absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>> worker =
CreateDataServiceWorkerClient(params_.protocol, transfer_server,
accelerator_device_info_, allocator_);
if (worker.ok()) {
LOG(INFO) << "Successfully started client for data transfer protocol '"
<< transfer_server.protocol() << "' for worker '"
<< task_info.worker_address() << "'.";
return worker;
}
LOG(INFO) << "Failed to start client for data transfer protocol '"
<< transfer_server.protocol() << "' for worker '"
<< task_info.worker_address() << "'; falling back to grpc. "
<< "Original error: " << worker.status();
metrics::RecordTFDataServiceDataTransferProtocolFallback(
transfer_server.protocol(),
static_cast<error::Code>(worker.status().raw_code()),
std::string(worker.status().message()));
return CreateGrpcWorkerClient(task_info);
}
absl::StatusOr<std::unique_ptr<DataServiceWorkerClient>>
DataServiceClient::CreateWorkerClient(const TaskInfo& task_info) {
if (params_.data_transfer_protocol == kLocalTransferProtocol ||
(tsl::port::JobUid() != -1 &&
LocalWorkers::Get(task_info.worker_address()) != nullptr)) {
DataTransferServerInfo info;
info.set_protocol(kLocalTransferProtocol);
info.set_address(task_info.worker_address());
return CreateDataServiceWorkerClient(params_.protocol, info,
accelerator_device_info_, allocator_);
}
if (!params_.data_transfer_protocol.empty()) {
TF_ASSIGN_OR_RETURN(
DataTransferServerInfo transfer_server,
GetTransferServer(params_.data_transfer_protocol, task_info));
return CreateAlternativeWorkerClientWithGrpcFallback(transfer_server,
task_info);
}
if (std::string default_protocol = DefaultDataTransferProtocol();
default_protocol != kGrpcTransferProtocol) {
absl::StatusOr<DataTransferServerInfo> transfer_server =
GetTransferServer(default_protocol, task_info);
if (transfer_server.ok()) {
return CreateAlternativeWorkerClientWithGrpcFallback(*transfer_server,
task_info);
}
VLOG(1) << "Failed to find transfer server for default data transfer "
"protocol '"
<< default_protocol << "' for worker '"
<< task_info.worker_address()
<< "'; falling back to grpc. Original error: "
<< transfer_server.status();
metrics::RecordTFDataServiceDataTransferProtocolFallback(
default_protocol, error::Code::NOT_FOUND,
"Failed to find transfer server for default protocol");
}
return CreateGrpcWorkerClient(task_info);
}
Status DataServiceClient::AddTask(const TaskInfo& task_info)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<DataServiceWorkerClient> worker,
CreateWorkerClient(task_info));
metrics::RecordTFDataServiceDataTransferProtocolUsed(
worker->GetDataTransferProtocol(),
!params_.data_transfer_protocol.empty());
tasks_.push_back(std::make_shared<Task>(task_info, std::move(worker)));
worker_thread_cv_.notify_one();
if (IsCoordinatedRead()) {
VLOG(1) << "Consumer " << params_.consumer_index.value() << " adding task "
<< task_info.task_id() << " to read from worker "
<< task_info.worker_address()
<< ". Task starting round: " << task_info.starting_round();
DCHECK_LE(current_round_, task_info.starting_round());
if (current_round_ == task_info.starting_round()) {
DCHECK_EQ(next_task_index_, 0);
}
}
if (!IsCoordinatedRead()) {
std::mt19937 rng;
std::shuffle(tasks_.begin(), tasks_.end(), rng);
}
return absl::OkStatus();
}
void DataServiceClient::Heartbeat() TF_LOCKS_EXCLUDED(mu_) {
ClientHeartbeatRequest req;
req.set_iteration_client_id(iteration_client_id_);
if (IsCoordinatedRead()) {
mutex_lock l(mu_);
req.set_current_round(current_round_);
if (round_robin_round_limit_.has_value()) {
req.set_blocked_round(round_robin_round_limit_.value());
}
}
{
mutex_lock l(mu_);
double target_processing_time_nsec = ctx_->GetTargetProcessingTimeNsec();
req.set_target_processing_time_nsec(target_processing_time_nsec);
}
ClientHeartbeatResponse resp;
Status s = dispatcher_->ClientHeartbeat(req, resp);
if (!s.ok()) {
if (IsPreemptedError(s)) {
LOG(WARNING)
<< "Failed to heartbeat to dispatcher from iteration client id "
<< iteration_client_id_ << ". Dispatcher address: " << params_.address
<< ". Error: " << s;
return;
}
mutex_lock l(mu_);
status_ = s;
get_next_cv_.notify_all();
}
mutex_lock l(mu_);
UpdateIterationFinished(resp.iteration_finished());
if (resp.optional_block_round_case() ==
ClientHeartbeatResponse::kBlockRound) {
TryBlockRound(resp.block_round());
} else {
round_robin_round_limit_ = std::nullopt;
worker_thread_cv_.notify_all();
}
UpdateTasks(resp);
RecordTFMetrics(resp);
}
void DataServiceClient::UpdateTasks(const ClientHeartbeatResponse& resp)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
absl::flat_hash_map<int64_t, TaskInfo> task_id_to_task;
for (auto& task : resp.task_info()) {
task_id_to_task[task.task_id()] = task;
}
if (iteration_finished_) {
return;
}
int index = 0;
while (index < tasks_.size()) {
std::shared_ptr<Task> task = tasks_[index];
if (task_id_to_task.contains(task->info.task_id())) {
task_id_to_task.erase(task->info.task_id());
++index;
} else {
if (task->end_of_sequence) {
finished_tasks_--;
}
tasks_.erase(tasks_.begin() + index);
if (index < next_task_index_) {
next_task_index_--;
}
if (!tasks_.empty() && next_task_index_ >= tasks_.size()) {
AdvanceTaskIndex();
}
}
}
for (auto& task : resp.task_info()) {
auto it = task_id_to_task.find(task.task_id());
if (it == task_id_to_task.end()) {
continue;
}
if (!ShouldReadFromTask(task)) {
VLOG(3) << "Skipping untargeted worker task " << task.task_id();
should_finish_iteration_ = false;
continue;
}
Status s = AddTask(it->second);
if (!s.ok()) {
status_ = s;
get_next_cv_.notify_all();
break;
}
}
}
bool DataServiceClient::ShouldReadFromTask(const TaskInfo& task) const
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (IsCoordinatedRead()) {
return true;
}
const bool is_local_task =
(LocalWorkers::Get(task.worker_address()) != nullptr);
if (params_.target_workers == TARGET_WORKERS_LOCAL && !is_local_task) {
return false;
}
const bool is_cross_tf_host_read = !is_local_task && IsColocatedTask(task);
if (params_.target_workers == TARGET_WORKERS_AUTO && is_cross_tf_host_read) {
return false;
}
return true;
}
void DataServiceClient::RecordTFMetrics(const ClientHeartbeatResponse& resp)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
for (const auto& task : resp.task_info()) {
if (worker_uids_.contains(task.worker_uid())) {
continue;
}
metrics::RecordTFDataServiceClientIterators(
task.worker_uid(), resp.deployment_mode(), params_.processing_mode,
IsCoordinatedRead());
worker_uids_.insert(task.worker_uid());
}
}
void DataServiceClient::UpdateBufferSize() TF_LOCKS_EXCLUDED(mu_) {
if (params_.max_outstanding_requests == model::kAutotune) {
mutex_lock l(mu_) | #include "tensorflow/core/data/service/client/data_service_client.h"
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/time/time.h"
#include "tensorflow/core/data/service/client/common.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/test_cluster.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tsl/lib/core/status_test_util.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::data::testing::RangeDataset;
using ::tensorflow::testing::IsOkAndHolds;
using ::tensorflow::testing::StatusIs;
using ::testing::_;
using ::testing::AtLeast;
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::testing::UnorderedElementsAreArray;
DataServiceParams GetDataServiceParams(
const std::string& dataset_id, const std::string& data_service_address,
const ProcessingModeDef::ShardingPolicy sharding_policy) {
DataServiceParams params;
params.dataset_id = dataset_id;
params.processing_mode.set_sharding_policy(sharding_policy);
params.address = data_service_address;
params.protocol = "grpc";
params.data_transfer_protocol = "grpc";
params.job_name = "test_job";
params.repetition = 0;
params.max_outstanding_requests = 100;
params.task_refresh_interval = absl::Milliseconds(100);
return params;
}
std::vector<int64_t> Range(const int64_t range) {
std::vector<int64_t> result;
for (int64_t i = 0; i < range; ++i) {
result.push_back(i);
}
return result;
}
class TestDataServiceContext : public DataServiceContext {
public:
TestDataServiceContext() = default;
~TestDataServiceContext() override = default;
std::unique_ptr<Thread> StartThread(const string& name,
std::function<void()> fn) override {
return absl::WrapUnique(
Env::Default()->StartThread({}, name, std::move(fn)));
}
MOCK_METHOD(void, RecordBufferEnqueue, (const std::vector<Tensor>& element),
(override));
MOCK_METHOD(void, RecordBufferDequeue, (const std::vector<Tensor>& element),
(override));
double GetTargetProcessingTimeNsec() const override { return 1.0e6; }
int64_t UpdateMaxOutstandingRequests(int64_t max_outstanding_requests,
int64_t new_size) override {
return new_size;
}
};
std::unique_ptr<TestDataServiceContext> GetTestDataServiceContext() {
return std::make_unique<TestDataServiceContext>();
}
template <class T>
StatusOr<std::vector<T>> GetResults(DataServiceClient& client) {
std::vector<T> results;
while (true) {
TF_ASSIGN_OR_RETURN(GetNextResult next,
client.GetNext(GetTestDataServiceContext));
if (next.end_of_sequence) {
return results;
}
results.push_back(next.tensors[0].unaligned_flat<T>().data()[0]);
}
return results;
}
template <class T>
StatusOr<T> GetNext(DataServiceClient& client) {
TF_ASSIGN_OR_RETURN(GetNextResult next,
client.GetNext(GetTestDataServiceContext));
if (next.end_of_sequence) {
return errors::OutOfRange(
"The tf.data service has reached the end of sequence");
}
return next.tensors[0].unaligned_flat<T>().data()[0];
}
TEST(DataServiceClientTest, NoSharding) {
TestCluster test_cluster(1);
TF_ASSERT_OK(test_cluster.Initialize());
DatasetClient<int64_t> test_dataset(test_cluster);
TF_ASSERT_OK_AND_ASSIGN(std::string dataset_id,
test_dataset.RegisterDataset(RangeDataset(10)));
DataServiceParams params = GetDataServiceParams(
dataset_id, test_cluster.DispatcherAddress(), ProcessingModeDef::OFF);
DataServiceClient client(params);
TF_ASSERT_OK(client.Initialize(nullptr,
nullptr));
EXPECT_THAT(GetResults<int64_t>(client),
IsOkAndHolds(ElementsAreArray(Range(10))));
client.Cancel();
}
TEST(DataServiceClientTest, DynamicSharding) {
TestCluster test_cluster(3);
TF_ASSERT_OK(test_cluster.Initialize());
DatasetClient<int64_t> test_dataset(test_cluster);
TF_ASSERT_OK_AND_ASSIGN(std::string dataset_id,
test_dataset.RegisterDataset(RangeDataset(10)));
DataServiceParams params = GetDataServiceParams(
dataset_id, test_cluster.DispatcherAddress(), ProcessingModeDef::DYNAMIC);
DataServiceClient client(params);
TF_ASSERT_OK(client.Initialize(nullptr,
nullptr));
EXPECT_THAT(GetResults<int64_t>(client),
IsOkAndHolds(UnorderedElementsAreArray(Range(10))));
client.Cancel();
}
TEST(DataServiceClientTest, StaticSharding) {
TestCluster test_cluster(3);
TF_ASSERT_OK(test_cluster.Initialize());
DatasetClient<int64_t> dataset_client(test_cluster);
TF_ASSERT_OK_AND_ASSIGN(std::string dataset_id,
dataset_client.RegisterDataset(RangeDataset(10)));
DataServiceParams params =
GetDataServiceParams(dataset_id, test_cluster.DispatcherAddress(),
ProcessingModeDef::FILE_OR_DATA);
DataServiceClient client(params);
TF_ASSERT_OK(client.Initialize(nullptr,
nullptr));
EXPECT_THAT(GetResults<int64_t>(client),
IsOkAndHolds(UnorderedElementsAreArray(Range(10))));
client.Cancel();
}
TEST(DataServiceClientTest, RecordBufferEvents) {
TestCluster test_cluster(1);
TF_ASSERT_OK(test_cluster.Initialize());
DatasetClient<int64_t> test_dataset(test_cluster);
TF_ASSERT_OK_AND_ASSIGN(std::string dataset_id,
test_dataset.RegisterDataset(RangeDataset(10)));
DataServiceParams params = GetDataServiceParams(
dataset_id, test_cluster.DispatcherAddress(), ProcessingModeDef::OFF);
DataServiceClient client(params);
TF_ASSERT_OK(client.Initialize(nullptr,
nullptr));
auto mock_context = std::make_unique<TestDataServiceContext>();
TestDataServiceContext* ctx = mock_context.get();
EXPECT_CALL(*ctx, RecordBufferEnqueue(_)).Times(AtLeast(1));
EXPECT_CALL(*ctx, RecordBufferDequeue(_)).Times(AtLeast(1));
TF_ASSERT_OK_AND_ASSIGN(GetNextResult next, client.GetNext([&mock_context]() {
return std::move(mock_context);
}));
client.Cancel();
}
TEST(DataServiceClientTest, Cancel) {
TestCluster test_cluster(1);
TF_ASSERT_OK(test_cluster.Initialize());
DatasetClient<int64_t> dataset_client(test_cluster);
TF_ASSERT_OK_AND_ASSIGN(std::string dataset_id,
dataset_client.RegisterDataset(RangeDataset(10)));
DataServiceParams params = GetDataServiceParams(
dataset_id, test_cluster.DispatcherAddress(), ProcessingModeDef::OFF);
DataServiceClient client(params);
TF_ASSERT_OK(client.Initialize(nullptr,
nullptr));
client.Cancel();
EXPECT_THAT(client.GetNext(GetTestDataServiceContext),
StatusIs(error::CANCELLED));
}
TEST(DataServiceClientTest, ValidationError) {
DataServiceParams params = GetDataServiceParams(
"dataset_id", "tf_data_service_address", ProcessingModeDef::OFF);
params.target_workers = TARGET_WORKERS_LOCAL;
DataServiceClient client(params);
EXPECT_THAT(
client.Initialize(nullptr,
nullptr),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr(
"Local reads require local tf.data workers, but no local worker "
"is found.")));
}
}
}
} |
1,758 | cpp | tensorflow/tensorflow | ts_op_gen | tensorflow/js/ops/ts_op_gen.cc | tensorflow/js/ops/ts_op_gen_test.cc | #ifndef TENSORFLOW_JS_OPS_TS_OP_GEN_H_
#define TENSORFLOW_JS_OPS_TS_OP_GEN_H_
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
void WriteTSOps(const OpList& ops, const ApiDefMap& api_def_map,
const string& ts_filename);
}
#endif
#include "tensorflow/js/ops/ts_op_gen.h"
#include <memory>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/framework/api_def.pb.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
static bool IsListAttr(const OpDef_ArgDef& arg) {
return !arg.type_list_attr().empty() || !arg.number_attr().empty();
}
struct ArgDefs {
ArgDefs(const OpDef::ArgDef& op_def_arg, const ApiDef::Arg& api_def_arg)
: op_def_arg(op_def_arg), api_def_arg(api_def_arg) {}
const OpDef::ArgDef& op_def_arg;
const ApiDef::Arg& api_def_arg;
};
struct OpAttrs {
OpAttrs(const OpDef::AttrDef& op_def_attr, const ApiDef::Attr& api_def_attr)
: op_def_attr(op_def_attr), api_def_attr(api_def_attr) {}
const OpDef::AttrDef& op_def_attr;
const ApiDef::Attr& api_def_attr;
};
class GenTypeScriptOp {
public:
GenTypeScriptOp(const OpDef& op_def, const ApiDef& api_def);
~GenTypeScriptOp();
string Code();
private:
void ProcessArgs();
void ProcessAttrs();
void AddAttrForArg(const string& attr, int arg_index);
string InputForAttr(const OpDef::AttrDef& op_def_attr);
void AddMethodSignature();
void AddOpAttrs();
void AddMethodReturnAndClose();
const OpDef& op_def_;
const ApiDef& api_def_;
string result_;
std::vector<ArgDefs> input_op_args_;
std::vector<OpAttrs> op_attrs_;
typedef std::unordered_map<string, std::vector<int>> AttrArgIdxMap;
AttrArgIdxMap attr_arg_idx_map_;
int num_outputs_;
};
GenTypeScriptOp::GenTypeScriptOp(const OpDef& op_def, const ApiDef& api_def)
: op_def_(op_def), api_def_(api_def), num_outputs_(0) {}
GenTypeScriptOp::~GenTypeScriptOp() = default;
string GenTypeScriptOp::Code() {
ProcessArgs();
ProcessAttrs();
AddMethodSignature();
AddOpAttrs();
AddMethodReturnAndClose();
strings::StrAppend(&result_, "\n");
return result_;
}
void GenTypeScriptOp::ProcessArgs() {
for (int i = 0; i < api_def_.arg_order_size(); i++) {
auto op_def_arg = FindInputArg(api_def_.arg_order(i), op_def_);
if (op_def_arg == nullptr) {
LOG(WARNING) << "Could not find OpDef::ArgDef for "
<< api_def_.arg_order(i);
continue;
}
auto api_def_arg = FindInputArg(api_def_.arg_order(i), api_def_);
if (api_def_arg == nullptr) {
LOG(WARNING) << "Could not find ApiDef::Arg for "
<< api_def_.arg_order(i);
continue;
}
if (!op_def_arg->type_attr().empty()) {
AddAttrForArg(op_def_arg->type_attr(), i);
} else if (!op_def_arg->type_list_attr().empty()) {
AddAttrForArg(op_def_arg->type_list_attr(), i);
}
if (!op_def_arg->number_attr().empty()) {
AddAttrForArg(op_def_arg->number_attr(), i);
}
input_op_args_.push_back(ArgDefs(*op_def_arg, *api_def_arg));
}
num_outputs_ = api_def_.out_arg_size();
}
void GenTypeScriptOp::ProcessAttrs() {
for (int i = 0; i < op_def_.attr_size(); i++) {
op_attrs_.push_back(OpAttrs(op_def_.attr(i), api_def_.attr(i)));
}
}
void GenTypeScriptOp::AddAttrForArg(const string& attr, int arg_index) {
auto iter = attr_arg_idx_map_.find(attr);
if (iter == attr_arg_idx_map_.end()) {
attr_arg_idx_map_.insert(AttrArgIdxMap::value_type(attr, {arg_index}));
} else {
iter->second.push_back(arg_index);
}
}
string GenTypeScriptOp::InputForAttr(const OpDef::AttrDef& op_def_attr) {
string inputs;
auto arg_list = attr_arg_idx_map_.find(op_def_attr.name());
if (arg_list != attr_arg_idx_map_.end()) {
for (auto iter = arg_list->second.begin(); iter != arg_list->second.end();
++iter) {
strings::StrAppend(&inputs, input_op_args_[*iter].op_def_arg.name());
}
}
return inputs;
}
void GenTypeScriptOp::AddMethodSignature() {
strings::StrAppend(&result_, "export function ", api_def_.endpoint(0).name(),
"(");
bool is_first = true;
for (auto& in_arg : input_op_args_) {
if (is_first) {
is_first = false;
} else {
strings::StrAppend(&result_, ", ");
}
auto op_def_arg = in_arg.op_def_arg;
strings::StrAppend(&result_, op_def_arg.name(), ": ");
if (IsListAttr(op_def_arg)) {
strings::StrAppend(&result_, "tfc.Tensor[]");
} else {
strings::StrAppend(&result_, "tfc.Tensor");
}
}
if (num_outputs_ == 1) {
strings::StrAppend(&result_, "): tfc.Tensor {\n");
} else {
strings::StrAppend(&result_, "): tfc.Tensor[] {\n");
}
}
void GenTypeScriptOp::AddOpAttrs() {
strings::StrAppend(&result_, " const opAttrs = [\n");
bool is_first = true;
for (auto& attr : op_attrs_) {
if (is_first) {
is_first = false;
} else {
strings::StrAppend(&result_, ",\n");
}
strings::StrAppend(&result_, " ");
if (attr.op_def_attr.type() == "type") {
strings::StrAppend(&result_, "createTensorsTypeOpAttr('",
attr.op_def_attr.name(), "', ",
InputForAttr(attr.op_def_attr), ")");
} else if (attr.op_def_attr.type() == "int") {
strings::StrAppend(&result_, "{name: '", attr.op_def_attr.name(), "', ");
strings::StrAppend(&result_, "type: nodeBackend().binding.TF_ATTR_INT, ");
strings::StrAppend(&result_, "value: ", InputForAttr(attr.op_def_attr),
".length}");
}
}
strings::StrAppend(&result_, "\n ];\n");
}
void GenTypeScriptOp::AddMethodReturnAndClose() {
strings::StrAppend(&result_, " return null;\n}\n");
}
void WriteTSOp(const OpDef& op_def, const ApiDef& api_def, WritableFile* ts) {
GenTypeScriptOp ts_op(op_def, api_def);
TF_CHECK_OK(ts->Append(GenTypeScriptOp(op_def, api_def).Code()));
}
void StartFile(WritableFile* ts_file) {
const string header =
R"header(
import * as tfc from '@tensorflow/tfjs-core';
import {createTensorsTypeOpAttr, nodeBackend} from './op_utils';
)header";
TF_CHECK_OK(ts_file->Append(header));
}
}
void WriteTSOps(const OpList& ops, const ApiDefMap& api_def_map,
const string& ts_filename) {
Env* env = Env::Default();
std::unique_ptr<WritableFile> ts_file = nullptr;
TF_CHECK_OK(env->NewWritableFile(ts_filename, &ts_file));
StartFile(ts_file.get());
for (const auto& op_def : ops.op()) {
if (op_def.has_deprecation() &&
op_def.deprecation().version() <= TF_GRAPH_DEF_VERSION) {
continue;
}
const auto* api_def = api_def_map.GetApiDef(op_def.name());
if (api_def->visibility() == ApiDef::VISIBLE) {
WriteTSOp(op_def, *api_def, ts_file.get());
}
}
TF_CHECK_OK(ts_file->Close());
}
} | #include "tensorflow/js/ops/ts_op_gen.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
void ExpectContainsStr(StringPiece s, StringPiece expected) {
EXPECT_TRUE(absl::StrContains(s, expected))
<< "'" << s << "' does not contain '" << expected << "'";
}
void ExpectDoesNotContainStr(StringPiece s, StringPiece expected) {
EXPECT_FALSE(absl::StrContains(s, expected))
<< "'" << s << "' does not contain '" << expected << "'";
}
constexpr char kBaseOpDef[] = R"(
op {
name: "Foo"
input_arg {
name: "images"
type_attr: "T"
number_attr: "N"
description: "Images to process."
}
input_arg {
name: "dim"
description: "Description for dim."
type: DT_FLOAT
}
output_arg {
name: "output"
description: "Description for output."
type: DT_FLOAT
}
attr {
name: "T"
type: "type"
description: "Type for images"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
}
}
default_value {
i: 1
}
}
attr {
name: "N"
type: "int"
has_minimum: true
minimum: 1
}
summary: "Summary for op Foo."
description: "Description for op Foo."
}
)";
void GenerateTsOpFileText(const string& op_def_str, const string& api_def_str,
string* ts_file_text) {
Env* env = Env::Default();
OpList op_defs;
protobuf::TextFormat::ParseFromString(
op_def_str.empty() ? kBaseOpDef : op_def_str, &op_defs);
ApiDefMap api_def_map(op_defs);
if (!api_def_str.empty()) {
TF_ASSERT_OK(api_def_map.LoadApiDef(api_def_str));
}
const string& tmpdir = testing::TmpDir();
const auto ts_file_path = io::JoinPath(tmpdir, "test.ts");
WriteTSOps(op_defs, api_def_map, ts_file_path);
TF_ASSERT_OK(ReadFileToString(env, ts_file_path, ts_file_text));
}
TEST(TsOpGenTest, TestImports) {
string ts_file_text;
GenerateTsOpFileText("", "", &ts_file_text);
const string expected = R"(
import * as tfc from '@tensorflow/tfjs-core';
import {createTensorsTypeOpAttr, nodeBackend} from './op_utils';
)";
ExpectContainsStr(ts_file_text, expected);
}
TEST(TsOpGenTest, InputSingleAndList) {
const string api_def = R"pb(
op { graph_op_name: "Foo" arg_order: "dim" arg_order: "images" }
)pb";
string ts_file_text;
GenerateTsOpFileText("", api_def, &ts_file_text);
const string expected = R"(
export function Foo(dim: tfc.Tensor, images: tfc.Tensor[]): tfc.Tensor {
)";
ExpectContainsStr(ts_file_text, expected);
}
TEST(TsOpGenTest, TestVisibility) {
const string api_def = R"(
op {
graph_op_name: "Foo"
visibility: HIDDEN
}
)";
string ts_file_text;
GenerateTsOpFileText("", api_def, &ts_file_text);
const string expected = R"(
export function Foo(images: tfc.Tensor[], dim: tfc.Tensor): tfc.Tensor {
)";
ExpectDoesNotContainStr(ts_file_text, expected);
}
TEST(TsOpGenTest, SkipDeprecated) {
const string op_def = R"(
op {
name: "DeprecatedFoo"
input_arg {
name: "input"
type_attr: "T"
description: "Description for input."
}
output_arg {
name: "output"
description: "Description for output."
type: DT_FLOAT
}
attr {
name: "T"
type: "type"
description: "Type for input"
allowed_values {
list {
type: DT_FLOAT
}
}
}
deprecation {
explanation: "Deprecated."
}
}
)";
string ts_file_text;
GenerateTsOpFileText(op_def, "", &ts_file_text);
ExpectDoesNotContainStr(ts_file_text, "DeprecatedFoo");
}
TEST(TsOpGenTest, MultiOutput) {
const string op_def = R"(
op {
name: "MultiOutputFoo"
input_arg {
name: "input"
description: "Description for input."
type_attr: "T"
}
output_arg {
name: "output1"
description: "Description for output 1."
type: DT_FLOAT
}
output_arg {
name: "output2"
description: "Description for output 2."
type: DT_FLOAT
}
attr {
name: "T"
type: "type"
description: "Type for input"
allowed_values {
list {
type: DT_FLOAT
}
}
}
summary: "Summary for op MultiOutputFoo."
description: "Description for op MultiOutputFoo."
}
)";
string ts_file_text;
GenerateTsOpFileText(op_def, "", &ts_file_text);
const string expected = R"(
export function MultiOutputFoo(input: tfc.Tensor): tfc.Tensor[] {
)";
ExpectContainsStr(ts_file_text, expected);
}
TEST(TsOpGenTest, OpAttrs) {
string ts_file_text;
GenerateTsOpFileText("", "", &ts_file_text);
const string expectedFooAttrs = R"(
const opAttrs = [
createTensorsTypeOpAttr('T', images),
{name: 'N', type: nodeBackend().binding.TF_ATTR_INT, value: images.length}
];
)";
ExpectContainsStr(ts_file_text, expectedFooAttrs);
}
}
} |
1,759 | cpp | tensorflow/tensorflow | object | tensorflow/cc/experimental/libtf/object.cc | tensorflow/cc/experimental/libtf/tests/object_test.cc | #ifndef TENSORFLOW_CC_EXPERIMENTAL_LIBTF_OBJECT_H_
#define TENSORFLOW_CC_EXPERIMENTAL_LIBTF_OBJECT_H_
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/cc/experimental/libtf/value.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
namespace tf {
namespace libtf {
using TaggedValue = impl::TaggedValue;
class Handle;
template <class T>
Handle Convert(T value);
class Handle {
public:
Handle() : value_(TaggedValue::None()) {}
public:
explicit Handle(TaggedValue value) : value_(std::move(value)) {}
protected:
TaggedValue value_;
friend class Integer;
friend class Float;
friend class String;
friend class Object;
friend class List;
friend class Dictionary;
friend class Tuple;
friend class Callable;
friend class Tensor;
template <class T>
friend tensorflow::StatusOr<T> Cast(Handle handle);
template <typename Fn, class TRET, class... ArgsOut>
friend class UneraseCallHelper;
};
template <class T>
tensorflow::StatusOr<T> Cast(Handle handle);
class None final : public Handle {
public:
None() : Handle(TaggedValue::None()) {}
private:
explicit None(TaggedValue v) : Handle(std::move(v)) {}
template <class T>
friend tensorflow::StatusOr<T> Cast(Handle handle);
};
class String final : public Handle {
public:
explicit String(const char* s) : Handle(TaggedValue(s)) {}
const char* get() const { return value_.s(); }
private:
explicit String(TaggedValue v) : Handle(std::move(v)) {}
template <class T>
friend tensorflow::StatusOr<T> Cast(Handle handle);
};
class Object : public Handle {
public:
Object() : Handle(TaggedValue::Dict()) {}
static const String& ParentKey();
template <class T = Handle>
tensorflow::StatusOr<T> Get(const String& key) {
auto& dict = value_.dict();
auto it = dict.find(key.value_);
if (it != dict.end()) {
return Cast<T>(Handle(it->second));
} else {
auto it_class = dict.find(ParentKey().value_);
if (it_class != dict.end()) {
auto& class_dict_maybe = it_class->second;
if (class_dict_maybe.type() == TaggedValue::DICT) {
auto& dict = class_dict_maybe.dict();
auto it = dict.find(key.value_);
if (it != dict.end()) {
return Cast<T>(Handle(it->second));
}
}
}
}
return absl::NotFoundError("Key not in dictionary.");
}
void Set(const String& key, Handle h) {
value_.dict()[key.value_] = std::move(h.value_);
}
void Unset(const String& key) { value_.dict().erase(key.value_); }
private:
explicit Object(TaggedValue v) : Handle(std::move(v)) {}
template <class T>
friend tensorflow::StatusOr<T> Cast(Handle handle);
};
class Dictionary final : public Handle {
public:
Dictionary() : Handle(TaggedValue::Dict()) {}
template <class T>
tensorflow::StatusOr<T> Get(const Handle& key) {
auto it = value_.dict().find(key.value_);
if (it != value_.dict().end()) return Cast<T>(Handle(it->second));
return absl::NotFoundError("Key not in dictionary.");
}
void Set(const String& key, Handle value) {
value_.dict()[key.value_] = std::move(value.value_);
}
void Set(const Handle& key, Handle value) {
value_.dict()[key.value_] = std::move(value.value_);
}
size_t size() const { return value_.dict().size(); }
private:
explicit Dictionary(TaggedValue v) : Handle(std::move(v)) {}
template <class T>
friend tensorflow::StatusOr<T> Cast(Handle handle);
};
class Integer final : public Handle {
public:
explicit Integer(Handle h) : Handle(h.value_) {}
explicit Integer(int64_t i) : Handle(TaggedValue(i)) {}
int64_t get() const { return value_.i64().get(); }
private:
explicit Integer(TaggedValue v) : Handle(std::move(v)) {}
template <class T>
friend tensorflow::StatusOr<T> Cast(Handle handle);
};
class Float final : public Handle {
public:
explicit Float(Handle h) : Handle(h.value_) {}
explicit Float(float i) : Handle(TaggedValue(i)) {}
float get() const { return value_.f32().get(); }
private:
explicit Float(TaggedValue v) : Handle(std::move(v)) {}
template <class T>
friend tensorflow::StatusOr<T> Cast(Handle handle);
};
class Tensor final : public Handle {
public:
explicit Tensor(Handle h) : Handle(h.value_) {}
template <class T>
tensorflow::Status GetValue(absl::Span<T> data) const;
private:
explicit Tensor(TaggedValue v) : Handle(std::move(v)) {}
template <class T>
friend tensorflow::StatusOr<T> Cast(Handle handle);
};
template <class T>
tensorflow::Status Tensor::GetValue(absl::Span<T> data) const {
tensorflow::AbstractTensorPtr t;
{
const auto abstract_t = value_.tensor().get();
if (!tensorflow::ImmediateExecutionTensorHandle::classof(abstract_t)) {
return absl::InvalidArgumentError(
"Attempting to get value of non eager tensor.");
}
auto imm_t =
static_cast<tensorflow::ImmediateExecutionTensorHandle*>(abstract_t);
tensorflow::Status status;
t.reset(imm_t->Resolve(&status));
if (!status.ok()) {
return status;
}
}
if (data.size() != t->NumElements()) {
return tensorflow::errors::InvalidArgument(absl::StrCat(
"Mismatched number of elements: \n", "Expected: ", data.size(), "\n",
"Actual: ", t->NumElements(), "\n"));
}
memcpy(data.data(), t->Data(), t->ByteSize());
return ::tensorflow::OkStatus();
}
class Tuple : public Handle {
public:
template <class... T>
explicit Tuple(T... args) : Handle(TaggedValue::Tuple()) {
add(args...);
}
template <class T>
tensorflow::StatusOr<T> Get(size_t i) {
if (i >= value_.tuple().size())
return absl::InvalidArgumentError("Out of bounds index.");
return Cast<T>(Handle(value_.tuple()[i]));
}
size_t size() const { return value_.tuple().size(); }
private:
void add() {}
template <class T, class... T2>
void add(T arg, T2... args) {
value_.tuple().emplace_back(Convert(arg).value_);
add(args...);
}
explicit Tuple(TaggedValue v) : Handle(std::move(v)) {}
template <class T>
friend tensorflow::StatusOr<T> Cast(Handle handle);
};
class List final : public Handle {
public:
template <class... T>
explicit List(T... args) : Handle(TaggedValue::List()) {}
template <class T>
tensorflow::StatusOr<T> Get(size_t i) {
if (i >= size()) {
return absl::InvalidArgumentError("Out of bounds index.");
}
return Cast<T>(Handle(value_.list()[i]));
}
tensorflow::Status Set(size_t i, Handle h) {
if (i >= size()) {
return absl::InvalidArgumentError("Out of bounds index.");
}
value_.list()[i] = std::move(h.value_);
return ::tensorflow::OkStatus();
}
template <class T>
void append(T arg) {
value_.list().emplace_back(Convert(arg).value_);
}
size_t size() const { return value_.list().size(); }
private:
explicit List(TaggedValue v) : Handle(std::move(v)) {}
template <class T>
friend tensorflow::StatusOr<T> Cast(Handle handle);
};
class KeywordArg {
public:
explicit KeywordArg(const char* s) : key_(String(s)), value_() {}
template <class T>
KeywordArg& operator=(const T obj) {
value_ = Convert(obj);
return *this;
}
friend class Callable;
private:
String key_;
Handle value_;
};
class Callable final : public Handle {
private:
void CollectArgs(Tuple& args, Dictionary& kwargs, int idx) {}
template <typename T, typename... Types>
void CollectArgs(Tuple& args, Dictionary& kwargs, int idx, T v,
Types... vars) {
const Handle& o = Convert(v);
args.value_.tuple().emplace_back(o.value_);
CollectArgs(args, kwargs, idx + 1, vars...);
}
template <typename... Types>
void CollectArgs(Tuple& args, Dictionary& kwargs, int idx, KeywordArg v,
Types... vars) {
kwargs.Set(v.key_, v.value_);
CollectArgs(args, kwargs, idx + 1, vars...);
}
public:
template <typename TReturn = Handle, typename... Types>
tensorflow::StatusOr<TReturn> Call(Types... vars) {
Dictionary kwargs = Dictionary();
Tuple args;
CollectArgs(args, kwargs, 0, vars...);
auto maybe_value =
value_.func()(std::move(args.value_), std::move(kwargs.value_));
if (!maybe_value.ok()) {
return maybe_value.status();
}
return Cast<TReturn>(Handle(maybe_value.value()));
}
public:
explicit Callable(TaggedValue v) : Handle(std::move(v)) {}
template <class T>
friend tensorflow::StatusOr<T> Cast(Handle handle);
};
namespace internal {
class Capsule final : public Handle {
public:
template <class T>
T cast() {
return static_cast<T>(value_.capsule());
}
private:
explicit Capsule(TaggedValue v) : Handle(std::move(v)) {}
template <class T>
friend tensorflow::StatusOr<T> tf::libtf::Cast(Handle handle);
};
}
template <class T>
inline TaggedValue::Type TypeToTaggedType() {}
template <>
inline TaggedValue::Type TypeToTaggedType<Handle>() {
return TaggedValue::Type::NONE;
}
template <>
inline TaggedValue::Type TypeToTaggedType<None>() {
return TaggedValue::Type::NONE;
}
template <>
inline TaggedValue::Type TypeToTaggedType<String>() {
return TaggedValue::Type::STRING;
}
template <>
inline TaggedValue::Type TypeToTaggedType<Callable>() {
return TaggedValue::Type::FUNC;
}
template <>
inline TaggedValue::Type TypeToTaggedType<Integer>() {
return TaggedValue::Type::INT64;
}
template <>
inline TaggedValue::Type TypeToTaggedType<Float>() {
return TaggedValue::Type::FLOAT32;
}
template <>
inline TaggedValue::Type TypeToTaggedType<Object>() {
return TaggedValue::Type::DICT;
}
template <>
inline TaggedValue::Type TypeToTaggedType<Dictionary>() {
return TaggedValue::Type::DICT;
}
template <>
inline TaggedValue::Type TypeToTaggedType<List>() {
return TaggedValue::Type::LIST;
}
template <>
inline TaggedValue::Type TypeToTaggedType<Tensor>() {
return TaggedValue::Type::TENSOR;
}
template <>
inline TaggedValue::Type TypeToTaggedType<internal::Capsule>() {
return TaggedValue::Type::CAPSULE;
}
template <class T>
tensorflow::StatusOr<T> Cast(Handle handle) {
if (handle.value_.type() == TypeToTaggedType<T>() ||
std::is_same<T, Handle>::value)
return T((std::move(handle.value_)));
return absl::InvalidArgumentError("Incompatible cast.");
}
template <>
inline Handle Convert(const char* value) {
return String(value);
}
template <>
inline Handle Convert(int32_t value) {
return Integer(value);
}
template <>
inline Handle Convert(int64_t value) {
return Integer(value);
}
template <>
inline Handle Convert(float value) {
return Float(value);
}
template <class T>
inline Handle Convert(T value) {
return Handle(std::move(value));
}
template <typename TF, typename TReturn, typename... TFuncArgs>
class CallableWrapper;
template <typename TLambda>
class CallableWrapperUnpackArgs
: public CallableWrapperUnpackArgs<decltype(&TLambda::operator())> {
public:
CallableWrapperUnpackArgs(TLambda fn, const char* name)
: CallableWrapperUnpackArgs<decltype(&TLambda::operator())>(fn, name) {}
};
template <typename TReturn, typename... TFuncArgs>
class CallableWrapperUnpackArgs<TReturn (*)(TFuncArgs...)>
: public CallableWrapper<TReturn (*)(TFuncArgs...), TReturn, TFuncArgs...> {
using Fn = TReturn (*)(TFuncArgs...);
public:
CallableWrapperUnpackArgs(Fn fn, const char* name)
: CallableWrapper<Fn, TReturn, TFuncArgs...>(fn, name) {}
};
template <typename TClass, typename TReturn, typename... TFuncArgs>
class CallableWrapperUnpackArgs<TReturn (TClass::*)(TFuncArgs...) const>
: public CallableWrapper<TClass, TReturn, TFuncArgs...> {
using Fn = TClass;
public:
CallableWrapperUnpackArgs(Fn fn, const char* name)
: CallableWrapper<Fn, TReturn, TFuncArgs...>(fn, name) {}
};
template <class Fn, typename TReturn, class... ArgsOut>
class UneraseCallHelper;
template <class Fn, typename TReturn>
class UneraseCallHelper<Fn, TReturn> {
public:
template <typename... ArgsOut>
static tensorflow::StatusOr<TaggedValue> Call(const char* name, Fn functor_,
int argument_index,
const TaggedValue& args_in,
ArgsOut... args) {
TReturn ret = functor_(args...);
return ret.value_;
}
};
template <class Fn, typename TReturn, class TSignatureArg,
class... TSignatureRest>
class UneraseCallHelper<Fn, TReturn, TSignatureArg, TSignatureRest...> {
public:
template <typename... TArgsOut>
static tensorflow::StatusOr<TaggedValue> Call(const char* name, Fn fn,
int argument_index,
TaggedValue& args_in,
TArgsOut... args) {
Handle h(std::move(args_in.tuple()[argument_index]));
tensorflow::StatusOr<TSignatureArg> x = Cast<TSignatureArg>(std::move(h));
if (!x.ok())
return absl::InvalidArgumentError(
absl::StrCat(std::string("Function ") + name + " Arg " +
std::to_string(argument_index) +
" cannot be cast to desired signature type "));
return UneraseCallHelper<Fn, TReturn, TSignatureRest...>::template Call(
name, fn, argument_index + 1, args_in, args..., *x);
}
};
template <class Fn, typename TReturn, typename... TFuncArgs>
class CallableWrapper {
private:
Fn functor_;
const char* name_;
public:
explicit CallableWrapper(Fn fn, const char* name)
: functor_(fn), name_(name) {}
tensorflow::StatusOr<TaggedValue> operator()(TaggedValue args,
TaggedValue kwargs) {
constexpr size_t argument_count = sizeof...(TFuncArgs);
if (argument_count != args.tuple().size())
return absl::InvalidArgumentError(
absl::StrCat(std::string("Function ") + name_ + " expected " +
std::to_string(argument_count) + " args."));
return UneraseCallHelper<Fn, TReturn, TFuncArgs...>::Call(name_, functor_,
0, args);
}
};
#define TFLIB_CALLABLE_ADAPTOR(x) ::tf::libtf::CreateCallableAdaptor(x, #x)
template <class TF>
TaggedValue CreateCallableAdaptor(TF x, const char* name) {
return TaggedValue((CallableWrapperUnpackArgs<TF>(x, name)));
}
}
}
#endif
#include "tensorflow/cc/experimental/libtf/object.h"
#include <type_traits>
namespace tf {
namespace libtf {
const String& Object::ParentKey() {
static const String* key = new String("__parent__");
return *key;
}
}
} | #include "tensorflow/cc/experimental/libtf/object.h"
#include <cstdint>
#include "tensorflow/cc/experimental/libtf/value.h"
#include "tensorflow/cc/experimental/libtf/value_iostream.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
namespace tf {
namespace libtf {
TEST(ObjectTest, TestDictionary) {
Dictionary foo;
foo.Set(String("a"), Integer(33));
foo.Set(String("b"), Integer(33));
EXPECT_EQ(foo.Get<Integer>(String("b"))->get(), 33);
}
TEST(ObjectTest, TestTuple) {
Tuple foo(String("a"), Integer(33), Float(10.f));
EXPECT_EQ(foo.size(), 3);
EXPECT_EQ(foo.Get<Integer>(1)->get(), 33);
}
TEST(ObjectTest, TestList) {
List l;
EXPECT_EQ(l.size(), 0);
l.append(Integer(3));
EXPECT_EQ(l.Get<Integer>(0)->get(), 3);
EXPECT_EQ(l.size(), 1);
}
TaggedValue AddIntegers(TaggedValue args_, TaggedValue kwargs_) {
auto& args = args_.tuple();
return TaggedValue(args[0].i64() + args[1].i64());
}
TEST(ObjectTest, TestCast) {
Integer i(3);
auto result = Cast<String>(i);
ASSERT_TRUE(!result.ok());
}
TEST(ObjectTest, TestCall) {
TaggedValue add_func(AddIntegers);
Callable add(add_func);
TF_ASSERT_OK_AND_ASSIGN(Integer i,
add.Call<Integer>(Integer(1), Integer(10)));
EXPECT_EQ(i.get(), 11);
TF_ASSERT_OK_AND_ASSIGN(
Integer i2, add.Call<Integer>(1, Integer(10), KeywordArg("foo") = 3));
EXPECT_EQ(i2.get(), 11);
}
TEST(ObjectTest, MakeObject) {
Object parent;
parent.Set(String("test3"), Integer(3));
Object child;
child.Set(String("test1"), Integer(1));
child.Set(String("test2"), Integer(2));
child.Set(Object::ParentKey(), parent);
EXPECT_EQ(child.Get<Integer>(String("test1"))->get(), 1);
EXPECT_EQ(child.Get<Integer>(String("test2"))->get(), 2);
EXPECT_EQ(child.Get<Integer>(String("test3"))->get(), 3);
ASSERT_FALSE(child.Get<Integer>(String("test4")).status().ok());
TF_ASSERT_OK(child.Get(String("test3")).status());
}
TEST(ObjectTest, CallFunctionOnObject) {
Object module;
module.Set(String("add"), Callable(TaggedValue(AddIntegers)));
TF_ASSERT_OK_AND_ASSIGN(Callable method, module.Get<Callable>(String("add")));
TF_ASSERT_OK_AND_ASSIGN(Integer val, method.Call<Integer>(1, 2));
EXPECT_EQ(val.get(), 3);
}
TEST(ObjectTest, Capsule) {
Object obj;
int* hundred = new int(100);
Handle capsule =
Handle(TaggedValue::Capsule(static_cast<void*>(hundred), [](void* p) {
delete static_cast<int*>(p);
}));
obj.Set(String("hundred"), capsule);
EXPECT_EQ(*static_cast<int*>(
obj.Get<internal::Capsule>(String("hundred"))->cast<int*>()),
100);
}
None AppendIntegerToList(List a, Integer b) {
a.append(b);
return None();
}
Integer AddIntegersTyped(Integer a, Integer b) {
return Integer(a.get() + b.get());
}
Integer ReturnFive() { return Integer(5); }
TEST(TypeUneraseCallTest, TestCallable) {
Callable add(TFLIB_CALLABLE_ADAPTOR(AddIntegersTyped));
auto res = add.Call<Integer>(Integer(3), Integer(1));
EXPECT_EQ(res->get(), 4);
}
TEST(TypeUneraseCallTest, TestAppend) {
Callable append(TFLIB_CALLABLE_ADAPTOR(AppendIntegerToList));
List l;
TF_ASSERT_OK(append.Call<None>(l, Integer(3)).status());
TF_ASSERT_OK(append.Call<None>(l, Integer(6)).status());
EXPECT_EQ(l.size(), 2);
EXPECT_EQ(l.Get<Integer>(0)->get(), 3);
EXPECT_EQ(l.Get<Integer>(1)->get(), 6);
}
TEST(TypeUneraseCallTest, TestCallableWrongArgs) {
Callable append(TFLIB_CALLABLE_ADAPTOR(AddIntegersTyped));
ASSERT_FALSE(append.Call<None>(Object(), Integer(3)).ok());
ASSERT_FALSE(append.Call<None>(Object(), Object()).ok());
ASSERT_FALSE(append.Call().ok());
ASSERT_FALSE(append.Call(Integer(3)).ok());
ASSERT_FALSE(append.Call(Integer(3), Integer(4), Integer(5)).ok());
}
Handle Polymorph(Handle a) {
auto i = Cast<Integer>(a);
if (i.ok()) {
return Integer(i->get() * 2);
}
auto f = Cast<Float>(a);
if (f.ok()) {
return Float(f->get() * 2.f);
}
return None();
}
TEST(TypeUneraseCallTest, TestCallableGeneric) {
Callable f(TFLIB_CALLABLE_ADAPTOR(Polymorph));
EXPECT_EQ(f.Call<Float>(Float(.2))->get(), .4f);
EXPECT_EQ(Cast<Float>(*f.Call(Float(.2)))->get(), .4f);
EXPECT_EQ(f.Call<Integer>(Integer(3))->get(), 6);
}
TEST(TypeUneraseCallTest, TestLambda) {
Callable c(
TFLIB_CALLABLE_ADAPTOR([](Integer a) { return Integer(a.get() * 2); }));
EXPECT_EQ(c.Call<Integer>(Integer(3))->get(), 6);
int call_count = 0;
Callable f(TFLIB_CALLABLE_ADAPTOR([&call_count](Integer a, Integer b) {
call_count++;
return Integer(a.get() + b.get());
}));
EXPECT_EQ(f.Call<Integer>(Integer(3), Integer(-1))->get(), 2);
EXPECT_EQ(f.Call<Integer>(Integer(3), Integer(-3))->get(), 0);
EXPECT_EQ(call_count, 2);
}
}
} |
1,760 | cpp | tensorflow/tensorflow | module | tensorflow/cc/experimental/libtf/module.cc | tensorflow/cc/experimental/libtf/tests/module_test.cc | #ifndef TENSORFLOW_CC_EXPERIMENTAL_LIBTF_MODULE_H_
#define TENSORFLOW_CC_EXPERIMENTAL_LIBTF_MODULE_H_
#include "tensorflow/cc/experimental/libexport/load.h"
#include "tensorflow/cc/experimental/libtf/runtime/runtime.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
namespace tf {
namespace libtf {
namespace impl {
tensorflow::StatusOr<Handle> BuildSavedUserObject(
tensorflow::SavedObject saved_object_proto);
tensorflow::StatusOr<std::vector<Handle>> BuildObjects(
tensorflow::libexport::TFPackage& tf_package);
tensorflow::StatusOr<Handle> BuildProgram(
runtime::Runtime runtime, tensorflow::libexport::TFPackage& tf_package);
}
}
}
#endif
#include "tensorflow/cc/experimental/libtf/module.h"
#include <string>
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
namespace tf {
namespace libtf {
namespace impl {
using tensorflow::libexport::TFPackage;
using tf::libtf::runtime::Runtime;
tensorflow::StatusOr<std::vector<Handle>> BuildObjects(TFPackage& tf_package) {
std::vector<Handle> objects;
const tensorflow::SavedObjectGraph object_graph = tf_package.GetObjectGraph();
for (auto& node : object_graph.nodes()) {
if (node.kind_case() == tensorflow::SavedObject::kUserObject) {
tensorflow::StatusOr<Handle> result = BuildSavedUserObject(node);
if (result.ok()) {
objects.push_back(*result);
} else {
return result.status();
}
}
}
return objects;
}
tensorflow::StatusOr<Handle> BuildSavedUserObject(
tensorflow::SavedObject saved_object_proto) {
if (saved_object_proto.kind_case() != tensorflow::SavedObject::kUserObject) {
return tensorflow::errors::InvalidArgument("Not a UserObject.");
}
std::string identifier = saved_object_proto.user_object().identifier();
if (identifier == "trackable_list_wrapper") {
tf::libtf::List user_list;
return user_list;
}
if (identifier == "trackable_dict_wrapper") {
tf::libtf::Dictionary user_dict;
return user_dict;
}
if (identifier == "signature_map") {
tf::libtf::Dictionary signature_map;
return signature_map;
}
if (identifier == "_generic_user_object") {
tf::libtf::Dictionary user_object;
return user_object;
}
return tensorflow::errors::Unimplemented(absl::StrCat(
"UserObject with identifier '", identifier, "' not implemented."));
}
tensorflow::Status RegisterConcreteFunctions(Runtime runtime,
TFPackage tf_package) {
return tensorflow::errors::Unimplemented("Not implemented.");
}
tensorflow::Status InitializeVariables(Runtime runtime, TFPackage tf_package,
std::vector<Handle> objects) {
return tensorflow::errors::Unimplemented("Not implemented.");
}
tensorflow::Status SetupPolymorphicFunctions(Runtime runtime,
TFPackage tf_package,
std::vector<Handle> objects) {
return tensorflow::errors::Unimplemented("Not implemented.");
}
tensorflow::Status SetupFunctionCaptures(Runtime runtime, TFPackage tf_package,
std::vector<Handle> objects) {
return tensorflow::errors::Unimplemented("Not implemented.");
}
tensorflow::StatusOr<Handle> BuildObjectHierarchy(TFPackage tf_package,
std::vector<Handle> objects) {
return tensorflow::errors::Unimplemented("Not implemented.");
}
tensorflow::StatusOr<Handle> BuildProgram(Runtime runtime,
TFPackage& tf_package) {
return tensorflow::errors::Unimplemented("Not implemented.");
}
}
}
} | #include "tensorflow/cc/experimental/libtf/module.h"
#include <string>
#include "tensorflow/cc/experimental/libtf/runtime/core/core.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
namespace tf {
namespace libtf {
namespace impl {
using ::tensorflow::libexport::TFPackage;
using ::tensorflow::testing::StatusIs;
using ::tf::libtf::runtime::Runtime;
TEST(ModuleTest, TestStubbedFunctions) {
Runtime runtime = runtime::core::Runtime();
TFPackage tf_package;
tensorflow::StatusOr<Handle> result = BuildProgram(runtime, tf_package);
ASSERT_FALSE(result.status().ok());
}
TEST(ModuleTest, TestBuildObjectsDataStructures) {
const std::string path = tensorflow::GetDataDependencyFilepath(
"tensorflow/cc/experimental/libtf/tests/testdata/data-structure-model");
TF_ASSERT_OK_AND_ASSIGN(TFPackage tf_package, TFPackage::Load(path));
TF_ASSERT_OK_AND_ASSIGN(std::vector<Handle> objects,
BuildObjects(tf_package));
EXPECT_EQ(objects.size(), 7);
TF_ASSERT_OK_AND_ASSIGN(tf::libtf::Dictionary node,
Cast<tf::libtf::Dictionary>(objects.front()));
for (unsigned int i = 1; i < 4; i++) {
TF_ASSERT_OK_AND_ASSIGN(tf::libtf::List node,
Cast<tf::libtf::List>(objects.at(i)));
}
for (unsigned int i = 4; i < 7; i++) {
TF_ASSERT_OK_AND_ASSIGN(tf::libtf::Dictionary node,
Cast<tf::libtf::Dictionary>(objects.at(i)));
}
}
TEST(ModuleTest, TestBuildEmptyList) {
tensorflow::SavedObject saved_object_proto;
const std::string pb_txt = R"pb(
user_object {
identifier: "trackable_list_wrapper"
version { producer: 1 min_consumer: 1 }
}
)pb";
ASSERT_TRUE(::tensorflow::protobuf::TextFormat::ParseFromString(
pb_txt, &saved_object_proto));
TF_ASSERT_OK_AND_ASSIGN(Handle result,
BuildSavedUserObject(saved_object_proto));
EXPECT_EQ(Cast<tf::libtf::List>(result)->size(), 0);
}
TEST(ModuleTest, TestBuildEmptyDict) {
tensorflow::SavedObject saved_object_proto;
const std::string pb_txt = R"pb(
user_object {
identifier: "trackable_dict_wrapper"
version { producer: 1 min_consumer: 1 }
}
)pb";
ASSERT_TRUE(::tensorflow::protobuf::TextFormat::ParseFromString(
pb_txt, &saved_object_proto));
TF_ASSERT_OK_AND_ASSIGN(Handle result,
BuildSavedUserObject(saved_object_proto));
EXPECT_EQ(Cast<tf::libtf::Dictionary>(result)->size(), 0);
}
TEST(ModuleTest, TestBuildSignatureMap) {
tensorflow::SavedObject saved_object_proto;
const std::string pb_txt = R"pb(
user_object {
identifier: "signature_map"
version { producer: 1 min_consumer: 1 }
}
)pb";
ASSERT_TRUE(::tensorflow::protobuf::TextFormat::ParseFromString(
pb_txt, &saved_object_proto));
TF_ASSERT_OK_AND_ASSIGN(Handle result,
BuildSavedUserObject(saved_object_proto));
EXPECT_EQ(Cast<tf::libtf::Dictionary>(result)->size(), 0);
}
TEST(ModuleTest, TestUnimplementedUserObject) {
tensorflow::SavedObject saved_object_proto;
const std::string pb_txt = R"pb(
user_object {
identifier: "foo"
version { producer: 1 min_consumer: 1 }
}
)pb";
ASSERT_TRUE(::tensorflow::protobuf::TextFormat::ParseFromString(
pb_txt, &saved_object_proto));
EXPECT_THAT(
BuildSavedUserObject(saved_object_proto),
StatusIs(tensorflow::error::UNIMPLEMENTED, ::testing::HasSubstr("foo")));
}
}
}
} |
1,761 | cpp | tensorflow/tensorflow | mlir_transform | tensorflow/cc/experimental/libtf/mlir/mlir_transform.cc | tensorflow/cc/experimental/libtf/tests/mlir_transform_test.cc | #ifndef TENSORFLOW_CC_EXPERIMENTAL_LIBTF_MLIR_MLIR_TRANSFORM_H_
#define TENSORFLOW_CC_EXPERIMENTAL_LIBTF_MLIR_MLIR_TRANSFORM_H_
#include "tensorflow/cc/experimental/libtf/object.h"
namespace tf {
namespace libtf {
Object MLIR();
}
}
#endif
#include "tensorflow/cc/experimental/libtf/mlir/mlir_transform.h"
#include <string>
#include <utility>
#include "tensorflow/cc/experimental/libtf/object.h"
#include "tensorflow/cc/experimental/libtf/value.h"
#include "tensorflow/cc/saved_model/bundle_v2.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
namespace tf {
namespace libtf {
Handle LoadModule(Object self, String saved_model) {
tensorflow::SavedModelV2Bundle bundle;
tensorflow::Status status =
tensorflow::SavedModelV2Bundle::Load(saved_model.get(), &bundle);
if (!status.ok()) {
return None();
}
auto* context = self.Get<internal::Capsule>(String("_context"))
->cast<mlir::MLIRContext*>();
absl::Span<std::string> exported_names(nullptr, 0);
auto module_or =
tensorflow::ConvertSavedModelToMlir(&bundle, context, exported_names);
if (!module_or.status().ok()) {
return None();
}
Object obj;
obj.Set(
String("_module"),
Handle(impl::TaggedValue::Capsule(new mlir::OwningOpRef<mlir::ModuleOp>(
std::move(module_or).value()))));
auto get_string = [](Object self) {
auto ref = self.Get<internal::Capsule>(String("_module"))
->cast<mlir::OwningOpRef<mlir::ModuleOp>*>();
return String(tensorflow::MlirModuleToString(ref->get(), false).c_str());
};
obj.Set(String("ToString"), Callable(TFLIB_CALLABLE_ADAPTOR(get_string)));
return obj;
}
None SaveModule(Object self, Object module, String directory) {
return None();
}
None Transform(Object self, Object module, List passes) {
return None();
}
Object MLIR() {
Object obj;
obj.Set(String("LoadSavedModel"),
Callable(TFLIB_CALLABLE_ADAPTOR(LoadModule)));
obj.Set(String("SaveSavedModel"),
Callable(TFLIB_CALLABLE_ADAPTOR(SaveModule)));
obj.Set(String("_context"),
Handle(impl::TaggedValue::Capsule(new mlir::MLIRContext())));
return obj;
}
}
} | #include "tensorflow/cc/experimental/libtf/mlir/mlir_transform.h"
#include <iostream>
#include <string>
#include "tensorflow/cc/experimental/libtf/object.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
namespace tf {
namespace libtf {
TEST(TransformTest, LoadSavedModel) {
Object mlir = MLIR();
TF_ASSERT_OK_AND_ASSIGN(Callable load,
mlir.Get<Callable>(String("LoadSavedModel")));
TF_ASSERT_OK_AND_ASSIGN(
Handle model_bad,
load.Call<Handle>(mlir, String("/error/doesnotexist___31284382")));
TF_ASSERT_OK(Cast<None>(model_bad).status());
const std::string model_good_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/cc/experimental/libtf/tests/testdata/simple-model");
TF_ASSERT_OK_AND_ASSIGN(
Object model_good,
load.Call<Object>(mlir, String(model_good_path.c_str())));
TF_ASSERT_OK_AND_ASSIGN(Callable to_string,
model_good.Get<Callable>(String("ToString")));
TF_ASSERT_OK_AND_ASSIGN(String s, to_string.Call<String>(model_good));
ASSERT_GT(strlen(s.get()), 0);
}
}
} |
1,762 | cpp | tensorflow/tensorflow | string | tensorflow/cc/experimental/libtf/impl/string.cc | tensorflow/cc/experimental/libtf/impl/string_test.cc | #ifndef TENSORFLOW_CC_EXPERIMENTAL_LIBTF_IMPL_STRING_H_
#define TENSORFLOW_CC_EXPERIMENTAL_LIBTF_IMPL_STRING_H_
#include <iosfwd>
#include <string>
namespace tf {
namespace libtf {
namespace impl {
class String final {
public:
explicit String(const char* s);
String() : String("") {}
String(const String& s) : value_(s.value_) {}
bool operator==(const String& other) const { return value_ == other.value_; }
const std::string& str() const { return *value_; }
template <typename H>
friend H AbslHashValue(H h, const String& s) {
return H::combine(std::move(h), *s.value_);
}
private:
const std::string* value_;
};
std::ostream& operator<<(std::ostream& o, const String& str);
}
}
}
#endif
#include "tensorflow/cc/experimental/libtf/impl/string.h"
#include <unordered_set>
using StringTable = std::unordered_set<std::string>;
namespace tf {
namespace libtf {
namespace impl {
String::String(const char* s) {
static StringTable* table = new StringTable;
value_ = &*table->insert(s).first;
}
}
}
} | #include "tensorflow/cc/experimental/libtf/impl/string.h"
#include "tensorflow/core/platform/test.h"
namespace tf {
namespace libtf {
namespace impl {
TEST(StringTest, TestBasicInterning) {
String s1("foo");
String s2("foo");
EXPECT_EQ(&s1.str(), &s2.str());
}
}
}
} |
1,763 | cpp | tensorflow/tensorflow | none | tensorflow/cc/experimental/libtf/impl/none.cc | tensorflow/cc/experimental/libtf/impl/none_test.cc | #ifndef TENSORFLOW_CC_EXPERIMENTAL_LIBTF_IMPL_NONE_H_
#define TENSORFLOW_CC_EXPERIMENTAL_LIBTF_IMPL_NONE_H_
#include <iosfwd>
#include <utility>
namespace tf {
namespace libtf {
namespace impl {
class None final {
public:
static None& GetInstance();
bool operator==(const None& other) const { return true; }
template <typename H>
friend H AbslHashValue(H h, const None& n) {
return H::combine(std::move(h), 34559);
}
private:
None() {}
};
std::ostream& operator<<(std::ostream& o, const None& none);
}
}
}
#endif
#include "tensorflow/cc/experimental/libtf/impl/none.h"
namespace tf {
namespace libtf {
namespace impl {
None& None::GetInstance() {
static None* none_inst = new None();
return *none_inst;
}
}
}
} | #include "tensorflow/cc/experimental/libtf/impl/none.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/platform/test.h"
namespace tf {
namespace libtf {
namespace impl {
TEST(NoneTest, TestSingleton) {
None& a = None::GetInstance();
None& b = None::GetInstance();
EXPECT_EQ(&a, &b);
}
TEST(NoneTest, TestSupportsAbslHash) {
absl::flat_hash_set<None> none_set;
None& a = None::GetInstance();
None& b = None::GetInstance();
none_set.insert(a);
none_set.insert(b);
EXPECT_EQ(none_set.size(), 1);
}
}
}
} |
1,764 | cpp | tensorflow/tensorflow | save | tensorflow/cc/experimental/libexport/save.cc | tensorflow/cc/experimental/libexport/save_test.cc | #ifndef TENSORFLOW_CC_EXPERIMENTAL_LIBEXPORT_SAVE_H_
#define TENSORFLOW_CC_EXPERIMENTAL_LIBEXPORT_SAVE_H_
#include <string>
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace libexport {
TF_EXPORT Status Save(const std::string& export_dir);
}
}
#endif
#include "tensorflow/cc/experimental/libexport/save.h"
#include "tensorflow/core/platform/env.h"
namespace tensorflow {
namespace libexport {
Status Save(const std::string& export_dir) {
TF_RETURN_IF_ERROR(Env::Default()->RecursivelyCreateDir(export_dir));
return absl::OkStatus();
}
}
} | #include "tensorflow/cc/experimental/libexport/save.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace libexport {
namespace {
TEST(SaveTest, TestDirectoryStructure) {
const string base_dir = tensorflow::io::JoinPath(
tensorflow::testing::TmpDir(), "test_directory_structure");
TF_ASSERT_OK(Save(base_dir));
TF_ASSERT_OK(Env::Default()->IsDirectory(base_dir));
}
}
}
} |
1,765 | cpp | tensorflow/tensorflow | load | tensorflow/cc/experimental/libexport/load.cc | tensorflow/cc/experimental/libexport/load_test.cc | #ifndef TENSORFLOW_CC_EXPERIMENTAL_LIBEXPORT_LOAD_H_
#define TENSORFLOW_CC_EXPERIMENTAL_LIBEXPORT_LOAD_H_
#include <string>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
#include "tensorflow/core/protobuf/trackable_object_graph.pb.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
namespace tensorflow {
namespace libexport {
class TFPackage {
public:
static tensorflow::StatusOr<TFPackage> Load(const std::string& path);
tensorflow::StatusOr<std::string> GetVariableCheckpointKey(int index);
const SavedObjectGraph& GetObjectGraph();
tensorflow::StatusOr<const tensorflow::NodeDef*> GetGraphDefNode(
std::string name);
const protobuf::RepeatedPtrField<FunctionDef>& GetFunctionDefs();
tensorflow::BundleReader* GetVariableReader() {
return variable_reader_.get();
}
bool HasCheckpoint() { return has_checkpoint_; }
const std::string GetVariablesFilepath() const { return variables_filepath_; }
private:
SavedModel saved_model_proto_;
TrackableObjectGraph trackable_object_graph_;
std::unique_ptr<tensorflow::BundleReader> variable_reader_;
std::string variables_filepath_;
bool has_checkpoint_;
absl::flat_hash_map<std::string, const NodeDef*> graph_def_nodes_by_name_;
};
}
}
#endif
#include "tensorflow/cc/experimental/libexport/load.h"
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
namespace tensorflow {
namespace libexport {
using protobuf::RepeatedPtrField;
tensorflow::StatusOr<TFPackage> TFPackage::Load(const std::string& path) {
TFPackage tf_package;
const string saved_model_pb_path = io::JoinPath(path, kSavedModelFilenamePb);
const string saved_model_pbtxt_path =
io::JoinPath(path, kSavedModelFilenamePbTxt);
if (Env::Default()->FileExists(saved_model_pb_path).ok()) {
TF_RETURN_IF_ERROR(ReadBinaryProto(Env::Default(), saved_model_pb_path,
&tf_package.saved_model_proto_));
} else if (Env::Default()->FileExists(saved_model_pbtxt_path).ok()) {
TF_RETURN_IF_ERROR(ReadTextProto(Env::Default(), saved_model_pbtxt_path,
&tf_package.saved_model_proto_));
} else {
return Status(absl::StatusCode::kNotFound,
"Could not find SavedModel .pb or .pbtxt at supplied export "
"directory path: " +
path);
}
const std::string variables_dir =
tensorflow::io::JoinPath(path, tensorflow::kSavedModelVariablesDirectory);
if (Env::Default()->FileExists(variables_dir).ok()) {
tf_package.has_checkpoint_ = true;
tf_package.variables_filepath_ = tensorflow::io::JoinPath(
variables_dir, tensorflow::kSavedModelVariablesFilename);
tf_package.variable_reader_ = std::make_unique<tensorflow::BundleReader>(
tensorflow::Env::Default(), tf_package.variables_filepath_);
tensorflow::Tensor object_graph_tensor;
TF_RETURN_IF_ERROR(tf_package.variable_reader_->Lookup(
tensorflow::kObjectGraphProtoKey, &object_graph_tensor));
const auto* object_graph_string =
reinterpret_cast<const tensorflow::tstring*>(
object_graph_tensor.tensor_data().data());
tf_package.trackable_object_graph_.ParseFromString(*object_graph_string);
} else {
tf_package.has_checkpoint_ = false;
LOG(INFO)
<< "No checkpoint found, assuming this is a program-only SavedModel";
}
const auto& nodes =
tf_package.saved_model_proto_.meta_graphs(0).graph_def().node();
for (const auto& node : nodes) {
tf_package.graph_def_nodes_by_name_[node.name()] = &node;
}
return tf_package;
}
tensorflow::StatusOr<std::string> TFPackage::GetVariableCheckpointKey(
int index) {
const auto& trackable_object = trackable_object_graph_.nodes(index);
const TrackableObjectGraph::TrackableObject::SerializedTensor*
serialized_tensor = nullptr;
for (auto& maybe_serialized_tensor : trackable_object.attributes()) {
if (maybe_serialized_tensor.name() == "VARIABLE_VALUE") {
serialized_tensor = &maybe_serialized_tensor;
}
}
if (serialized_tensor == nullptr) {
return tensorflow::Status(absl::StatusCode::kInternal,
"Failed to find variable value field.");
}
return serialized_tensor->checkpoint_key();
}
const SavedObjectGraph& TFPackage::GetObjectGraph() {
return saved_model_proto_.mutable_meta_graphs(0)->object_graph_def();
}
tensorflow::StatusOr<const tensorflow::NodeDef*> TFPackage::GetGraphDefNode(
std::string name) {
const auto& iter = graph_def_nodes_by_name_.find(name);
if (iter == graph_def_nodes_by_name_.end()) {
return tensorflow::Status(absl::StatusCode::kInternal,
absl::StrCat("Failed to find node named ", name));
}
return iter->second;
}
const RepeatedPtrField<FunctionDef>& TFPackage::GetFunctionDefs() {
auto& function_library =
saved_model_proto_.mutable_meta_graphs(0)->graph_def().library();
return function_library.function();
}
}
} | #include "tensorflow/cc/experimental/libexport/load.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace libexport {
namespace {
TEST(LoadTest, TestDiskSavedModelLoad) {
StatusOr<TFPackage> result = TFPackage::Load("test");
EXPECT_FALSE(result.status().ok());
}
}
}
} |
1,766 | cpp | tensorflow/tensorflow | freeze_saved_model | tensorflow/cc/tools/freeze_saved_model.cc | tensorflow/cc/tools/freeze_saved_model_test.cc | #ifndef TENSORFLOW_CC_TOOLS_FREEZE_SAVED_MODEL_H_
#define TENSORFLOW_CC_TOOLS_FREEZE_SAVED_MODEL_H_
#include <unordered_set>
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
Status FreezeSavedModel(const SavedModelBundle& saved_model_bundle,
GraphDef* frozen_graph_def,
std::unordered_set<string>* inputs,
std::unordered_set<string>* outputs);
}
#endif
#include "tensorflow/cc/tools/freeze_saved_model.h"
#include <iostream>
#include <queue>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace tensorflow {
namespace {
void GetTensorNamesFromTensorInfo(const TensorInfo& tensor_info,
std::unordered_set<string>* tensor_names) {
if (tensor_info.has_coo_sparse()) {
const TensorInfo_CooSparse& coo_sparse = tensor_info.coo_sparse();
tensor_names->insert(coo_sparse.values_tensor_name());
tensor_names->insert(coo_sparse.indices_tensor_name());
tensor_names->insert(coo_sparse.dense_shape_tensor_name());
} else if (tensor_info.has_composite_tensor()) {
for (const auto& component : tensor_info.composite_tensor().components()) {
tensor_names->insert(component.name());
}
} else {
tensor_names->insert(tensor_info.name());
}
}
void GetSignatureDefsInputsAndOutputs(
const SavedModelBundle& saved_model_bundle,
std::unordered_set<string>* inputs, std::unordered_set<string>* outputs) {
for (auto& sigdef_elem : saved_model_bundle.meta_graph_def.signature_def()) {
const SignatureDef& signature_def = sigdef_elem.second;
for (auto& input_elem : signature_def.inputs()) {
GetTensorNamesFromTensorInfo(input_elem.second, inputs);
}
for (auto& output_elem : signature_def.outputs()) {
GetTensorNamesFromTensorInfo(output_elem.second, outputs);
}
}
}
void GetNodeNameToNodeDefMap(
GraphDef* graph_def,
std::unordered_map<string, NodeDef*>* name_to_node_map) {
for (size_t i = 0; i < graph_def->node_size(); i++) {
NodeDef* node = graph_def->mutable_node(i);
(*name_to_node_map)[node->name()] = node;
}
}
const string GetNodeNameFromTensorName(string tensor_name) {
if (tensor_name[0] == '^') {
tensor_name.erase(0, 1);
}
std::vector<string> tensor_name_parts = str_util::Split(tensor_name, ':');
return tensor_name_parts[0];
}
void GetReachableNodesAndVariables(
GraphDef* graph_def, const std::unordered_set<string>& outputs,
const std::unordered_map<string, NodeDef*>& name_to_node_map,
std::unordered_set<string>* reachable_node_names,
std::unordered_set<string>* variable_node_names) {
static const std::unordered_set<string>* kVariableTypes =
new std::unordered_set<string>({"Variable", "VariableV2", "VarHandleOp"});
std::queue<string> nodes_to_visit;
for (const string& output_tensor_name : outputs) {
nodes_to_visit.push(GetNodeNameFromTensorName(output_tensor_name));
}
while (!nodes_to_visit.empty()) {
const string node_name = nodes_to_visit.front();
nodes_to_visit.pop();
if (reachable_node_names->find(node_name) != reachable_node_names->end()) {
continue;
}
reachable_node_names->insert(node_name);
NodeDef* node = name_to_node_map.at(node_name);
if (kVariableTypes->find(node->op()) != kVariableTypes->end()) {
variable_node_names->insert(node->name());
}
for (const string& input_tensor_name : node->input()) {
nodes_to_visit.push(GetNodeNameFromTensorName(input_tensor_name));
}
}
}
Status GetVariableNameToTensorMap(
Session* session,
const std::unordered_map<string, NodeDef*>& name_to_node_map,
std::unordered_set<string> variable_names_set,
std::unordered_map<string, Tensor>* variable_name_to_value_map) {
if (variable_names_set.empty()) {
return OkStatus();
}
std::vector<string> variable_names;
variable_names.reserve(variable_names_set.size());
std::vector<string> tensor_names;
tensor_names.reserve(variable_names_set.size());
for (const string& node_name : variable_names_set) {
variable_names.push_back(node_name);
NodeDef* node_def = name_to_node_map.at(node_name);
if (node_def->op() == "VarHandleOp") {
tensor_names.push_back(node_name + "/Read/ReadVariableOp:0");
} else {
tensor_names.push_back(node_name + ":0");
}
}
std::vector<Tensor> outputs;
TF_RETURN_IF_ERROR(
session->Run( {}, tensor_names, {}, &outputs));
for (size_t i = 0; i < variable_names.size(); i++) {
(*variable_name_to_value_map)[variable_names[i]] = outputs[i];
}
return OkStatus();
}
void ConvertVariableToConstant(const NodeDef& variable_node,
const Tensor& variable_value,
NodeDef* const_node) {
const_node->set_name(variable_node.name());
const_node->set_op("Const");
(*const_node->mutable_attr())["dtype"] = variable_node.attr().at("dtype");
variable_value.AsProtoTensorContent(
(*const_node->mutable_attr())["value"].mutable_tensor());
}
void ConvertReadVariableOpToIdentity(const NodeDef& node,
NodeDef* identity_node) {
identity_node->set_name(node.name());
identity_node->set_op("Identity");
(*identity_node->mutable_attr())["T"] = node.attr().at("dtype");
identity_node->add_input(node.input(0));
}
StatusOr<string> GetVarHandleName(
const std::unordered_map<string, NodeDef*>& name_to_node_map,
string node_name) {
const NodeDef* node = name_to_node_map.at(node_name);
while (node->input_size() > 0) {
auto parent = name_to_node_map.find(node->input(0));
if (parent == name_to_node_map.end()) break;
node = parent->second;
if (node->op() != "Identity") {
VLOG(2) << "Stopping at non-identity node " << node->op();
break;
}
}
if (node->op() == "VarHandleOp") {
return node->name();
}
return absl::NotFoundError("No VarHandleOp ancestor found");
}
StatusOr<string> GetHandleNameIfNeedsToFreeze(
const std::unordered_map<string, NodeDef*>& name_to_node_map,
string node_name, const std::unordered_set<string>& variable_node_names) {
StatusOr<string> var_handle_name =
GetVarHandleName(name_to_node_map, node_name);
if (var_handle_name.ok() && variable_node_names.count(*var_handle_name)) {
return var_handle_name;
}
return absl::NotFoundError("No VarHandleOp ancestor found");
}
Status FreezeGraphDef(const SavedModelBundle& saved_model_bundle,
const std::unordered_set<string>& outputs,
GraphDef* frozen_graph_def) {
GraphDef graph_def = saved_model_bundle.meta_graph_def.graph_def();
*frozen_graph_def->mutable_versions() = graph_def.versions();
*frozen_graph_def->mutable_library() = graph_def.library();
if (graph_def.node_size() == 0) {
return OkStatus();
}
std::unordered_map<string, NodeDef*> name_to_node_map;
GetNodeNameToNodeDefMap(&graph_def, &name_to_node_map);
std::unordered_set<string> reachable_node_names;
std::unordered_set<string> variable_node_names;
GetReachableNodesAndVariables(&graph_def, outputs, name_to_node_map,
&reachable_node_names, &variable_node_names);
std::unordered_map<string, Tensor> variable_to_value_map;
TF_RETURN_IF_ERROR(GetVariableNameToTensorMap(
saved_model_bundle.session.get(), name_to_node_map, variable_node_names,
&variable_to_value_map));
for (const NodeDef& node : graph_def.node()) {
if (reachable_node_names.find(node.name()) == reachable_node_names.end()) {
continue;
}
if (variable_node_names.find(node.name()) != variable_node_names.end()) {
ConvertVariableToConstant(node, variable_to_value_map[node.name()],
frozen_graph_def->add_node());
continue;
} else if (node.op() == "ReadVariableOp" &&
GetHandleNameIfNeedsToFreeze(name_to_node_map, node.name(),
variable_node_names)
.ok()) {
ConvertReadVariableOpToIdentity(node, frozen_graph_def->add_node());
continue;
} else if (node.op() == "Identity") {
StatusOr<string> handle_name = GetHandleNameIfNeedsToFreeze(
name_to_node_map, node.name(), variable_node_names);
if (handle_name.ok()) {
NodeDef* new_node = frozen_graph_def->add_node();
*new_node = node;
(*new_node->mutable_attr())["T"] =
name_to_node_map.at(*handle_name)->attr().at("dtype");
continue;
}
}
*frozen_graph_def->add_node() = node;
}
return OkStatus();
}
}
Status FreezeSavedModel(const SavedModelBundle& saved_model_bundle,
GraphDef* frozen_graph_def,
std::unordered_set<string>* inputs,
std::unordered_set<string>* outputs) {
GetSignatureDefsInputsAndOutputs(saved_model_bundle, inputs, outputs);
TF_RETURN_IF_ERROR(
FreezeGraphDef(saved_model_bundle, *outputs, frozen_graph_def));
return OkStatus();
}
} | #include "tensorflow/cc/tools/freeze_saved_model.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
class FreezeTest : public ::testing::Test {
protected:
void GraphDefEqual(const GraphDef& actual, const GraphDef& expected) {
EXPECT_EQ(actual.ShortDebugString(), expected.ShortDebugString());
}
SignatureDef BuildSignatureDef(const std::unordered_set<string>& inputs,
const std::unordered_set<string>& outputs) {
SignatureDef signature_def;
for (const string& input : inputs) {
(*signature_def.mutable_inputs())[input].set_name(input);
}
for (const string& output : outputs) {
(*signature_def.mutable_outputs())[output].set_name(output);
}
return signature_def;
}
void AddSignatureDefToSavedModelBundle(const SignatureDef& signature_def,
const string& key,
SavedModelBundle* saved_model_bundle) {
MetaGraphDef* meta_graph_def = &saved_model_bundle->meta_graph_def;
(*meta_graph_def->mutable_signature_def())[key] = signature_def;
}
Status InitializeSavedModelBundleSession(
const GraphDef& graph_def, const string& init_node,
SavedModelBundle* saved_model_bundle) {
SessionOptions session_options;
saved_model_bundle->session.reset(NewSession(session_options));
TF_RETURN_IF_ERROR(saved_model_bundle->session->Create(graph_def));
if (!init_node.empty()) {
std::vector<Tensor> outputs;
return saved_model_bundle->session->Run(
{}, {}, {init_node}, &outputs);
}
return OkStatus();
}
Status AddGraphDefToSavedModelBundle(const GraphDef& graph_def,
const string& init_node,
SavedModelBundle* saved_model_bundle) {
MetaGraphDef* meta_graph_def = &saved_model_bundle->meta_graph_def;
*meta_graph_def->mutable_graph_def() = graph_def;
return InitializeSavedModelBundleSession(graph_def, init_node,
saved_model_bundle);
}
Status AddGraphDefWithOutputsToSavedModelBundle(
const GraphDef& graph_def, const std::unordered_set<string>& outputs,
const string& init_node, SavedModelBundle* saved_model_bundle) {
SignatureDef signature_def =
BuildSignatureDef(std::unordered_set<string>(), outputs);
AddSignatureDefToSavedModelBundle(signature_def, "signature_def",
saved_model_bundle);
return AddGraphDefToSavedModelBundle(graph_def, init_node,
saved_model_bundle);
}
void RunAndCompareFrozenAndUnfrozenGraphs(Session* unfrozen_session,
const GraphDef& frozen_graph_def,
const string& tensor_name) {
std::vector<Tensor> unfrozen_outputs;
TF_ASSERT_OK(unfrozen_session->Run( {}, {tensor_name},
{}, &unfrozen_outputs));
SessionOptions session_options;
std::unique_ptr<Session> frozen_session(NewSession(session_options));
TF_ASSERT_OK(frozen_session->Create(frozen_graph_def));
std::vector<Tensor> frozen_outputs;
TF_ASSERT_OK(frozen_session->Run( {}, {tensor_name},
{}, &frozen_outputs));
test::ExpectTensorEqual<float>(unfrozen_outputs[0], frozen_outputs[0]);
}
void TestFreezeGraphWithoutDependentVariables(bool use_resource) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output a = ops::Const(scope.WithOpName("a"), 10.0f, {});
Output b = ops::Const(scope.WithOpName("b"), 10.0f, {});
Output c = ops::Mul(scope.WithOpName("c"), a, b);
if (use_resource) {
Output var =
ops::VarHandleOp(scope.WithOpName("var"), DataType::DT_FLOAT, {});
Output read_var = ops::ReadVariableOp(
scope.WithOpName("var/Read/ReadVariableOp"), var, DataType::DT_FLOAT);
auto assign = ops::AssignVariableOp(scope.WithOpName("assign"), var, a);
} else {
Output var =
ops::Variable(scope.WithOpName("var"), {}, DataType::DT_FLOAT);
Output assign = ops::Assign(scope.WithOpName("assign"), var, a);
}
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(
graph_def, {"c:0"}, "assign", &saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def,
&inputs, &outputs));
GraphDef expected_graph_def;
Scope expected_scope = Scope::NewRootScope();
Output expected_a = ops::Const(expected_scope.WithOpName("a"), 10.0f, {});
Output expected_b = ops::Const(expected_scope.WithOpName("b"), 10.0f, {});
Output expected_c =
ops::Mul(expected_scope.WithOpName("c"), expected_a, expected_b);
TF_ASSERT_OK(expected_scope.ToGraphDef(&expected_graph_def));
GraphDefEqual(frozen_graph_def, expected_graph_def);
RunAndCompareFrozenAndUnfrozenGraphs(saved_model_bundle.session.get(),
frozen_graph_def, "c:0");
}
void TestFreezeGraphWithDependentVariables(bool use_resource,
bool use_identity = false) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output a = ops::Const(scope.WithOpName("a"), 10.0f, {});
Output read_var;
if (use_resource) {
Output var =
ops::VarHandleOp(scope.WithOpName("var"), DataType::DT_FLOAT, {});
if (use_identity) {
Output identity = ops::Identity(scope.WithOpName("identity"), var);
read_var =
ops::ReadVariableOp(scope.WithOpName("var/Read/ReadVariableOp"),
identity, DataType::DT_FLOAT);
} else {
read_var =
ops::ReadVariableOp(scope.WithOpName("var/Read/ReadVariableOp"),
var, DataType::DT_FLOAT);
}
auto assign = ops::AssignVariableOp(scope.WithOpName("assign"), var, a);
} else {
Output read_var =
ops::Variable(scope.WithOpName("var"), {}, DataType::DT_FLOAT);
Output assign = ops::Assign(scope.WithOpName("assign"), read_var, a);
}
Output c = ops::Mul(scope.WithOpName("c"), a, read_var);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(
graph_def, {"c:0"}, "assign", &saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def,
&inputs, &outputs));
size_t expected_nodes = use_resource ? (use_identity ? 5 : 4) : 3;
EXPECT_EQ(frozen_graph_def.node_size(), expected_nodes);
for (const NodeDef& node : frozen_graph_def.node()) {
EXPECT_NE(node.op(), "Variable") << node.name();
EXPECT_NE(node.op(), "VariableV2") << node.name();
EXPECT_NE(node.op(), "VarHandleOp") << node.name();
EXPECT_NE(node.op(), "ReadVariableOp") << node.name();
}
RunAndCompareFrozenAndUnfrozenGraphs(saved_model_bundle.session.get(),
frozen_graph_def, "c:0");
}
void TestFreezeGraphWithAndWithoutDependentVariables(bool use_resource) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output a = ops::Const(scope.WithOpName("a"), 10.0f, {});
Output read_var;
if (use_resource) {
Output var =
ops::VarHandleOp(scope.WithOpName("var"), DataType::DT_FLOAT, {});
read_var = ops::ReadVariableOp(
scope.WithOpName("var/Read/ReadVariableOp"), var, DataType::DT_FLOAT);
auto assign = ops::AssignVariableOp(scope.WithOpName("assign"), var, a);
Output var_1 =
ops::VarHandleOp(scope.WithOpName("var_1"), DataType::DT_FLOAT, {});
Output read_var_1 =
ops::ReadVariableOp(scope.WithOpName("var_1/Read/ReadVariableOp"),
var, DataType::DT_FLOAT);
auto assign_1 =
ops::AssignVariableOp(scope.WithOpName("assign_1"), var_1, a);
} else {
read_var = ops::Variable(scope.WithOpName("var"), {}, DataType::DT_FLOAT);
Output assign = ops::Assign(scope.WithOpName("assign"), read_var, a);
Output var_1 =
ops::Variable(scope.WithOpName("var_1"), {}, DataType::DT_FLOAT);
Output assign_1 = ops::Assign(scope.WithOpName("assign_1"), var_1, a);
}
Output c = ops::Mul(scope.WithOpName("c"), a, read_var);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(
graph_def, {"c:0"}, "assign", &saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def,
&inputs, &outputs));
size_t expected_nodes = use_resource ? 4 : 3;
EXPECT_EQ(frozen_graph_def.node_size(), expected_nodes);
for (const NodeDef& node : frozen_graph_def.node()) {
EXPECT_NE(node.op(), "Variable") << node.name();
EXPECT_NE(node.op(), "VariableV2") << node.name();
EXPECT_NE(node.op(), "VarHandleOp") << node.name();
EXPECT_NE(node.op(), "ReadVariableOp") << node.name();
}
RunAndCompareFrozenAndUnfrozenGraphs(saved_model_bundle.session.get(),
frozen_graph_def, "c:0");
}
};
TEST_F(FreezeTest, InputsAndOutputsSingleSignatureDef) {
SavedModelBundle saved_model_bundle;
std::unordered_set<string> expected_inputs = {"input0:0", "input1:0"};
std::unordered_set<string> expected_outputs = {"output0:0", "output1:0"};
SignatureDef signature_def =
BuildSignatureDef(expected_inputs, expected_outputs);
AddSignatureDefToSavedModelBundle(signature_def, "signature_def",
&saved_model_bundle);
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
EXPECT_EQ(expected_inputs, inputs);
EXPECT_EQ(expected_outputs, outputs);
}
TEST_F(FreezeTest, InputsAndOutputsMultipleSignatureDefs) {
SavedModelBundle saved_model_bundle;
SignatureDef signature_def_0 = BuildSignatureDef({"input0:0"}, {"output0:0"});
SignatureDef signature_def_1 = BuildSignatureDef({"input1:0"}, {"output1:0"});
AddSignatureDefToSavedModelBundle(signature_def_0, "signature_def_0",
&saved_model_bundle);
AddSignatureDefToSavedModelBundle(signature_def_1, "signature_def_1",
&saved_model_bundle);
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
std::unordered_set<string> expected_inputs = {"input0:0", "input1:0"};
std::unordered_set<string> expected_outputs = {"output0:0", "output1:0"};
EXPECT_EQ(expected_inputs, inputs);
EXPECT_EQ(expected_outputs, outputs);
}
TEST_F(FreezeTest, GraphDefVersionsAndLibrary) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
graph_def.mutable_versions()->set_producer(1234);
graph_def.mutable_versions()->set_min_consumer(1234);
*graph_def.mutable_library()->add_function() = test::function::NonZero();
TF_ASSERT_OK(
AddGraphDefToSavedModelBundle(graph_def, "", &saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
GraphDefEqual(frozen_graph_def, graph_def);
}
TEST_F(FreezeTest, GraphDefWithNoVariables) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output a = ops::Const(scope.WithOpName("a"), 10.0f, {});
Output b = ops::Const(scope.WithOpName("b"), 10.0f, {});
Output c = ops::Mul(scope.WithOpName("c"), a, b);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(graph_def, {"c:0"}, "",
&saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
GraphDefEqual(frozen_graph_def, graph_def);
}
TEST_F(FreezeTest, GraphDefWithMultiOutputOperation) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output a = ops::Const(scope.WithOpName("a"), {10.0f, 10.0f}, {2});
Output axis = ops::Const(scope.WithOpName("axis"), 0, {});
OutputList split = ops::Split(scope.WithOpName("split"), axis, a, 2).output;
Output b = ops::Const(scope.WithOpName("b"), 10.0f, {});
Output c = ops::Mul(scope.WithOpName("c"), split[1], b);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(graph_def, {"c:0"}, "",
&saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
GraphDefEqual(frozen_graph_def, graph_def);
}
TEST_F(FreezeTest, GraphDefWithControlDependency) {
SavedModelBundle saved_model_bundle;
GraphDef graph_def;
Scope scope = Scope::NewRootScope();
Output source = ops::Const(scope.WithOpName("source"), 10.0f, {});
Output a = ops::Const(scope.WithOpName("a").WithControlDependencies(source),
{10.0f, 10.0f}, {2});
Output b = ops::Const(scope.WithOpName("b"), 10.0f, {});
Output c = ops::Mul(scope.WithOpName("c"), a, b);
TF_ASSERT_OK(scope.ToGraphDef(&graph_def));
TF_ASSERT_OK(AddGraphDefWithOutputsToSavedModelBundle(graph_def, {"c:0"}, "",
&saved_model_bundle));
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
GraphDefEqual(frozen_graph_def, graph_def);
}
TEST_F(FreezeTest, GraphDefWithoutDependentVariables) {
TestFreezeGraphWithoutDependentVariables(false);
}
TEST_F(FreezeTest, GraphDefWithoutDependentResourceVariables) {
TestFreezeGraphWithoutDependentVariables(true);
}
TEST_F(FreezeTest, GraphDefWithDependentVariables) {
TestFreezeGraphWithDependentVariables(false);
}
TEST_F(FreezeTest, GraphDefWithDependentResourceVariables) {
TestFreezeGraphWithDependentVariables(true);
}
TEST_F(FreezeTest, GraphDefWithDependentResourceVariablesAndIdentity) {
TestFreezeGraphWithDependentVariables(true, true);
}
TEST_F(FreezeTest, GraphDefWithAndWithoutDependentVariables) {
TestFreezeGraphWithAndWithoutDependentVariables(false);
}
TEST_F(FreezeTest, GraphDefWithAndWithoutDependentResourceVariables) {
TestFreezeGraphWithAndWithoutDependentVariables(true);
}
TEST_F(FreezeTest, InputsAndOutputsCompositeTensorSignatureDef) {
SavedModelBundle saved_model_bundle;
SignatureDef signature_def;
TensorInfo& in = (*signature_def.mutable_inputs())["input_arg"];
in.mutable_composite_tensor()->add_components()->set_name("input1:0");
in.mutable_composite_tensor()->add_components()->set_name("input2:0");
TensorInfo& out = (*signature_def.mutable_outputs())["output_arg"];
out.mutable_composite_tensor()->add_components()->set_name("output2:0");
out.mutable_composite_tensor()->add_components()->set_name("output1:0");
AddSignatureDefToSavedModelBundle(signature_def, "signature_def",
&saved_model_bundle);
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
std::unordered_set<string> expected_inputs = {"input1:0", "input2:0"};
std::unordered_set<string> expected_outputs = {"output1:0", "output2:0"};
EXPECT_EQ(expected_inputs, inputs);
EXPECT_EQ(expected_outputs, outputs);
}
TEST_F(FreezeTest, InputsAndOutputsSparseCooSignatureDef) {
SavedModelBundle saved_model_bundle;
SignatureDef signature_def;
TensorInfo& in = (*signature_def.mutable_inputs())["input_arg"];
in.mutable_coo_sparse()->set_values_tensor_name("input1:0");
in.mutable_coo_sparse()->set_indices_tensor_name("input2:0");
in.mutable_coo_sparse()->set_dense_shape_tensor_name("input3:0");
TensorInfo& out = (*signature_def.mutable_outputs())["output_arg"];
out.mutable_coo_sparse()->set_values_tensor_name("output1:0");
out.mutable_coo_sparse()->set_indices_tensor_name("output2:0");
out.mutable_coo_sparse()->set_dense_shape_tensor_name("output3:0");
AddSignatureDefToSavedModelBundle(signature_def, "signature_def",
&saved_model_bundle);
GraphDef frozen_graph_def;
std::unordered_set<string> inputs;
std::unordered_set<string> outputs;
TF_ASSERT_OK(FreezeSavedModel(saved_model_bundle, &frozen_graph_def, &inputs,
&outputs));
std::unordered_set<string> expected_inputs = {"input1:0", "input2:0",
"input3:0"};
std::unordered_set<string> expected_outputs = {"output1:0", "output2:0",
"output3:0"};
EXPECT_EQ(expected_inputs, inputs);
EXPECT_EQ(expected_outputs, outputs);
}
}
} |
1,767 | cpp | tensorflow/tensorflow | fingerprinting_utils | tensorflow/cc/saved_model/fingerprinting_utils.cc | tensorflow/cc/saved_model/fingerprinting_utils_test.cc | #ifndef TENSORFLOW_CC_SAVED_MODEL_FINGERPRINTING_UTILS_H_
#define TENSORFLOW_CC_SAVED_MODEL_FINGERPRINTING_UTILS_H_
#include <cstdint>
#include <string>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/records/record_reader.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/protobuf/fingerprint.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
#include "tensorflow/tools/proto_splitter/chunk.pb.h"
namespace tensorflow::saved_model::fingerprinting {
namespace fingerprinting_utils_internal {
using ::tensorflow::protobuf::Map;
using ::tensorflow::protobuf::Message;
using ::tensorflow::protobuf::RepeatedPtrField;
absl::StatusOr<int> fieldTagMatches(
const RepeatedPtrField<::tensorflow::proto_splitter::FieldIndex>& a,
const RepeatedPtrField<::tensorflow::proto_splitter::FieldIndex>& b);
absl::StatusOr<::tensorflow::proto_splitter::ChunkedMessage>
PruneChunkedMessage(
const ::tensorflow::proto_splitter::ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
std::vector<::tensorflow::proto_splitter::ChunkInfo> chunks_info,
std::vector<RepeatedPtrField<::tensorflow::proto_splitter::FieldIndex>>
target_fields_list);
std::string SerializeProto(const Message& message);
absl::StatusOr<uint64_t> HashFields(
const ::tensorflow::proto_splitter::ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<::tensorflow::proto_splitter::ChunkInfo>& chunks_info,
const RepeatedPtrField<::tensorflow::proto_splitter::FieldIndex>&
field_tags,
Message* merged_message);
inline RepeatedPtrField<::tensorflow::proto_splitter::FieldIndex>
GraphDefFieldTags();
inline RepeatedPtrField<::tensorflow::proto_splitter::FieldIndex>
SignatureDefFieldTags();
inline RepeatedPtrField<::tensorflow::proto_splitter::FieldIndex>
SavedObjectGraphFieldTags();
absl::StatusOr<tensorflow::SavedModel> PrunedSavedModel(
absl::string_view export_dir,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<::tensorflow::proto_splitter::ChunkInfo>& chunks_info,
::tensorflow::proto_splitter::ChunkMetadata& chunk_metadata);
absl::StatusOr<uint64_t> HashMessage(
Message* message,
const ::tensorflow::proto_splitter::ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<::tensorflow::proto_splitter::ChunkInfo>& chunks_info,
const RepeatedPtrField<::tensorflow::proto_splitter::FieldIndex>&
field_tags);
absl::StatusOr<uint64_t> HashGraphDef(
tensorflow::GraphDef* graph_def,
const ::tensorflow::proto_splitter::ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<::tensorflow::proto_splitter::ChunkInfo>& chunks_info);
absl::StatusOr<uint64_t> HashSignatureDef(
const Map<std::string, ::tensorflow::SignatureDef>& signature_def_map,
const ::tensorflow::proto_splitter::ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<::tensorflow::proto_splitter::ChunkInfo>& chunks_info);
absl::StatusOr<uint64_t> HashSavedObjectGraph(
tensorflow::SavedObjectGraph* saved_object_graph,
const ::tensorflow::proto_splitter::ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<::tensorflow::proto_splitter::ChunkInfo>& chunks_info);
}
uint64_t HashCheckpointIndexFile(absl::string_view model_dir);
absl::StatusOr<FingerprintDef> CreateFingerprintDefCpb(
absl::string_view export_dir, std::string cpb_file);
}
#endif
#include "tensorflow/cc/saved_model/fingerprinting_utils.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/records/record_reader.h"
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system_helper.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/protobuf/fingerprint.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
#include "tensorflow/core/util/tensor_bundle/naming.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tensorflow/tools/proto_splitter/chunk.pb.h"
#include "tensorflow/tools/proto_splitter/merge.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow::saved_model::fingerprinting {
using ::tensorflow::proto_splitter::ChunkedField;
using ::tensorflow::proto_splitter::ChunkedMessage;
using ::tensorflow::proto_splitter::ChunkInfo;
using ::tensorflow::proto_splitter::ChunkMetadata;
using ::tensorflow::proto_splitter::FieldIndex;
using tools::proto_splitter::Field;
using tools::proto_splitter::FieldType;
using tools::proto_splitter::GetChunkMetadata;
using tools::proto_splitter::GetFieldTypes;
using tools::proto_splitter::GetMutableField;
using tools::proto_splitter::GetRiegeliReader;
using tools::proto_splitter::Merger;
using tools::proto_splitter::MutableFieldResult;
using tools::proto_splitter::ReadChunk;
namespace fingerprinting_utils_internal {
using ::tensorflow::protobuf::Map;
using ::tensorflow::protobuf::Message;
using ::tensorflow::protobuf::RepeatedPtrField;
using ::tensorflow::protobuf::io::CodedOutputStream;
using ::tensorflow::protobuf::io::StringOutputStream;
absl::StatusOr<int> fieldTagMatches(const RepeatedPtrField<FieldIndex>& a,
const RepeatedPtrField<FieldIndex>& b) {
int matches = 0;
for (int i = 0; i == matches && i < a.size() && i < b.size(); i++) {
switch (b[i].kind_case()) {
case ::tensorflow::proto_splitter::FieldIndex::KindCase::kField:
if (a.at(i).has_field() && a.at(i).field() == b.at(i).field()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::KindCase::kIndex:
if (a.at(i).has_index() && a.at(i).index() == b.at(i).index()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::KindCase::kMapKey:
if (a.at(i).has_map_key()) {
const ::tensorflow::proto_splitter::FieldIndex_MapKey& key =
b.at(i).map_key();
const ::tensorflow::proto_splitter::FieldIndex_MapKey& chunked_key =
a.at(i).map_key();
switch (key.type_case()) {
case ::tensorflow::proto_splitter::FieldIndex::MapKey::TypeCase::kS:
if (chunked_key.has_s() && chunked_key.s() == key.s()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::MapKey::TypeCase::
kBoolean:
if (chunked_key.has_boolean() &&
chunked_key.boolean() == key.boolean()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::MapKey::TypeCase::
kUi32:
if (chunked_key.has_ui32() && chunked_key.ui32() == key.ui32()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::MapKey::TypeCase::
kUi64:
if (chunked_key.has_ui64() && chunked_key.ui64() == key.ui64()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::MapKey::TypeCase::
kI32:
if (chunked_key.has_i32() && chunked_key.i32() == key.i32()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::MapKey::TypeCase::
kI64:
if (chunked_key.has_i64() && chunked_key.i64() == key.i64()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::MapKey::TypeCase::
TYPE_NOT_SET:
default:
return absl::FailedPreconditionError(
"Encountered unknown field_tag.map_key type.");
}
}
break;
case FieldIndex::KindCase::KIND_NOT_SET:
default:
return absl::FailedPreconditionError(
"Encountered unknown field_tag kind.");
}
}
return matches;
}
absl::StatusOr<::tensorflow::proto_splitter::ChunkedMessage>
PruneChunkedMessage(
const ::tensorflow::proto_splitter::ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
std::vector<ChunkInfo> chunks_info,
std::vector<RepeatedPtrField<FieldIndex>> target_fields_list) {
::tensorflow::proto_splitter::ChunkedMessage pruned_chunked_message;
if (chunked_message.has_chunk_index()) {
pruned_chunked_message.set_chunk_index(chunked_message.chunk_index());
}
for (const ChunkedField& chunked_field : chunked_message.chunked_fields()) {
for (const auto& target_fields : target_fields_list) {
TF_ASSIGN_OR_RETURN(
int matches,
fieldTagMatches(chunked_field.field_tag(), target_fields));
if (matches == chunked_field.field_tag_size()) {
auto cf = std::make_unique<proto_splitter::ChunkedField>();
cf->mutable_field_tag()->CopyFrom(chunked_field.field_tag());
TF_ASSIGN_OR_RETURN(
*cf->mutable_message(),
PruneChunkedMessage(chunked_field.message(), reader, chunks_info,
target_fields_list));
pruned_chunked_message.mutable_chunked_fields()->AddAllocated(
cf.release());
}
}
}
return pruned_chunked_message;
}
std::string SerializeProto(const Message& message) {
std::string serialized_message;
{
StringOutputStream stream(&serialized_message);
CodedOutputStream output(&stream);
output.SetSerializationDeterministic(true);
message.SerializeToCodedStream(&output);
}
return serialized_message;
}
absl::StatusOr<uint64_t> HashFields(
const ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<ChunkInfo>& chunks_info,
const RepeatedPtrField<FieldIndex>& field_tags, Message* merged_message) {
uint64_t field_checksum = 0;
for (const ChunkedField& chunked_field : chunked_message.chunked_fields()) {
const RepeatedPtrField<FieldIndex> chunked_field_tags =
chunked_field.field_tag();
const ChunkedMessage& chunked_message = chunked_field.message();
TF_ASSIGN_OR_RETURN(int matches,
fieldTagMatches(chunked_field_tags, field_tags));
if (chunked_message.has_chunk_index() && matches == field_tags.size()) {
TF_ASSIGN_OR_RETURN(
std::string chunk,
ReadChunk(reader, chunks_info[chunked_message.chunk_index()]));
field_checksum = FingerprintCat64(field_checksum, Fingerprint64(chunk));
} else if (matches == field_tags.size()) {
TF_ASSIGN_OR_RETURN(uint64_t hash,
HashFields(chunked_message, reader, chunks_info,
field_tags, merged_message));
field_checksum = FingerprintCat64(field_checksum, hash);
} else if (chunked_message.has_chunk_index() &&
matches == chunked_field_tags.size()) {
TF_ASSIGN_OR_RETURN(std::vector<Field> fields,
GetFieldTypes(chunked_field_tags));
for (const auto& field : fields) {
TF_ASSIGN_OR_RETURN(MutableFieldResult mfr,
GetMutableField(merged_message, field));
merged_message =
mfr.parent->GetReflection()->MutableMessage(mfr.parent, mfr.field);
}
TF_ASSIGN_OR_RETURN(
std::string chunk,
ReadChunk(reader, chunks_info[chunked_message.chunk_index()]));
merged_message->ParseFromString(chunk);
TF_ASSIGN_OR_RETURN(uint64_t hash,
HashFields(chunked_message, reader, chunks_info,
field_tags, merged_message));
field_checksum = FingerprintCat64(field_checksum, hash);
} else if (matches == chunked_field_tags.size()) {
for (const ChunkedField& cf : chunked_message.chunked_fields()) {
TF_ASSIGN_OR_RETURN(uint64_t hash,
HashFields(cf.message(), reader, chunks_info,
field_tags, merged_message));
field_checksum = FingerprintCat64(field_checksum, hash);
}
}
}
return field_checksum;
}
inline RepeatedPtrField<FieldIndex> GraphDefFieldTags() {
FieldIndex meta_graph_field_tag;
meta_graph_field_tag.set_field(2);
FieldIndex meta_graph_index_field_tag;
meta_graph_index_field_tag.set_index(0);
FieldIndex graph_def_field_tag;
graph_def_field_tag.set_field(2);
RepeatedPtrField<FieldIndex> graph_def_field_tags;
graph_def_field_tags.Add(FieldIndex(meta_graph_field_tag));
graph_def_field_tags.Add(FieldIndex(meta_graph_index_field_tag));
graph_def_field_tags.Add(FieldIndex(graph_def_field_tag));
return graph_def_field_tags;
}
inline RepeatedPtrField<FieldIndex> SignatureDefFieldTags() {
FieldIndex meta_graph_field_tag;
meta_graph_field_tag.set_field(2);
FieldIndex meta_graph_index_field_tag;
meta_graph_index_field_tag.set_index(0);
FieldIndex signature_def_field_tag;
signature_def_field_tag.set_field(5);
RepeatedPtrField<FieldIndex> signature_def_field_tags;
signature_def_field_tags.Add(FieldIndex(meta_graph_field_tag));
signature_def_field_tags.Add(FieldIndex(meta_graph_index_field_tag));
signature_def_field_tags.Add(FieldIndex(signature_def_field_tag));
return signature_def_field_tags;
}
inline RepeatedPtrField<FieldIndex> SavedObjectGraphFieldTags() {
FieldIndex meta_graph_field_tag;
meta_graph_field_tag.set_field(2);
FieldIndex meta_graph_index_field_tag;
meta_graph_index_field_tag.set_index(0);
FieldIndex saved_object_graph_field_tag;
saved_object_graph_field_tag.set_field(7);
RepeatedPtrField<FieldIndex> saved_object_graph_field_tags;
saved_object_graph_field_tags.Add(FieldIndex(meta_graph_field_tag));
saved_object_graph_field_tags.Add(FieldIndex(meta_graph_index_field_tag));
saved_object_graph_field_tags.Add(FieldIndex(saved_object_graph_field_tag));
return saved_object_graph_field_tags;
}
absl::StatusOr<SavedModel> PrunedSavedModel(
absl::string_view export_dir,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<ChunkInfo>& chunks_info, ChunkMetadata& chunk_metadata) {
SavedModel saved_model;
ChunkMetadata pruned_chunk_metadata;
pruned_chunk_metadata.mutable_chunks()->CopyFrom(chunk_metadata.chunks());
TF_ASSIGN_OR_RETURN(
*pruned_chunk_metadata.mutable_message(),
PruneChunkedMessage(chunk_metadata.message(), reader, chunks_info,
{GraphDefFieldTags(), SignatureDefFieldTags(),
SavedObjectGraphFieldTags()}));
TF_RETURN_IF_ERROR(
Merger::ReadPartial(io::JoinPath(export_dir, kSavedModelFilenamePrefix),
pruned_chunk_metadata, &saved_model));
return saved_model;
}
absl::StatusOr<uint64_t> HashMessage(
Message* message, const ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<ChunkInfo>& chunks_info,
const RepeatedPtrField<FieldIndex>& field_tags) {
uint64_t total_message_hash = Fingerprint64(SerializeProto(*message));
TF_ASSIGN_OR_RETURN(
uint64_t message_hash,
HashFields(chunked_message, reader, chunks_info, field_tags, message));
return FingerprintCat64(total_message_hash, message_hash);
}
absl::StatusOr<uint64_t> HashGraphDef(
::tensorflow::GraphDef* graph_def, const ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<ChunkInfo>& chunks_info) {
return HashMessage(graph_def, chunked_message, reader, chunks_info,
GraphDefFieldTags());
}
absl::StatusOr<uint64_t> HashSignatureDef(
const Map<std::string, ::tensorflow::SignatureDef>& signature_def_map,
const ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<ChunkInfo>& chunks_info) {
uint64_t signature_def_hash = 0;
std::vector<std::pair<std::string, ::tensorflow::SignatureDef>>
signature_def_sorted(signature_def_map.begin(), signature_def_map.end());
std::sort(signature_def_sorted.begin(), signature_def_sorted.end(),
[](const std::pair<std::string, ::tensorflow::SignatureDef>& a,
const std::pair<std::string, ::tensorflow::SignatureDef>& b) {
return a.first < b.first;
});
for (const auto& signature_def : signature_def_sorted) {
uint64_t signature_def_pair_hash =
FingerprintCat64(Fingerprint64(signature_def.first),
Fingerprint64(SerializeProto(signature_def.second)));
signature_def_hash =
FingerprintCat64(signature_def_hash, signature_def_pair_hash);
SignatureDef signature_def_val = signature_def.second;
TF_ASSIGN_OR_RETURN(
uint64_t signature_def_entry_hash,
HashFields(chunked_message, reader, chunks_info,
SignatureDefFieldTags(), &signature_def_val));
signature_def_hash =
FingerprintCat64(signature_def_hash, signature_def_entry_hash);
}
return signature_def_hash;
}
absl::StatusOr<uint64_t> HashSavedObjectGraph(
::tensorflow::SavedObjectGraph* saved_object_graph,
const ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<ChunkInfo>& chunks_info) {
return HashMessage(saved_object_graph, chunked_message, reader, chunks_info,
SavedObjectGraphFieldTags());
}
}
using fingerprinting_utils_internal::HashFields;
using fingerprinting_utils_internal::HashGraphDef;
using fingerprinting_utils_internal::HashSavedObjectGraph;
using fingerprinting_utils_internal::HashSignatureDef;
using fingerprinting_utils_internal::PrunedSavedModel;
using fingerprinting_utils_internal::SerializeProto;
uint64_t HashCheckpointIndexFile(absl::string_view model_dir) {
std::string meta_filename = MetaFilename(io::JoinPath(
model_dir, kSavedModelVariablesDirectory, kSavedModelVariablesFilename));
std::string data;
absl::Status read_status =
ReadFileToString(Env::Default(), meta_filename, &data);
if (read_status.ok()) {
return tensorflow::Fingerprint64(data);
} else {
return 0;
}
}
absl::StatusOr<FingerprintDef> CreateFingerprintDefCpb(
absl::string_view export_dir, std::string cpb_file) {
const int kFingerprintProducer = 2;
TF_ASSIGN_OR_RETURN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
return absl::FailedPreconditionError(
absl::StrCat("Couldn't read ChunkMetadata from chunked proto.\n",
read_metadata.status().ToString()));
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
FingerprintDef fingerprint_def;
SavedModel saved_model;
TF_ASSIGN_OR_RETURN(uint64_t saved_model_hash,
HashFields(chunk_metadata.message(), reader, chunks_info,
{}, &saved_model));
saved_model_hash = FingerprintCat64(
saved_model_hash, Fingerprint64(SerializeProto(saved_model)));
fingerprint_def.set_saved_model_checksum(saved_model_hash);
TF_ASSIGN_OR_RETURN(
saved_model,
PrunedSavedModel(export_dir, reader, chunks_info, chunk_metadata));
TF_ASSIGN_OR_RETURN(
uint64_t graph_def_program_hash,
HashGraphDef(saved_model.mutable_meta_graphs(0)->mutable_graph_def(),
chunk_metadata.message(), reader, chunks_info));
fingerprint_def.set_graph_def_program_hash(graph_def_program_hash);
TF_ASSIGN_OR_RETURN(
uint64_t signature_def_hash,
HashSignatureDef(saved_model.meta_graphs(0).signature_def(),
chunk_metadata.message(), reader, chunks_info));
fingerprint_def.set_signature_def_hash(signature_def_hash);
TF_ASSIGN_OR_RETURN(
uint64_t saved_object_graph_hash,
HashSavedObjectGraph(
saved_model.mutable_meta_graphs(0)->mutable_object_graph_def(),
chunk_metadata.message(), reader, chunks_info));
fingerprint_def.set_saved_object_graph_hash(saved_object_graph_hash);
fingerprint_def.set_checkpoint_hash(HashCheckpointIndexFile(export_dir));
reader.Close();
VersionDef* version = fingerprint_def.mutable_version();
version->set_producer(kFingerprintProducer);
return fingerprint_def;
}
} | #include "tensorflow/cc/saved_model/fingerprinting_utils.h"
#include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tensorflow/tools/proto_splitter/chunk.pb.h"
#include "tensorflow/tools/proto_splitter/testdata/test_message.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace tensorflow::saved_model::fingerprinting {
namespace {
using fingerprinting_utils_internal::fieldTagMatches;
using fingerprinting_utils_internal::HashFields;
using fingerprinting_utils_internal::HashGraphDef;
using fingerprinting_utils_internal::HashSavedObjectGraph;
using fingerprinting_utils_internal::HashSignatureDef;
using fingerprinting_utils_internal::PruneChunkedMessage;
using fingerprinting_utils_internal::SerializeProto;
using ::tensorflow::proto_splitter::ChunkedField;
using ::tensorflow::proto_splitter::ChunkedMessage;
using ::tensorflow::proto_splitter::ChunkInfo;
using ::tensorflow::proto_splitter::ChunkMetadata;
using ::tensorflow::proto_splitter::FieldIndex;
using ::tensorflow::proto_splitter_testdata::ManyFields;
using ::tensorflow::protobuf::Message;
using ::tensorflow::protobuf::RepeatedPtrField;
using ::tensorflow::protobuf::TextFormat;
using ::tensorflow::protobuf::io::ArrayInputStream;
using ::tensorflow::protobuf::util::MessageDifferencer;
using tools::proto_splitter::GetChunkMetadata;
using tools::proto_splitter::GetRiegeliReader;
using tsl::testing::IsOkAndHolds;
using tsl::testing::TensorFlowSrcRoot;
absl::Status ParseTextProto(absl::string_view text_proto,
Message* parsed_proto) {
TextFormat::Parser parser;
ArrayInputStream input_stream(text_proto.data(), text_proto.size());
if (parser.Parse(&input_stream, parsed_proto)) {
return absl::OkStatus();
}
parsed_proto->Clear();
return absl::InvalidArgumentError(
absl::StrCat("Could not parse text proto: ", text_proto));
}
absl::StatusOr<RepeatedPtrField<::tensorflow::proto_splitter::FieldIndex>>
ExtractFieldTags(absl::string_view chunked_field_text_proto) {
ChunkedField chunked_field;
TF_RETURN_IF_ERROR(ParseTextProto(chunked_field_text_proto, &chunked_field));
return chunked_field.field_tag();
}
TEST(FingerprintingTest, TestFieldTagMatchesInitialSubsequence) {
TF_ASSERT_OK_AND_ASSIGN(RepeatedPtrField<FieldIndex> field_tags,
ExtractFieldTags(R"pb(
field_tag { field: 2 }
field_tag { index: 1505 }
field_tag { field: 5 }
field_tag { map_key { ui32: 123 } }
)pb"));
RepeatedPtrField<FieldIndex> field_tags_sub;
field_tags_sub.CopyFrom(field_tags);
field_tags_sub.DeleteSubrange(2, 2);
EXPECT_THAT(fieldTagMatches(field_tags_sub, field_tags), IsOkAndHolds(2));
}
TEST(FingerprintingTest, TestFieldTagMatchesNoninitialSubsequence) {
TF_ASSERT_OK_AND_ASSIGN(RepeatedPtrField<FieldIndex> field_tags,
ExtractFieldTags(R"pb(
field_tag { field: 2 }
field_tag { index: 1505 }
field_tag { field: 5 }
field_tag { map_key { ui32: 123 } }
)pb"));
RepeatedPtrField<FieldIndex> field_tags_sub;
field_tags_sub.CopyFrom(field_tags);
field_tags_sub.DeleteSubrange(0, 2);
EXPECT_THAT(fieldTagMatches(field_tags_sub, field_tags), IsOkAndHolds(0));
}
TEST(FingerprintingTest, TestFieldTagMatchesIdenticalSubsequence) {
TF_ASSERT_OK_AND_ASSIGN(RepeatedPtrField<FieldIndex> field_tags,
ExtractFieldTags(R"pb(
field_tag { field: 2 }
field_tag { index: 1505 }
field_tag { field: 5 }
field_tag { map_key { ui32: 123 } }
)pb"));
RepeatedPtrField<FieldIndex> field_tags_sub;
field_tags_sub.CopyFrom(field_tags);
EXPECT_THAT(fieldTagMatches(field_tags_sub, field_tags), IsOkAndHolds(4));
}
TEST(FingerprintingTest, TestFieldTagMatchesSuperSubsequence) {
TF_ASSERT_OK_AND_ASSIGN(RepeatedPtrField<FieldIndex> field_tags,
ExtractFieldTags(R"pb(
field_tag { field: 2 }
field_tag { index: 1505 }
field_tag { field: 5 }
field_tag { map_key { ui32: 123 } }
)pb"));
RepeatedPtrField<FieldIndex> field_tags_sub;
field_tags_sub.CopyFrom(field_tags);
field_tags_sub.Add()->set_field(6);
EXPECT_THAT(fieldTagMatches(field_tags_sub, field_tags), IsOkAndHolds(4));
}
TEST(FingerprintingTest, TestPruneChunkedMessageSingleTarget) {
std::string cpb_file = io::JoinPath(
TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "many-field.cpb");
TF_ASSERT_OK_AND_ASSIGN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
FieldIndex field_one_field_tag;
field_one_field_tag.set_field(1);
FieldIndex repeated_field_field_tag;
repeated_field_field_tag.set_field(2);
FieldIndex repeated_field_index_field_tag;
repeated_field_index_field_tag.set_index(1);
RepeatedPtrField<FieldIndex> target_field_tags;
target_field_tags.Add(FieldIndex(field_one_field_tag));
target_field_tags.Add(FieldIndex(repeated_field_field_tag));
target_field_tags.Add(FieldIndex(repeated_field_index_field_tag));
ChunkedMessage pruned_chunked_message;
TF_ASSERT_OK_AND_ASSIGN(
pruned_chunked_message,
PruneChunkedMessage(chunk_metadata.message(), reader, chunks_info,
{target_field_tags}));
std::string expected_pruned_chunked_message_text_proto = R"pb(
chunk_index: 0
chunked_fields {
field_tag { field: 1 }
message { chunk_index: 1 }
}
)pb";
ChunkedMessage expected_pruned_chunked_message;
TF_ASSERT_OK(ParseTextProto(expected_pruned_chunked_message_text_proto,
&expected_pruned_chunked_message));
ASSERT_TRUE(MessageDifferencer::Equals(pruned_chunked_message,
expected_pruned_chunked_message));
}
TEST(FingerprintingTest, TestPruneChunkedMessageMultiTarget) {
std::string cpb_file = io::JoinPath(
TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "many-field.cpb");
TF_ASSERT_OK_AND_ASSIGN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
FieldIndex field_one_field_tag;
field_one_field_tag.set_field(1);
FieldIndex repeated_field_field_tag;
repeated_field_field_tag.set_field(2);
FieldIndex repeated_field_index_field_tag;
repeated_field_index_field_tag.set_index(1);
RepeatedPtrField<FieldIndex> target_one_field_tags;
target_one_field_tags.Add(FieldIndex(field_one_field_tag));
target_one_field_tags.Add(FieldIndex(repeated_field_field_tag));
target_one_field_tags.Add(FieldIndex(repeated_field_index_field_tag));
FieldIndex nested_map_bool_field_tag;
nested_map_bool_field_tag.set_field(7);
FieldIndex nested_map_bool_mapkey_field_tag;
nested_map_bool_mapkey_field_tag.mutable_map_key()->set_boolean(true);
FieldIndex string_field_field_tag;
string_field_field_tag.set_field(3);
RepeatedPtrField<FieldIndex> target_two_field_tags;
target_two_field_tags.Add(FieldIndex(nested_map_bool_field_tag));
target_two_field_tags.Add(FieldIndex(nested_map_bool_mapkey_field_tag));
target_two_field_tags.Add(FieldIndex(string_field_field_tag));
ChunkedMessage pruned_chunked_message;
TF_ASSERT_OK_AND_ASSIGN(
pruned_chunked_message,
PruneChunkedMessage(chunk_metadata.message(), reader, chunks_info,
{target_one_field_tags, target_two_field_tags}));
std::string expected_pruned_chunked_message_text_proto = R"pb(
chunk_index: 0
chunked_fields {
field_tag { field: 1 }
message { chunk_index: 1 }
}
chunked_fields {
field_tag { field: 7 }
field_tag { map_key { boolean: true } }
message { chunk_index: 2 }
}
)pb";
ChunkedMessage expected_pruned_chunked_message;
TF_ASSERT_OK(ParseTextProto(expected_pruned_chunked_message_text_proto,
&expected_pruned_chunked_message));
ASSERT_TRUE(MessageDifferencer::Equals(pruned_chunked_message,
expected_pruned_chunked_message));
}
TEST(FingerprintingTest, TestPruneChunkedMessageNoTarget) {
std::string cpb_file = io::JoinPath(
TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "many-field.cpb");
TF_ASSERT_OK_AND_ASSIGN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
ChunkedMessage pruned_chunked_message;
TF_ASSERT_OK_AND_ASSIGN(
pruned_chunked_message,
PruneChunkedMessage(chunk_metadata.message(), reader, chunks_info, {}));
std::string expected_pruned_chunked_message_text_proto = R"pb(
chunk_index: 0
)pb";
ChunkedMessage expected_pruned_chunked_message;
TF_ASSERT_OK(ParseTextProto(expected_pruned_chunked_message_text_proto,
&expected_pruned_chunked_message));
ASSERT_TRUE(MessageDifferencer::Equals(pruned_chunked_message,
expected_pruned_chunked_message));
}
TEST(FingerprintingTest, TestSerializeProto) {
std::string many_fields_text_proto = R"pb(
string_field: "abc123"
)pb";
ManyFields many_fields;
TF_ASSERT_OK(ParseTextProto(many_fields_text_proto, &many_fields));
ASSERT_EQ(SerializeProto(many_fields), many_fields.SerializeAsString());
}
TEST(FingerprintingTest, TestHashFieldsV2) {
std::string cpb_file = io::JoinPath(
TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "many-field.cpb");
TF_ASSERT_OK_AND_ASSIGN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
ManyFields many_fields;
TF_ASSERT_OK_AND_ASSIGN(uint64_t many_fields_hash,
HashFields(chunk_metadata.message(), reader,
chunks_info, {}, &many_fields));
ASSERT_EQ(many_fields_hash, 14850154939410192811U);
}
TEST(FingerprintingTest, TestHashGraphDef) {
std::string cpb_file =
io::JoinPath(TensorFlowSrcRoot(), "tools/proto_splitter/testdata",
"split-standard.cpb");
TF_ASSERT_OK_AND_ASSIGN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
GraphDef graph_def;
EXPECT_THAT(
HashGraphDef(&graph_def, chunk_metadata.message(), reader, chunks_info),
IsOkAndHolds(16782272393894422524U));
}
TEST(FingerprintingTest, TestHashSignatureDef) {
std::string cpb_file =
io::JoinPath(TensorFlowSrcRoot(), "tools/proto_splitter/testdata",
"split-standard.cpb");
TF_ASSERT_OK_AND_ASSIGN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
::tensorflow::protobuf::Map<std::string, SignatureDef> signature_def_map;
SignatureDef signature_def;
EXPECT_THAT(HashSignatureDef(signature_def_map, chunk_metadata.message(),
reader, chunks_info),
IsOkAndHolds(0));
}
TEST(FingerprintingTest, TestHashSavedObjectGraph) {
std::string cpb_file =
io::JoinPath(TensorFlowSrcRoot(), "tools/proto_splitter/testdata",
"split-standard.cpb");
TF_ASSERT_OK_AND_ASSIGN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
SavedObjectGraph saved_object_graph;
EXPECT_THAT(
HashSavedObjectGraph(&saved_object_graph, chunk_metadata.message(),
reader, chunks_info),
IsOkAndHolds(17454850744699451884U));
}
}
} |
1,768 | cpp | tensorflow/tensorflow | bundle_v2 | tensorflow/cc/saved_model/bundle_v2.cc | tensorflow/cc/saved_model/bundle_v2_test.cc | #ifndef TENSORFLOW_CC_SAVED_MODEL_BUNDLE_V2_H_
#define TENSORFLOW_CC_SAVED_MODEL_BUNDLE_V2_H_
#include <functional>
#include <memory>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
#include "tensorflow/core/protobuf/trackable_object_graph.pb.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
namespace tensorflow {
class SavedModelV2Bundle {
public:
using RestoreObjectsCallback =
std::function<Status(int, const TrackableObjectGraph::TrackableObject&)>;
static Status Load(const std::string& export_dir, SavedModelV2Bundle* bundle);
MetaGraphDef& meta_graph_def() { return meta_graph_def_; }
const SavedObjectGraph& saved_object_graph() {
return meta_graph_def().object_graph_def();
}
TrackableObjectGraph& trackable_object_graph() {
return trackable_object_graph_;
}
BundleReader* variable_reader() { return variable_reader_.get(); }
GraphDebugInfo* debug_info() { return debug_info_.get(); }
Status VisitObjectsToRestore(RestoreObjectsCallback callback);
private:
Status RecurseObjectsToRestore(
const SavedObject* saved_object, int saved_object_node_id,
const TrackableObjectGraph::TrackableObject* trackable_object,
std::string object_name,
absl::flat_hash_set<int>* seen_trackable_node_ids,
RestoreObjectsCallback callback);
MetaGraphDef meta_graph_def_;
TrackableObjectGraph trackable_object_graph_;
std::unique_ptr<BundleReader> variable_reader_;
std::unique_ptr<GraphDebugInfo> debug_info_;
};
}
#endif
#include "tensorflow/cc/saved_model/bundle_v2.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/cc/saved_model/fingerprinting.h"
#include "tensorflow/cc/saved_model/metrics.h"
#include "tensorflow/cc/saved_model/reader.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/byte_order.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
#include "tensorflow/core/protobuf/trackable_object_graph.pb.h"
#include "tensorflow/core/util/tensor_bundle/byte_swap_tensor.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/strcat.h"
namespace tensorflow {
namespace {
using strings::StrCat;
constexpr char kCCLoadBundleV2Label[] = "cc_load_bundle_v2";
absl::Status ReadCheckpointObjectGraph(BundleReader* bundle_reader,
TrackableObjectGraph* object_graph) {
Tensor object_graph_tensor;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
bundle_reader->Lookup(kObjectGraphProtoKey, &object_graph_tensor),
"SavedModel checkpoint does not contain object graph.");
if (object_graph_tensor.dtype() != DT_STRING ||
object_graph_tensor.dims() != 0 ||
object_graph_tensor.NumElements() != 1) {
return absl::Status(
absl::StatusCode::kFailedPrecondition,
"SavedModel checkpoint object graph was not the correct type.");
}
const tstring* object_graph_string = reinterpret_cast<const tstring*>(
object_graph_tensor.tensor_data().data());
if (!object_graph->ParseFromString(*object_graph_string)) {
return absl::Status(
absl::StatusCode::kFailedPrecondition,
"SavedModel checkpoint object graph could not be deserialized.");
}
return absl::OkStatus();
}
}
absl::Status SavedModelV2Bundle::Load(const std::string& export_dir,
SavedModelV2Bundle* const bundle) {
metrics::SavedModelReadApi(kCCLoadBundleV2Label).IncrementBy(1);
SavedModel saved_model_proto;
TF_RETURN_IF_ERROR(ReadSavedModel(export_dir, &saved_model_proto));
metrics::SavedModelReadPath().Set(export_dir);
if (saved_model_proto.meta_graphs_size() != 1) {
return absl::Status(
absl::StatusCode::kInvalidArgument,
strings::StrCat(
"SavedModelV2 should have exactly one MetaGraphDef but actually ",
"contains ", saved_model_proto.meta_graphs_size()));
}
bundle->meta_graph_def_ =
std::move(*saved_model_proto.mutable_meta_graphs(0));
if (!port::kLittleEndian) {
TF_RETURN_IF_ERROR(
ByteSwapTensorContentInMetaGraphDef(&(bundle->meta_graph_def_)));
}
TF_RETURN_IF_ERROR(
ReadSavedModelDebugInfoIfPresent(export_dir, &bundle->debug_info_));
const std::string variables_dir =
io::JoinPath(export_dir, kSavedModelVariablesDirectory);
if (!Env::Default()->FileExists(variables_dir).ok()) {
LOG(INFO)
<< "No checkpoint found, assuming this is a program-only SavedModel";
} else {
const std::string variables_prefix =
io::JoinPath(variables_dir, kSavedModelVariablesFilename);
bundle->variable_reader_ =
std::make_unique<BundleReader>(Env::Default(), variables_prefix);
TF_RETURN_WITH_CONTEXT_IF_ERROR(
bundle->variable_reader_->status(),
"Unable to load SavedModel variables checkpoint from ",
variables_prefix);
TF_RETURN_IF_ERROR(ReadCheckpointObjectGraph(
bundle->variable_reader_.get(), &bundle->trackable_object_graph_));
}
auto fingerprint_proto =
saved_model::fingerprinting::ReadSavedModelFingerprint(export_dir);
if (fingerprint_proto.ok()) {
metrics::SavedModelReadFingerprint().Set(
metrics::MakeFingerprintJson(fingerprint_proto.value()));
TF_ASSIGN_OR_RETURN(
std::string path_and_singleprint,
metrics::MakeSavedModelPathAndSingleprint(
export_dir, saved_model::fingerprinting::Singleprint(
fingerprint_proto.value())));
metrics::SavedModelReadPathAndSingleprint().Set(path_and_singleprint);
}
return absl::OkStatus();
}
absl::Status SavedModelV2Bundle::VisitObjectsToRestore(
RestoreObjectsCallback callback) {
if (saved_object_graph().nodes_size() == 0 ||
trackable_object_graph().nodes_size() == 0) {
return absl::OkStatus();
}
const SavedObject* root_saved_object = &saved_object_graph().nodes(0);
const TrackableObjectGraph::TrackableObject* root_trackable_object =
&trackable_object_graph().nodes(0);
absl::flat_hash_set<int> trackable_node_ids;
return RecurseObjectsToRestore(root_saved_object, 0, root_trackable_object,
std::string(), &trackable_node_ids,
std::move(callback));
}
absl::Status SavedModelV2Bundle::RecurseObjectsToRestore(
const SavedObject* saved_object, int saved_object_node_id,
const TrackableObjectGraph::TrackableObject* trackable_object,
std::string object_name, absl::flat_hash_set<int>* seen_trackable_node_ids,
RestoreObjectsCallback callback) {
if (saved_object_node_id != 0 &&
(trackable_object->attributes_size() > 0 ||
trackable_object->slot_variables_size() > 0)) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(
callback(saved_object_node_id, *trackable_object), "Unable to restore ",
object_name);
}
for (const auto& trackable_child_ref : trackable_object->children()) {
const auto& local_name = trackable_child_ref.local_name();
std::string child_name;
if (object_name.empty()) {
child_name = local_name;
} else {
child_name = strings::StrCat(object_name, ".", local_name);
}
int trackable_child_node_id = trackable_child_ref.node_id();
if (!seen_trackable_node_ids->insert(trackable_child_node_id).second) {
continue;
}
if (trackable_child_node_id < 0 ||
trackable_child_node_id >= trackable_object_graph().nodes_size()) {
return errors::FailedPrecondition(
strings::StrCat("Illegal trackable child node id for ", child_name));
}
const auto* trackable_child =
&trackable_object_graph().nodes(trackable_child_node_id);
int saved_child_node_id = -1;
const SavedObject* saved_child = nullptr;
for (const auto& saved_child_ref : saved_object->children()) {
if (saved_child_ref.local_name() == local_name) {
saved_child_node_id = saved_child_ref.node_id();
if (saved_child_node_id >= 0 &&
saved_child_node_id < saved_object_graph().nodes_size()) {
saved_child = &saved_object_graph().nodes(saved_child_node_id);
}
break;
}
}
if (!saved_child) {
return absl::Status(
absl::StatusCode::kFailedPrecondition,
strings::StrCat("Could not find saved object to restore for ",
child_name));
}
TF_RETURN_IF_ERROR(RecurseObjectsToRestore(
saved_child, saved_child_node_id, trackable_child, child_name,
seen_trackable_node_ids, callback));
}
return absl::OkStatus();
}
} | #include "tensorflow/cc/saved_model/bundle_v2.h"
#include <algorithm>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "json/json.h"
#include "json/reader.h"
#include "json/value.h"
#include "tensorflow/cc/saved_model/metrics.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/trackable_object_graph.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
constexpr char kTestData[] = "cc/saved_model/testdata";
class BundleV2Test : public ::testing::Test {
protected:
BundleV2Test() {}
void RestoreVarsAndVerify(SavedModelV2Bundle* bundle,
std::vector<std::string> expected_names) {
using RestoredVarType = std::tuple<int, std::string, std::string>;
std::vector<RestoredVarType> restored_vars;
TF_ASSERT_OK(bundle->VisitObjectsToRestore(
[&](int saved_node_id,
const TrackableObjectGraph::TrackableObject& trackable_object)
-> absl::Status {
for (const auto& attr : trackable_object.attributes()) {
if (attr.name() == "VARIABLE_VALUE") {
restored_vars.emplace_back(saved_node_id, attr.full_name(),
attr.checkpoint_key());
}
}
return absl::OkStatus();
}));
for (const auto& expected_name : expected_names) {
EXPECT_EQ(1, std::count_if(restored_vars.begin(), restored_vars.end(),
[&](RestoredVarType t) {
return std::get<1>(t) == expected_name;
}));
}
for (const auto& restored_var : restored_vars) {
const auto& saved_node =
bundle->saved_object_graph().nodes(std::get<0>(restored_var));
EXPECT_EQ(std::get<1>(restored_var), saved_node.variable().name());
Tensor value;
TF_ASSERT_OK(
bundle->variable_reader()->Lookup(std::get<2>(restored_var), &value));
}
}
};
TEST_F(BundleV2Test, LoadsVarsAndArithmeticObjectGraph) {
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), kTestData, "VarsAndArithmeticObjectGraph");
SavedModelV2Bundle bundle;
TF_ASSERT_OK(SavedModelV2Bundle::Load(export_dir, &bundle));
EXPECT_GT(bundle.trackable_object_graph().nodes_size(), 0);
RestoreVarsAndVerify(&bundle, {"variable_x", "variable_y", "child_variable"});
}
TEST_F(BundleV2Test, LoadsCyclicModule) {
const std::string export_dir =
io::JoinPath(testing::TensorFlowSrcRoot(), kTestData, "CyclicModule");
SavedModelV2Bundle bundle;
TF_ASSERT_OK(SavedModelV2Bundle::Load(export_dir, &bundle));
EXPECT_GT(bundle.trackable_object_graph().nodes_size(), 0);
RestoreVarsAndVerify(&bundle, {"MyVariable"});
}
TEST_F(BundleV2Test, UpdatesMetrics) {
const std::string kCCLoadBundleV2Label = "cc_load_bundle_v2";
const int read_count = metrics::SavedModelReadCount("2").value();
const int api_count =
metrics::SavedModelReadApi(kCCLoadBundleV2Label).value();
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), kTestData, "VarsAndArithmeticObjectGraph");
SavedModelV2Bundle bundle;
TF_ASSERT_OK(SavedModelV2Bundle::Load(export_dir, &bundle));
EXPECT_EQ(metrics::SavedModelReadCount("2").value(), read_count + 1);
EXPECT_EQ(metrics::SavedModelReadApi(kCCLoadBundleV2Label).value(),
api_count + 1);
EXPECT_EQ(metrics::SavedModelReadPath().value(), export_dir);
Json::Value fingerprint = Json::objectValue;
Json::Reader reader = Json::Reader();
reader.parse(metrics::SavedModelReadFingerprint().value(), fingerprint);
EXPECT_EQ(fingerprint["saved_model_checksum"].asUInt64(),
15788619162413586750ULL);
EXPECT_EQ(fingerprint["graph_def_program_hash"].asUInt64(),
706963557435316516ULL);
EXPECT_EQ(fingerprint["signature_def_hash"].asUInt64(),
5693392539583495303ULL);
EXPECT_EQ(fingerprint["saved_object_graph_hash"].asUInt64(),
12074714563970609759ULL);
EXPECT_EQ(fingerprint["checkpoint_hash"].asUInt64(), 10788359570789890102ULL);
TF_ASSERT_OK_AND_ASSIGN(
auto path_and_singleprint,
metrics::ParseSavedModelPathAndSingleprint(
metrics::SavedModelReadPathAndSingleprint().value()));
auto [path, singleprint] = path_and_singleprint;
EXPECT_TRUE(absl::StrContains(
path, absl::StrCat(kTestData, "/VarsAndArithmeticObjectGraph")));
EXPECT_EQ(singleprint,
"706963557435316516/"
"5693392539583495303/"
"12074714563970609759/"
"10788359570789890102");
}
}
} |
1,769 | cpp | tensorflow/tensorflow | fingerprinting | tensorflow/cc/saved_model/fingerprinting.cc | tensorflow/cc/saved_model/fingerprinting_test.cc | #ifndef TENSORFLOW_CC_SAVED_MODEL_FINGERPRINTING_H_
#define TENSORFLOW_CC_SAVED_MODEL_FINGERPRINTING_H_
#include <string>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/protobuf/fingerprint.pb.h"
namespace tensorflow::saved_model::fingerprinting {
absl::StatusOr<FingerprintDef> CreateFingerprintDef(
absl::string_view export_dir);
absl::StatusOr<FingerprintDef> ReadSavedModelFingerprint(
absl::string_view export_dir);
std::string Singleprint(uint64_t graph_def_program_hash,
uint64_t signature_def_hash,
uint64_t saved_object_graph_hash,
uint64_t checkpoint_hash);
std::string Singleprint(const FingerprintDef& fingerprint);
absl::StatusOr<std::string> Singleprint(absl::string_view export_dir);
}
#endif
#include "tensorflow/cc/saved_model/fingerprinting.h"
#include <cstdint>
#include <string>
#include "absl/container/btree_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/regularization/simple_delete.h"
#include "tensorflow/core/graph/regularization/util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system_helper.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/protobuf/fingerprint.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
#include "tensorflow/core/util/tensor_bundle/naming.h"
#if !defined(PLATFORM_WINDOWS) && !defined(__APPLE__)
#include "tensorflow/cc/saved_model/fingerprinting_utils.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#endif
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow::saved_model::fingerprinting {
namespace {
using ::tensorflow::protobuf::Map;
using ::tensorflow::protobuf::io::CodedOutputStream;
using ::tensorflow::protobuf::io::StringOutputStream;
uint64_t HashCheckpointIndexFile(absl::string_view model_dir) {
std::string meta_filename = MetaFilename(io::JoinPath(
model_dir, kSavedModelVariablesDirectory, kSavedModelVariablesFilename));
std::string data;
absl::Status read_status =
ReadFileToString(Env::Default(), meta_filename, &data);
if (read_status.ok()) {
return tensorflow::Fingerprint64(data);
} else {
LOG(WARNING) << "Failed to read checkpoint file: " << read_status;
return 0;
}
}
uint64_t HashSavedModel(const SavedModel& saved_model) {
std::string saved_model_serialized;
{
StringOutputStream stream(&saved_model_serialized);
CodedOutputStream output(&stream);
output.SetSerializationDeterministic(true);
saved_model.SerializeToCodedStream(&output);
}
return tensorflow::Fingerprint64(saved_model_serialized);
}
uint64_t RegularizeAndHashSignatureDefs(
const Map<std::string, SignatureDef>& signature_def_map) {
absl::btree_map<std::string, SignatureDef> sorted_signature_defs;
sorted_signature_defs.insert(signature_def_map.begin(),
signature_def_map.end());
uint64_t result_hash = 0;
for (const auto& item : sorted_signature_defs) {
result_hash =
FingerprintCat64(result_hash, tensorflow::Fingerprint64(item.first));
std::string signature_def_serialized;
{
StringOutputStream stream(&signature_def_serialized);
CodedOutputStream output(&stream);
output.SetSerializationDeterministic(true);
item.second.SerializeToCodedStream(&output);
}
result_hash = FingerprintCat64(
result_hash, tensorflow::Fingerprint64(signature_def_serialized));
}
return result_hash;
}
absl::StatusOr<uint64_t> RegularizeAndHashSavedObjectGraph(
const SavedObjectGraph& object_graph_def) {
absl::btree_map<int64_t, std::string> uid_to_function_names;
for (const auto& [name, concrete_function] :
object_graph_def.concrete_functions()) {
TF_ASSIGN_OR_RETURN(int64_t uid, graph_regularization::GetSuffixUID(name));
uid_to_function_names.insert({uid, name});
}
uint64_t result_hash = 0;
for (const auto& [uid, function_name] : uid_to_function_names) {
result_hash = FingerprintCat64(result_hash,
tensorflow::Fingerprint64(absl::StripSuffix(
function_name, std::to_string(uid))));
std::string concrete_function_serialized;
{
StringOutputStream stream(&concrete_function_serialized);
CodedOutputStream output(&stream);
output.SetSerializationDeterministic(true);
object_graph_def.concrete_functions()
.at(function_name)
.SerializeToCodedStream(&output);
}
result_hash = FingerprintCat64(
result_hash, tensorflow::Fingerprint64(concrete_function_serialized));
}
return result_hash;
}
absl::StatusOr<FingerprintDef> CreateFingerprintDefPb(
absl::string_view export_dir, std::string pb_file) {
const int kFingerprintProducer = 1;
SavedModel saved_model;
TF_RETURN_IF_ERROR(ReadBinaryProto(Env::Default(), pb_file, &saved_model));
FingerprintDef fingerprint_def;
MetaGraphDef* metagraph = saved_model.mutable_meta_graphs(0);
fingerprint_def.set_saved_model_checksum(HashSavedModel(saved_model));
graph_regularization::SimpleDelete(*metagraph->mutable_graph_def());
fingerprint_def.set_graph_def_program_hash(
graph_regularization::ComputeHash(metagraph->graph_def()));
fingerprint_def.set_signature_def_hash(
RegularizeAndHashSignatureDefs(metagraph->signature_def()));
TF_ASSIGN_OR_RETURN(
uint64_t object_graph_hash,
RegularizeAndHashSavedObjectGraph(metagraph->object_graph_def()));
fingerprint_def.set_saved_object_graph_hash(object_graph_hash);
fingerprint_def.set_checkpoint_hash(HashCheckpointIndexFile(export_dir));
VersionDef* version = fingerprint_def.mutable_version();
version->set_producer(kFingerprintProducer);
return fingerprint_def;
}
}
absl::StatusOr<FingerprintDef> CreateFingerprintDef(
absl::string_view export_dir) {
std::string prefix = io::JoinPath(export_dir, kSavedModelFilenamePrefix);
#if !defined(PLATFORM_WINDOWS) && !defined(__APPLE__)
TF_ASSIGN_OR_RETURN(bool only_contains_pb,
tools::proto_splitter::OnlyContainsPb(prefix));
if (only_contains_pb) {
return CreateFingerprintDefPb(export_dir, absl::StrCat(prefix, ".pb"));
}
return CreateFingerprintDefCpb(export_dir, absl::StrCat(prefix, ".cpb"));
#else
return CreateFingerprintDefPb(export_dir, absl::StrCat(prefix, ".pb"));
#endif
}
absl::StatusOr<FingerprintDef> ReadSavedModelFingerprint(
absl::string_view export_dir) {
const std::string fingerprint_pb_path =
io::JoinPath(export_dir, kFingerprintFilenamePb);
TF_RETURN_IF_ERROR(Env::Default()->FileExists(fingerprint_pb_path));
FingerprintDef fingerprint_proto;
absl::Status result =
ReadBinaryProto(Env::Default(), fingerprint_pb_path, &fingerprint_proto);
if (!result.ok()) return result;
return fingerprint_proto;
}
std::string Singleprint(uint64_t graph_def_program_hash,
uint64_t signature_def_hash,
uint64_t saved_object_graph_hash,
uint64_t checkpoint_hash) {
return std::to_string(graph_def_program_hash) + "/" +
std::to_string(signature_def_hash) + "/" +
std::to_string(saved_object_graph_hash) + "/" +
std::to_string(checkpoint_hash);
}
std::string Singleprint(const FingerprintDef& fingerprint) {
return Singleprint(
fingerprint.graph_def_program_hash(), fingerprint.signature_def_hash(),
fingerprint.saved_object_graph_hash(), fingerprint.checkpoint_hash());
}
absl::StatusOr<std::string> Singleprint(absl::string_view export_dir) {
TF_ASSIGN_OR_RETURN(const FingerprintDef fingerprint_def,
ReadSavedModelFingerprint(export_dir));
return Singleprint(fingerprint_def);
}
} | #include "tensorflow/cc/saved_model/fingerprinting.h"
#include <string>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/fingerprint.pb.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tsl/platform/statusor.h"
namespace tensorflow::saved_model::fingerprinting {
namespace {
absl::StatusOr<SavedModel> ReadSavedModel(absl::string_view file_dir) {
std::string file_path = io::JoinPath(file_dir, "saved_model.pb");
std::string serialized_saved_model;
auto status =
ReadFileToString(Env::Default(), file_path, &serialized_saved_model);
if (!status.ok()) {
return status;
}
SavedModel saved_model_pb;
saved_model_pb.ParseFromString(serialized_saved_model);
return saved_model_pb;
}
TEST(FingerprintingTest, TestCreateFingerprint) {
const std::string export_dir =
io::JoinPath(testing::TensorFlowSrcRoot(), "cc/saved_model/testdata",
"VarsAndArithmeticObjectGraph");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb,
ReadSavedModel(export_dir));
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_def,
CreateFingerprintDef(export_dir));
EXPECT_GT(fingerprint_def.saved_model_checksum(), 0);
EXPECT_EQ(fingerprint_def.graph_def_program_hash(), 10127142238652115842U);
EXPECT_EQ(fingerprint_def.signature_def_hash(), 15570736222402453744U);
EXPECT_EQ(fingerprint_def.saved_object_graph_hash(), 3678101440349108924U);
EXPECT_GT(fingerprint_def.checkpoint_hash(), 0);
}
TEST(FingerprintingTest, TestCompareFingerprintForTwoModelSavedTwice) {
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), "cc/saved_model/testdata", "bert1");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb,
ReadSavedModel(export_dir));
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_def,
CreateFingerprintDef(export_dir));
const std::string export_dir2 = io::JoinPath(
testing::TensorFlowSrcRoot(), "cc/saved_model/testdata", "bert2");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb2,
ReadSavedModel(export_dir2));
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_def2,
CreateFingerprintDef(export_dir2));
EXPECT_GT(fingerprint_def.saved_model_checksum(), 0);
EXPECT_GT(fingerprint_def2.saved_model_checksum(), 0);
EXPECT_EQ(fingerprint_def.graph_def_program_hash(),
fingerprint_def2.graph_def_program_hash());
EXPECT_EQ(fingerprint_def.signature_def_hash(),
fingerprint_def2.signature_def_hash());
EXPECT_EQ(fingerprint_def.saved_object_graph_hash(),
fingerprint_def2.saved_object_graph_hash());
}
TEST(FingerprintingTest, TestFingerprintComputationDoesNotMutateModel) {
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), "cc/saved_model/testdata", "bert1");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb,
ReadSavedModel(export_dir));
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_def,
CreateFingerprintDef(export_dir));
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_def2,
CreateFingerprintDef(export_dir));
EXPECT_EQ(fingerprint_def.saved_model_checksum(),
fingerprint_def2.saved_model_checksum());
}
TEST(FingerprintingTest, TestFingerprintHasVersion) {
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), "cc/saved_model/testdata", "bert1");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb,
ReadSavedModel(export_dir));
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_def,
CreateFingerprintDef(export_dir));
EXPECT_EQ(fingerprint_def.version().producer(), 1);
}
TEST(FingerprintingTest, TestHashCheckpointForModelWithNoVariables) {
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), "cc/saved_model/testdata", "bert1");
TF_ASSERT_OK_AND_ASSIGN(SavedModel saved_model_pb,
ReadSavedModel(export_dir));
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_def,
CreateFingerprintDef(export_dir));
EXPECT_EQ(fingerprint_def.checkpoint_hash(), 0);
}
TEST(FingerprintingTest, TestReadValidFingerprint) {
const std::string export_dir =
io::JoinPath(testing::TensorFlowSrcRoot(), "cc/saved_model/testdata",
"VarsAndArithmeticObjectGraph");
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_pb,
ReadSavedModelFingerprint(export_dir));
EXPECT_EQ(fingerprint_pb.saved_model_checksum(), 15788619162413586750u);
}
TEST(FingerprintingTest, TestReadNonexistentFingerprint) {
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), "cc/saved_model/testdata", "AssetModule");
EXPECT_EQ(ReadSavedModelFingerprint(export_dir).status().code(),
absl::StatusCode::kNotFound);
}
TEST(FingerprintingTest, TestSingleprint) {
const std::string export_dir =
io::JoinPath(testing::TensorFlowSrcRoot(), "cc/saved_model/testdata",
"VarsAndArithmeticObjectGraph");
const std::string const_singleprint =
"706963557435316516/5693392539583495303/12074714563970609759/"
"10788359570789890102";
TF_ASSERT_OK_AND_ASSIGN(std::string singleprint, Singleprint(export_dir));
EXPECT_EQ(singleprint, const_singleprint);
TF_ASSERT_OK_AND_ASSIGN(FingerprintDef fingerprint_pb,
ReadSavedModelFingerprint(export_dir));
EXPECT_EQ(Singleprint(fingerprint_pb), const_singleprint);
EXPECT_EQ(Singleprint(fingerprint_pb.graph_def_program_hash(),
fingerprint_pb.signature_def_hash(),
fingerprint_pb.saved_object_graph_hash(),
fingerprint_pb.checkpoint_hash()),
const_singleprint);
}
}
} |
1,770 | cpp | tensorflow/tensorflow | reader | tensorflow/cc/saved_model/reader.cc | tensorflow/cc/saved_model/reader_test.cc | #ifndef TENSORFLOW_CC_SAVED_MODEL_READER_H_
#define TENSORFLOW_CC_SAVED_MODEL_READER_H_
#include <memory>
#include <unordered_set>
#include "absl/status/statusor.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
namespace tensorflow {
Status ReadSavedModel(absl::string_view export_dir,
SavedModel* saved_model_proto);
absl::StatusOr<MetaGraphDef*> FindMetaGraphDef(
const std::unordered_set<string>& tags, SavedModel* saved_model_proto);
Status ReadMetaGraphDefFromSavedModel(absl::string_view export_dir,
const std::unordered_set<string>& tags,
MetaGraphDef* meta_graph_def);
Status ReadSavedModelDebugInfoIfPresent(
absl::string_view export_dir,
std::unique_ptr<GraphDebugInfo>* debug_info_proto);
}
#endif
#include "tensorflow/cc/saved_model/reader.h"
#include <memory>
#include <string>
#include <unordered_set>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/status/statusor.h"
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/cc/saved_model/metrics.h"
#include "tensorflow/cc/saved_model/util.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system_helper.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tensorflow/core/util/tensor_bundle/byte_swap_tensor.h"
#define IS_OSS true
namespace tensorflow {
absl::StatusOr<MetaGraphDef*> FindMetaGraphDef(
const std::unordered_set<string>& tags, SavedModel* saved_model_proto) {
LOG(INFO) << "Reading meta graph with tags { " << absl::StrJoin(tags, " ")
<< " }";
for (MetaGraphDef& graph_def : *saved_model_proto->mutable_meta_graphs()) {
std::unordered_set<string> graph_tags;
for (const string& tag : graph_def.meta_info_def().tags()) {
graph_tags.insert(tag);
}
if (graph_tags == tags) {
MetaGraphDef* meta_graph_def = &graph_def;
if (!port::kLittleEndian) {
TF_RETURN_IF_ERROR(ByteSwapTensorContentInMetaGraphDef(meta_graph_def));
}
return meta_graph_def;
}
}
return Status(
absl::StatusCode::kNotFound,
strings::StrCat(
"Could not find meta graph def matching supplied tags: { ",
absl::StrJoin(tags, " "),
" }. To inspect available tag-sets in the SavedModel, please "
"use the SavedModel CLI: `saved_model_cli`"));
}
Status ReadSavedModel(absl::string_view export_dir,
SavedModel* saved_model_proto) {
LOG(INFO) << "Reading SavedModel from: " << export_dir;
if (IS_OSS) {
const std::string saved_model_pb_path =
io::JoinPath(export_dir, kSavedModelFilenamePb);
TF_ASSIGN_OR_RETURN(
bool saved_model_pb_exists,
internal::FileExists(Env::Default(), saved_model_pb_path));
if (saved_model_pb_exists) {
Status result = ReadBinaryProto(Env::Default(), saved_model_pb_path,
saved_model_proto);
if (result.ok()) {
metrics::SavedModelReadCount(
saved_model::GetWriteVersion(*saved_model_proto))
.IncrementBy(1);
}
return result;
}
}
const std::string saved_model_pbtxt_path =
io::JoinPath(export_dir, kSavedModelFilenamePbTxt);
auto saved_model_pbtxt_exists =
internal::FileExists(Env::Default(), saved_model_pbtxt_path);
if (saved_model_pbtxt_exists.value_or(false)) {
Status result = ReadTextProto(Env::Default(), saved_model_pbtxt_path,
saved_model_proto);
if (result.ok()) {
metrics::SavedModelReadCount(
saved_model::GetWriteVersion(*saved_model_proto))
.IncrementBy(1);
}
return result;
}
if (!IS_OSS) {
}
return Status(
absl::StatusCode::kNotFound,
strings::StrCat("Could not find SavedModel .pb or .pbtxt at supplied "
"export directory path: ",
export_dir,
". Check that "
"the directory exists and that you have the right "
"permissions for accessing it."));
}
Status ReadMetaGraphDefFromSavedModel(absl::string_view export_dir,
const std::unordered_set<string>& tags,
MetaGraphDef* const meta_graph_def) {
SavedModel saved_model_proto;
TF_RETURN_IF_ERROR(ReadSavedModel(export_dir, &saved_model_proto));
TF_ASSIGN_OR_RETURN(MetaGraphDef * m,
FindMetaGraphDef(tags, &saved_model_proto));
*meta_graph_def = std::move(*m);
return absl::OkStatus();
}
Status ReadSavedModelDebugInfoIfPresent(
absl::string_view export_dir,
std::unique_ptr<GraphDebugInfo>* debug_info_proto) {
LOG(INFO) << "Reading SavedModel debug info (if present) from: "
<< export_dir;
const string debug_info_pb_path =
io::JoinPath(export_dir, "debug", "saved_model_debug_info.pb");
TF_ASSIGN_OR_RETURN(bool debug_info_pb_exists,
internal::FileExists(Env::Default(), debug_info_pb_path));
if (debug_info_pb_exists) {
GraphDebugInfo debug_info;
TF_RETURN_IF_ERROR(
ReadBinaryProto(Env::Default(), debug_info_pb_path, &debug_info));
*debug_info_proto = std::make_unique<GraphDebugInfo>(std::move(debug_info));
}
return absl::OkStatus();
}
} | #include "tensorflow/cc/saved_model/reader.h"
#include <gmock/gmock.h>
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/cc/saved_model/metrics.h"
#include "tensorflow/cc/saved_model/tag_constants.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/resource_loader.h"
namespace tensorflow {
namespace {
string TestDataPbTxt() {
return io::JoinPath("tensorflow", "cc", "saved_model", "testdata",
"half_plus_two_pbtxt", "00000123");
}
string TestDataSharded() {
return io::JoinPath("tensorflow", "cc", "saved_model", "testdata",
"half_plus_two", "00000123");
}
string ChunkedSavedModel() {
return io::JoinPath("tensorflow", "cc", "saved_model", "testdata",
"chunked_saved_model", "chunked_model");
}
string NonChunkedSavedModel() {
return io::JoinPath("tensorflow", "cc", "saved_model", "testdata",
"chunked_saved_model", "non_chunked_model");
}
class ReaderTest : public ::testing::Test {
protected:
ReaderTest() {}
void CheckMetaGraphDef(const MetaGraphDef& meta_graph_def) {
const auto& tags = meta_graph_def.meta_info_def().tags();
EXPECT_TRUE(std::find(tags.begin(), tags.end(), kSavedModelTagServe) !=
tags.end());
EXPECT_NE(meta_graph_def.meta_info_def().tensorflow_version(), "");
EXPECT_EQ(
meta_graph_def.signature_def().at("serving_default").method_name(),
"tensorflow/serving/predict");
}
};
TEST_F(ReaderTest, TagMatch) {
MetaGraphDef meta_graph_def;
const string export_dir = GetDataDependencyFilepath(TestDataSharded());
TF_ASSERT_OK(ReadMetaGraphDefFromSavedModel(export_dir, {kSavedModelTagServe},
&meta_graph_def));
CheckMetaGraphDef(meta_graph_def);
}
TEST_F(ReaderTest, NoTagMatch) {
MetaGraphDef meta_graph_def;
const string export_dir = GetDataDependencyFilepath(TestDataSharded());
Status st = ReadMetaGraphDefFromSavedModel(export_dir, {"missing-tag"},
&meta_graph_def);
EXPECT_FALSE(st.ok());
EXPECT_TRUE(absl::StrContains(
st.message(),
"Could not find meta graph def matching supplied tags: { missing-tag }"))
<< st.message();
}
TEST_F(ReaderTest, NoTagMatchMultiple) {
MetaGraphDef meta_graph_def;
const string export_dir = GetDataDependencyFilepath(TestDataSharded());
Status st = ReadMetaGraphDefFromSavedModel(
export_dir, {kSavedModelTagServe, "missing-tag"}, &meta_graph_def);
EXPECT_FALSE(st.ok());
EXPECT_TRUE(absl::StrContains(
st.message(), "Could not find meta graph def matching supplied tags: "))
<< st.message();
}
TEST_F(ReaderTest, InvalidExportPath) {
MetaGraphDef meta_graph_def;
const string export_dir = GetDataDependencyFilepath("missing-path");
Status st = ReadMetaGraphDefFromSavedModel(export_dir, {kSavedModelTagServe},
&meta_graph_def);
EXPECT_FALSE(st.ok());
}
TEST_F(ReaderTest, ReadSavedModelDebugInfoIfPresent) {
const string export_dir = GetDataDependencyFilepath(TestDataSharded());
std::unique_ptr<GraphDebugInfo> debug_info_proto;
TF_ASSERT_OK(ReadSavedModelDebugInfoIfPresent(export_dir, &debug_info_proto));
}
TEST_F(ReaderTest, MetricsNotUpdatedFailedRead) {
MetaGraphDef meta_graph_def;
const int read_count_v1 = metrics::SavedModelReadCount("1").value();
const int read_count_v2 = metrics::SavedModelReadCount("2").value();
const string export_dir = GetDataDependencyFilepath("missing-path");
Status st =
ReadMetaGraphDefFromSavedModel(export_dir, {"serve"}, &meta_graph_def);
EXPECT_FALSE(st.ok());
EXPECT_EQ(metrics::SavedModelReadCount("1").value(), read_count_v1);
EXPECT_EQ(metrics::SavedModelReadCount("2").value(), read_count_v2);
}
TEST_F(ReaderTest, MetricsUpdatedSuccessfulRead) {
MetaGraphDef meta_graph_def;
const int read_count_v1 = metrics::SavedModelReadCount("1").value();
const string export_dir = GetDataDependencyFilepath(TestDataSharded());
Status st =
ReadMetaGraphDefFromSavedModel(export_dir, {"serve"}, &meta_graph_def);
EXPECT_EQ(metrics::SavedModelReadCount("1").value(), read_count_v1 + 1);
}
}
} |
1,771 | cpp | tensorflow/tensorflow | client_session | tensorflow/cc/client/client_session.cc | tensorflow/cc/client/client_session_test.cc | #ifndef XLA_PYTHON_IFRT_PROXY_CLIENT_CLIENT_SESSION_H_
#define XLA_PYTHON_IFRT_PROXY_CLIENT_CLIENT_SESSION_H_
#include <memory>
#include "absl/status/status.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
namespace xla {
namespace ifrt {
namespace proxy {
class ClientSession {
public:
using Response = std::shared_ptr<IfrtResponse>;
virtual ~ClientSession() = default;
virtual Future<Response> Enqueue(std::unique_ptr<IfrtRequest> request) = 0;
virtual void Finish(const absl::Status& s) {}
};
}
}
}
#endif
#include "tensorflow/cc/client/client_session.h"
#include <unordered_map>
#include <utility>
#include <vector>
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
class ClientSession::Impl {
private:
friend class ClientSession;
Impl(Session* session, std::shared_ptr<Graph> graph)
: session_(session), graph_(std::move(graph)) {}
static SessionOptions MakeDefaultSessionOptions(const string& target);
Status MaybeExtendGraph() const;
std::unique_ptr<Session> session_;
std::shared_ptr<Graph> graph_;
mutable mutex mu_;
mutable int last_num_graph_nodes_ TF_GUARDED_BY(mu_) = 0;
};
ClientSession::ClientSession(const Scope& scope, const string& target)
: ClientSession(scope, Impl::MakeDefaultSessionOptions(target)) {}
ClientSession::ClientSession(const Scope& scope) : ClientSession(scope, "") {}
ClientSession::ClientSession(const Scope& scope,
const SessionOptions& session_options) {
Session* new_session;
Status status = NewSession(session_options, &new_session);
TF_CHECK_OK(status) << status;
impl_.reset(new Impl(new_session, scope.graph_as_shared_ptr()));
CHECK_NOTNULL(impl()->session_.get());
}
ClientSession::~ClientSession() {}
SessionOptions ClientSession::Impl::MakeDefaultSessionOptions(
const string& target) {
SessionOptions options;
options.env = Env::Default();
options.target = target;
return options;
}
Status ClientSession::Run(const std::vector<Output>& fetch_outputs,
std::vector<Tensor>* outputs) const {
return Run(FeedType{}, fetch_outputs, {}, outputs);
}
Status ClientSession::Run(const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
std::vector<Tensor>* outputs) const {
return Run(inputs, fetch_outputs, {}, outputs);
}
Status ClientSession::Run(const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
const std::vector<Operation>& run_outputs,
std::vector<Tensor>* outputs) const {
return Run(RunOptions(), inputs, fetch_outputs, run_outputs, outputs,
nullptr);
}
Status ClientSession::Impl::MaybeExtendGraph() const {
mutex_lock l(mu_);
int num_nodes = graph_->num_node_ids();
if (num_nodes > last_num_graph_nodes_) {
GraphDef graph_def;
graph_->ToGraphDefSubRange(&graph_def, last_num_graph_nodes_);
last_num_graph_nodes_ = num_nodes;
return session_->Extend(graph_def);
}
return absl::OkStatus();
}
Status ClientSession::Run(const RunOptions& run_options, const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
const std::vector<Operation>& run_outputs,
std::vector<Tensor>* outputs,
RunMetadata* run_metadata) const {
std::vector<std::pair<string, Tensor>> feeds;
feeds.reserve(inputs.size());
for (auto const& feed : inputs) {
TF_RETURN_IF_ERROR(feed.second.status);
feeds.emplace_back(std::piecewise_construct,
std::forward_as_tuple(feed.first.name()),
std::forward_as_tuple(feed.second.tensor));
}
std::vector<string> output_tensor_names;
output_tensor_names.reserve(fetch_outputs.size());
for (auto const& output : fetch_outputs) {
output_tensor_names.push_back(output.name());
}
std::vector<string> target_node_names;
target_node_names.reserve(run_outputs.size());
for (auto const& output : run_outputs) {
target_node_names.push_back(output.node()->name());
}
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->Run(run_options, feeds, output_tensor_names,
target_node_names, outputs, run_metadata);
}
Status ClientSession::Run(
const RunOptions& run_options, const FeedType& inputs,
const std::vector<Output>& fetch_outputs,
const std::vector<Operation>& run_outputs, std::vector<Tensor>* outputs,
RunMetadata* run_metadata,
const thread::ThreadPoolOptions& threadpool_options) const {
std::vector<std::pair<string, Tensor>> feeds;
for (auto const& feed : inputs) {
TF_RETURN_IF_ERROR(feed.second.status);
feeds.emplace_back(feed.first.name(), feed.second.tensor);
}
std::vector<string> output_tensor_names;
output_tensor_names.reserve(fetch_outputs.size());
for (auto const& output : fetch_outputs) {
output_tensor_names.push_back(output.name());
}
std::vector<string> target_node_names;
target_node_names.reserve(run_outputs.size());
for (auto const& output : run_outputs) {
target_node_names.push_back(output.node()->name());
}
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->Run(run_options, feeds, output_tensor_names,
target_node_names, outputs, run_metadata,
threadpool_options);
}
Status ClientSession::MakeCallable(const CallableOptions& callable_options,
CallableHandle* out_handle) {
TF_RETURN_IF_ERROR(impl()->MaybeExtendGraph());
return impl()->session_->MakeCallable(callable_options, out_handle);
}
Status ClientSession::RunCallable(CallableHandle handle,
const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors,
RunMetadata* run_metadata) {
return impl()->session_->RunCallable(handle, feed_tensors, fetch_tensors,
run_metadata);
}
Status ClientSession::RunCallable(CallableHandle handle,
const std::vector<Tensor>& feed_tensors,
std::vector<Tensor>* fetch_tensors,
RunMetadata* run_metadata,
const thread::ThreadPoolOptions& options) {
return impl()->session_->RunCallable(handle, feed_tensors, fetch_tensors,
run_metadata, options);
}
Status ClientSession::ReleaseCallable(CallableHandle handle) {
return impl()->session_->ReleaseCallable(handle);
}
} | #define EIGEN_USE_THREADS
#include "tensorflow/cc/client/client_session.h"
#include <utility>
#include <vector>
#include "absl/synchronization/barrier.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/core/threadpool_options.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
namespace {
using ops::Add;
using ops::BatchMatMul;
using ops::Const;
using ops::Mul;
using ops::Placeholder;
using ops::Sub;
tensorflow::SessionOptions GetSessionOptions() {
tensorflow::SessionOptions options;
options.config.mutable_experimental()->set_disable_optimize_for_static_graph(
true);
return options;
}
class CustomThreadPoolImpl : public thread::ThreadPoolInterface {
public:
explicit CustomThreadPoolImpl(int numThreads) {
underlying_threadpool_.reset(new thread::ThreadPool(
tensorflow::Env::Default(), "custom_threadpool", numThreads));
num_schedule_called_ = 0;
}
void Schedule(std::function<void()> fn) override {
num_schedule_called_ += 1;
underlying_threadpool_->Schedule(std::move(fn));
}
void ScheduleWithHint(std::function<void()> fn, int start, int end) override {
num_schedule_called_ += 1;
underlying_threadpool_->ScheduleWithHint(std::move(fn), start, end);
}
void Cancel() override {}
int NumThreads() const override {
return underlying_threadpool_->NumThreads();
}
int CurrentThreadId() const override {
return underlying_threadpool_->CurrentThreadId();
}
int GetNumScheduleCalled() { return num_schedule_called_; }
private:
int num_schedule_called_;
std::unique_ptr<tensorflow::thread::ThreadPool> underlying_threadpool_;
};
TEST(ClientSessionTest, Basic) {
Scope root = Scope::NewRootScope();
auto c = Const(root, {{1, 1}});
ClientSession session(root);
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run({c}, &outputs));
test::ExpectTensorEqual<int>(outputs[0], test::AsTensor<int>({1, 1}, {1, 2}));
}
TEST(ClientSessionTest, Feed) {
Scope root = Scope::NewRootScope();
auto a = Placeholder(root, DT_INT32);
auto b = Placeholder(root, DT_INT32);
auto c = Add(root, a, b);
ClientSession session(root);
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run({{a, 1}, {b, 41}}, {c}, &outputs));
test::ExpectTensorEqual<int>(outputs[0], test::AsTensor<int>({42}, {}));
}
TEST(ClientSessionTest, Extend) {
Scope root = Scope::NewRootScope();
auto a = Placeholder(root, DT_INT32, Placeholder::Shape({2}));
auto c = Add(root, a, {2, 2});
ClientSession session(root, GetSessionOptions());
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run({{a, {1, 1}}}, {c}, &outputs));
test::ExpectTensorEqual<int>(outputs[0], test::AsTensor<int>({3, 3}, {2}));
auto d = Add(root, c, {39, 39});
outputs.clear();
TF_EXPECT_OK(session.Run({{a, {-10, 1}}}, {d}, &outputs));
test::ExpectTensorEqual<int>(outputs[0], test::AsTensor<int>({31, 42}, {2}));
}
TEST(ClientSessionTest, MultiThreadedWithDefaultThreadpool) {
Scope root = Scope::NewRootScope();
auto a = Add(root, {1, 2}, {3, 4});
auto b = Mul(root, {1, 2}, {3, 4});
ClientSession session(root, GetSessionOptions());
{
thread::ThreadPool thread_pool(Env::Default(), "pool", 2);
thread_pool.Schedule([&session, a]() {
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run({a}, &outputs));
test::ExpectTensorEqual<int>(outputs[0],
test::AsTensor<int>({4, 6}, {2}));
});
thread_pool.Schedule([&session, b]() {
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run({b}, &outputs));
test::ExpectTensorEqual<int>(outputs[0],
test::AsTensor<int>({3, 8}, {2}));
});
}
auto c = Sub(root, b, a);
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run({c}, &outputs));
test::ExpectTensorEqual<int>(outputs[0], test::AsTensor<int>({-1, 2}, {2}));
}
TEST(ClientSessionTest, MultiThreadedWithCustomThreadpool) {
Scope root = Scope::NewRootScope();
int num_threads = 3;
auto a = Add(root, {1, 2}, {3, 4});
auto b = Mul(root, {1, 2}, {3, 4});
ClientSession session(root, GetSessionOptions());
auto inter_op_threadpool =
absl::make_unique<CustomThreadPoolImpl>(num_threads);
ASSERT_EQ(inter_op_threadpool->GetNumScheduleCalled(), 0);
auto intra_op_threadpool =
absl::make_unique<CustomThreadPoolImpl>(num_threads);
ASSERT_EQ(intra_op_threadpool->GetNumScheduleCalled(), 0);
tensorflow::thread::ThreadPoolOptions threadPoolOptions;
threadPoolOptions.inter_op_threadpool = inter_op_threadpool.get();
threadPoolOptions.intra_op_threadpool = intra_op_threadpool.get();
{
thread::ThreadPool thread_pool(Env::Default(), "pool", 2);
thread_pool.Schedule([&session, a]() {
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run(RunOptions(), ClientSession::FeedType{}, {a}, {},
&outputs, nullptr, thread::ThreadPoolOptions()));
test::ExpectTensorEqual<int>(outputs[0],
test::AsTensor<int>({4, 6}, {2}));
});
thread_pool.Schedule([&session, b]() {
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run(RunOptions(), ClientSession::FeedType{}, {b}, {},
&outputs, nullptr, thread::ThreadPoolOptions()));
test::ExpectTensorEqual<int>(outputs[0],
test::AsTensor<int>({3, 8}, {2}));
});
}
auto c = Sub(root, b, a);
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run(RunOptions(), ClientSession::FeedType{}, {c}, {},
&outputs, nullptr, thread::ThreadPoolOptions()));
test::ExpectTensorEqual<int>(outputs[0], test::AsTensor<int>({-1, 2}, {2}));
}
TEST(ClientSessionTest, CallableWithDefaultThreadPool) {
Scope root = Scope::NewRootScope();
auto a = Placeholder(root, DT_INT32);
auto b = Placeholder(root, DT_INT32);
auto c = Add(root, a, b);
ClientSession session(root);
std::vector<Tensor> outputs;
CallableOptions options;
options.add_feed(a.node()->name());
options.add_feed(b.node()->name());
options.add_fetch(c.node()->name());
ClientSession::CallableHandle callable;
TF_CHECK_OK(session.MakeCallable(options, &callable));
TF_EXPECT_OK(session.RunCallable(
callable, {test::AsTensor<int>({1}, {}), test::AsTensor<int>({41}, {})},
&outputs, nullptr));
test::ExpectTensorEqual<int>(outputs[0], test::AsTensor<int>({42}, {}));
TF_EXPECT_OK(session.ReleaseCallable(callable));
}
TEST(ClientSessionTest, CallableWithCustomThreadPool) {
Scope root = Scope::NewRootScope();
int num_threads = 3;
TensorShape data_shape({1, 1});
auto a = Placeholder(root, DT_INT32, Placeholder::Shape(data_shape));
auto b = Placeholder(root, DT_INT32, Placeholder::Shape(data_shape));
auto c = BatchMatMul(root, a, b);
ClientSession session(root);
std::vector<Tensor> outputs;
auto inter_op_threadpool =
absl::make_unique<CustomThreadPoolImpl>(num_threads);
ASSERT_EQ(inter_op_threadpool->GetNumScheduleCalled(), 0);
auto intra_op_threadpool =
absl::make_unique<CustomThreadPoolImpl>(num_threads);
ASSERT_EQ(intra_op_threadpool->GetNumScheduleCalled(), 0);
tensorflow::thread::ThreadPoolOptions threadPoolOptions;
threadPoolOptions.inter_op_threadpool = inter_op_threadpool.get();
threadPoolOptions.intra_op_threadpool = intra_op_threadpool.get();
CallableOptions options;
options.add_feed(a.node()->name());
options.add_feed(b.node()->name());
options.add_fetch(c.node()->name());
ClientSession::CallableHandle callable;
TF_CHECK_OK(session.MakeCallable(options, &callable));
absl::Barrier barrier(num_threads + 1);
for (int i = 0; i < num_threads; i++) {
intra_op_threadpool->Schedule([&barrier, num_threads]() {
tensorflow::SetPerThreadMaxParallelism(num_threads - 1);
barrier.Block();
});
}
barrier.Block();
TF_EXPECT_OK(session.RunCallable(
callable,
{test::AsTensor<int>({2}, {1, 1}), test::AsTensor<int>({10}, {1, 1})},
&outputs, nullptr, threadPoolOptions));
test::ExpectTensorEqual<int>(outputs[0], test::AsTensor<int>({20}, {1, 1}));
TF_EXPECT_OK(session.ReleaseCallable(callable));
ASSERT_GT(inter_op_threadpool->GetNumScheduleCalled(), 0);
ASSERT_GT(intra_op_threadpool->GetNumScheduleCalled(), 0);
intra_op_threadpool.reset();
}
}
} |
1,772 | cpp | tensorflow/tensorflow | while_loop | tensorflow/cc/ops/while_loop.cc | tensorflow/cc/ops/while_loop_test.cc | #ifndef TENSORFLOW_CC_OPS_WHILE_LOOP_H_
#define TENSORFLOW_CC_OPS_WHILE_LOOP_H_
#include <string>
#include <vector>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
namespace tensorflow {
namespace ops {
typedef std::function<Status(const Scope&, const std::vector<Output>& inputs,
Output* output)>
CondGraphBuilderFn;
typedef std::function<Status(const Scope&, const std::vector<Output>& inputs,
std::vector<Output>* outputs)>
BodyGraphBuilderFn;
Status BuildWhileLoop(const Scope& scope, const std::vector<Output>& inputs,
const CondGraphBuilderFn& cond,
const BodyGraphBuilderFn& body, const string& frame_name,
OutputList* outputs, bool create_while_ctx = true,
Output* cond_output = nullptr);
}
}
#endif
#include "tensorflow/cc/ops/while_loop.h"
#include "tensorflow/cc/framework/scope_internal.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/graph/node_builder.h"
namespace tensorflow {
namespace ops {
namespace {
OutputTensor ToOutputTensor(const Output& output) {
return OutputTensor(output.node(), output.index());
}
std::vector<OutputTensor> ToOutputTensors(const std::vector<Output>& outputs) {
std::vector<OutputTensor> result(outputs.size());
for (int i = 0; i < outputs.size(); ++i) {
result[i] = ToOutputTensor(outputs[i]);
}
return result;
}
std::vector<Node*> ToNodes(const std::vector<Output>& outputs) {
std::vector<Node*> result(outputs.size());
for (int i = 0; i < outputs.size(); ++i) {
result[i] = outputs[i].node();
}
return result;
}
string NextIterationName(const Scope& scope, int loop_var_idx) {
string result;
const string& prefix = scope.impl()->name();
if (!prefix.empty()) strings::StrAppend(&result, prefix, "/");
strings::StrAppend(&result, "NextIteration");
if (loop_var_idx > 0) strings::StrAppend(&result, "_", loop_var_idx);
return result;
}
Status CreateMerge(const Scope& scope, int loop_var_idx,
const Output& enter_output, Output* merge_output) {
NodeBuilder::NodeOut enter_input(enter_output.node(), enter_output.index());
const int next_output_index = 0;
DataType dtype = enter_output.node()->output_type(0);
NodeBuilder::NodeOut next_input(NextIterationName(scope, loop_var_idx),
next_output_index, dtype);
std::vector<NodeBuilder::NodeOut> input_list({enter_input, next_input});
const string unique_name = scope.GetUniqueNameForOp("Merge");
NodeBuilder builder = NodeBuilder(unique_name, "Merge").Input(input_list);
scope.UpdateBuilder(&builder);
Node* merge_node;
TF_RETURN_IF_ERROR(builder.Finalize(scope.graph(), &merge_node));
TF_RETURN_IF_ERROR(scope.DoShapeInference(merge_node));
*merge_output = Output(merge_node, 0);
return absl::OkStatus();
}
Status CreateCond(const Scope& scope, const CondGraphBuilderFn& cond,
const std::vector<Output>& inputs, Output* output) {
Scope cond_scope =
scope.NewSubScope("cond").WithControlDependencies(inputs[0]);
Output raw_cond_out;
TF_RETURN_IF_ERROR(cond(cond_scope, inputs, &raw_cond_out));
TF_RETURN_IF_ERROR(scope.graph()->IsValidOutputTensor(raw_cond_out.node(),
raw_cond_out.index()));
if (raw_cond_out.type() != DT_BOOL) {
return errors::InvalidArgument(
"BuildWhileLoop: 'cond' argument must return a boolean output, got ",
DataTypeString(raw_cond_out.type()));
}
*output = LoopCond(scope, raw_cond_out).output;
return absl::OkStatus();
}
Status CreateBody(const Scope& scope, const BodyGraphBuilderFn& body,
const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
DCHECK(outputs != nullptr);
DCHECK(outputs->empty());
Scope body_scope =
scope.NewSubScope("body").WithControlDependencies(inputs[0]);
TF_RETURN_IF_ERROR(body(body_scope, inputs, outputs));
const size_t num_loop_vars = inputs.size();
if (outputs->size() != num_loop_vars) {
return errors::InvalidArgument(
"BuildWhileLoop: 'body' argument expected to return ", num_loop_vars,
" output(s), got ", outputs->size());
}
for (const Output& output : *outputs) {
TF_RETURN_IF_ERROR(
scope.graph()->IsValidOutputTensor(output.node(), output.index()));
}
return absl::OkStatus();
}
}
Status BuildWhileLoop(const Scope& scope, const std::vector<Output>& inputs,
const CondGraphBuilderFn& cond,
const BodyGraphBuilderFn& body, const string& frame_name,
OutputList* outputs, bool create_while_ctx,
Output* cond_output) {
DCHECK(!inputs.empty());
DCHECK(outputs != nullptr);
DCHECK(outputs->empty());
TF_RETURN_IF_ERROR(scope.status());
const size_t num_loop_vars = inputs.size();
std::vector<Output> enter_outputs(num_loop_vars);
for (size_t i = 0; i < num_loop_vars; ++i) {
enter_outputs[i] = internal::Enter(scope, inputs[i], frame_name);
}
TF_RETURN_IF_ERROR(scope.status());
std::vector<Output> merge_outputs(num_loop_vars);
for (size_t i = 0; i < num_loop_vars; ++i) {
TF_RETURN_IF_ERROR(
CreateMerge(scope, i, enter_outputs[i], &merge_outputs[i]));
}
Output cond_out;
TF_RETURN_IF_ERROR(CreateCond(scope, cond, merge_outputs, &cond_out));
if (cond_output != nullptr) *cond_output = cond_out;
std::vector<Output> switch_trues(num_loop_vars);
std::vector<Output> switch_falses(num_loop_vars);
for (size_t i = 0; i < num_loop_vars; ++i) {
auto switch_i = Switch(scope, merge_outputs[i], cond_out);
switch_trues[i] = switch_i.output_true;
switch_falses[i] = switch_i.output_false;
}
TF_RETURN_IF_ERROR(scope.status());
std::vector<Output> body_outputs;
TF_RETURN_IF_ERROR(CreateBody(scope, body, switch_trues, &body_outputs));
std::vector<Output> next_outputs(num_loop_vars);
for (size_t i = 0; i < num_loop_vars; ++i) {
next_outputs[i] = NextIteration(scope, body_outputs[i]);
DCHECK_EQ(next_outputs[i].node()->name(), NextIterationName(scope, i));
}
TF_RETURN_IF_ERROR(scope.status());
for (size_t i = 0; i < num_loop_vars; ++i) {
const int merge_backedge_output_index = 1;
scope.graph()->AddEdge(next_outputs[i].node(), next_outputs[i].index(),
merge_outputs[i].node(),
merge_backedge_output_index);
}
outputs->resize(num_loop_vars);
for (size_t i = 0; i < num_loop_vars; ++i) {
(*outputs)[i] = internal::Exit(scope, switch_falses[i]);
}
TF_RETURN_IF_ERROR(scope.status());
if (create_while_ctx) {
WhileContext* while_ctx;
TF_RETURN_IF_ERROR(scope.graph()->AddWhileContext(
frame_name, ToNodes(enter_outputs), ToNodes(*outputs),
ToOutputTensor(cond_out), ToOutputTensors(switch_trues),
ToOutputTensors(body_outputs), &while_ctx));
for (size_t i = 0; i < num_loop_vars; ++i) {
(*outputs)[i].node()->set_while_ctx(while_ctx);
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/cc/ops/while_loop.h"
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/while_context.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class WhileLoopTest : public ::testing::Test {
protected:
WhileLoopTest() : scope_(Scope::NewRootScope()) {}
void Init(int num_inputs, DataType dtype = DT_INT32) {
for (int i = 0; i < num_inputs; ++i) {
inputs_.push_back(ops::Placeholder(scope_, dtype));
}
}
void CreateLoop(const ops::CondGraphBuilderFn& cond,
const ops::BodyGraphBuilderFn& body,
error::Code error_code = error::OK,
const string& error_msg = "") {
Status s =
ops::BuildWhileLoop(scope_, inputs_, cond, body, kFrameName, &outputs_);
EXPECT_EQ(s.code(), error_code);
EXPECT_EQ(s.message(), error_msg);
}
template <typename T>
void Run(const std::vector<Input::Initializer>& input_values,
const std::vector<T>& expected_output_values) {
ClientSession session(scope_);
DCHECK_EQ(input_values.size(), inputs_.size());
ClientSession::FeedType feeds;
for (int i = 0; i < inputs_.size(); ++i) {
feeds.emplace(inputs_[i], input_values[i]);
}
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, outputs_, &out_tensors));
ASSERT_EQ(out_tensors.size(), outputs_.size());
DCHECK_EQ(expected_output_values.size(), out_tensors.size());
for (int i = 0; i < out_tensors.size(); ++i) {
test::ExpectTensorEqual<T>(
out_tensors[i], test::AsTensor<T>({expected_output_values[i]}, {}));
}
}
Scope scope_;
std::vector<Output> inputs_;
std::vector<Output> outputs_;
static const char* const kFrameName;
};
const char* const WhileLoopTest::kFrameName = "test_loop";
Status LessThanTenCond(const Scope& s, const std::vector<Output>& inputs,
Output* output) {
*output = ops::Less(s, inputs[0], 10);
return s.status();
}
Status AddOneBody(const Scope& s, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
outputs->push_back(ops::Add(s, inputs[0], 1));
return s.status();
}
TEST_F(WhileLoopTest, Basic) {
Init(1);
CreateLoop(LessThanTenCond, AddOneBody);
WhileContext* while_ctx;
for (int i = 0; i < outputs_.size(); ++i) {
Node* node = outputs_[i].node();
ASSERT_TRUE(node->IsExit()) << "Output node " << i << ":\n"
<< node->DebugString();
ASSERT_TRUE(node->while_ctx() != nullptr) << i;
if (i == 0) {
while_ctx = node->while_ctx();
EXPECT_EQ(while_ctx->frame_name(), kFrameName);
} else {
EXPECT_EQ(node->while_ctx(), while_ctx) << i;
}
}
Run<int>({1}, {10});
Run<int>({11}, {11});
}
TEST_F(WhileLoopTest, WrongCondOutputType) {
Init(1);
CreateLoop(
[](const Scope& s, const std::vector<Output>& inputs, Output* output) {
*output = ops::Placeholder(s, DT_FLOAT);
return s.status();
},
AddOneBody, error::INVALID_ARGUMENT,
"BuildWhileLoop: 'cond' argument must return a boolean output, got "
"float");
}
TEST_F(WhileLoopTest, NullCondOutputNode) {
Init(1);
CreateLoop(
[](const Scope& s, const std::vector<Output>& inputs, Output* output) {
*output = {nullptr, 0};
return s.status();
},
AddOneBody, error::INVALID_ARGUMENT, "Node is null");
}
TEST_F(WhileLoopTest, InvalidCondOutputIndex) {
Init(1);
CreateLoop(
[](const Scope& s, const std::vector<Output>& inputs, Output* output) {
auto less = ops::Less(s, inputs[0], 10);
*output = {less.node(), 100};
return s.status();
},
AddOneBody, error::OUT_OF_RANGE,
"Node 'cond/Less' (type: 'Less', num of outputs: 1) does not have output "
"100");
}
TEST_F(WhileLoopTest, UnsetCondOutput) {
Init(1);
CreateLoop([](const Scope& s, const std::vector<Output>& inputs,
Output* output) { return s.status(); },
AddOneBody, error::INVALID_ARGUMENT, "Node is null");
}
TEST_F(WhileLoopTest, NullBodyOutputNode) {
Init(1);
CreateLoop(LessThanTenCond,
[](const Scope& s, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
outputs->push_back({nullptr, 0});
return s.status();
},
error::INVALID_ARGUMENT, "Node is null");
}
TEST_F(WhileLoopTest, InvalidBodyOutputIndex) {
Init(1);
CreateLoop(LessThanTenCond,
[](const Scope& s, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
auto add = ops::Add(s, inputs[0], 1);
outputs->emplace_back(add.node(), 100);
return s.status();
},
error::OUT_OF_RANGE,
"Node 'body/Add' (type: 'Add', num of outputs: 1) does not have "
"output 100");
}
TEST_F(WhileLoopTest, UnsetBodyOutputs) {
Init(1);
CreateLoop(
LessThanTenCond,
[](const Scope& s, const std::vector<Output>& inputs,
std::vector<Output>* outputs) { return s.status(); },
error::INVALID_ARGUMENT,
"BuildWhileLoop: 'body' argument expected to return 1 output(s), got 0");
}
}
} |
1,773 | cpp | tensorflow/tensorflow | while_gradients | tensorflow/cc/framework/while_gradients.cc | tensorflow/cc/framework/while_gradients_test.cc | #ifndef TENSORFLOW_CC_FRAMEWORK_WHILE_GRADIENTS_H_
#define TENSORFLOW_CC_FRAMEWORK_WHILE_GRADIENTS_H_
#include <vector>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/core/graph/while_context.h"
namespace tensorflow {
Status AddWhileLoopGradient(WhileContext* while_ctx, const Scope& scope,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs);
}
#endif
#include "tensorflow/cc/framework/while_gradients.h"
#include <string>
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/framework/scope_internal.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/cc/ops/while_loop.h"
namespace tensorflow {
namespace {
using ops::BodyGraphBuilderFn;
using ops::BuildWhileLoop;
using ops::CondGraphBuilderFn;
Output ToOutput(OutputTensor output_tensor) {
return Output(const_cast<Node*>(output_tensor.node), output_tensor.index);
}
std::vector<Output> ToOutputVector(
const std::vector<OutputTensor>& output_tensors) {
const int n = output_tensors.size();
std::vector<Output> result;
result.reserve(n);
for (int i = 0; i < n; ++i) result.push_back(ToOutput(output_tensors[i]));
return result;
}
string BackPropFrameName(const string& forward_frame_name) {
return strings::StrCat(forward_frame_name, "_backprop");
}
Status AddForwardLoopCounter(WhileContext* while_ctx, const Scope& scope,
Output* count) {
Output zero = ops::Const(scope, 0, {});
CondGraphBuilderFn cond_fn = [while_ctx](const Scope& scope,
const std::vector<Output>& inputs,
Output* output) {
*output = ToOutput(while_ctx->cond_output());
return absl::OkStatus();
};
BodyGraphBuilderFn body_fn = [](const Scope& scope,
const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
DCHECK_EQ(inputs.size(), 1);
outputs->emplace_back(ops::Add(scope, inputs[0], 1));
return scope.status();
};
std::vector<Output> outputs;
TF_RETURN_IF_ERROR(BuildWhileLoop(scope, {zero}, cond_fn, body_fn,
while_ctx->frame_name(), &outputs,
false));
*count = outputs[0];
return absl::OkStatus();
}
Status AddBackPropLoopCounter(WhileContext* while_ctx, const Output& loop_count,
const Scope& scope,
Output* backprop_execution_pred) {
CondGraphBuilderFn cond_fn = [](const Scope& scope,
const std::vector<Output>& inputs,
Output* output) {
DCHECK_EQ(inputs.size(), 1);
*output = ops::Greater(scope, inputs[0], 0);
return scope.status();
};
BodyGraphBuilderFn body_fn = [](const Scope& scope,
const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
DCHECK_EQ(inputs.size(), 1);
outputs->emplace_back(ops::Subtract(scope, inputs[0], 1));
return scope.status();
};
string frame_name = BackPropFrameName(while_ctx->frame_name());
std::vector<Output> outputs;
TF_RETURN_IF_ERROR(BuildWhileLoop(
scope, {loop_count}, cond_fn, body_fn, frame_name, &outputs,
false, backprop_execution_pred));
return absl::OkStatus();
}
Status AddWhileGradientLoop(WhileContext* while_ctx,
const std::vector<Output>& grad_inputs,
const Output& backprop_execution_pred,
const Scope& parent_scope,
std::vector<Output>* grad_outputs) {
DCHECK_EQ(grad_inputs.size(), while_ctx->body_outputs().size());
DCHECK_EQ(while_ctx->body_inputs().size(), while_ctx->body_outputs().size());
Scope scope = parent_scope.NewSubScope("while");
CondGraphBuilderFn cond_fn = [backprop_execution_pred](
const Scope& scope,
const std::vector<Output>& inputs,
Output* output) {
*output = backprop_execution_pred;
return absl::OkStatus();
};
BodyGraphBuilderFn body_fn = [while_ctx](const Scope& scope,
const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
std::vector<Output> body_outputs =
ToOutputVector(while_ctx->body_outputs());
std::vector<Output> body_inputs = ToOutputVector(while_ctx->body_inputs());
return AddSymbolicGradients(scope, body_outputs, body_inputs, inputs,
outputs);
};
string frame_name = BackPropFrameName(while_ctx->frame_name());
TF_RETURN_IF_ERROR(BuildWhileLoop(scope, grad_inputs, cond_fn, body_fn,
frame_name, grad_outputs,
false));
return absl::OkStatus();
}
}
Status AddWhileLoopGradient(WhileContext* while_ctx, const Scope& scope,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
Output forward_loop_count;
TF_RETURN_IF_ERROR(AddForwardLoopCounter(
while_ctx, scope.NewSubScope("ForwardLoopCounter"), &forward_loop_count));
Output backprop_counter_cond;
TF_RETURN_IF_ERROR(AddBackPropLoopCounter(
while_ctx, forward_loop_count, scope.NewSubScope("BackPropLoopCounter"),
&backprop_counter_cond));
return AddWhileGradientLoop(while_ctx, grad_inputs, backprop_counter_cond,
scope, grad_outputs);
}
} | #include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/framework/testutil.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/cc/ops/while_loop.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class WhileGradientsTest : public ::testing::Test {
protected:
WhileGradientsTest() : scope_(Scope::NewRootScope()) {}
void Init(int num_inputs, DataType dtype = DT_INT32) {
for (int i = 0; i < num_inputs; ++i) {
inputs_.push_back(ops::Placeholder(scope_, dtype));
}
}
void CreateLoop(const ops::CondGraphBuilderFn& cond,
const ops::BodyGraphBuilderFn& body,
const std::vector<Output>* inputs = nullptr) {
if (inputs == nullptr) inputs = &inputs_;
TF_ASSERT_OK(ops::BuildWhileLoop(scope_, *inputs, cond, body, "test_loop",
&outputs_));
}
void CreateBackprop() {
TF_ASSERT_OK(
AddSymbolicGradients(scope_, outputs_, inputs_, &grad_outputs_));
ASSERT_EQ(grad_outputs_.size(), inputs_.size());
}
template <typename T>
void Run(const std::vector<Input::Initializer>& input_values,
const std::vector<T>& expected_grad_values) {
Run<T>(ClientSession(scope_), input_values, expected_grad_values);
}
template <typename T>
void Run(const ClientSession& session,
const std::vector<Input::Initializer>& input_values,
const std::vector<T>& expected_grad_values,
const RunOptions& run_options = RunOptions(),
RunMetadata* run_metadata = nullptr) {
DCHECK_EQ(input_values.size(), inputs_.size());
ClientSession::FeedType feeds;
for (int i = 0; i < inputs_.size(); ++i) {
feeds.emplace(inputs_[i], input_values[i]);
}
std::vector<Operation> run_outputs;
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(run_options, feeds, grad_outputs_, run_outputs,
&out_tensors, run_metadata));
ASSERT_EQ(out_tensors.size(), grad_outputs_.size());
DCHECK_EQ(expected_grad_values.size(), out_tensors.size());
for (int i = 0; i < out_tensors.size(); ++i) {
test::ExpectTensorEqual<T>(
out_tensors[i], test::AsTensor<T>({expected_grad_values[i]}, {}));
}
}
Scope scope_;
std::vector<Output> inputs_;
std::vector<Output> outputs_;
std::vector<Output> grad_outputs_;
};
TEST_F(WhileGradientsTest, Basic) {
Init(1);
CreateLoop(
[](const Scope& s, const std::vector<Output>& inputs, Output* output) {
*output = ops::Less(s, inputs[0], 10);
return s.status();
},
[](const Scope& s, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
outputs->push_back(ops::AddN(s, {inputs[0], 1}));
return s.status();
});
CreateBackprop();
Run<int>({1}, {1});
Run<int>({11}, {1});
}
TEST_F(WhileGradientsTest, MultipleLoopVars) {
Init(3);
CreateLoop(
[](const Scope& s, const std::vector<Output>& inputs, Output* output) {
*output = ops::Less(s, inputs[0], 10);
return s.status();
},
[](const Scope& s, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
outputs->push_back(ops::AddN(s, {inputs[0], inputs[1]}));
outputs->push_back(ops::AddN(s, {inputs[1], 1}));
outputs->push_back(inputs[2]);
return s.status();
});
CreateBackprop();
Run<int>({0, 1, 2}, {1, 5, 1});
Run<int>({1, 1, 0}, {1, 5, 1});
Run<int>({0, 0, 0}, {1, 6, 1});
}
TEST_F(WhileGradientsTest, Chaining) {
Init(2, DT_DOUBLE);
std::vector<Output> loop_inputs = {ops::Multiply(scope_, inputs_[0], 2.0),
ops::Multiply(scope_, inputs_[1], 2.0)};
CreateLoop(
[](const Scope& s, const std::vector<Output>& inputs, Output* output) {
*output = ops::LogicalAnd(s, ops::Greater(s, inputs[0], 0.0),
ops::Greater(s, inputs[1], 0.0));
return s.status();
},
[](const Scope& s, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
outputs->push_back(ops::AddN(s, {inputs[0], -1.0}));
outputs->push_back(inputs[1]);
return s.status();
},
&loop_inputs);
outputs_[0] = ops::Neg(scope_, outputs_[0]);
CreateBackprop();
Run<double>({1.0, 1.0}, {-2.0, 2.0});
Run<double>({0.0, 0.0}, {-2.0, 2.0});
}
TEST_F(WhileGradientsTest, MultipleDevices) {
scope_ = scope_.WithDevice("/cpu:0");
Init(2);
CreateLoop(
[](const Scope& s, const std::vector<Output>& inputs, Output* output) {
*output = ops::Less(s, inputs[0], 10);
return s.status();
},
[](const Scope& s, const std::vector<Output>& inputs,
std::vector<Output>* outputs) {
Scope cpu1_scope = s.WithDevice("/cpu:1");
outputs->push_back(ops::AddN(cpu1_scope, {inputs[0], inputs[1]}));
outputs->push_back(inputs[1]);
return cpu1_scope.status();
});
Scope cpu1_scope = scope_.WithDevice("/cpu:1");
TF_ASSERT_OK(
AddSymbolicGradients(cpu1_scope, outputs_, inputs_, &grad_outputs_));
ASSERT_EQ(grad_outputs_.size(), inputs_.size());
SessionOptions session_options;
(*session_options.config.mutable_device_count())["CPU"] = 2;
RunOptions run_options;
run_options.set_output_partition_graphs(true);
RunMetadata run_metadata;
Run<int>(ClientSession(scope_, session_options), {0, 1}, {1, 11}, run_options,
&run_metadata);
ASSERT_EQ(run_metadata.partition_graphs().size(), 2);
for (const GraphDef& partition_graph : run_metadata.partition_graphs()) {
EXPECT_GE(partition_graph.node().size(), 1);
}
}
}
} |
1,774 | cpp | tensorflow/tensorflow | cc_op_gen | tensorflow/cc/framework/cc_op_gen.cc | tensorflow/cc/framework/cc_op_gen_test.cc | #ifndef TENSORFLOW_CC_FRAMEWORK_CC_OP_GEN_H_
#define TENSORFLOW_CC_FRAMEWORK_CC_OP_GEN_H_
#include <string>
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace cc_op {
void WriteCCOps(const OpList& ops, const ApiDefMap& api_def_map,
const string& dot_h_fname, const string& dot_cc_fname);
}
}
#endif
#include "tensorflow/cc/framework/cc_op_gen.h"
#include <memory>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "absl/strings/escaping.h"
#include "tensorflow/cc/framework/cc_op_gen_util.h"
#include "tensorflow/core/framework/api_def.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace cc_op {
namespace {
const int kRightMargin = 79;
string GetConstructorDecl(const OpInfo& op_info, StringPiece op_name_prefix,
bool include_attr) {
const string prefix = strings::StrCat(op_name_prefix, op_info.op_name, "(");
string c_decl;
for (int i = 0; i < op_info.arg_types.size(); ++i) {
if (i > 0) strings::StrAppend(&c_decl, ", ");
strings::StrAppend(&c_decl, op_info.arg_types[i], " ",
op_info.arg_names[i]);
}
if (include_attr && op_info.has_optional_attrs) {
strings::StrAppend(&c_decl, ", const ", op_info.op_name, "::Attrs& attrs");
}
strings::StrAppend(&c_decl, ")");
return WordWrap(prefix, c_decl, kRightMargin);
}
void WriteClassDecl(const OpInfo& op_info, WritableFile* h) {
string class_decl = op_info.comment;
strings::StrAppend(&class_decl, "class ", op_info.op_name, " {\n");
strings::StrAppend(&class_decl, " public:\n");
if (op_info.has_optional_attrs) {
strings::StrAppend(&class_decl, op_info.GetOpAttrStruct());
}
strings::StrAppend(&class_decl, " ",
GetConstructorDecl(op_info, "", false),
";\n");
if (op_info.has_optional_attrs) {
strings::StrAppend(&class_decl, " ",
GetConstructorDecl(op_info, "", true),
";\n");
}
if (op_info.output_types.empty()) {
strings::StrAppend(&class_decl,
" operator ::tensorflow::Operation() const { "
"return operation; }\n");
} else if (op_info.output_types.size() == 1) {
if (op_info.is_list_output[0]) {
strings::StrAppend(&class_decl,
" ::tensorflow::Output operator[](size_t index) "
"const { return ",
op_info.output_names[0], "[index]; }\n\n");
} else {
strings::StrAppend(&class_decl,
" operator ::tensorflow::Output() const { return ",
op_info.output_names[0], "; }\n");
strings::StrAppend(&class_decl,
" operator ::tensorflow::Input() const { return ",
op_info.output_names[0], "; }\n");
strings::StrAppend(&class_decl,
" ::tensorflow::Node* node() const { return ",
op_info.output_names[0], ".node(); }\n");
}
}
if (op_info.has_optional_attrs) {
strings::StrAppend(&class_decl, "\n");
for (int i = 0; i < op_info.graph_op_def.attr_size(); ++i) {
const auto& attr(op_info.graph_op_def.attr(i));
const auto& api_def_attr(op_info.api_def.attr(i));
if ((op_info.inferred_input_attrs.find(attr.name()) !=
op_info.inferred_input_attrs.end()) ||
!api_def_attr.has_default_value()) {
continue;
}
const auto entry = AttrTypeName(attr.type());
const auto attr_type_name = entry.first;
const bool use_const = entry.second;
const string camel_case_name = ToCamelCase(api_def_attr.rename_to());
const string suffix =
(camel_case_name == op_info.op_name || camel_case_name == "Attrs")
? "_"
: "";
const string attr_func_def = strings::StrCat(
camel_case_name, suffix, "(", use_const ? "const " : "",
attr_type_name, use_const ? "&" : "");
strings::StrAppend(&class_decl, " static Attrs ", attr_func_def,
" x) {\n");
strings::StrAppend(&class_decl, " return Attrs().", camel_case_name,
suffix, "(x);\n");
strings::StrAppend(&class_decl, " }\n");
}
}
strings::StrAppend(&class_decl, "\n Operation operation;\n");
for (int i = 0; i < op_info.output_types.size(); ++i) {
strings::StrAppend(&class_decl, " ", op_info.output_types[i], " ",
op_info.output_names[i], ";\n");
}
strings::StrAppend(&class_decl, "};\n");
if (!op_info.aliases.empty()) {
for (const auto& alias : op_info.aliases) {
strings::StrAppend(&class_decl, "typedef ", op_info.op_name, " ", alias,
";\n");
}
}
strings::StrAppend(&class_decl, "\n");
TF_CHECK_OK(h->Append(class_decl));
}
void GetOutput(const OpInfo& op_info, string* out) {
const string scope_str = op_info.arg_names[0];
string return_on_error =
strings::StrCat("if (!", scope_str, ".ok()) return;");
strings::StrAppend(out, " this->operation = Operation(ret);\n");
if (op_info.graph_op_def.output_arg_size() == 0) {
strings::StrAppend(out, " return;\n");
return;
}
if (op_info.graph_op_def.output_arg_size() == 1) {
if (op_info.is_list_output[0]) {
strings::StrAppend(out,
" for (int32 i = 0; i < ret->num_outputs(); ++i)\n");
strings::StrAppend(out, " this->", op_info.output_names[0],
".push_back(Output(ret, i));\n");
} else {
strings::StrAppend(out, " this->", op_info.output_names[0],
" = Output(ret, 0);\n");
}
return;
}
strings::StrAppend(out, " ::tensorflow::NameRangeMap _outputs_range;\n");
strings::StrAppend(out,
" ::tensorflow::Status _status_ = "
"::tensorflow::NameRangesForNode(*ret, ret->op_def(), "
"nullptr, &_outputs_range);\n");
strings::StrAppend(out, " if (!_status_.ok()) {\n", " ", scope_str,
".UpdateStatus(_status_);\n", " return;\n");
strings::StrAppend(out, " }\n\n");
for (int i = 0; i < op_info.graph_op_def.output_arg_size(); ++i) {
const string arg_range = strings::StrCat(
"_outputs_range[\"", op_info.graph_op_def.output_arg(i).name(), "\"]");
if (op_info.is_list_output[i]) {
strings::StrAppend(out, " for (int32 i = ", arg_range, ".first; i < ",
arg_range, ".second; ++i)\n");
strings::StrAppend(out, " this->", op_info.output_names[i],
".push_back(Output(ret, i));\n");
} else {
strings::StrAppend(out, " this->", op_info.output_names[i],
" = Output(ret, ", arg_range, ".first);\n");
}
}
}
string GetConstructorBody(const OpInfo& op_info) {
const string scope_str = op_info.arg_names[0];
string body;
string return_on_error =
strings::StrCat("if (!", scope_str, ".ok()) return;");
strings::StrAppend(&body, " ", return_on_error, "\n");
for (int i = 0; i < op_info.graph_op_def.input_arg_size(); ++i) {
const auto& arg(op_info.graph_op_def.input_arg(i));
const auto& api_def_arg(op_info.api_def.in_arg(i));
strings::StrAppend(
&body, " auto _", api_def_arg.rename_to(), " = ::tensorflow::ops::",
ArgIsList(arg) ? "AsNodeOutList" : "AsNodeOut", "(", scope_str, ", ",
AvoidCPPKeywords(api_def_arg.rename_to()), ");\n");
strings::StrAppend(&body, " ", return_on_error, "\n");
}
strings::StrAppend(&body, " ::tensorflow::Node* ret;\n");
strings::StrAppend(&body, " const auto unique_name = ", scope_str,
".GetUniqueNameForOp(\"", op_info.op_name, "\");\n");
strings::StrAppend(
&body, " auto builder = ::tensorflow::NodeBuilder(unique_name, \"",
op_info.graph_op_def.name(), "\")\n");
const string spaces = " ";
for (int i = 0; i < op_info.api_def.in_arg_size(); ++i) {
const auto& arg(op_info.api_def.in_arg(i));
strings::StrAppend(&body, spaces, ".Input(_", arg.rename_to(), ")\n");
}
for (int i = 0; i < op_info.api_def.attr_size(); ++i) {
const auto& graph_attr(op_info.graph_op_def.attr(i));
const auto& api_def_attr(op_info.api_def.attr(i));
if (op_info.inferred_input_attrs.find(api_def_attr.name()) !=
op_info.inferred_input_attrs.end()) {
continue;
}
const string attr_name =
api_def_attr.has_default_value()
? strings::StrCat("attrs.", api_def_attr.rename_to(), "_")
: AvoidCPPKeywords(api_def_attr.rename_to());
strings::StrAppend(&body, spaces, ".Attr(\"", graph_attr.name(), "\", ",
attr_name, ")\n");
}
strings::StrAppend(&body, " ;\n");
strings::StrAppend(&body, " ", scope_str, ".UpdateBuilder(&builder);\n");
strings::StrAppend(&body, " ", scope_str, ".UpdateStatus(builder.Finalize(",
scope_str, ".graph(), &ret));\n");
strings::StrAppend(&body, " ", return_on_error, "\n");
strings::StrAppend(&body, " ", scope_str, ".UpdateStatus(", scope_str,
".DoShapeInference(ret));\n");
GetOutput(op_info, &body);
return body;
}
void WriteClassDef(const OpInfo& op_info, WritableFile* cc) {
string class_def;
strings::StrAppend(
&class_def,
GetConstructorDecl(op_info, strings::StrCat(op_info.op_name, "::"),
true),
" {\n");
strings::StrAppend(&class_def, GetConstructorBody(op_info));
strings::StrAppend(&class_def, "}\n\n");
if (op_info.has_optional_attrs) {
strings::StrAppend(
&class_def,
GetConstructorDecl(op_info, strings::StrCat(op_info.op_name, "::"),
false));
strings::StrAppend(&class_def, "\n : ", op_info.op_name, "(");
int i = 0;
for (; i < op_info.arg_names.size(); ++i) {
if (i > 0) strings::StrAppend(&class_def, ", ");
strings::StrAppend(&class_def, op_info.arg_names[i]);
}
if (i > 0) strings::StrAppend(&class_def, ", ");
strings::StrAppend(&class_def, op_info.op_name, "::Attrs()");
strings::StrAppend(&class_def, ") {}\n\n");
}
TF_CHECK_OK(cc->Append(class_def));
}
void WriteCCOp(const OpDef& graph_op_def, const ApiDef& api_def,
const std::vector<string>& aliases, WritableFile* h,
WritableFile* cc) {
OpInfo op_info(graph_op_def, api_def, aliases);
WriteClassDecl(op_info, h);
WriteClassDef(op_info, cc);
}
void StartFiles(bool internal, const string& dot_h_fname, WritableFile* h,
WritableFile* cc, string* op_header_guard) {
const string header =
R"header(
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
)header";
const string namespace_begin = internal ? R"namespace(
namespace tensorflow {
namespace ops {
namespace internal {
)namespace"
: R"namespace(
namespace tensorflow {
namespace ops {
)namespace";
const string op_header = GetPath(dot_h_fname);
*op_header_guard = ToGuard(op_header);
const string cc_header = strings::StrCat(
R"include(
#include "tensorflow/cc/ops/const_op.h"
)include",
"#include \"", op_header, "\"\n", namespace_begin);
const string filename = GetFilename(dot_h_fname);
const string doxygen = strings::StrCat("
ToTitle(filename), "\n", "
TF_CHECK_OK(h->Append(
strings::StrCat("
"#ifndef ",
*op_header_guard,
"\n"
"#define ",
*op_header_guard, "\n\n")));
TF_CHECK_OK(h->Append(header));
TF_CHECK_OK(h->Append(namespace_begin));
TF_CHECK_OK(h->Append(doxygen));
TF_CHECK_OK(cc->Append(cc_header));
}
void FinishFiles(bool internal, WritableFile* h, WritableFile* cc,
const string& op_header_guard) {
const string footer = internal ? R"footer(}
}
}
)footer"
:
R"footer(
}
}
)footer";
TF_CHECK_OK(h->Append(footer));
TF_CHECK_OK(
h->Append(strings::StrCat("\n#endif ", "
TF_CHECK_OK(cc->Append(footer));
TF_CHECK_OK(cc->Close());
TF_CHECK_OK(h->Close());
}
string MakeInternal(const string& fname) {
auto dot_pos = fname.rfind('.');
if (dot_pos == string::npos) {
return strings::StrCat(fname, "_internal");
} else {
return strings::StrCat(fname.substr(0, dot_pos), "_internal",
fname.substr(dot_pos));
}
}
}
void WriteCCOps(const OpList& ops, const ApiDefMap& api_def_map,
const string& dot_h_fname, const string& dot_cc_fname) {
Env* env = Env::Default();
std::unique_ptr<WritableFile> h = nullptr;
std::unique_ptr<WritableFile> cc = nullptr;
TF_CHECK_OK(env->NewWritableFile(dot_h_fname, &h));
TF_CHECK_OK(env->NewWritableFile(dot_cc_fname, &cc));
string op_header_guard;
StartFiles(false, dot_h_fname, h.get(), cc.get(), &op_header_guard);
std::unique_ptr<WritableFile> internal_h = nullptr;
std::unique_ptr<WritableFile> internal_cc = nullptr;
const string internal_dot_h_fname = MakeInternal(dot_h_fname);
TF_CHECK_OK(env->NewWritableFile(internal_dot_h_fname, &internal_h));
TF_CHECK_OK(env->NewWritableFile(MakeInternal(dot_cc_fname), &internal_cc));
string internal_op_header_guard;
StartFiles(true , internal_dot_h_fname, internal_h.get(),
internal_cc.get(), &internal_op_header_guard);
for (const auto& graph_op_def : ops.op()) {
if (graph_op_def.has_deprecation() &&
graph_op_def.deprecation().version() <= TF_GRAPH_DEF_VERSION) {
continue;
}
if (graph_op_def.name() == "Const") continue;
const auto* api_def = api_def_map.GetApiDef(graph_op_def.name());
std::vector<string> aliases;
if (api_def->visibility() == ApiDef::SKIP) continue;
for (int endpoint_i = 1; endpoint_i < api_def->endpoint_size();
++endpoint_i) {
aliases.push_back(api_def->endpoint(endpoint_i).name());
}
if (api_def->visibility() == ApiDef::HIDDEN) {
WriteCCOp(graph_op_def, *api_def, aliases, internal_h.get(),
internal_cc.get());
continue;
}
WriteCCOp(graph_op_def, *api_def, aliases, h.get(), cc.get());
}
FinishFiles(false, h.get(), cc.get(), op_header_guard);
FinishFiles(true , internal_h.get(), internal_cc.get(),
internal_op_header_guard);
}
}
} | #include "tensorflow/cc/framework/cc_op_gen.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
constexpr char kBaseOpDef[] = R"(
op {
name: "Foo"
input_arg {
name: "images"
description: "Images to process."
}
input_arg {
name: "dim"
description: "Description for dim."
type: DT_FLOAT
}
output_arg {
name: "output"
description: "Description for output."
type: DT_FLOAT
}
attr {
name: "T"
type: "type"
description: "Type for images"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
}
}
default_value {
i: 1
}
}
summary: "Summary for op Foo."
description: "Description for op Foo."
}
)";
void ExpectHasSubstr(StringPiece s, StringPiece expected) {
EXPECT_TRUE(absl::StrContains(s, expected))
<< "'" << s << "' does not contain '" << expected << "'";
}
void ExpectDoesNotHaveSubstr(StringPiece s, StringPiece expected) {
EXPECT_FALSE(absl::StrContains(s, expected))
<< "'" << s << "' contains '" << expected << "'";
}
void ExpectSubstrOrder(const string& s, const string& before,
const string& after) {
int before_pos = s.find(before);
int after_pos = s.find(after);
ASSERT_NE(std::string::npos, before_pos);
ASSERT_NE(std::string::npos, after_pos);
EXPECT_LT(before_pos, after_pos)
<< before << " is not before " << after << " in " << s;
}
void GenerateCcOpFiles(Env* env, const OpList& ops,
const ApiDefMap& api_def_map, string* h_file_text,
string* internal_h_file_text) {
const string& tmpdir = testing::TmpDir();
const auto h_file_path = io::JoinPath(tmpdir, "test.h");
const auto cc_file_path = io::JoinPath(tmpdir, "test.cc");
const auto internal_h_file_path = io::JoinPath(tmpdir, "test_internal.h");
const auto internal_cc_file_path = io::JoinPath(tmpdir, "test_internal.cc");
cc_op::WriteCCOps(ops, api_def_map, h_file_path, cc_file_path);
TF_ASSERT_OK(ReadFileToString(env, h_file_path, h_file_text));
TF_ASSERT_OK(
ReadFileToString(env, internal_h_file_path, internal_h_file_text));
}
TEST(CcOpGenTest, TestVisibilityChangedToHidden) {
const string api_def = R"(
op {
graph_op_name: "Foo"
visibility: HIDDEN
}
)";
Env* env = Env::Default();
OpList op_defs;
protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs);
ApiDefMap api_def_map(op_defs);
string h_file_text, internal_h_file_text;
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectHasSubstr(h_file_text, "class Foo");
ExpectDoesNotHaveSubstr(internal_h_file_text, "class Foo");
TF_ASSERT_OK(api_def_map.LoadApiDef(api_def));
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectHasSubstr(internal_h_file_text, "class Foo");
ExpectDoesNotHaveSubstr(h_file_text, "class Foo");
}
TEST(CcOpGenTest, TestArgNameChanges) {
const string api_def = R"(
op {
graph_op_name: "Foo"
arg_order: "dim"
arg_order: "images"
}
)";
Env* env = Env::Default();
OpList op_defs;
protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs);
ApiDefMap api_def_map(op_defs);
string cc_file_text, h_file_text;
string internal_cc_file_text, internal_h_file_text;
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectSubstrOrder(h_file_text, "Input images", "Input dim");
TF_ASSERT_OK(api_def_map.LoadApiDef(api_def));
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectSubstrOrder(h_file_text, "Input dim", "Input images");
}
TEST(CcOpGenTest, TestEndpoints) {
const string api_def = R"(
op {
graph_op_name: "Foo"
endpoint {
name: "Foo1"
}
endpoint {
name: "Foo2"
}
}
)";
Env* env = Env::Default();
OpList op_defs;
protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs);
ApiDefMap api_def_map(op_defs);
string cc_file_text, h_file_text;
string internal_cc_file_text, internal_h_file_text;
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectHasSubstr(h_file_text, "class Foo {");
ExpectDoesNotHaveSubstr(h_file_text, "class Foo1");
ExpectDoesNotHaveSubstr(h_file_text, "class Foo2");
TF_ASSERT_OK(api_def_map.LoadApiDef(api_def));
GenerateCcOpFiles(env, op_defs, api_def_map, &h_file_text,
&internal_h_file_text);
ExpectHasSubstr(h_file_text, "class Foo1");
ExpectHasSubstr(h_file_text, "typedef Foo1 Foo2");
ExpectDoesNotHaveSubstr(h_file_text, "class Foo {");
}
}
} |
1,775 | cpp | tensorflow/tensorflow | scope | tensorflow/cc/framework/scope.cc | tensorflow/cc/framework/scope_test.cc | #ifndef TENSORFLOW_CC_FRAMEWORK_SCOPE_H_
#define TENSORFLOW_CC_FRAMEWORK_SCOPE_H_
#include <memory>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "absl/strings/str_cat.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
namespace tensorflow {
class Graph;
class GraphDef;
class NodeBuilder;
struct CompositeOpScopes;
class Scope {
public:
Scope(const Scope& other);
~Scope();
Scope& operator=(const Scope& other);
static Scope NewRootScope();
Scope NewSubScope(const string& child_scope_name) const;
template <typename... Ty>
Scope WithOpName(Ty... fragments) const {
return WithOpNameImpl(absl::StrCat(fragments...));
}
Scope WithControlDependencies(absl::Span<const Operation> control_deps) const;
Scope WithControlDependencies(const Output& control_dep) const;
Scope WithNoControlDependencies() const;
Scope WithDevice(const string& device) const;
Scope WithAssignedDevice(const string& assigned_device) const;
Scope WithXlaCluster(const string& xla_cluster) const;
Scope ColocateWith(const Operation& op) const;
Scope ColocateWith(const Output& out) const { return ColocateWith(out.op()); }
Scope ClearColocation() const;
Scope ExitOnError() const;
Scope WithKernelLabel(const string& kernel_label) const;
string GetUniqueNameForOp(const string& default_name) const;
void UpdateStatus(const Status& s) const;
void UpdateBuilder(NodeBuilder* builder) const;
CompositeOpScopes GetCompositeOpScopes(const string& composite_op_name) const;
bool ok() const;
Graph* graph() const;
std::shared_ptr<Graph> graph_as_shared_ptr() const;
Status status() const;
Status ToGraphDef(GraphDef* gdef, bool include_debug_info = false) const;
Status ToGraph(
Graph* g, GraphConstructorOptions opts = GraphConstructorOptions{}) const;
Status DoShapeInference(Node* node) const;
static Scope DisabledShapeInferenceScope();
const std::vector<Operation>& control_deps() const;
class Impl;
Impl* impl() { return impl_.get(); }
const Impl* impl() const { return impl_.get(); }
private:
Scope WithOpNameImpl(const string& op_name) const;
friend class InternalScope;
std::unique_ptr<Impl> impl_;
explicit Scope(Impl*);
};
struct CompositeOpScopes {
Scope child;
Scope last;
};
Status CreateOutputWithScope(string op_name,
absl::Span<const ::tensorflow::Input> inputs,
const Scope& scope, Output* output);
}
#endif
#include <algorithm>
#include <vector>
#include "tensorflow/cc/framework/scope_internal.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
Scope::Scope(Impl* impl) : impl_(impl) {}
Scope::Scope(const Scope& other) : impl_(new Impl(*other.impl())) {}
Scope::~Scope() {}
Scope& Scope::operator=(const Scope& other) {
impl_.reset(new Impl(*other.impl_));
return *this;
}
namespace {
const char kScopeSeparator[] = "/";
const char kSuffixSeparator[] = "_";
}
Scope::Impl::Impl(Graph* graph, Status* status, NameMap* name_map,
ShapeRefiner* refiner, bool disable_shape_inference)
: graph_(graph),
status_(status),
name_map_(name_map),
refiner_(refiner),
scope_used_(nullptr),
colocation_constraints_(),
disable_shape_inference_(disable_shape_inference) {}
Scope::Impl::Impl(const std::shared_ptr<Graph>& graph,
const std::shared_ptr<Status>& status,
const std::shared_ptr<NameMap>& name_map,
const std::shared_ptr<ShapeRefiner>& refiner)
: graph_(graph),
status_(status),
name_map_(name_map),
refiner_(refiner),
scope_used_(nullptr),
colocation_constraints_(),
disable_shape_inference_(refiner_ == nullptr) {}
Scope Scope::NewRootScope() {
Graph* graph = new Graph(OpRegistry::Global());
ShapeRefiner* refiner =
new ShapeRefiner(graph->versions(), graph->op_registry());
return Scope(new Impl(graph, new Status, new Impl::NameMap, refiner,
false));
}
Scope Scope::DisabledShapeInferenceScope() {
Graph* graph = new Graph(OpRegistry::Global());
ShapeRefiner* refiner =
new ShapeRefiner(graph->versions(), graph->op_registry());
return Scope(new Impl(graph, new Status, new Impl::NameMap, refiner,
true));
}
Scope::Impl::Impl(const Scope& other, Tags::ScopeName, const string& name,
bool copy_names)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(copy_names ? other.impl()->name_map_
: std::shared_ptr<NameMap>(new NameMap)),
refiner_(other.impl()->refiner_),
scope_used_(nullptr),
control_deps_(other.impl()->control_deps_),
name_(name),
op_name_(""),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::OpName, const string& name,
const string& op_name)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(name),
op_name_(op_name),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::ControlDeps,
std::vector<Operation> control_deps, bool clear_control_deps)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(
clear_control_deps
? std::vector<Operation>()
: (control_deps.insert(control_deps.begin(),
other.impl()->control_deps_.begin(),
other.impl()->control_deps_.end()),
control_deps)),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::Device, const string& device)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(device),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::SingleUseScope,
const string& op_name)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(new bool(false)),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(op_name),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::ExitOnError)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(true),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::KernelLabel,
const string& kernel_label)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(kernel_label),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::Colocate,
const Operation& colocate_with_op, bool clear_colocations)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(
clear_colocations
? std::unordered_set<string>()
: other.impl()->GetColocationConstraints(colocate_with_op)),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::AssignedDevice,
const string& assigned_device)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(assigned_device),
xla_cluster_(other.impl()->xla_cluster_),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
Scope::Impl::Impl(const Scope& other, Tags::XlaCluster,
const string& xla_cluster)
: graph_(other.impl()->graph_),
status_(other.impl()->status_),
name_map_(other.impl()->name_map_),
refiner_(other.impl()->refiner_),
scope_used_(other.impl()->scope_used_),
control_deps_(other.impl()->control_deps_),
name_(other.impl()->name_),
op_name_(other.impl()->op_name_),
exit_on_error_(other.impl()->exit_on_error_),
kernel_label_(other.impl()->kernel_label_),
device_(other.impl()->device_),
assigned_device_(other.impl()->assigned_device_),
xla_cluster_(xla_cluster),
colocation_constraints_(other.impl()->colocation_constraints_),
disable_shape_inference_(other.impl()->disable_shape_inference_) {}
std::unordered_set<string> Scope::Impl::GetColocationConstraints(
const Operation& colocate_with_op) const {
std::unordered_set<string> current_constraints(colocation_constraints_);
const AttrSlice attrs = colocate_with_op.node()->attrs();
std::vector<string> node_constraints;
if (TryGetNodeAttr(attrs, kColocationAttrName, &node_constraints)) {
for (const string& entry : node_constraints) {
StringPiece s(entry);
if (absl::ConsumePrefix(&s, kColocationGroupPrefix)) {
current_constraints.emplace(s);
}
}
} else {
current_constraints.insert(colocate_with_op.node()->name());
}
return current_constraints;
}
bool Scope::ok() const { return impl()->status_->ok(); }
Graph* Scope::graph() const { return impl()->graph_.get(); }
std::shared_ptr<Graph> Scope::graph_as_shared_ptr() const {
return impl()->graph_;
}
Status Scope::status() const { return *impl()->status_; }
const std::vector<Operation>& Scope::control_deps() const {
return impl()->control_deps_;
}
void Scope::UpdateStatus(const Status& s) const {
impl()->status_->Update(s);
if (impl()->exit_on_error_ && !ok()) {
LOG(FATAL) << *impl()->status_;
}
}
Status Scope::ToGraphDef(GraphDef* gdef, bool include_debug_info) const {
if (!ok()) {
return *impl()->status_;
}
graph()->ToGraphDef(gdef, true, include_debug_info);
return absl::OkStatus();
}
Status Scope::ToGraph(Graph* g, GraphConstructorOptions opts) const {
if (ok()) {
GraphDef graph_def;
graph()->ToGraphDef(&graph_def);
UpdateStatus(ConvertGraphDefToGraph(opts, std::move(graph_def), g));
}
return *impl()->status_;
}
void Scope::UpdateBuilder(NodeBuilder* builder) const {
std::vector<Node*> control_inputs;
for (const auto& op : impl()->control_deps_) {
control_inputs.push_back(op.node());
}
builder->ControlInputs(control_inputs);
if (!impl()->kernel_label_.empty()) {
builder->Attr("_kernel", impl()->kernel_label_);
}
if (!impl()->colocation_constraints_.empty()) {
std::vector<string> constraints(impl()->colocation_constraints_.begin(),
impl()->colocation_constraints_.end());
std::sort(constraints.begin(), constraints.end());
std::transform(constraints.begin(), constraints.end(), constraints.begin(),
[](const string& s) {
return strings::StrCat(kColocationGroupPrefix, s);
});
builder->Attr(kColocationAttrName, constraints);
}
if (!impl()->device_.empty()) {
builder->Device(impl()->device_);
}
if (!impl()->assigned_device_.empty()) {
builder->AssignedDevice(impl()->assigned_device_);
}
if (!impl()->xla_cluster_.empty()) {
builder->XlaCluster(impl()->xla_cluster_);
}
}
string Scope::Impl::GetUniqueName(const string& prefix,
bool check_single_use) const {
if (check_single_use && single_use_scope()) {
if (*scope_used_) {
*status_ =
errors::AlreadyExists(prefix, " already exists in the current scope");
return "";
}
*scope_used_ = true;
return prefix;
}
auto entry = name_map_->find(prefix);
if (entry == name_map_->end()) {
name_map_->insert({prefix, 0});
return prefix;
}
string unique_name;
do {
unique_name = strings::StrCat(prefix, kSuffixSeparator, ++entry->second);
} while (name_map_->find(unique_name) != name_map_->end());
name_map_->insert({unique_name, 0});
return unique_name;
}
string Scope::Impl::GetNameForOp(const string& default_name) const {
const string unique_name =
GetUniqueName(default_name, true );
const string sep =
name_.empty() || unique_name.empty() ? "" : kScopeSeparator;
return strings::StrCat(name_, sep, unique_name);
}
string Scope::GetUniqueNameForOp(const string& default_name) const {
if (impl()->single_use_scope()) {
if (impl()->op_name_.empty() || *impl()->scope_used_) {
*impl()->status_ =
errors::InvalidArgument("Cannot get a unique name in this scope");
return "";
}
*impl()->scope_used_ = true;
return impl()->op_name_;
}
return impl()->op_name_.empty() ? impl()->GetNameForOp(default_name)
: impl()->GetNameForOp(impl()->op_name_);
}
Scope Scope::NewSubScope(const string& child_scope_name) const {
if (child_scope_name.empty()) {
return Scope(new Impl(*this, Impl::Tags::ScopeName(), impl()->name_,
true ));
}
const string unique_name =
impl()->GetUniqueName(child_scope_name, false );
const string sep =
impl()->name_.empty() || unique_name.empty() ? "" : kScopeSeparator;
return Scope(new Impl(*this, Impl::Tags::ScopeName(),
strings::StrCat(impl()->name_, sep, unique_name),
false ));
}
Scope Scope::WithOpNameImpl(const string& op_name) const {
if (impl()->single_use_scope()) {
UpdateStatus(errors::InvalidArgument("Cannot set op name ", op_name,
" on this scope"));
return *this;
}
return Scope(new Impl(*this, Impl::Tags::OpName(), impl()->name_, op_name));
}
Scope Scope::WithControlDependencies(
const absl::Span<const Operation> control_deps) const {
return Scope(
new Impl(*this, Impl::Tags::ControlDeps(),
std::vector<Operation>(control_deps.begin(), control_deps.end()),
false));
}
Scope Scope::WithControlDependencies(const Output& control_dep) const {
return Scope(new Impl(*this, Impl::Tags::ControlDeps(),
std::vector<Operation>(1, control_dep.op()),
false));
}
Scope Scope::WithNoControlDependencies() const {
return Scope(new Impl(*this, Impl::Tags::ControlDeps(),
std::vector<Operation>(),
true));
}
Scope Scope::WithDevice(const string& device) const {
return Scope(new Impl(*this, Impl::Tags::Device(), device));
}
Scope Scope::WithAssignedDevice(const string& assigned_device) const {
return Scope(new Impl(*this, Impl::Tags::AssignedDevice(), assigned_device));
}
Scope Scope::WithXlaCluster(const string& xla_cluster) const {
return Scope(new Impl(*this, Impl::Tags::XlaCluster(), xla_cluster));
}
Scope Scope::ColocateWith(const Operation& op) const {
return Scope(new Impl(*this, Impl::Tags::Colocate(), op,
false));
}
Scope Scope::ClearColocation() const {
return Scope(new Impl(*this, Impl::Tags::Colocate(), Operation(),
true));
}
Scope Scope::ExitOnError() const {
return Scope(new Impl(*this, Impl::Tags::ExitOnError()));
}
Scope Scope::WithKernelLabel(const string& kernel_label) const {
return Scope(new Impl(*this, Impl::Tags::KernelLabel(), kernel_label));
}
CompositeOpScopes Scope::GetCompositeOpScopes(
const string& composite_op_name) const {
if (impl()->op_name_.empty() && composite_op_name.empty()) {
UpdateStatus(errors::InvalidArgument(
"Cannot create composite op scopes with empty name"));
return {*this, *this};
}
if (!impl()->single_use_scope()) {
Scope child = NewSubScope(impl()->op_name_.empty() ? composite_op_name
: impl()->op_name_);
const string child_op_sep = impl()->name_.empty() ? "" : kSuffixSeparator;
const string child_name =
strings::StrCat(impl()->name_, child_op_sep, child.impl()->name_);
return {child,
Scope(new Impl(child, Impl::Tags::SingleUseScope(), child_name))};
} else {
return {Scope(new Impl(*this, Impl::Tags::ScopeName(), impl()->op_name_,
true )),
*this};
}
}
Status Scope::DoShapeInference(Node* node) const {
if (impl_->disable_shape_inference_) return absl::OkStatus();
return impl_->refiner_->AddNode(node);
}
class InternalScope {
public:
static Scope NewScope(Graph* graph, Status* status, ShapeRefiner* refiner) {
Scope::Impl::NameMap* name_map = new Scope::Impl::NameMap;
for (const Node* node : graph->nodes()) {
const string& name = node->name();
(*name_map)[name] = 0;
size_t idx = -1;
while ((idx = name.find(kScopeSeparator, idx + 1)) != string::npos) {
(*name_map)[name.substr(0, idx)] = 0;
}
}
return Scope(new Scope::Impl(
std::shared_ptr<Graph>(graph, [](Graph*) {}),
std::shared_ptr<Status>(status, [](Status*) {}),
std::shared_ptr<Scope::Impl::NameMap>(name_map),
std::shared_ptr<ShapeRefiner>(refiner, [](ShapeRefiner*) {})));
}
};
Scope NewInternalScope(Graph* graph, Status* status, ShapeRefiner* refiner) {
return InternalScope::NewScope(graph, status, refiner);
}
Status CreateOutputWithScope(string op_name,
absl::Span<const ::tensorflow::Input> inputs,
const Scope& scope, Output* output) {
TF_RETURN_IF_ERROR(scope.status());
const auto unique_name = scope.GetUniqueNameForOp(op_name);
auto builder = ::tensorflow::NodeBuilder(unique_name, op_name);
for (const auto& input : inputs) {
TF_RETURN_IF_ERROR(scope.status());
builder = builder.Input(input.node());
}
::tensorflow::Node* ret;
scope.UpdateBuilder(&builder);
TF_RETURN_IF_ERROR(scope.status());
scope.UpdateStatus(builder.Finalize(scope.graph(), &ret));
TF_RETURN_IF_ERROR(scope.status());
*output = Output(ret, 0);
return absl::OkStatus();
}
} | #include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(ScopeTest, BasicNames) {
Scope root = Scope::NewRootScope();
EXPECT_EQ(root.GetUniqueNameForOp("add"), "add");
EXPECT_EQ(root.GetUniqueNameForOp("add"), "add_1");
EXPECT_EQ(root.GetUniqueNameForOp("add"), "add_2");
EXPECT_EQ(root.GetUniqueNameForOp("mul"), "mul");
}
TEST(ScopeTest, OpAndScopeNameCollision) {
Scope root = Scope::NewRootScope();
EXPECT_EQ(root.GetUniqueNameForOp("foo"), "foo");
EXPECT_EQ(root.GetUniqueNameForOp("foo"), "foo_1");
EXPECT_EQ(root.GetUniqueNameForOp("foo_1"), "foo_1_1");
EXPECT_EQ(root.GetUniqueNameForOp("foo_2"), "foo_2");
EXPECT_EQ(root.GetUniqueNameForOp("foo"), "foo_3");
EXPECT_EQ(root.GetUniqueNameForOp("foo_2"), "foo_2_1");
}
TEST(ScopeTest, HierarchicalNames) {
Scope root = Scope::NewRootScope();
Scope child = root.NewSubScope("child");
EXPECT_EQ(child.GetUniqueNameForOp("add"), "child/add");
EXPECT_EQ(child.GetUniqueNameForOp("add"), "child/add_1");
EXPECT_EQ(child.GetUniqueNameForOp("mul"), "child/mul");
Scope child_1 = root.NewSubScope("child");
EXPECT_EQ(child_1.GetUniqueNameForOp("add"), "child_1/add");
EXPECT_EQ(child_1.GetUniqueNameForOp("add"), "child_1/add_1");
EXPECT_EQ(child_1.GetUniqueNameForOp("mul"), "child_1/mul");
Scope c_c = root.NewSubScope("c").NewSubScope("c");
EXPECT_EQ(c_c.GetUniqueNameForOp("add"), "c/c/add");
Scope c_1 = root.NewSubScope("c");
Scope c_1_c = c_1.NewSubScope("c");
EXPECT_EQ(c_1_c.GetUniqueNameForOp("add"), "c_1/c/add");
Scope c_1_c_1 = c_1.NewSubScope("c");
EXPECT_EQ(c_1_c_1.GetUniqueNameForOp("add"), "c_1/c_1/add");
EXPECT_EQ(root.NewSubScope("").NewSubScope("").GetUniqueNameForOp("d"), "d");
EXPECT_EQ(root.NewSubScope("").GetUniqueNameForOp("d"), "d_1");
EXPECT_EQ(root.GetUniqueNameForOp("d"), "d_2");
}
TEST(ScopeTest, ScopeAndOpNames) {
Scope root = Scope::NewRootScope();
Scope child = root.NewSubScope("child");
EXPECT_EQ(child.GetUniqueNameForOp("add"), "child/add");
EXPECT_EQ(root.GetUniqueNameForOp("child"), "child_1");
EXPECT_EQ(root.NewSubScope("child").GetUniqueNameForOp("p"), "child_2/p");
}
namespace {
string LastOp(const Scope& scope) { return scope.GetUniqueNameForOp("Last"); }
std::vector<string> AnotherCompositeOp(const Scope& scope) {
auto cop_scopes = scope.GetCompositeOpScopes("another_cop");
const string c1 = cop_scopes.child.GetUniqueNameForOp("c1");
const string c2 = cop_scopes.child.GetUniqueNameForOp("mul");
return {c1, c2, LastOp(cop_scopes.last)};
}
std::vector<string> LinearOp(const Scope& scope) {
auto cop_scopes = scope.GetCompositeOpScopes("linear");
Scope linear = cop_scopes.child;
const string mul_op_name = linear.GetUniqueNameForOp("mul");
const string bias_add_op_name = linear.GetUniqueNameForOp("bias_add");
auto cop_names = AnotherCompositeOp(cop_scopes.last);
return {mul_op_name, bias_add_op_name, cop_names[0], cop_names[1],
cop_names[2]};
}
}
TEST(ScopeTest, CompositeOp) {
Scope root = Scope::NewRootScope();
const auto names1 = LinearOp(root);
EXPECT_EQ(names1[0], "linear/mul");
EXPECT_EQ(names1[1], "linear/bias_add");
EXPECT_EQ(names1[2], "linear/c1");
EXPECT_EQ(names1[3], "linear/mul_1");
EXPECT_EQ(names1[4], "linear");
EXPECT_EQ(root.GetUniqueNameForOp("linear"), "linear_1");
const auto names2 = LinearOp(root);
EXPECT_EQ(names2[0], "linear_2/mul");
EXPECT_EQ(names2[1], "linear_2/bias_add");
EXPECT_EQ(names2[2], "linear_2/c1");
EXPECT_EQ(names2[3], "linear_2/mul_1");
EXPECT_EQ(names2[4], "linear_2");
const auto names3 = LinearOp(root.WithOpName("c"));
EXPECT_EQ(names3[0], "c/mul");
EXPECT_EQ(names3[1], "c/bias_add");
EXPECT_EQ(names3[2], "c/c1");
EXPECT_EQ(names3[3], "c/mul_1");
EXPECT_EQ(names3[4], "c");
}
TEST(ScopeTest, SingleUseScope) {
Scope root = Scope::NewRootScope();
auto cop_scopes = root.GetCompositeOpScopes("cop");
EXPECT_EQ(cop_scopes.last.GetUniqueNameForOp("foo"), "cop");
cop_scopes.last.GetUniqueNameForOp("foo");
EXPECT_FALSE(cop_scopes.last.ok());
}
TEST(ScopeTest, ControlDeps) {
Scope root = Scope::NewRootScope();
auto c1 = Operation();
auto c2 = Operation();
Scope c = root.WithControlDependencies({c1, c2});
EXPECT_EQ(c.control_deps().size(), 2);
Scope c_c = c.WithControlDependencies({Operation()});
EXPECT_EQ(c_c.control_deps().size(), 3);
}
TEST(ScopeTest, CreateOutput) {
Scope root = Scope::NewRootScope();
Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT);
Output add;
ASSERT_TRUE(
CreateOutputWithScope("Add", {a, a}, root.WithOpName("add"), &add).ok());
EXPECT_EQ(add.node()->name(), "add");
EXPECT_EQ(add.node()->type_string(), "Add");
}
} |
1,776 | cpp | tensorflow/tensorflow | queue_runner | tensorflow/cc/training/queue_runner.cc | tensorflow/cc/training/queue_runner_test.cc | #ifndef TENSORFLOW_CC_TRAINING_QUEUE_RUNNER_H_
#define TENSORFLOW_CC_TRAINING_QUEUE_RUNNER_H_
#include <memory>
#include <string>
#include <unordered_set>
#include <vector>
#include "tensorflow/cc/training/coordinator.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/protobuf/queue_runner.pb.h"
#include "tensorflow/core/public/session.h"
namespace tensorflow {
class QueueRunner : public RunnerInterface {
public:
static Status New(const QueueRunnerDef& queue_runner_def,
std::unique_ptr<QueueRunner>* result);
static Status New(const QueueRunnerDef& queue_runner_def, Coordinator* coord,
std::unique_ptr<QueueRunner>* result);
void AddErrorCallback(const std::function<void(Status)>& cb);
void ClearErrorCallbacks();
~QueueRunner();
Status Start(Session* sess);
Status StartAndCollectCostGraph(Session* sess,
const RunOptions& run_options = RunOptions());
Status Start(Session* sess, int wait_for_ms);
Status StartAndCollectCostGraph(Session* session, int wait_for_ms,
const RunOptions& run_options = RunOptions());
void Stop(Session* sess);
Status Join() final;
Status GetStatus();
Status ExportCostGraph(CostGraphDef* cost_graph) const override;
private:
QueueRunner() : coord_(nullptr), stopped_(false), cg_mu_(nullptr) {}
Status Init(const QueueRunnerDef& queue_runner_def);
void Run(Session* sess, const string& enqueue_op);
void UpdateStatus(const Status& status);
bool IsQueueClosed(Status status) const {
return queue_closed_exception_types_.count(
static_cast<int>(status.code())) > 0;
}
bool IsRunning() const override { return !stopped_; }
void SetRunArgumentsAndCostGraph(const RunOptions& run_options);
Status RealRun(Session* sess, const string& op, bool update_costs);
string queue_name_;
std::vector<string> enqueue_op_names_;
string close_op_name_;
string cancel_op_name_;
std::unordered_set<int> queue_closed_exception_types_;
std::unique_ptr<thread::ThreadPool> thread_pool_;
mutex mu_;
int runs_ = 0;
Status status_ TF_GUARDED_BY(mu_);
Status enqueue_status_ TF_GUARDED_BY(mu_);
std::unique_ptr<BlockingCounter> counter_;
Coordinator* coord_;
std::atomic<bool> stopped_;
mutex cb_mu_;
std::vector<std::function<void(Status)>> callbacks_;
mutable std::unique_ptr<mutex> cg_mu_;
std::unique_ptr<CostGraphDef> cost_graph_ TF_GUARDED_BY(cg_mu_);
RunOptions run_options_;
};
}
#endif
#include "tensorflow/cc/training/queue_runner.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/platform/env.h"
namespace tensorflow {
Status QueueRunner::New(const QueueRunnerDef& queue_runner_def,
std::unique_ptr<QueueRunner>* result) {
result->reset(new QueueRunner());
return (*result)->Init(queue_runner_def);
}
Status QueueRunner::New(const QueueRunnerDef& queue_runner_def,
Coordinator* coord,
std::unique_ptr<QueueRunner>* result) {
result->reset(new QueueRunner());
(*result)->coord_ = coord;
return (*result)->Init(queue_runner_def);
}
void QueueRunner::AddErrorCallback(const std::function<void(Status)>& cb) {
mutex_lock l(cb_mu_);
callbacks_.push_back(cb);
}
void QueueRunner::ClearErrorCallbacks() {
mutex_lock l(cb_mu_);
callbacks_.clear();
}
Status QueueRunner::Init(const QueueRunnerDef& queue_runner_def) {
queue_name_ = queue_runner_def.queue_name();
enqueue_op_names_.clear();
enqueue_op_names_.insert(enqueue_op_names_.end(),
queue_runner_def.enqueue_op_name().begin(),
queue_runner_def.enqueue_op_name().end());
size_t op_names_size = enqueue_op_names_.size();
if (op_names_size > kint32max) {
return Status(absl::StatusCode::kInvalidArgument,
"Enqueue ops to run cannot exceed kint32max");
}
runs_ = static_cast<int>(op_names_size);
if (runs_ == 0) {
return Status(absl::StatusCode::kInvalidArgument,
"Empty enqueue ops to run.");
}
close_op_name_ = queue_runner_def.close_op_name();
cancel_op_name_ = queue_runner_def.cancel_op_name();
if (queue_runner_def.queue_closed_exception_types_size() == 0) {
queue_closed_exception_types_.insert(error::OUT_OF_RANGE);
} else {
for (const auto& code : queue_runner_def.queue_closed_exception_types()) {
queue_closed_exception_types_.insert(static_cast<int>(code));
}
}
int nthreads = runs_;
if (coord_) {
nthreads++;
}
thread_pool_.reset(new thread::ThreadPool(
Env::Default(), SanitizeThreadSuffix(queue_name_), nthreads));
return absl::OkStatus();
}
QueueRunner::~QueueRunner() {
Join().IgnoreError();
}
Status QueueRunner::Start(Session* sess) { return Start(sess, 0); }
Status QueueRunner::StartAndCollectCostGraph(Session* sess,
const RunOptions& run_options) {
SetRunArgumentsAndCostGraph(run_options);
return Start(sess, 0);
}
Status QueueRunner::Start(Session* sess, int wait_for) {
counter_.reset(new BlockingCounter(runs_));
for (const string& enqueue_op : enqueue_op_names_) {
thread_pool_->Schedule(
std::bind(&QueueRunner::Run, this, sess, enqueue_op));
}
if (coord_) {
thread_pool_->Schedule(std::bind(&QueueRunner::Stop, this, sess));
}
if (wait_for > 0) {
if (!counter_->WaitFor(std::chrono::milliseconds(wait_for))) {
return Status(absl::StatusCode::kDeadlineExceeded,
"Queues not fed before the timeout");
}
mutex_lock l(mu_);
if (!enqueue_status_.ok()) {
return enqueue_status_;
} else {
return status_;
}
}
return absl::OkStatus();
}
Status QueueRunner::StartAndCollectCostGraph(Session* session, int wait_for_ms,
const RunOptions& run_options) {
SetRunArgumentsAndCostGraph(run_options);
return Start(session, wait_for_ms);
}
void QueueRunner::Stop(Session* sess) {
if (coord_ != nullptr) {
coord_->WaitForStop();
}
if (!cancel_op_name_.empty()) {
UpdateStatus(RealRun(sess, cancel_op_name_, false));
}
stopped_ = true;
}
Status QueueRunner::Join() {
thread_pool_.reset();
mutex_lock l(mu_);
return status_;
}
void QueueRunner::UpdateStatus(const Status& status) {
{
mutex_lock l(mu_);
if (!status_.ok() || status.ok() || IsQueueClosed(status)) {
return;
}
status_ = status;
}
if (coord_) {
coord_->ReportStatus(status);
}
mutex_lock l(cb_mu_);
for (auto& cb : callbacks_) {
cb(status);
}
}
void QueueRunner::Run(Session* sess, const string& enqueue_op) {
bool first_iteration = true;
Status status;
while (status.ok()) {
if (coord_ && coord_->ShouldStop()) {
break;
}
status = RealRun(sess, enqueue_op, true);
if (first_iteration) {
if (!status.ok()) {
mutex_lock l(mu_);
enqueue_status_ = status;
}
counter_->DecrementCount();
first_iteration = false;
}
}
bool last_run = false;
{
mutex_lock l(mu_);
runs_--;
last_run = (runs_ == 0);
}
if (IsQueueClosed(status) && (!coord_ || !coord_->ShouldStop())) {
if (last_run && !close_op_name_.empty()) {
UpdateStatus(RealRun(sess, close_op_name_, false));
}
} else if (!status.ok()) {
LOG(ERROR) << "Queue runner thread got a failure status: "
<< status.ToString();
UpdateStatus(status);
if (coord_) {
coord_->RequestStop().IgnoreError();
}
}
}
Status QueueRunner::GetStatus() {
mutex_lock l(mu_);
return status_;
}
Status QueueRunner::ExportCostGraph(CostGraphDef* cost_graph) const {
if (!cg_mu_) {
return Status(absl::StatusCode::kFailedPrecondition,
"This QueueRunner doesn't collect a cost graph.");
}
mutex_lock l(*cg_mu_);
cost_graph->MergeFrom(*cost_graph_);
return absl::OkStatus();
}
void QueueRunner::SetRunArgumentsAndCostGraph(const RunOptions& run_options) {
cg_mu_.reset(new mutex());
{
mutex_lock l(*cg_mu_);
cost_graph_.reset(new CostGraphDef());
}
run_options_ = run_options;
}
Status QueueRunner::RealRun(Session* sess, const string& op,
bool update_costs) {
Status s;
if (update_costs && cg_mu_) {
RunMetadata metadata;
s = sess->Run(run_options_, {}, {}, {op}, nullptr, &metadata);
mutex_lock l(*cg_mu_);
cost_graph_->Swap(metadata.mutable_cost_graph());
} else {
s = sess->Run({}, {}, {op}, nullptr);
}
return s;
}
} | #include "tensorflow/cc/training/queue_runner.h"
#include <string>
#include <vector>
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/cc/training/coordinator.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/protobuf/queue_runner.pb.h"
#include "tensorflow/core/public/session.h"
namespace tensorflow {
namespace {
using error::Code;
using ops::Assign;
using ops::Const;
using ops::CountUpTo;
using ops::FIFOQueue;
using ops::QueueClose;
using ops::QueueDequeue;
using ops::QueueEnqueue;
using ops::RandomNormal;
using ops::Square;
using ops::Variable;
constexpr char kAssignOpName[] = "assign";
constexpr char kCancelOp0[] = "cancel0";
constexpr char kCancelOp1[] = "cancel1";
constexpr char kCloseOp0[] = "close0";
constexpr char kCloseOp1[] = "close1";
constexpr char kCountUpToOpName[] = "count";
constexpr char kDequeueOp0[] = "dequeue0";
constexpr char kDequeueOp1[] = "dequeue1";
constexpr char kEnqueueOp0[] = "enqueue0";
constexpr char kEnqueueOp1[] = "enqueue1";
constexpr char kIllegalOpName1[] = "would fail";
constexpr char kIllegalOpName2[] = "fail again";
constexpr char kQueueName[] = "unit_test";
constexpr char kQueueName0[] = "q0";
constexpr char kQueueName1[] = "q1";
constexpr char kSquareOpName[] = "square";
constexpr char kVarOpName[] = "var";
GraphDef BuildSimpleGraph() {
Scope root = Scope::NewRootScope();
auto init_value = Const(root, 0);
auto var = Variable(root.WithOpName(kVarOpName), TensorShape({}),
DataType::DT_INT32);
auto assign = Assign(root.WithOpName(kAssignOpName), var, init_value);
auto count = CountUpTo(root.WithOpName(kCountUpToOpName), var, 10);
Square(root.WithOpName(kSquareOpName), var);
GraphDef graph_def;
TF_EXPECT_OK(root.ToGraphDef(&graph_def));
return graph_def;
}
QueueRunnerDef BuildQueueRunnerDef(
const std::string& queue_name, const std::vector<std::string>& enqueue_ops,
const std::string& close_op, const std::string& cancel_op,
const std::vector<Code>& queue_closed_error_codes) {
QueueRunnerDef queue_runner_def;
*queue_runner_def.mutable_queue_name() = queue_name;
for (const std::string& enqueue_op : enqueue_ops) {
*queue_runner_def.mutable_enqueue_op_name()->Add() = enqueue_op;
}
*queue_runner_def.mutable_close_op_name() = close_op;
*queue_runner_def.mutable_cancel_op_name() = cancel_op;
for (const auto& error_code : queue_closed_error_codes) {
*queue_runner_def.mutable_queue_closed_exception_types()->Add() =
error_code;
}
return queue_runner_def;
}
std::unique_ptr<Session> BuildSessionAndInitVariable(
const GraphDef& graph_def) {
SessionOptions options;
std::unique_ptr<Session> session(NewSession(options));
TF_CHECK_OK(session->Create(graph_def));
TF_CHECK_OK(session->Run({}, {}, {kAssignOpName}, nullptr));
return session;
}
TEST(QueueRunnerTest, BasicTest) {
GraphDef graph_def = BuildSimpleGraph();
auto session = BuildSessionAndInitVariable(graph_def);
QueueRunnerDef queue_runner_def = BuildQueueRunnerDef(
kQueueName, {kCountUpToOpName}, kSquareOpName, "", {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
TF_CHECK_OK(qr->Start(session.get()));
TF_EXPECT_OK(qr->Join());
std::vector<Tensor> outputs;
TF_EXPECT_OK(session->Run({}, {kSquareOpName}, {}, &outputs));
int square_value = *outputs[0].scalar<int>().data();
EXPECT_EQ(square_value, 100);
}
TEST(QueueRunnerTest, QueueClosedCode) {
GraphDef graph_def = BuildSimpleGraph();
auto session = BuildSessionAndInitVariable(graph_def);
QueueRunnerDef queue_runner_def = BuildQueueRunnerDef(
kQueueName, {kCountUpToOpName, kCountUpToOpName}, kSquareOpName, "",
{Code::OUT_OF_RANGE, Code::CANCELLED});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
TF_EXPECT_OK(qr->Start(session.get()));
TF_EXPECT_OK(qr->Join());
std::vector<Tensor> outputs;
TF_EXPECT_OK(session->Run({}, {kSquareOpName}, {}, &outputs));
int square_value = *outputs[0].scalar<int>().data();
EXPECT_EQ(square_value, 100);
}
TEST(QueueRunnerTest, QueueCloseFails) {
GraphDef graph_def = BuildSimpleGraph();
auto session = BuildSessionAndInitVariable(graph_def);
QueueRunnerDef queue_runner_def =
BuildQueueRunnerDef(kQueueName, {kCountUpToOpName}, kIllegalOpName1, "",
{Code::OUT_OF_RANGE});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
TF_EXPECT_OK(qr->Start(session.get()));
auto status = qr->Join();
EXPECT_EQ(status.code(), Code::NOT_FOUND) << status;
}
TEST(QueueRunnerTest, CatchErrorInJoin) {
GraphDef graph_def = BuildSimpleGraph();
auto session = BuildSessionAndInitVariable(graph_def);
QueueRunnerDef queue_runner_def = BuildQueueRunnerDef(
kQueueName, {kIllegalOpName1, kIllegalOpName2}, kCountUpToOpName, "", {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
TF_EXPECT_OK(qr->Start(session.get()));
EXPECT_EQ(qr->Join().code(), Code::NOT_FOUND);
}
GraphDef BuildDoubleQueueGraph() {
Scope root = Scope::NewRootScope();
auto q0 = FIFOQueue(root.WithOpName(kQueueName0), {DataType::DT_INT32});
auto ten = Const(root, 10);
auto enqueue0 = QueueEnqueue(root.WithOpName(kEnqueueOp0), q0, {ten});
auto close0 = QueueClose(root.WithOpName(kCloseOp0), q0);
auto cancel0 = QueueClose(root.WithOpName(kCancelOp0), q0,
QueueClose::CancelPendingEnqueues(true));
auto q1 = FIFOQueue(root.WithOpName(kQueueName1), {DataType::DT_INT32},
FIFOQueue::Capacity(3));
auto dequeue0 =
QueueDequeue(root.WithOpName(kDequeueOp0), q0, {DataType::DT_INT32});
auto enqueue1 = QueueEnqueue(root.WithOpName(kEnqueueOp1), q1, {dequeue0[0]});
auto dequeue1 =
QueueDequeue(root.WithOpName(kDequeueOp1), q1, {DataType::DT_INT32});
auto close1 = QueueClose(root.WithOpName(kCloseOp1), q1);
auto cancel1 = QueueClose(root.WithOpName(kCancelOp1), q1,
QueueClose::CancelPendingEnqueues(true));
GraphDef graph_def;
TF_EXPECT_OK(root.ToGraphDef(&graph_def));
return graph_def;
}
TEST(QueueRunnerTest, RealEnqueueDequeue) {
auto graph_def = BuildDoubleQueueGraph();
SessionOptions options;
std::unique_ptr<Session> session(NewSession(options));
TF_CHECK_OK(session->Create(graph_def));
QueueRunnerDef queue_runner_def =
BuildQueueRunnerDef(kQueueName, {kEnqueueOp1}, kCloseOp1, "", {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
TF_CHECK_OK(qr->Start(session.get()));
TF_EXPECT_OK(session->Run({}, {}, {kEnqueueOp0}, nullptr));
TF_EXPECT_OK(session->Run({}, {}, {kEnqueueOp0}, nullptr));
TF_EXPECT_OK(session->Run({}, {}, {kCloseOp0}, nullptr));
TF_EXPECT_OK(qr->Join());
std::vector<Tensor> dq1;
TF_EXPECT_OK(session->Run({}, {kDequeueOp1}, {}, &dq1));
EXPECT_EQ(*dq1[0].scalar<int>().data(), 10);
std::vector<Tensor> dq2;
TF_EXPECT_OK(session->Run({}, {kDequeueOp1}, {}, &dq2));
EXPECT_EQ(*dq2[0].scalar<int>().data(), 10);
EXPECT_EQ(session->Run({}, {kDequeueOp1}, {}, nullptr).code(),
Code::OUT_OF_RANGE);
}
void JoinThread(QueueRunner* queue_runner, bool* join_succeeded,
Notification* join_done) {
EXPECT_EQ(queue_runner->Join().code(), Code::CANCELLED);
*join_succeeded = true;
join_done->Notify();
}
TEST(QueueRunnerTest, SessionCloseCancelPendingEnqueue) {
auto graph_def = BuildDoubleQueueGraph();
SessionOptions options;
std::unique_ptr<Session> session(NewSession(options));
TF_CHECK_OK(session->Create(graph_def));
QueueRunnerDef queue_runner_def = BuildQueueRunnerDef(
kQueueName1, {kEnqueueOp1}, kCloseOp1, kCancelOp1, {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
TF_CHECK_OK(qr->Start(session.get()));
TF_EXPECT_OK(session->Run({}, {}, {kEnqueueOp0}, nullptr));
std::vector<Tensor> dq1;
TF_EXPECT_OK(session->Run({}, {kDequeueOp1}, {}, &dq1));
EXPECT_EQ(*dq1[0].scalar<int>().data(), 10);
bool join_succeeded = false;
Notification join_done;
Env::Default()->SchedClosure(
std::bind(&JoinThread, qr.get(), &join_succeeded, &join_done));
Env::Default()->SleepForMicroseconds(10000000);
EXPECT_EQ(join_succeeded, false);
TF_EXPECT_OK(session->Close());
join_done.WaitForNotification();
EXPECT_EQ(join_succeeded, true);
}
TEST(QueueRunnerTest, EmptyEnqueueOps) {
QueueRunnerDef queue_runner_def =
BuildQueueRunnerDef(kQueueName, {}, kCountUpToOpName, "", {});
std::unique_ptr<QueueRunner> qr;
EXPECT_EQ(QueueRunner::New(queue_runner_def, &qr).code(),
Code::INVALID_ARGUMENT);
}
TEST(QueueRunnerTest, StartTimeout) {
GraphDef graph_def = BuildDoubleQueueGraph();
SessionOptions options;
std::unique_ptr<Session> session(NewSession(options));
TF_CHECK_OK(session->Create(graph_def));
QueueRunnerDef queue_runner_def = BuildQueueRunnerDef(
kQueueName1, {kEnqueueOp1}, kCloseOp1, kCancelOp1, {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
EXPECT_EQ(qr->Start(session.get(), 1).code(), Code::DEADLINE_EXCEEDED);
TF_EXPECT_OK(session->Close());
}
TEST(QueueRunnerTest, TestCoordinatorStop) {
auto graph_def = BuildDoubleQueueGraph();
SessionOptions options;
std::unique_ptr<Session> session(NewSession(options));
TF_CHECK_OK(session->Create(graph_def));
QueueRunnerDef queue_runner0 =
BuildQueueRunnerDef(kQueueName0, {kEnqueueOp0}, kCloseOp0, kCancelOp0,
{Code::OUT_OF_RANGE, Code::CANCELLED});
QueueRunnerDef queue_runner1 =
BuildQueueRunnerDef(kQueueName1, {kEnqueueOp1}, kCloseOp1, kCancelOp1,
{Code::OUT_OF_RANGE, Code::CANCELLED});
Coordinator coord;
std::unique_ptr<QueueRunner> qr0;
TF_EXPECT_OK(QueueRunner::New(queue_runner0, &coord, &qr0));
TF_CHECK_OK(qr0->Start(session.get()));
std::unique_ptr<QueueRunner> qr1;
TF_EXPECT_OK(QueueRunner::New(queue_runner1, &coord, &qr1));
TF_CHECK_OK(qr1->Start(session.get()));
TF_EXPECT_OK(coord.RegisterRunner(std::move(qr0)));
TF_EXPECT_OK(coord.RegisterRunner(std::move(qr1)));
std::vector<Tensor> dq;
TF_EXPECT_OK(session->Run({}, {kDequeueOp1}, {}, &dq));
EXPECT_EQ(*dq[0].scalar<int>().data(), 10);
TF_EXPECT_OK(coord.RequestStop());
TF_EXPECT_OK(coord.Join());
}
TEST(QueueRunnerTest, CallbackCalledOnError) {
GraphDef graph_def = BuildSimpleGraph();
auto session = BuildSessionAndInitVariable(graph_def);
QueueRunnerDef queue_runner_def = BuildQueueRunnerDef(
kQueueName, {kIllegalOpName1, kIllegalOpName2}, kCountUpToOpName, "", {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
bool error_caught = false;
qr->AddErrorCallback([&error_caught](const Status&) { error_caught = true; });
TF_EXPECT_OK(qr->Start(session.get()));
EXPECT_FALSE(qr->Join().ok());
EXPECT_TRUE(error_caught);
}
TEST(QueueRunnerTest, RunMetaDataTest) {
Scope root = Scope::NewRootScope();
auto q0 = FIFOQueue(root.WithOpName(kQueueName), {DataType::DT_FLOAT});
Output rnd = RandomNormal(root.WithOpName("rnd"), {1, 1}, DataType::DT_FLOAT);
Output square = Square(root.WithOpName(kSquareOpName), rnd);
auto enqueue0 = QueueEnqueue(root.WithOpName(kEnqueueOp0), q0, {square});
auto close0 = QueueClose(root.WithOpName(kCloseOp0), q0);
auto cancel0 = QueueClose(root.WithOpName(kCancelOp0), q0,
QueueClose::CancelPendingEnqueues(true));
auto dequeue0 =
QueueDequeue(root.WithOpName(kDequeueOp0), q0, {DataType::DT_FLOAT});
GraphDef graph_def;
TF_EXPECT_OK(root.ToGraphDef(&graph_def));
for (auto& node : *graph_def.mutable_node()) {
node.set_device("/cpu:0");
}
SessionOptions sess_options;
sess_options.config.mutable_graph_options()->set_build_cost_model(1);
std::unique_ptr<Session> session(NewSession(sess_options));
TF_CHECK_OK(session->Create(graph_def));
QueueRunnerDef queue_runner_def =
BuildQueueRunnerDef(kQueueName, {kEnqueueOp0}, kCloseOp0, kCancelOp0, {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
RunOptions run_options;
TF_CHECK_OK(qr->StartAndCollectCostGraph(session.get(), run_options));
std::vector<Tensor> dq0;
TF_EXPECT_OK(session->Run({}, {kDequeueOp0}, {}, &dq0));
TF_EXPECT_OK(session->Run({}, {kDequeueOp0}, {}, &dq0));
CostGraphDef cost_graph;
TF_CHECK_OK(qr->ExportCostGraph(&cost_graph));
EXPECT_TRUE(cost_graph.node_size() > 0);
qr->Stop(session.get());
}
TEST(QueueRunnerTest, NoRunMetaDataTest) {
GraphDef graph_def = BuildSimpleGraph();
auto session = BuildSessionAndInitVariable(graph_def);
QueueRunnerDef queue_runner_def = BuildQueueRunnerDef(
kQueueName, {kCountUpToOpName}, kSquareOpName, "", {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
TF_CHECK_OK(qr->Start(session.get()));
TF_EXPECT_OK(qr->Join());
CostGraphDef cost_graph;
EXPECT_EQ(qr->ExportCostGraph(&cost_graph).code(),
error::FAILED_PRECONDITION);
}
}
} |
1,777 | cpp | tensorflow/tensorflow | coordinator | tensorflow/cc/training/coordinator.cc | tensorflow/cc/training/coordinator_test.cc | #ifndef TENSORFLOW_CC_TRAINING_COORDINATOR_H_
#define TENSORFLOW_CC_TRAINING_COORDINATOR_H_
#include <atomic>
#include <memory>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
class RunnerInterface {
public:
virtual ~RunnerInterface() {}
virtual Status Join() = 0;
virtual Status ExportCostGraph(CostGraphDef* cost_graph) const {
return Status(absl::StatusCode::kInvalidArgument,
"No cost model to export.");
}
virtual bool IsRunning() const = 0;
};
class Coordinator {
public:
Coordinator();
Coordinator(const std::vector<error::Code>& clean_stop_errors);
~Coordinator();
Status RegisterRunner(std::unique_ptr<RunnerInterface> runner);
bool AllRunnersStopped();
Status RequestStop();
bool ShouldStop();
Status Join();
void ReportStatus(const Status& status);
Status GetStatus();
void WaitForStop();
Status ExportCostGraph(CostGraphDef* cost_graph) const;
private:
std::unordered_set<int> clean_stop_errors_;
condition_variable wait_for_stop_;
mutex mu_;
bool should_stop_ TF_GUARDED_BY(mu_);
mutex status_lock_;
Status status_ TF_GUARDED_BY(status_lock_);
mutable mutex runners_lock_;
std::vector<std::unique_ptr<RunnerInterface>> runners_
TF_GUARDED_BY(runners_lock_);
Coordinator(const Coordinator&) = delete;
void operator=(const Coordinator&) = delete;
};
}
#endif
#include "tensorflow/cc/training/coordinator.h"
namespace tensorflow {
Coordinator::Coordinator() : Coordinator(std::vector<error::Code>()) {}
Coordinator::Coordinator(const std::vector<error::Code>& clean_stop_errors)
: should_stop_(false) {
if (clean_stop_errors.empty()) {
clean_stop_errors_.insert(error::OUT_OF_RANGE);
} else {
for (const auto& code : clean_stop_errors) {
clean_stop_errors_.insert(static_cast<int>(code));
}
}
}
Coordinator::~Coordinator() {
RequestStop().IgnoreError();
Join().IgnoreError();
}
Status Coordinator::RegisterRunner(std::unique_ptr<RunnerInterface> runner) {
{
mutex_lock l(mu_);
if (should_stop_) {
return Status(absl::StatusCode::kFailedPrecondition,
"The coordinator has been stopped.");
}
}
mutex_lock l(runners_lock_);
runners_.push_back(std::move(runner));
return absl::OkStatus();
}
bool Coordinator::AllRunnersStopped() {
mutex_lock l(runners_lock_);
for (const auto& runner : runners_) {
if (runner->IsRunning()) {
return false;
}
}
return true;
}
Status Coordinator::RequestStop() {
mutex_lock l(mu_);
if (should_stop_) {
return Status(absl::StatusCode::kFailedPrecondition,
"The Coordinator is not running.");
}
should_stop_ = true;
wait_for_stop_.notify_all();
return absl::OkStatus();
}
bool Coordinator::ShouldStop() {
mutex_lock l(mu_);
return should_stop_;
}
Status Coordinator::Join() {
{
mutex_lock l(mu_);
if (!should_stop_) {
return Status(absl::StatusCode::kFailedPrecondition,
"Joining coordinator without requesting to stop.");
}
}
{
mutex_lock l(runners_lock_);
for (const auto& t : runners_) {
ReportStatus(t->Join());
}
runners_.clear();
}
return GetStatus();
}
void Coordinator::ReportStatus(const Status& status) {
mutex_lock l(status_lock_);
if (status.ok() || !status_.ok() ||
clean_stop_errors_.count(static_cast<int>(status.code())) > 0) {
return;
}
status_ = status;
}
Status Coordinator::GetStatus() {
mutex_lock l(status_lock_);
return status_;
}
void Coordinator::WaitForStop() {
mutex_lock l(mu_);
while (!should_stop_) {
wait_for_stop_.wait(l);
}
}
Status Coordinator::ExportCostGraph(CostGraphDef* cost_graph) const {
mutex_lock l(runners_lock_);
for (auto& t : runners_) {
Status s = t->ExportCostGraph(cost_graph);
if (!s.ok()) {
return s;
}
}
return absl::OkStatus();
}
} | #include "tensorflow/cc/training/coordinator.h"
#include "tensorflow/cc/training/queue_runner.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/session.h"
namespace tensorflow {
namespace {
using error::Code;
void WaitForStopThread(Coordinator* coord, Notification* about_to_wait,
Notification* done) {
about_to_wait->Notify();
coord->WaitForStop();
done->Notify();
}
TEST(CoordinatorTest, TestStopAndWaitOnStop) {
Coordinator coord;
EXPECT_EQ(coord.ShouldStop(), false);
Notification about_to_wait;
Notification done;
Env::Default()->SchedClosure(
std::bind(&WaitForStopThread, &coord, &about_to_wait, &done));
about_to_wait.WaitForNotification();
Env::Default()->SleepForMicroseconds(1000 * 1000);
EXPECT_FALSE(done.HasBeenNotified());
TF_EXPECT_OK(coord.RequestStop());
done.WaitForNotification();
EXPECT_TRUE(coord.ShouldStop());
}
class MockQueueRunner : public RunnerInterface {
public:
explicit MockQueueRunner(Coordinator* coord) {
coord_ = coord;
join_counter_ = nullptr;
thread_pool_.reset(new thread::ThreadPool(Env::Default(), "test-pool", 10));
stopped_ = false;
}
MockQueueRunner(Coordinator* coord, int* join_counter)
: MockQueueRunner(coord) {
join_counter_ = join_counter;
}
void StartCounting(std::atomic<int>* counter, int until,
Notification* start = nullptr) {
thread_pool_->Schedule(
std::bind(&MockQueueRunner::CountThread, this, counter, until, start));
}
void StartSettingStatus(const Status& status, BlockingCounter* counter,
Notification* start) {
thread_pool_->Schedule(std::bind(&MockQueueRunner::SetStatusThread, this,
status, counter, start));
}
Status Join() override {
if (join_counter_ != nullptr) {
(*join_counter_)++;
}
thread_pool_.reset();
return status_;
}
Status GetStatus() { return status_; }
void SetStatus(const Status& status) { status_ = status; }
bool IsRunning() const override { return !stopped_; };
void Stop() { stopped_ = true; }
private:
void CountThread(std::atomic<int>* counter, int until, Notification* start) {
if (start != nullptr) start->WaitForNotification();
while (!coord_->ShouldStop() && counter->load() < until) {
(*counter)++;
Env::Default()->SleepForMicroseconds(10 * 1000);
}
coord_->RequestStop().IgnoreError();
}
void SetStatusThread(const Status& status, BlockingCounter* counter,
Notification* start) {
start->WaitForNotification();
SetStatus(status);
counter->DecrementCount();
}
std::unique_ptr<thread::ThreadPool> thread_pool_;
Status status_;
Coordinator* coord_;
int* join_counter_;
bool stopped_;
};
TEST(CoordinatorTest, TestRealStop) {
std::atomic<int> counter(0);
Coordinator coord;
std::unique_ptr<MockQueueRunner> qr1(new MockQueueRunner(&coord));
qr1->StartCounting(&counter, 100);
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr1)));
std::unique_ptr<MockQueueRunner> qr2(new MockQueueRunner(&coord));
qr2->StartCounting(&counter, 100);
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr2)));
while (counter.load() == 0)
;
TF_EXPECT_OK(coord.RequestStop());
int temp_counter = counter.load();
Env::Default()->SleepForMicroseconds(1000 * 1000);
EXPECT_EQ(temp_counter, counter.load());
TF_EXPECT_OK(coord.Join());
}
TEST(CoordinatorTest, TestRequestStop) {
Coordinator coord;
std::atomic<int> counter(0);
Notification start;
std::unique_ptr<MockQueueRunner> qr;
for (int i = 0; i < 10; i++) {
qr.reset(new MockQueueRunner(&coord));
qr->StartCounting(&counter, 10, &start);
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr)));
}
start.Notify();
coord.WaitForStop();
EXPECT_EQ(coord.ShouldStop(), true);
EXPECT_EQ(counter.load(), 10);
TF_EXPECT_OK(coord.Join());
}
TEST(CoordinatorTest, TestJoin) {
Coordinator coord;
int join_counter = 0;
std::unique_ptr<MockQueueRunner> qr1(
new MockQueueRunner(&coord, &join_counter));
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr1)));
std::unique_ptr<MockQueueRunner> qr2(
new MockQueueRunner(&coord, &join_counter));
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr2)));
TF_EXPECT_OK(coord.RequestStop());
TF_EXPECT_OK(coord.Join());
EXPECT_EQ(join_counter, 2);
}
TEST(CoordinatorTest, StatusReporting) {
Coordinator coord({Code::CANCELLED, Code::OUT_OF_RANGE});
Notification start;
BlockingCounter counter(3);
std::unique_ptr<MockQueueRunner> qr1(new MockQueueRunner(&coord));
qr1->StartSettingStatus(Status(absl::StatusCode::kCancelled, ""), &counter,
&start);
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr1)));
std::unique_ptr<MockQueueRunner> qr2(new MockQueueRunner(&coord));
qr2->StartSettingStatus(Status(absl::StatusCode::kInvalidArgument, ""),
&counter, &start);
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr2)));
std::unique_ptr<MockQueueRunner> qr3(new MockQueueRunner(&coord));
qr3->StartSettingStatus(Status(absl::StatusCode::kOutOfRange, ""), &counter,
&start);
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr3)));
start.Notify();
counter.Wait();
TF_EXPECT_OK(coord.RequestStop());
EXPECT_EQ(coord.Join().code(), absl::StatusCode::kInvalidArgument);
}
TEST(CoordinatorTest, JoinWithoutStop) {
Coordinator coord;
std::unique_ptr<MockQueueRunner> qr(new MockQueueRunner(&coord));
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr)));
EXPECT_EQ(coord.Join().code(), Code::FAILED_PRECONDITION);
}
TEST(CoordinatorTest, AllRunnersStopped) {
Coordinator coord;
MockQueueRunner* qr = new MockQueueRunner(&coord);
TF_ASSERT_OK(coord.RegisterRunner(std::unique_ptr<RunnerInterface>(qr)));
EXPECT_FALSE(coord.AllRunnersStopped());
qr->Stop();
EXPECT_TRUE(coord.AllRunnersStopped());
}
}
} |
1,778 | cpp | tensorflow/tensorflow | index_util | third_party/xla/xla/index_util.cc | third_party/xla/xla/index_util_test.cc | #ifndef XLA_INDEX_UTIL_H_
#define XLA_INDEX_UTIL_H_
#include <vector>
#include "absl/types/span.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
class IndexUtil {
public:
static inline int64_t MultidimensionalIndexToLinearIndex(
const Shape& shape, absl::Span<const int64_t> multi_index) {
return MultidimensionalIndexToLinearIndex(
shape, LayoutUtil::MinorToMajor(shape), multi_index);
}
static inline int64_t MultidimensionalIndexToLinearIndex(
const Shape& shape, absl::Span<const int64_t> minor_to_major,
absl::Span<const int64_t> multi_index) {
for (size_t i = 0; i < multi_index.size(); ++i) {
DCHECK_GE(multi_index[i], 0);
DCHECK_LT(multi_index[i], shape.dimensions(i))
<< "indexing beyond extent in dimension " << i << ":"
<< "\n\tindex: " << absl::StrJoin(multi_index, ",")
<< "\n\tshape: " << ShapeUtil::HumanString(shape);
}
if (minor_to_major.empty()) {
return 0;
}
int64_t linear_index = multi_index[minor_to_major[0]];
int64_t scale = 1;
for (int i = 1; i < minor_to_major.size(); ++i) {
scale *= shape.dimensions(minor_to_major[i - 1]);
linear_index += scale * multi_index[minor_to_major[i]];
}
return linear_index;
}
static DimensionVector LinearIndexToMultidimensionalIndex(
const Shape& shape, int64_t linear_index);
static bool BumpIndices(const Shape& shape, absl::Span<int64_t> indices);
static int64_t GetDimensionStride(const Shape& shape, int64_t dimension);
static bool IndexInBounds(const Shape& shape,
absl::Span<const int64_t> index);
static int CompareIndices(absl::Span<const int64_t> lhs,
absl::Span<const int64_t> rhs);
private:
IndexUtil(const IndexUtil&) = delete;
IndexUtil& operator=(const IndexUtil&) = delete;
};
}
#endif
#include "xla/index_util.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include <vector>
#include "absl/strings/str_join.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/logging.h"
namespace xla {
DimensionVector IndexUtil::LinearIndexToMultidimensionalIndex(
const Shape& shape, int64_t linear_index) {
DCHECK_GE(linear_index, 0);
DCHECK_LT(linear_index, ShapeUtil::ElementsIn(shape));
DimensionVector multi_index(shape.dimensions_size());
int64_t divisor = 1;
for (auto dimension : LayoutUtil::MinorToMajor(shape)) {
multi_index[dimension] =
(linear_index / divisor) % shape.dimensions(dimension);
divisor *= shape.dimensions(dimension);
}
return multi_index;
}
bool IndexUtil::BumpIndices(const Shape& shape,
absl::Span<int64_t> indices) {
for (int64_t dimno = indices.size() - 1; dimno >= 0; --dimno) {
int64_t limit = shape.dimensions(dimno);
if (indices[dimno] + 1 < limit) {
indices[dimno]++;
std::fill(indices.begin() + dimno + 1, indices.end(), 0);
return true;
}
}
return false;
}
int64_t IndexUtil::GetDimensionStride(const Shape& shape,
int64_t dimension) {
int64_t stride = 1;
for (auto dim : LayoutUtil::MinorToMajor(shape)) {
if (dim == dimension) {
break;
}
stride *= shape.dimensions()[dim];
}
return stride;
}
bool IndexUtil::IndexInBounds(const Shape& shape,
absl::Span<const int64_t> index) {
int64_t rank = shape.rank();
const int64_t index_size = index.size();
if (rank != index_size) {
return false;
}
for (int64_t d = 0; d < rank; ++d) {
if (index[d] >= shape.dimensions(d)) {
return false;
}
}
return true;
}
int IndexUtil::CompareIndices(absl::Span<const int64_t> lhs,
absl::Span<const int64_t> rhs) {
int64_t rank = lhs.size();
const int64_t rhs_rank = rhs.size();
CHECK_EQ(rhs_rank, rank);
for (int64_t dim = 0; dim < rank; ++dim) {
if (lhs[dim] < rhs[dim]) {
return -1;
} else if (lhs[dim] > rhs[dim]) {
return 1;
}
}
return 0;
}
} | #include "xla/index_util.h"
#include <initializer_list>
#include <vector>
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
void SetMinorToMajorLayout(Shape* shape, std::vector<int64_t> dimensions) {
shape->mutable_layout()->clear_minor_to_major();
for (auto dimension : dimensions) {
shape->mutable_layout()->add_minor_to_major(dimension);
}
}
TEST(IndexUtilTest, VectorIndexing) {
Shape vector_shape = ShapeUtil::MakeShape(F32, {100});
EXPECT_EQ(42,
IndexUtil::MultidimensionalIndexToLinearIndex(vector_shape, {42}));
auto multi_index =
IndexUtil::LinearIndexToMultidimensionalIndex(vector_shape, 42);
EXPECT_EQ(1, multi_index.size());
EXPECT_EQ(42, multi_index[0]);
}
TEST(IndexUtilTest, MatrixIndexingRowMajor) {
Shape matrix_shape_01 = ShapeUtil::MakeShape(F32, {10, 20});
SetMinorToMajorLayout(&matrix_shape_01, {0, 1});
EXPECT_EQ(0, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_01,
{0, 0}));
EXPECT_EQ(199, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_01,
{9, 19}));
EXPECT_EQ(53, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_01,
{3, 5}));
EXPECT_THAT(
IndexUtil::LinearIndexToMultidimensionalIndex(matrix_shape_01, 53),
testing::ElementsAre(3, 5));
}
TEST(IndexUtilTest, MatrixIndexingColumnMajor) {
Shape matrix_shape_10 = ShapeUtil::MakeShape(F32, {10, 20});
SetMinorToMajorLayout(&matrix_shape_10, {1, 0});
EXPECT_EQ(0, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_10,
{0, 0}));
EXPECT_EQ(199, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_10,
{9, 19}));
EXPECT_EQ(65, IndexUtil::MultidimensionalIndexToLinearIndex(matrix_shape_10,
{3, 5}));
EXPECT_THAT(
IndexUtil::LinearIndexToMultidimensionalIndex(matrix_shape_10, 65),
testing::ElementsAre(3, 5));
}
TEST(IndexUtilTest, ThreeDArrayIndexing210) {
Shape shape_210 = ShapeUtil::MakeShape(F32, {10, 20, 30});
SetMinorToMajorLayout(&shape_210, {2, 1, 0});
EXPECT_EQ(1957, IndexUtil::MultidimensionalIndexToLinearIndex(shape_210,
{3, 5, 7}));
EXPECT_EQ(5277, IndexUtil::MultidimensionalIndexToLinearIndex(shape_210,
{8, 15, 27}));
}
TEST(IndexUtilTest, ThreeDArrayIndexing120) {
Shape shape_120 = ShapeUtil::MakeShape(F32, {10, 20, 30});
SetMinorToMajorLayout(&shape_120, {1, 2, 0});
EXPECT_EQ(1945, IndexUtil::MultidimensionalIndexToLinearIndex(shape_120,
{3, 5, 7}));
EXPECT_EQ(5355, IndexUtil::MultidimensionalIndexToLinearIndex(shape_120,
{8, 15, 27}));
}
TEST(IndexUtilTest, FourDArrayIndexing3210) {
Shape shape_3210 = ShapeUtil::MakeShape(F32, {10, 20, 30, 40});
SetMinorToMajorLayout(&shape_3210, {3, 2, 1, 0});
EXPECT_EQ(78289, IndexUtil::MultidimensionalIndexToLinearIndex(shape_3210,
{3, 5, 7, 9}));
EXPECT_EQ(211113, IndexUtil::MultidimensionalIndexToLinearIndex(
shape_3210, {8, 15, 27, 33}));
}
TEST(IndexUtilTest, LinearToMultiToLinear) {
std::vector<int64_t> linear_indexes = {0, 1439999999, 1145567336,
43883404, 617295214, 1117613654};
std::vector<std::vector<int64_t>> minor_to_major_orders;
minor_to_major_orders.push_back({6, 5, 4, 3, 2, 1, 0});
minor_to_major_orders.push_back({0, 1, 2, 3, 4, 5, 6});
minor_to_major_orders.push_back({4, 5, 1, 2, 6, 0, 3});
for (auto minor_to_major_order : minor_to_major_orders) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 20, 30, 40, 30, 20, 10});
SetMinorToMajorLayout(&shape, minor_to_major_order);
for (auto linear_index : linear_indexes) {
auto multi_index =
IndexUtil::LinearIndexToMultidimensionalIndex(shape, linear_index);
EXPECT_EQ(linear_index, IndexUtil::MultidimensionalIndexToLinearIndex(
shape, multi_index));
}
}
}
TEST(IndexUtilTest, BumpIndices2x2) {
auto shape = ShapeUtil::MakeShape(S32, {2, 2});
std::vector<int64_t> indices = {0, 0};
EXPECT_TRUE(IndexUtil::BumpIndices(shape, absl::MakeSpan(indices)));
EXPECT_THAT(indices, ::testing::ElementsAre(0, 1));
EXPECT_TRUE(IndexUtil::BumpIndices(shape, absl::MakeSpan(indices)));
EXPECT_THAT(indices, ::testing::ElementsAre(1, 0));
EXPECT_TRUE(IndexUtil::BumpIndices(shape, absl::MakeSpan(indices)));
EXPECT_THAT(indices, ::testing::ElementsAre(1, 1));
EXPECT_FALSE(IndexUtil::BumpIndices(shape, absl::MakeSpan(indices)));
}
}
} |
1,779 | cpp | tensorflow/tensorflow | comparison_util | third_party/xla/xla/comparison_util.cc | third_party/xla/xla/comparison_util_test.cc | #ifndef XLA_COMPARISON_UTIL_H_
#define XLA_COMPARISON_UTIL_H_
#include <cstdint>
#include <functional>
#include <optional>
#include <ostream>
#include <string>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/primitive_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
class Comparison {
public:
enum class Order : uint8_t {
kTotal,
kPartial,
};
friend absl::string_view ComparisonOrderToString(Comparison::Order order);
template <typename Sink>
friend void AbslStringify(Sink& sink, const Order& p) {
absl::Format(&sink, "%s", ComparisonOrderToString(p));
}
enum class Direction : uint8_t {
kEq,
kNe,
kGe,
kGt,
kLe,
kLt,
};
enum class [[deprecated("Use PrimitiveType and Order")]] Type : uint8_t{
kFloat,
kFloatTotalOrder,
kSigned,
kUnsigned,
};
Comparison() = delete;
explicit Comparison(Direction dir, PrimitiveType type);
explicit Comparison(Direction dir, PrimitiveType type, Order order);
[[deprecated(
"Use Comparison(Comparison::Direction, "
"PrimitiveType)")]] explicit Comparison(Direction dir, Type type);
inline Direction GetDirection() const { return dir_; }
inline PrimitiveType GetPrimitiveType() const { return primitive_type_; }
inline Order GetOrder() const { return order_; }
[[deprecated("Use GetPrimitiveType() and GetOrder()")]] inline Type GetType()
const {
return type_;
}
inline bool IsEq() const { return dir_ == Direction::kEq; }
inline bool IsNe() const { return dir_ == Direction::kNe; }
inline bool IsGe() const { return dir_ == Direction::kGe; }
inline bool IsGt() const { return dir_ == Direction::kGt; }
inline bool IsLt() const { return dir_ == Direction::kLt; }
inline bool IsTotalOrder() const { return order_ == Order::kTotal; }
inline bool IsPartialOrder() const { return order_ == Order::kPartial; }
inline bool IsF32TotalOrder() const {
return primitive_type_ == PrimitiveType::F32 && IsTotalOrder();
}
inline bool IsBf16TotalOrder() const {
return primitive_type_ == PrimitiveType::BF16 && IsTotalOrder();
}
inline bool IsStandardF32() const {
return primitive_type_ == PrimitiveType::F32 && IsPartialOrder();
}
inline bool IsStandardBf16() const {
return primitive_type_ == PrimitiveType::BF16 && IsPartialOrder();
}
inline bool IsStandardS32() const {
return primitive_type_ == PrimitiveType::S32 && IsTotalOrder();
}
inline bool IsStandardU32() const {
return primitive_type_ == PrimitiveType::U32 && IsTotalOrder();
}
inline bool IsIntegralPrimitiveType() const {
return primitive_util::IsIntegralType(primitive_type_);
}
inline bool IsFloatingPointPrimitiveType() const {
return primitive_util::IsFloatingPointType(primitive_type_);
}
bool IsReflexive() const;
bool IsAntireflexive() const;
Comparison Converse() const;
std::optional<Comparison> Inverse() const;
std::string ToString(std::string prefix1 = ".", std::string prefix2 = ".",
std::string prefix3 = ".") const;
template <typename T>
inline std::function<bool(T, T)> GetComparator() const {
switch (GetDirection()) {
case Direction::kEq:
return std::equal_to<T>();
case Direction::kNe:
return std::not_equal_to<T>();
case Direction::kGe:
return std::greater_equal<T>();
case Direction::kGt:
return std::greater<T>();
case Direction::kLe:
return std::less_equal<T>();
case Direction::kLt:
return std::less<T>();
}
}
template <typename T>
inline bool Compare(const T a, const T b) const {
DCHECK(primitive_util::IsCanonicalRepresentation<T>(primitive_type_));
if constexpr (is_specialized_floating_point_v<T>) {
if (IsTotalOrder()) {
using R = SignedIntegerTypeForSizeType<sizeof(T)>;
return GetComparator<R>()(ToSignMagnitude(a), ToSignMagnitude(b));
}
}
return GetComparator<T>()(a, b);
}
[[deprecated("Use PrimitiveType and Order")]] static Comparison::Type
DefaultComparisonType(PrimitiveType type);
private:
const Direction dir_;
const PrimitiveType primitive_type_;
const Order order_;
[[deprecated]] const Type type_;
};
using ComparisonDirection = Comparison::Direction;
using ComparisonOrder = Comparison::Order;
inline std::ostream& operator<<(std::ostream& os, const Comparison& cmp) {
return os << cmp.ToString();
}
std::string ComparisonDirectionToString(Comparison::Direction direction);
std::string ComparisonTypeToString(Comparison::Type type);
absl::string_view ComparisonPrimitiveTypeToString(PrimitiveType type);
absl::StatusOr<Comparison::Direction> StringToComparisonDirection(
absl::string_view direction);
absl::StatusOr<Comparison::Type> StringToComparisonType(
absl::string_view comparison);
absl::StatusOr<Comparison::Order> StringToComparisonOrder(
absl::string_view order);
template <typename KeyFn>
auto LessThanByKey(KeyFn&& key_fn) {
return [=](const auto& a, const auto& b) { return key_fn(a) < key_fn(b); };
}
inline bool operator==(const Comparison& a, const Comparison& b) {
return a.GetDirection() == b.GetDirection() &&
a.GetPrimitiveType() == b.GetPrimitiveType() &&
a.GetOrder() == b.GetOrder();
}
inline bool operator!=(const Comparison& a, const Comparison& b) {
return !(a == b);
}
}
#endif
#include "xla/comparison_util.h"
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/primitive_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
bool IsValidComparison(xla::PrimitiveType type, Comparison::Order order) {
if (primitive_util::IsFloatingPointType(type) ||
primitive_util::IsComplexType(type)) {
return true;
}
if (primitive_util::IsIntegralType(type) || type == PRED) {
return order == Comparison::Order::kTotal;
}
LOG(FATAL) << "Unsupported type: " << PrimitiveType_Name(type);
}
PrimitiveType DefaultPrimitiveType(Comparison::Type type) {
switch (type) {
case Comparison::Type::kFloat:
case Comparison::Type::kFloatTotalOrder:
return PrimitiveType::F32;
case Comparison::Type::kSigned:
return PrimitiveType::S32;
case Comparison::Type::kUnsigned:
return PrimitiveType::U32;
}
}
Comparison::Order DefaultOrdering(Comparison::Type type) {
switch (type) {
case Comparison::Type::kFloat:
return Comparison::Order::kPartial;
case Comparison::Type::kFloatTotalOrder:
case Comparison::Type::kSigned:
case Comparison::Type::kUnsigned:
return Comparison::Order::kTotal;
}
}
Comparison::Order DefaultOrdering(PrimitiveType type) {
if (primitive_util::IsFloatingPointType(type) ||
primitive_util::IsComplexType(type)) {
return Comparison::Order::kPartial;
}
if (primitive_util::IsIntegralType(type) || type == PRED) {
return Comparison::Order::kTotal;
}
LOG(FATAL) << "Unsupported type: " << PrimitiveType_Name(type);
}
Comparison::Direction Converse(Comparison::Direction direction) {
switch (direction) {
case Comparison::Direction::kEq:
return Comparison::Direction::kEq;
case Comparison::Direction::kNe:
return Comparison::Direction::kNe;
case Comparison::Direction::kGe:
return Comparison::Direction::kLe;
case Comparison::Direction::kGt:
return Comparison::Direction::kLt;
case Comparison::Direction::kLe:
return Comparison::Direction::kGe;
case Comparison::Direction::kLt:
return Comparison::Direction::kGt;
}
}
Comparison::Direction Inverse(Comparison::Direction direction) {
switch (direction) {
case Comparison::Direction::kEq:
return Comparison::Direction::kNe;
case Comparison::Direction::kNe:
return Comparison::Direction::kEq;
case Comparison::Direction::kGe:
return Comparison::Direction::kLt;
case Comparison::Direction::kGt:
return Comparison::Direction::kLe;
case Comparison::Direction::kLe:
return Comparison::Direction::kGt;
case Comparison::Direction::kLt:
return Comparison::Direction::kGe;
}
}
}
std::string ComparisonDirectionToString(Comparison::Direction direction) {
switch (direction) {
case Comparison::Direction::kEq:
return "EQ";
case Comparison::Direction::kNe:
return "NE";
case Comparison::Direction::kGe:
return "GE";
case Comparison::Direction::kGt:
return "GT";
case Comparison::Direction::kLe:
return "LE";
case Comparison::Direction::kLt:
return "LT";
default:
LOG(FATAL) << "Attempted to print uninitialized comparison direction";
}
}
std::string ComparisonTypeToString(Comparison::Type type) {
switch (type) {
case Comparison::Type::kFloat:
return "FLOAT";
case Comparison::Type::kFloatTotalOrder:
return "TOTALORDER";
case Comparison::Type::kSigned:
return "SIGNED";
case Comparison::Type::kUnsigned:
return "UNSIGNED";
}
}
absl::string_view ComparisonPrimitiveTypeToString(PrimitiveType type) {
return PrimitiveType_Name(type);
}
absl::string_view ComparisonOrderToString(Comparison::Order order) {
switch (order) {
case Comparison::Order::kPartial:
return "PARTIALORDER";
case Comparison::Order::kTotal:
return "TOTALORDER";
}
}
absl::StatusOr<Comparison::Direction> StringToComparisonDirection(
absl::string_view direction) {
static auto* map =
new absl::flat_hash_map<std::string, Comparison::Direction>({
{"EQ", Comparison::Direction::kEq},
{"NE", Comparison::Direction::kNe},
{"GE", Comparison::Direction::kGe},
{"GT", Comparison::Direction::kGt},
{"LE", Comparison::Direction::kLe},
{"LT", Comparison::Direction::kLt},
});
auto it = map->find(direction);
if (it == map->end()) {
return InvalidArgument("Unknown comparison direction: %s", direction);
}
return it->second;
}
absl::StatusOr<Comparison::Order> StringToComparisonOrder(
absl::string_view order) {
static auto* map = new absl::flat_hash_map<std::string, Comparison::Order>({
{"TOTALORDER", Comparison::Order::kTotal},
{"PARTIALORDER", Comparison::Order::kPartial},
});
auto it = map->find(order);
if (it == map->end()) {
return InvalidArgument("Unknown comparison type: %s", order);
}
return it->second;
}
absl::StatusOr<Comparison::Type> StringToComparisonType(
absl::string_view comparison) {
static auto* map = new absl::flat_hash_map<std::string, Comparison::Type>({
{"FLOAT", Comparison::Type::kFloat},
{"TOTALORDER", Comparison::Type::kFloatTotalOrder},
{"SIGNED", Comparison::Type::kSigned},
{"UNSIGNED", Comparison::Type::kUnsigned},
});
auto it = map->find(comparison);
if (it == map->end()) {
return InvalidArgument("Unknown comparison type: %s", comparison);
}
return it->second;
}
Comparison::Type Comparison::DefaultComparisonType(PrimitiveType type) {
if (primitive_util::IsFloatingPointType(type) ||
primitive_util::IsComplexType(type)) {
return Type::kFloat;
}
if (primitive_util::IsSignedIntegralType(type)) {
return Type::kSigned;
}
if (primitive_util::IsUnsignedIntegralType(type) || type == PRED) {
return Type::kUnsigned;
}
LOG(FATAL) << "Unexpected: " << PrimitiveType_Name(type);
}
Comparison::Comparison(Direction dir, PrimitiveType type, Order order)
: dir_(dir),
primitive_type_(type),
order_(order),
type_(DefaultComparisonType(type)) {
CHECK(IsValidComparison(primitive_type_, order_));
}
Comparison::Comparison(Direction dir, PrimitiveType type)
: dir_(dir),
primitive_type_(type),
order_(DefaultOrdering(type)),
type_(DefaultComparisonType(type)) {
CHECK(IsValidComparison(primitive_type_, order_));
}
Comparison::Comparison(Direction dir, Type type)
: dir_(dir),
primitive_type_(DefaultPrimitiveType(type)),
order_(DefaultOrdering(type)),
type_(type) {
CHECK(IsValidComparison(primitive_type_, order_));
}
Comparison Comparison::Converse() const {
return Comparison(xla::Converse(dir_), primitive_type_, order_);
}
std::optional<Comparison> Comparison::Inverse() const {
if (IsPartialOrder()) {
return std::nullopt;
}
if (primitive_util::IsArrayType(primitive_type_)) {
return Comparison(xla::Inverse(dir_), primitive_type_, order_);
}
return std::nullopt;
}
bool Comparison::IsReflexive() const {
switch (dir_) {
case Direction::kEq:
case Direction::kGe:
case Direction::kLe:
return IsTotalOrder();
case Direction::kNe:
case Direction::kGt:
case Direction::kLt:
return false;
}
}
bool Comparison::IsAntireflexive() const {
switch (dir_) {
case Direction::kNe:
return IsTotalOrder();
case Direction::kGt:
case Direction::kLt:
return true;
case Direction::kEq:
case Direction::kGe:
case Direction::kLe:
return false;
}
}
std::string Comparison::ToString(std::string prefix1, std::string prefix2,
std::string prefix3) const {
return absl::StrCat(prefix1, ComparisonDirectionToString(dir_), prefix2,
ComparisonPrimitiveTypeToString(primitive_type_), prefix3,
ComparisonOrderToString(order_));
}
} | #include "xla/comparison_util.h"
#include <cstdint>
#include <limits>
#include "xla/test.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
using ::testing::Eq;
TEST(Comparison, FloatsDefaultToPartialOrder) {
EXPECT_EQ(
Comparison(Comparison::Direction::kGe, PrimitiveType::BF16).GetOrder(),
Comparison::Order::kPartial);
EXPECT_EQ(
Comparison(Comparison::Direction::kGe, PrimitiveType::F32).GetOrder(),
Comparison::Order::kPartial);
EXPECT_EQ(
Comparison(Comparison::Direction::kGe, PrimitiveType::C64).GetOrder(),
Comparison::Order::kPartial);
}
TEST(Comparison, IntegersDefaultToTotalOrder) {
EXPECT_EQ(
Comparison(Comparison::Direction::kGe, PrimitiveType::S32).GetOrder(),
Comparison::Order::kTotal);
EXPECT_EQ(
Comparison(Comparison::Direction::kGe, PrimitiveType::U8).GetOrder(),
Comparison::Order::kTotal);
EXPECT_EQ(
Comparison(Comparison::Direction::kGe, PrimitiveType::PRED).GetOrder(),
Comparison::Order::kTotal);
}
TEST(Comparison, LegacyConstructorDefaultsToX32) {
EXPECT_EQ(Comparison(Comparison::Direction::kGe, Comparison::Type::kFloat)
.GetPrimitiveType(),
xla::PrimitiveType::F32);
EXPECT_EQ(
Comparison(Comparison::Direction::kGe, Comparison::Type::kFloatTotalOrder)
.GetPrimitiveType(),
xla::PrimitiveType::F32);
EXPECT_EQ(Comparison(Comparison::Direction::kGe, Comparison::Type::kSigned)
.GetPrimitiveType(),
xla::PrimitiveType::S32);
EXPECT_EQ(Comparison(Comparison::Direction::kGe, Comparison::Type::kUnsigned)
.GetPrimitiveType(),
xla::PrimitiveType::U32);
}
TEST(Comparison, PartialOrderReflexivity) {
EXPECT_FALSE(
Comparison(Comparison::Direction::kEq, PrimitiveType::F32).IsReflexive());
EXPECT_FALSE(
Comparison(Comparison::Direction::kLe, PrimitiveType::F32).IsReflexive());
EXPECT_FALSE(
Comparison(Comparison::Direction::kLt, PrimitiveType::S32).IsReflexive());
}
TEST(Comparison, TotalOrderReflexivity) {
EXPECT_TRUE(Comparison(Comparison::Direction::kLe, PrimitiveType::BF16,
Comparison::Order::kTotal)
.IsReflexive());
EXPECT_TRUE(Comparison(Comparison::Direction::kGe, PrimitiveType::F32,
Comparison::Order::kTotal)
.IsReflexive());
EXPECT_TRUE(
Comparison(Comparison::Direction::kEq, PrimitiveType::S32).IsReflexive());
EXPECT_FALSE(Comparison(Comparison::Direction::kNe, PrimitiveType::F32,
Comparison::Order::kTotal)
.IsReflexive());
EXPECT_FALSE(Comparison(Comparison::Direction::kLt, PrimitiveType::F64,
Comparison::Order::kTotal)
.IsReflexive());
}
TEST(Comparison, PartialOrderAntiReflexivity) {
EXPECT_TRUE(Comparison(Comparison::Direction::kGt, PrimitiveType::F32)
.IsAntireflexive());
EXPECT_TRUE(Comparison(Comparison::Direction::kLt, PrimitiveType::F32,
Comparison::Order::kTotal)
.IsAntireflexive());
EXPECT_FALSE(Comparison(Comparison::Direction::kEq, PrimitiveType::F32,
Comparison::Order::kTotal)
.IsAntireflexive());
}
TEST(Comparison, TotalOrderAntiReflexivity) {
EXPECT_TRUE(Comparison(Comparison::Direction::kNe, PrimitiveType::BF16,
Comparison::Order::kTotal)
.IsAntireflexive());
EXPECT_TRUE(Comparison(Comparison::Direction::kNe, PrimitiveType::S32)
.IsAntireflexive());
EXPECT_FALSE(Comparison(Comparison::Direction::kEq, PrimitiveType::F32,
Comparison::Order::kTotal)
.IsAntireflexive());
EXPECT_FALSE(Comparison(Comparison::Direction::kLe, PrimitiveType::F64,
Comparison::Order::kTotal)
.IsAntireflexive());
EXPECT_FALSE(Comparison(Comparison::Direction::kLe, PrimitiveType::S8)
.IsAntireflexive());
}
TEST(Comparison, Converse) {
EXPECT_THAT(
Comparison(Comparison::Direction::kLe, PrimitiveType::S8).Converse(),
Eq(Comparison(Comparison::Direction::kGe, PrimitiveType::S8)));
EXPECT_THAT(
Comparison(Comparison::Direction::kEq, PrimitiveType::U16).Converse(),
Eq(Comparison(Comparison::Direction::kEq, PrimitiveType::U16)));
EXPECT_THAT(
Comparison(Comparison::Direction::kGt, PrimitiveType::F32).Converse(),
Eq(Comparison(Comparison::Direction::kLt, PrimitiveType::F32)));
}
TEST(Comparison, PartialOrderFloatsShouldNotHaveInverse) {
EXPECT_FALSE(Comparison(Comparison::Direction::kGt, PrimitiveType::F32)
.Inverse()
.has_value());
}
TEST(Comparison, Inverse) {
EXPECT_THAT(
*Comparison(Comparison::Direction::kLe, PrimitiveType::S64).Inverse(),
Eq(Comparison(Comparison::Direction::kGt, PrimitiveType::S64)));
EXPECT_THAT(
*Comparison(Comparison::Direction::kEq, PrimitiveType::U16).Inverse(),
Eq(Comparison(Comparison::Direction::kNe, PrimitiveType::U16)));
EXPECT_THAT(*Comparison(Comparison::Direction::kGt, PrimitiveType::F32,
Comparison::Order::kTotal)
.Inverse(),
Eq(Comparison(Comparison::Direction::kLe, PrimitiveType::F32,
Comparison::Order::kTotal)));
}
TEST(Comparison, ToString) {
EXPECT_EQ(
Comparison(Comparison::Direction::kLt, PrimitiveType::F32).ToString(),
".LT.F32.PARTIALORDER");
EXPECT_EQ(
Comparison(Comparison::Direction::kEq, PrimitiveType::S8).ToString(),
".EQ.S8.TOTALORDER");
EXPECT_EQ(Comparison(Comparison::Direction::kGe, PrimitiveType::C128)
.ToString("_1_", "_2_", "_3_"),
"_1_GE_2_C128_3_PARTIALORDER");
}
TEST(Comparison, TotalOrderFloatComparison) {
EXPECT_TRUE(Comparison(Comparison::Direction::kEq, PrimitiveType::F32,
Comparison::Order::kTotal)
.Compare<float>(std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN()));
EXPECT_TRUE(Comparison(Comparison::Direction::kLt, PrimitiveType::F32,
Comparison::Order::kTotal)
.Compare<float>(1.0f, 2.0f));
EXPECT_FALSE(Comparison(Comparison::Direction::kLt, PrimitiveType::F32,
Comparison::Order::kTotal)
.Compare<float>(std::numeric_limits<float>::infinity(),
-std::numeric_limits<float>::infinity()));
EXPECT_TRUE(Comparison(Comparison::Direction::kLt, PrimitiveType::F32,
Comparison::Order::kTotal)
.Compare<float>(-0.0f, +0.0f));
EXPECT_TRUE(Comparison(Comparison::Direction::kNe, PrimitiveType::F32,
Comparison::Order::kTotal)
.Compare<float>(+0.0f, -0.0f));
EXPECT_FALSE(Comparison(Comparison::Direction::kGt, PrimitiveType::F32,
Comparison::Order::kTotal)
.Compare<float>(-0.1f, 0.1f));
}
TEST(Comparison, TotalOrderBfloat16Comparison) {
EXPECT_TRUE(Comparison(Comparison::Direction::kEq, PrimitiveType::BF16,
Comparison::Order::kTotal)
.Compare<xla::bfloat16>(
std::numeric_limits<xla::bfloat16>::quiet_NaN(),
std::numeric_limits<xla::bfloat16>::quiet_NaN()));
EXPECT_TRUE(
Comparison(Comparison::Direction::kLt, PrimitiveType::BF16,
Comparison::Order::kTotal)
.Compare<xla::bfloat16>(xla::bfloat16(1.0f), xla::bfloat16(2.0f)));
EXPECT_FALSE(Comparison(Comparison::Direction::kLt, PrimitiveType::BF16,
Comparison::Order::kTotal)
.Compare<xla::bfloat16>(
std::numeric_limits<xla::bfloat16>::infinity(),
-std::numeric_limits<xla::bfloat16>::infinity()));
EXPECT_TRUE(
Comparison(Comparison::Direction::kLt, PrimitiveType::BF16,
Comparison::Order::kTotal)
.Compare<xla::bfloat16>(xla::bfloat16(-0.0f), xla::bfloat16(+0.0f)));
EXPECT_TRUE(
Comparison(Comparison::Direction::kGt, PrimitiveType::BF16,
Comparison::Order::kTotal)
.Compare<xla::bfloat16>(xla::bfloat16(+0.0f), xla::bfloat16(-0.0f)));
EXPECT_FALSE(
Comparison(Comparison::Direction::kGt, PrimitiveType::BF16,
Comparison::Order::kTotal)
.Compare<xla::bfloat16>(xla::bfloat16(-0.1f), xla::bfloat16(0.1f)));
}
TEST(Comparison, Compare) {
EXPECT_TRUE(Comparison(Comparison::Direction::kLt, PrimitiveType::F32)
.Compare<float>(1.0f, 2.0f));
EXPECT_TRUE(
Comparison(Comparison::Direction::kGe, PrimitiveType::BF16)
.Compare<xla::bfloat16>(xla::bfloat16(2.0f), xla::bfloat16(1.0f)));
EXPECT_FALSE(Comparison(Comparison::Direction::kNe, PrimitiveType::S64)
.Compare<int64_t>(1'000'000, 1'000'000));
EXPECT_TRUE(Comparison(Comparison::Direction::kEq, PrimitiveType::U8)
.Compare<uint8_t>(63, 63));
}
}
} |
1,780 | cpp | tensorflow/tensorflow | ef57 | third_party/xla/xla/ef57.cc | third_party/xla/xla/ef57_test.cc | #ifndef XLA_EF57_H_
#define XLA_EF57_H_
#include <cmath>
#include <utility>
#include "absl/types/span.h"
namespace xla {
inline std::pair<float, float> SplitF64ToF32(double x) {
const float x_f32 = static_cast<float>(x);
const bool result_is_finite = std::isfinite(x_f32);
const float hi = x_f32;
const float lo = static_cast<float>(x - static_cast<double>(hi));
return std::make_pair(hi, result_is_finite ? lo : 0.0f);
}
void ConvertF64ToEf57(absl::Span<const double> input, absl::Span<float> output);
}
#endif
#include "xla/ef57.h"
#include <limits>
#include <tuple>
#include "absl/types/span.h"
#include "xla/compiler_macros.h"
#include "tsl/platform/logging.h"
#ifdef XLA_HAS_SSE2
#include <immintrin.h>
#endif
#if defined(XLA_HAS_ARM_NEON) && defined(XLA_HAS_ARM64)
#include <arm_neon.h>
#endif
namespace xla {
void ConvertF64ToEf57(absl::Span<const double> input,
absl::Span<float> output) {
DCHECK_EQ(input.size() * 2, output.size());
#ifdef __AVX__
constexpr int kDoublesPerAvxIteration = sizeof(__m256d) / sizeof(double);
constexpr int kFloatsPerSseRegister = sizeof(__m128) / sizeof(float);
while (input.size() >= kDoublesPerAvxIteration) {
__m256d x = _mm256_loadu_pd(input.data());
__m128 x_hi_f32 = _mm256_cvtpd_ps(x);
__m256d x_hi_f64 = _mm256_cvtps_pd(x_hi_f32);
__m256d x_lo_f64 = _mm256_sub_pd(x, x_hi_f64);
__m128 x_lo_f32 = _mm256_cvtpd_ps(x_lo_f64);
const __m128 inf = _mm_set1_ps(std::numeric_limits<float>::infinity());
__m128 x_hi_exponent = _mm_and_ps(x_hi_f32, inf);
__m128 x_is_finite = _mm_cmplt_ps(x_hi_exponent, inf);
x_lo_f32 = _mm_and_ps(x_lo_f32, x_is_finite);
_mm_storeu_ps(output.data(), _mm_unpacklo_ps(x_hi_f32, x_lo_f32));
output.remove_prefix(kFloatsPerSseRegister);
_mm_storeu_ps(output.data(), _mm_unpackhi_ps(x_hi_f32, x_lo_f32));
output.remove_prefix(kFloatsPerSseRegister);
input.remove_prefix(kDoublesPerAvxIteration);
}
#endif
#ifdef XLA_HAS_SSE2
constexpr int kDoublesPerSseIteration = sizeof(__m128d) / sizeof(double);
constexpr int kFloatsPerSseIteration = sizeof(__m128) / sizeof(float);
while (input.size() >= kDoublesPerSseIteration) {
__m128d x = _mm_loadu_pd(input.data());
__m128 x_hi_f32 = _mm_cvtpd_ps(x);
__m128d x_hi_f64 = _mm_cvtps_pd(x_hi_f32);
__m128d x_lo_f64 = _mm_sub_pd(x, x_hi_f64);
__m128 x_lo_f32 = _mm_cvtpd_ps(x_lo_f64);
const __m128 inf = _mm_set1_ps(std::numeric_limits<float>::infinity());
__m128 x_hi_exponent = _mm_and_ps(x_hi_f32, inf);
__m128 x_is_finite = _mm_cmplt_ps(x_hi_exponent, inf);
x_lo_f32 = _mm_and_ps(x_lo_f32, x_is_finite);
__m128 to_store = _mm_unpacklo_ps(x_hi_f32, x_lo_f32);
_mm_storeu_ps(output.data(), to_store);
input.remove_prefix(kDoublesPerSseIteration);
output.remove_prefix(kFloatsPerSseIteration);
}
#endif
#if defined(XLA_HAS_ARM_NEON) && defined(XLA_HAS_ARM64)
constexpr int kDoublesPerNeonIteration = sizeof(float64x2_t) / sizeof(double);
constexpr int kFloatsPerNeonIteration = sizeof(float32x2x2_t) / sizeof(float);
while (input.size() >= kDoublesPerNeonIteration) {
float64x2_t x = vld1q_f64(input.data());
float32x2_t x_hi_f32 = vcvt_f32_f64(x);
float64x2_t x_hi_f64 = vcvt_f64_f32(x_hi_f32);
float64x2_t x_lo_f64 = vsubq_f64(x, x_hi_f64);
float32x2_t x_lo_f32 = vcvt_f32_f64(x_lo_f64);
uint32x2_t x_is_finite =
vcalt_f32(x_hi_f32, vdup_n_f32(std::numeric_limits<float>::infinity()));
x_lo_f32 = vreinterpret_f32_u32(
vand_u32(vreinterpret_u32_f32(x_lo_f32), x_is_finite));
float32x2x2_t to_store;
to_store.val[0] = x_hi_f32;
to_store.val[1] = x_lo_f32;
vst2_f32(output.data(), to_store);
input.remove_prefix(kDoublesPerNeonIteration);
output.remove_prefix(kFloatsPerNeonIteration);
}
#endif
while (input.size() >= 1) {
std::tie(output[0], output[1]) = SplitF64ToF32(input.front());
input.remove_prefix(1);
output.remove_prefix(2);
}
}
} | #include "xla/ef57.h"
#include <cmath>
#include <limits>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/log_streamer.h"
#include "absl/random/random.h"
#include "absl/types/span.h"
#include "xla/test.h"
namespace xla {
namespace {
TEST(Ef57Test, DoubleMax) {
auto [high, low] = SplitF64ToF32(std::numeric_limits<double>::max());
EXPECT_EQ(high, std::numeric_limits<float>::infinity());
EXPECT_EQ(low, 0.0f);
}
TEST(Ef57Test, Overflow) {
auto [high, low] = SplitF64ToF32(0x1.ffffffp+127);
EXPECT_EQ(high, std::numeric_limits<float>::infinity());
EXPECT_EQ(low, 0.0f);
}
TEST(Ef57Test, CheckPrecision) {
auto [high, low] = SplitF64ToF32(2.0 - 0x1p-52);
EXPECT_EQ(high, 2.0f);
EXPECT_EQ(low, -0x1p-52f);
}
TEST(Ef57Test, SimpleArray) {
std::vector<double> inputs(127);
absl::BitGen gen;
for (double& input : inputs) {
input = absl::Uniform<float>(gen, 0.0f, 1.0f);
}
std::vector<float> outputs(inputs.size() * 2);
ConvertF64ToEf57(inputs, absl::MakeSpan(outputs));
for (int i = 0; i < inputs.size(); ++i) {
EXPECT_EQ(outputs[i * 2], inputs[i]);
EXPECT_EQ(outputs[i * 2 + 1], 0.0f);
}
}
TEST(Ef57Test, RelativeSplit) {
const float distance = std::scalbnf(1.0f, std::numeric_limits<float>::digits);
std::vector<double> inputs(127);
absl::BitGen gen;
for (double& input : inputs) {
input = absl::Uniform<double>(gen, 0.0, 1.0);
}
std::vector<float> outputs(inputs.size() * 2);
ConvertF64ToEf57(inputs, absl::MakeSpan(outputs));
for (int i = 0; i < outputs.size(); i += 2) {
auto most_significant = outputs[i];
auto least_significant = outputs[i + 1];
auto most_significant_mag = std::fabs(most_significant);
auto least_significant_mag = std::fabs(least_significant);
EXPECT_FALSE(std::isnan(most_significant_mag));
if (most_significant_mag == 0.0f) {
EXPECT_EQ(least_significant_mag, 0.0f);
} else {
EXPECT_GT(most_significant_mag, least_significant_mag * distance);
}
}
}
}
} |
1,781 | cpp | tensorflow/tensorflow | text_literal_writer | third_party/xla/xla/text_literal_writer.cc | third_party/xla/xla/text_literal_writer_test.cc | #ifndef XLA_TEXT_LITERAL_WRITER_H_
#define XLA_TEXT_LITERAL_WRITER_H_
#include "absl/strings/string_view.h"
#include "xla/literal.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
namespace xla {
class TextLiteralWriter {
public:
static absl::Status WriteToPath(const Literal& literal,
absl::string_view path);
private:
TextLiteralWriter(const TextLiteralWriter&) = delete;
TextLiteralWriter& operator=(const TextLiteralWriter&) = delete;
};
}
#endif
#include "xla/text_literal_writer.h"
#include <memory>
#include <string>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "tsl/platform/env.h"
namespace xla {
absl::Status TextLiteralWriter::WriteToPath(
const Literal& literal, absl::string_view path) {
std::unique_ptr<tsl::WritableFile> f;
auto s = tsl::Env::Default()->NewWritableFile(std::string(path), &f);
if (!s.ok()) {
return s;
}
s = f->Append(ShapeUtil::HumanString(literal.shape()) + "\n");
if (!s.ok()) {
return s;
}
absl::Status status;
tsl::WritableFile* f_ptr = f.get();
literal.EachCellAsString([f_ptr, &status](absl::Span<const int64_t> indices,
const std::string& value) {
if (!status.ok()) {
return;
}
std::string coordinates =
absl::StrCat("(", absl::StrJoin(indices, ", "), ")");
status = f_ptr->Append(absl::StrCat(coordinates, ": ", value, "\n"));
});
auto ignored = f->Close();
return status;
}
} | #include "xla/text_literal_writer.h"
#include <memory>
#include <string>
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/types.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
namespace xla {
namespace {
TEST(TextLiteralWriterTest, WritesFloatLiteral) {
auto literal = LiteralUtil::CreateR2<float>({
{3.14, 2.17},
{1.23, 4.56},
});
std::string path;
ASSERT_TRUE(tsl::Env::Default()->LocalTempFilename(&path));
ASSERT_IS_OK(TextLiteralWriter::WriteToPath(literal, path));
std::string contents;
TF_ASSERT_OK(tsl::ReadFileToString(tsl::Env::Default(), path, &contents));
const std::string expected = R"(f32[2,2]
(0, 0): 3.14
(0, 1): 2.17
(1, 0): 1.23
(1, 1): 4.56
)";
EXPECT_EQ(expected, contents);
}
}
} |
1,782 | cpp | tensorflow/tensorflow | permutation_util | third_party/xla/xla/permutation_util.cc | third_party/xla/xla/permutation_util_test.cc | #ifndef XLA_PERMUTATION_UTIL_H_
#define XLA_PERMUTATION_UTIL_H_
#include <vector>
#include "absl/types/span.h"
#include "xla/types.h"
#include "tsl/platform/logging.h"
namespace xla {
bool IsPermutation(absl::Span<const int64_t> permutation);
template <typename Container>
std::vector<typename Container::value_type> Permute(
const Container& input, absl::Span<const int64_t> permutation) {
using T = typename Container::value_type;
absl::Span<const T> data(input);
CHECK_EQ(permutation.size(), data.size());
CHECK(IsPermutation(permutation));
std::vector<T> output(data.size());
for (size_t i = 0; i < permutation.size(); ++i) {
output[i] = data[permutation[i]];
}
return output;
}
template <typename Container>
std::vector<typename Container::value_type> PermuteInverse(
const Container& input, absl::Span<const int64_t> permutation) {
using T = typename Container::value_type;
absl::Span<const T> data(input);
CHECK_EQ(permutation.size(), data.size());
CHECK(IsPermutation(permutation));
std::vector<T> output(data.size());
for (size_t i = 0; i < permutation.size(); ++i) {
output[permutation[i]] = data[i];
}
return output;
}
std::vector<int64_t> InversePermutation(
absl::Span<const int64_t> input_permutation);
std::vector<int64_t> ComposePermutations(absl::Span<const int64_t> p1,
absl::Span<const int64_t> p2);
bool IsIdentityPermutation(absl::Span<const int64_t> permutation);
}
#endif
#include "xla/permutation_util.h"
#include <vector>
#include "absl/container/inlined_vector.h"
namespace xla {
bool IsPermutation(absl::Span<const int64_t> permutation) {
absl::InlinedVector<bool, 8> seen(permutation.size(), false);
for (int64_t p : permutation) {
if (p < 0 || p >= permutation.size() || seen[p]) {
return false;
}
seen[p] = true;
}
return true;
}
std::vector<int64_t> InversePermutation(
absl::Span<const int64_t> input_permutation) {
DCHECK(IsPermutation(input_permutation));
std::vector<int64_t> output_permutation(input_permutation.size(), -1);
for (size_t i = 0; i < input_permutation.size(); ++i) {
output_permutation[input_permutation[i]] = i;
}
return output_permutation;
}
std::vector<int64_t> ComposePermutations(absl::Span<const int64_t> p1,
absl::Span<const int64_t> p2) {
CHECK_EQ(p1.size(), p2.size());
std::vector<int64_t> output;
output.reserve(p1.size());
for (size_t i = 0; i < p1.size(); ++i) {
output.push_back(p1.at(p2.at(i)));
}
return output;
}
bool IsIdentityPermutation(absl::Span<const int64_t> permutation) {
for (int64_t i = 0; i < permutation.size(); ++i) {
if (permutation[i] != i) {
return false;
}
}
return true;
}
} | #include "xla/permutation_util.h"
#include "xla/test.h"
namespace xla {
namespace {
TEST(PermutationUtilTest, IsPermutation) {
EXPECT_TRUE(IsPermutation({}));
EXPECT_TRUE(IsPermutation({0}));
EXPECT_FALSE(IsPermutation({-3}));
EXPECT_TRUE(IsPermutation({0, 1}));
EXPECT_FALSE(IsPermutation({1, 1}));
EXPECT_TRUE(IsPermutation({1, 0}));
EXPECT_TRUE(IsPermutation({3, 1, 0, 2}));
EXPECT_FALSE(IsPermutation({3, 0, 2}));
}
}
} |
1,783 | cpp | tensorflow/tensorflow | primitive_util | third_party/xla/xla/primitive_util.cc | third_party/xla/xla/primitive_util_test.cc | #ifndef XLA_PRIMITIVE_UTIL_H_
#define XLA_PRIMITIVE_UTIL_H_
#include <array>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/optimization.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/ml_dtypes.h"
namespace xla {
namespace primitive_util {
int SignificandWidth(PrimitiveType type);
int ExponentWidth(PrimitiveType type);
int UnderflowExponent(PrimitiveType type);
int OverflowExponent(PrimitiveType type);
int ExponentBias(PrimitiveType type);
bool HasInfinity(PrimitiveType type);
bool HasNegativeZero(PrimitiveType type);
template <typename NativeT>
constexpr PrimitiveType NativeToPrimitiveType() {
static_assert(!std::is_same<NativeT, NativeT>::value,
"Cannot map native type to primitive type.");
return PRIMITIVE_TYPE_INVALID;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<bool>() {
return PRED;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<u2>() {
return U2;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<u4>() {
return U4;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<uint8_t>() {
return U8;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<uint16_t>() {
return U16;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<uint32_t>() {
return U32;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<uint64_t>() {
return U64;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<s2>() {
return S2;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<s4>() {
return S4;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<int8_t>() {
return S8;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<int16_t>() {
return S16;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<int32_t>() {
return S32;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<int64_t>() {
return S64;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<float>() {
return F32;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<double>() {
return F64;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<half>() {
return F16;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<bfloat16>() {
return BF16;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<tsl::float8_e5m2>() {
return F8E5M2;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<tsl::float8_e4m3fn>() {
return F8E4M3FN;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<tsl::float8_e4m3b11fnuz>() {
return F8E4M3B11FNUZ;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<tsl::float8_e5m2fnuz>() {
return F8E5M2FNUZ;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<tsl::float8_e4m3fnuz>() {
return F8E4M3FNUZ;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<complex64>() {
return C64;
}
template <>
constexpr PrimitiveType NativeToPrimitiveType<complex128>() {
return C128;
}
template <PrimitiveType>
struct PrimitiveTypeToNative;
template <>
struct PrimitiveTypeToNative<PRED> {
using type = bool;
};
template <>
struct PrimitiveTypeToNative<U2> {
using type = u2;
};
template <>
struct PrimitiveTypeToNative<U4> {
using type = u4;
};
template <>
struct PrimitiveTypeToNative<U8> {
using type = uint8_t;
};
template <>
struct PrimitiveTypeToNative<U16> {
using type = uint16_t;
};
template <>
struct PrimitiveTypeToNative<U32> {
using type = uint32_t;
};
template <>
struct PrimitiveTypeToNative<U64> {
using type = uint64_t;
};
template <>
struct PrimitiveTypeToNative<S2> {
using type = s2;
};
template <>
struct PrimitiveTypeToNative<S4> {
using type = s4;
};
template <>
struct PrimitiveTypeToNative<S8> {
using type = int8_t;
};
template <>
struct PrimitiveTypeToNative<S16> {
using type = int16_t;
};
template <>
struct PrimitiveTypeToNative<S32> {
using type = int32_t;
};
template <>
struct PrimitiveTypeToNative<S64> {
using type = int64_t;
};
template <>
struct PrimitiveTypeToNative<F32> {
using type = float;
};
template <>
struct PrimitiveTypeToNative<F64> {
using type = double;
};
template <>
struct PrimitiveTypeToNative<F16> {
using type = half;
};
template <>
struct PrimitiveTypeToNative<BF16> {
using type = bfloat16;
};
template <>
struct PrimitiveTypeToNative<F8E5M2> {
using type = tsl::float8_e5m2;
};
template <>
struct PrimitiveTypeToNative<F8E4M3FN> {
using type = tsl::float8_e4m3fn;
};
template <>
struct PrimitiveTypeToNative<F8E4M3B11FNUZ> {
using type = tsl::float8_e4m3b11fnuz;
};
template <>
struct PrimitiveTypeToNative<F8E5M2FNUZ> {
using type = tsl::float8_e5m2fnuz;
};
template <>
struct PrimitiveTypeToNative<F8E4M3FNUZ> {
using type = tsl::float8_e4m3fnuz;
};
template <>
struct PrimitiveTypeToNative<C64> {
using type = complex64;
};
template <>
struct PrimitiveTypeToNative<C128> {
using type = complex128;
};
template <>
struct PrimitiveTypeToNative<TOKEN> {
using type = void;
};
template <PrimitiveType kType>
using NativeTypeOf =
typename primitive_util::PrimitiveTypeToNative<kType>::type;
template <PrimitiveType kPrimitiveType>
using PrimitiveTypeConstant =
std::integral_constant<PrimitiveType, kPrimitiveType>;
inline constexpr bool IsArrayType(PrimitiveType primitive_type) {
return primitive_type != TUPLE && primitive_type != OPAQUE_TYPE &&
primitive_type != TOKEN && primitive_type > PRIMITIVE_TYPE_INVALID &&
primitive_type < PrimitiveType_ARRAYSIZE;
}
constexpr bool IsF8Type(PrimitiveType type) {
return type == F8E5M2 || type == F8E4M3FN || type == F8E4M3B11FNUZ ||
type == F8E5M2FNUZ || type == F8E4M3FNUZ;
}
constexpr bool IsFloatingPointType(PrimitiveType type) {
return type == F16 || type == F32 || type == F64 || type == BF16 ||
IsF8Type(type);
}
constexpr bool IsComplexType(PrimitiveType type) {
return type == C64 || type == C128;
}
constexpr bool IsSignedIntegralType(PrimitiveType type) {
return type == S2 || type == S4 || type == S8 || type == S16 || type == S32 ||
type == S64;
}
constexpr bool IsUnsignedIntegralType(PrimitiveType type) {
return type == U2 || type == U4 || type == U8 || type == U16 || type == U32 ||
type == U64;
}
constexpr bool IsIntegralType(PrimitiveType type) {
return IsUnsignedIntegralType(type) || IsSignedIntegralType(type);
}
template <typename R, typename F>
constexpr R IntegralTypeSwitch(F&& f, PrimitiveType type) {
if (ABSL_PREDICT_TRUE(IsIntegralType(type))) {
switch (type) {
case S2:
return std::forward<F>(f)(PrimitiveTypeConstant<PrimitiveType::S2>());
case S4:
return std::forward<F>(f)(PrimitiveTypeConstant<PrimitiveType::S4>());
case S8:
return std::forward<F>(f)(PrimitiveTypeConstant<PrimitiveType::S8>());
case S16:
return std::forward<F>(f)(PrimitiveTypeConstant<PrimitiveType::S16>());
case S32:
return std::forward<F>(f)(PrimitiveTypeConstant<PrimitiveType::S32>());
case S64:
return std::forward<F>(f)(PrimitiveTypeConstant<PrimitiveType::S64>());
case U2:
return std::forward<F>(f)(PrimitiveTypeConstant<PrimitiveType::U2>());
case U4:
return std::forward<F>(f)(PrimitiveTypeConstant<PrimitiveType::U4>());
case U8:
return std::forward<F>(f)(PrimitiveTypeConstant<PrimitiveType::U8>());
case U16:
return std::forward<F>(f)(PrimitiveTypeConstant<PrimitiveType::U16>());
case U32:
return std::forward<F>(f)(PrimitiveTypeConstant<PrimitiveType::U32>());
case U64:
return std::forward<F>(f)(PrimitiveTypeConstant<PrimitiveType::U64>());
default:
ABSL_UNREACHABLE();
}
}
LOG(FATAL) << "Not an integral data type " << type;
}
template <typename R, typename F>
constexpr R FloatingPointTypeSwitch(F&& f, PrimitiveType type) {
if (ABSL_PREDICT_TRUE(IsFloatingPointType(type))) {
switch (type) {
case F8E4M3FN:
return std::forward<F>(f)(
PrimitiveTypeConstant<PrimitiveType::F8E4M3FN>());
case F8E4M3B11FNUZ:
return std::forward<F>(f)(
PrimitiveTypeConstant<PrimitiveType::F8E4M3B11FNUZ>());
case F8E4M3FNUZ:
return std::forward<F>(f)(
PrimitiveTypeConstant<PrimitiveType::F8E4M3FNUZ>());
case F8E5M2:
return std::forward<F>(f)(
PrimitiveTypeConstant<PrimitiveType::F8E5M2>());
case F8E5M2FNUZ:
return std::forward<F>(f)(
PrimitiveTypeConstant<PrimitiveType::F8E5M2FNUZ>());
case F16:
return std::forward<F>(f)(PrimitiveTypeConstant<PrimitiveType::F16>());
case BF16:
return std::forward<F>(f)(PrimitiveTypeConstant<PrimitiveType::BF16>());
case F32:
return std::forward<F>(f)(PrimitiveTypeConstant<PrimitiveType::F32>());
case F64:
return std::forward<F>(f)(PrimitiveTypeConstant<PrimitiveType::F64>());
default:
ABSL_UNREACHABLE();
}
}
LOG(FATAL) << "Not a floating point data type " << type;
}
template <typename R, typename F>
constexpr R ComplexTypeSwitch(F&& f, PrimitiveType type) {
if (ABSL_PREDICT_TRUE(IsComplexType(type))) {
switch (type) {
case C64:
return std::forward<F>(f)(PrimitiveTypeConstant<PrimitiveType::C64>());
case C128:
return std::forward<F>(f)(PrimitiveTypeConstant<PrimitiveType::C128>());
default:
ABSL_UNREACHABLE();
}
}
LOG(FATAL) << "Not a complex data type " << type;
}
template <typename R, typename F>
constexpr R ArrayTypeSwitch(F&& f, PrimitiveType type) {
if (ABSL_PREDICT_TRUE(IsArrayType(type))) {
if (IsFloatingPointType(type)) {
return FloatingPointTypeSwitch<R>(std::forward<F>(f), type);
}
if (IsIntegralType(type)) {
return IntegralTypeSwitch<R>(std::forward<F>(f), type);
}
if (IsComplexType(type)) {
return ComplexTypeSwitch<R>(std::forward<F>(f), type);
}
if (type == PRED) {
return std::forward<F>(f)(PrimitiveTypeConstant<PrimitiveType::PRED>());
}
}
LOG(FATAL) << "Not an array data type " << type;
}
template <typename R, typename F>
constexpr R PrimitiveTypeSwitch(F&& f, PrimitiveType type) {
if (ABSL_PREDICT_TRUE(IsArrayType(type))) {
return ArrayTypeSwitch<R>(std::forward<F>(f), type);
}
if (type == TUPLE) {
return std::forward<F>(f)(PrimitiveTypeConstant<PrimitiveType::TUPLE>());
}
if (type == TOKEN) {
return std::forward<F>(f)(PrimitiveTypeConstant<PrimitiveType::TOKEN>());
}
if (type == OPAQUE_TYPE) {
return std::forward<F>(f)(
PrimitiveTypeConstant<PrimitiveType::OPAQUE_TYPE>());
}
LOG(FATAL) << "unhandled type " << type;
}
namespace internal {
template <PrimitiveType primitive_type>
inline constexpr int PrimitiveTypeBitWidth() {
if constexpr (IsArrayType(primitive_type)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type>;
if constexpr (IsIntegralType(primitive_type)) {
static_assert(is_specialized_integral_v<NativeT>);
static_assert(std::numeric_limits<NativeT>::is_signed ==
IsSignedIntegralType(primitive_type));
static_assert(std::numeric_limits<NativeT>::radix == 2);
return std::numeric_limits<NativeT>::digits +
(IsSignedIntegralType(primitive_type) ? 1 : 0);
}
if constexpr (primitive_type == PRED) {
return std::numeric_limits<NativeT>::digits;
}
if constexpr (IsFloatingPointType(primitive_type)) {
return sizeof(NativeT) * std::numeric_limits<uint8_t>::digits;
}
if constexpr (IsComplexType(primitive_type)) {
static_assert(is_complex_v<NativeT>);
return sizeof(NativeT) * std::numeric_limits<uint8_t>::digits;
}
}
return 0;
}
template <int... Types>
inline constexpr auto BitWidthArrayHelper(
std::integer_sequence<int, Types...>) {
return std::array{PrimitiveTypeBitWidth<PrimitiveType{Types}>()...};
}
inline constexpr auto kBitWidths = BitWidthArrayHelper(
std::make_integer_sequence<int, PrimitiveType_ARRAYSIZE>{});
template <int... Types>
inline constexpr auto ByteWidthArrayHelper(
std::integer_sequence<int, Types...>) {
return std::array{
CeilOfRatio(PrimitiveTypeBitWidth<PrimitiveType{Types}>(), 8)...};
}
inline constexpr auto kByteWidths = ByteWidthArrayHelper(
std::make_integer_sequence<int, PrimitiveType_ARRAYSIZE>{});
template <const std::array<int, PrimitiveType_ARRAYSIZE>& kWidths>
inline constexpr int WidthForType(PrimitiveType type) {
if (ABSL_PREDICT_TRUE(IsArrayType(type))) {
return kWidths[type];
}
LOG(FATAL) << "Unhandled primitive type " << type;
}
}
inline constexpr int BitWidth(PrimitiveType type) {
return internal::WidthForType<internal::kBitWidths>(type);
}
inline constexpr int ByteWidth(PrimitiveType type) {
return internal::WidthForType<internal::kByteWidths>(type);
}
constexpr PrimitiveType UnsignedIntegralTypeForBitWidth(int64_t src_bitwidth) {
switch (src_bitwidth) {
case 2:
return xla::U2;
case 4:
return xla::U4;
case 8:
return xla::U8;
case 16:
return xla::U16;
case 32:
return xla::U32;
case 64:
return xla::U64;
default:
return xla::PRIMITIVE_TYPE_INVALID;
}
}
PrimitiveType SignedIntegralTypeForBitWidth(int64_t src_bitwidth);
constexpr PrimitiveType ComplexComponentType(PrimitiveType complex_type) {
switch (complex_type) {
case C64:
return F32;
case C128:
return F64;
default:
LOG(FATAL) << "Primitive type is not complex: "
<< PrimitiveType_Name(complex_type);
}
}
constexpr PrimitiveType ComplexType(PrimitiveType base_type) {
if (base_type == F32) {
return C64;
}
if (base_type == F64) {
return C128;
}
return PRIMITIVE_TYPE_INVALID;
}
inline PrimitiveType HigherPrecisionType(PrimitiveType a, PrimitiveType b) {
auto type_properties = [](PrimitiveType type) {
auto component_type =
IsComplexType(type) ? ComplexComponentType(type) : type;
return std::make_tuple(
IsComplexType(type),
IsFloatingPointType(component_type) ? OverflowExponent(component_type)
: -1,
IsFloatingPointType(component_type) ? SignificandWidth(component_type)
: -1,
BitWidth(component_type),
IsSignedIntegralType(component_type));
};
auto a_properties = type_properties(a);
auto b_properties = type_properties(b);
if (a_properties > b_properties) {
return a;
}
if (b_properties > a_properties) {
return b;
}
CHECK_EQ(a, b);
return a;
}
inline bool CastPreservesValues(PrimitiveType from_type,
PrimitiveType to_type) {
if (from_type == to_type) {
return true;
}
if (from_type == PRED) {
return true;
}
if (to_type == PRED) {
return false;
}
if (primitive_util::IsComplexType(to_type)) {
auto from_component_type =
primitive_util::IsComplexType(from_type)
? primitive_util::ComplexComponentType(from_type)
: from_type;
auto to_component_type = primitive_util::ComplexComponentType(to_type);
return CastPreservesValues(from_component_type, to_component_type);
}
if (primitive_util::IsComplexType(from_type)) {
return false;
}
if (primitive_util::IsFloatingPointType(from_type) &&
primitive_util::IsFloatingPointType(to_type)) {
return (!primitive_util::HasInfinity(from_type) ||
primitive_util::HasInfinity(to_type)) &&
primitive_util::SignificandWidth(from_type) <=
primitive_util::SignificandWidth(to_type) &&
primitive_util::ExponentWidth(from_type) <=
primitive_util::ExponentWidth(to_type) &&
(primitive_util::UnderflowExponent(from_type) -
primitive_util::SignificandWidth(from_type)) >=
(primitive_util::UnderflowExponent(to_type) -
primitive_util::SignificandWidth(to_type)) &&
primitive_util::OverflowExponent(from_type) <=
primitive_util::OverflowExponent(to_type);
}
if (!primitive_util::IsIntegralType(from_type)) {
return false;
}
const int from_bits = primitive_util::IsSignedIntegralType(from_type)
? primitive_util::BitWidth(from_type) - 1
: primitive_util::BitWidth(from_type);
const int to_bits = primitive_util::IsSignedIntegralType(to_type)
? primitive_util::BitWidth(to_type) - 1
: primitive_util::BitWidth(to_type);
if (primitive_util::IsFloatingPointType(to_type)) {
return from_bits <= primitive_util::SignificandWidth(to_type) &&
primitive_util::BitWidth(from_type) - 1 <
primitive_util::OverflowExponent(to_type);
}
if (primitive_util::IsSignedIntegralType(from_type) &&
primitive_util::IsUnsignedIntegralType(to_type)) {
return false;
}
CHECK(primitive_util::IsIntegralType(to_type));
return from_bits <= to_bits;
}
const std::string& LowercasePrimitiveTypeName(PrimitiveType s);
absl::StatusOr<PrimitiveType> StringToPrimitiveType(absl::string_view name);
bool IsPrimitiveTypeName(absl::string_view name);
template <typename T>
bool IsCanonicalRepresentation(PrimitiveType type) {
return PrimitiveTypeSwitch<bool>(
[](auto primitive_type) -> bool {
if constexpr (primitive_util::IsFloatingPointType(primitive_type) ||
primitive_util::IsComplexType(primitive_type)) {
return NativeToPrimitiveType<T>() == primitive_type;
}
if constexpr (primitive_util::IsSignedIntegralType(primitive_type)) {
return std::numeric_limits<T>::is_integer &&
std::numeric_limits<T>::is_signed &&
BitWidth(primitive_type) <=
(std::numeric_limits<T>::digits + 1);
}
if constexpr (primitive_util::IsUnsignedIntegralType(primitive_type) ||
primitive_type == PRED) {
return std::numeric_limits<T>::is_integer &&
!std::numeric_limits<T>::is_signed &&
BitWidth(primitive_type) <= std::numeric_limits<T>::digits;
}
return false;
},
type);
}
inline bool FitsInIntegralType(int64_t x, PrimitiveType ty) {
return primitive_util::IntegralTypeSwitch<bool>(
[&](auto primitive_type) -> bool {
using NativeT = primitive_util::NativeTypeOf<primitive_type>;
return std::numeric_limits<NativeT>::min() <= x &&
std::numeric_limits<NativeT>::max() >= x;
},
ty);
}
constexpr bool IsSubByteNonPredType(PrimitiveType type) {
return IsArrayType(type) && type != PRED &&
primitive_util::BitWidth(type) < 8;
}
inline void PackIntN(PrimitiveType input_type, absl::Span<const char> input,
absl::Span<char> output) {
xla::PackIntN(primitive_util::BitWidth(input_type), input, output);
}
inline void UnpackIntN(PrimitiveType input_type, absl::Span<const char> input,
absl::Span<char> output) {
xla::UnpackIntN(primitive_util::BitWidth(input_type), input, output);
}
}
}
#endif
#include "xla/primitive_util.h"
#include <cstdint>
#include <limits>
#include <string>
#include "absl/base/optimization.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/ascii.h"
#include "absl/strings/string_view.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace primitive_util {
int SignificandWidth(PrimitiveType type) {
return FloatingPointTypeSwitch<int>(
[&](auto constant_type) -> int {
return std::numeric_limits<NativeTypeOf<constant_type>>::digits;
},
type);
}
int ExponentWidth(PrimitiveType type) {
int total_bit_width = BitWidth(type);
int trailing_significand_field_width = SignificandWidth(type) - 1;
int kSignBitWidth = 1;
return total_bit_width - (trailing_significand_field_width + kSignBitWidth);
}
int UnderflowExponent(PrimitiveType type) {
return FloatingPointTypeSwitch<int>(
[&](auto constant_type) -> int {
return std::numeric_limits<NativeTypeOf<constant_type>>::min_exponent;
},
type);
}
int OverflowExponent(PrimitiveType type) {
return FloatingPointTypeSwitch<int>(
[&](auto constant_type) -> int {
return std::numeric_limits<NativeTypeOf<constant_type>>::max_exponent;
},
type);
}
int ExponentBias(PrimitiveType type) {
return (1 - UnderflowExponent(type)) + 1;
}
bool HasInfinity(PrimitiveType type) {
if (ABSL_PREDICT_TRUE(IsFloatingPointType(type))) {
return FloatingPointTypeSwitch<bool>(
[&](auto constant_type) -> bool {
return std::numeric_limits<NativeTypeOf<constant_type>>::has_infinity;
},
type);
}
return false;
}
bool HasNegativeZero(PrimitiveType type) {
if (ABSL_PREDICT_TRUE(IsFloatingPointType(type))) {
return FloatingPointTypeSwitch<bool>(
[&](auto constant_type) -> bool {
return has_negative_zero_v<NativeTypeOf<constant_type>>;
},
type);
}
return false;
}
xla::PrimitiveType SignedIntegralTypeForBitWidth(int64_t src_bitwidth) {
switch (src_bitwidth) {
case 2:
return xla::S2;
case 4:
return xla::S4;
case 8:
return xla::S8;
case 16:
return xla::S16;
case 32:
return xla::S32;
case 64:
return xla::S64;
default:
return xla::PRIMITIVE_TYPE_INVALID;
}
}
class PrimitiveTypeNameGenerator {
public:
PrimitiveTypeNameGenerator() {
for (int i = 0; i < PrimitiveType_ARRAYSIZE; i++) {
if (i == static_cast<int>(OPAQUE_TYPE)) {
lowercase_name_[i] = "opaque";
} else if (PrimitiveType_IsValid(i)) {
lowercase_name_[i] = absl::AsciiStrToLower(
PrimitiveType_Name(static_cast<PrimitiveType>(i)));
}
}
}
const std::string& LowercaseName(PrimitiveType t) {
CHECK_LT(t, PrimitiveType_ARRAYSIZE);
return lowercase_name_[static_cast<int>(t)];
}
private:
std::string lowercase_name_[PrimitiveType_ARRAYSIZE];
};
const std::string& LowercasePrimitiveTypeName(PrimitiveType s) {
static auto* gen = new PrimitiveTypeNameGenerator();
return gen->LowercaseName(s);
}
namespace {
const absl::flat_hash_map<std::string, PrimitiveType>&
GetPrimitiveTypeStringMap() {
static absl::flat_hash_map<std::string, PrimitiveType>* name_to_type = [] {
static auto* map = new absl::flat_hash_map<std::string, PrimitiveType>;
for (int i = 0; i < PrimitiveType_ARRAYSIZE; i++) {
if (PrimitiveType_IsValid(i) && i != PRIMITIVE_TYPE_INVALID) {
auto value = static_cast<PrimitiveType>(i);
(*map)[LowercasePrimitiveTypeName(value)] = value;
}
}
(*map)["opaque"] = OPAQUE_TYPE;
return map;
}();
return *name_to_type;
}
}
absl::StatusOr<PrimitiveType> StringToPrimitiveType(absl::string_view name) {
const auto& map = GetPrimitiveTypeStringMap();
auto found = map.find(name);
if (found == map.end()) {
return InvalidArgument("Invalid element type string: \"%s\".", name);
}
return found->second;
}
bool IsPrimitiveTypeName(absl::string_view name) {
const auto& map = GetPrimitiveTypeStringMap();
auto found = map.find(name);
return found != map.end();
}
}
} | #include "xla/primitive_util.h"
#include <numeric>
#include <string>
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
TEST(PrimitiveUtilTest, StringToPrimitiveType) {
auto expect_ok_and_equal = [](const std::string& str,
PrimitiveType expected) {
TF_ASSERT_OK_AND_ASSIGN(PrimitiveType actual,
primitive_util::StringToPrimitiveType(str));
EXPECT_EQ(expected, actual);
};
expect_ok_and_equal("f32", F32);
expect_ok_and_equal("tuple", TUPLE);
expect_ok_and_equal("pred", PRED);
expect_ok_and_equal("s32", S32);
EXPECT_IS_NOT_OK(primitive_util::StringToPrimitiveType("F32").status());
EXPECT_IS_NOT_OK(primitive_util::StringToPrimitiveType("Pred").status());
EXPECT_IS_NOT_OK(primitive_util::StringToPrimitiveType("preD").status());
}
TEST(PrimitiveUtilTest, FloatTypes) {
EXPECT_EQ(primitive_util::SignificandWidth(F32), 24);
EXPECT_EQ(primitive_util::SignificandWidth(BF16), 8);
EXPECT_EQ(primitive_util::ExponentWidth(F32), 8);
EXPECT_EQ(primitive_util::ExponentWidth(BF16), 8);
EXPECT_EQ(primitive_util::UnderflowExponent(F32), -125);
EXPECT_EQ(primitive_util::UnderflowExponent(BF16), -125);
EXPECT_EQ(primitive_util::OverflowExponent(F32), 128);
EXPECT_EQ(primitive_util::OverflowExponent(BF16), 128);
}
TEST(PrimitiveUtilTest, CastPreservesValues) {
bool expecteds[PrimitiveType_ARRAYSIZE][PrimitiveType_ARRAYSIZE];
expecteds[PRED][PRED] = true;
expecteds[PRED][S2] = true;
expecteds[PRED][S4] = true;
expecteds[PRED][S8] = true;
expecteds[PRED][S16] = true;
expecteds[PRED][S32] = true;
expecteds[PRED][S64] = true;
expecteds[PRED][U2] = true;
expecteds[PRED][U4] = true;
expecteds[PRED][U8] = true;
expecteds[PRED][U16] = true;
expecteds[PRED][U32] = true;
expecteds[PRED][U64] = true;
expecteds[PRED][F16] = true;
expecteds[PRED][F32] = true;
expecteds[PRED][F64] = true;
expecteds[PRED][C64] = true;
expecteds[PRED][BF16] = true;
expecteds[PRED][C128] = true;
expecteds[PRED][F8E5M2] = true;
expecteds[PRED][F8E4M3FN] = true;
expecteds[PRED][F8E4M3B11FNUZ] = true;
expecteds[PRED][F8E5M2FNUZ] = true;
expecteds[PRED][F8E4M3FNUZ] = true;
expecteds[S2][PRED] = false;
expecteds[S2][S2] = true;
expecteds[S2][S4] = true;
expecteds[S2][S8] = true;
expecteds[S2][S16] = true;
expecteds[S2][S32] = true;
expecteds[S2][S64] = true;
expecteds[S2][U2] = false;
expecteds[S2][U4] = false;
expecteds[S2][U8] = false;
expecteds[S2][U16] = false;
expecteds[S2][U32] = false;
expecteds[S2][U64] = false;
expecteds[S2][F16] = true;
expecteds[S2][F32] = true;
expecteds[S2][F64] = true;
expecteds[S2][C64] = true;
expecteds[S2][BF16] = true;
expecteds[S2][C128] = true;
expecteds[S2][F8E5M2] = true;
expecteds[S2][F8E4M3FN] = true;
expecteds[S2][F8E4M3B11FNUZ] = true;
expecteds[S2][F8E5M2FNUZ] = true;
expecteds[S2][F8E4M3FNUZ] = true;
expecteds[S4][PRED] = false;
expecteds[S4][S2] = false;
expecteds[S4][S4] = true;
expecteds[S4][S8] = true;
expecteds[S4][S16] = true;
expecteds[S4][S32] = true;
expecteds[S4][S64] = true;
expecteds[S4][U2] = false;
expecteds[S4][U4] = false;
expecteds[S4][U8] = false;
expecteds[S4][U16] = false;
expecteds[S4][U32] = false;
expecteds[S4][U64] = false;
expecteds[S4][F16] = true;
expecteds[S4][F32] = true;
expecteds[S4][F64] = true;
expecteds[S4][C64] = true;
expecteds[S4][BF16] = true;
expecteds[S4][C128] = true;
expecteds[S4][F8E5M2] = true;
expecteds[S4][F8E4M3FN] = true;
expecteds[S4][F8E4M3B11FNUZ] = true;
expecteds[S4][F8E5M2FNUZ] = true;
expecteds[S4][F8E4M3FNUZ] = true;
expecteds[S8][PRED] = false;
expecteds[S8][S2] = false;
expecteds[S8][S4] = false;
expecteds[S8][S8] = true;
expecteds[S8][S16] = true;
expecteds[S8][S32] = true;
expecteds[S8][S64] = true;
expecteds[S8][U2] = false;
expecteds[S8][U4] = false;
expecteds[S8][U8] = false;
expecteds[S8][U16] = false;
expecteds[S8][U32] = false;
expecteds[S8][U64] = false;
expecteds[S8][F16] = true;
expecteds[S8][F32] = true;
expecteds[S8][F64] = true;
expecteds[S8][C64] = true;
expecteds[S8][BF16] = true;
expecteds[S8][C128] = true;
expecteds[S8][F8E5M2] = false;
expecteds[S8][F8E4M3FN] = false;
expecteds[S8][F8E4M3B11FNUZ] = false;
expecteds[S8][F8E5M2FNUZ] = false;
expecteds[S8][F8E4M3FNUZ] = false;
expecteds[S16][PRED] = false;
expecteds[S16][S2] = false;
expecteds[S16][S4] = false;
expecteds[S16][S8] = false;
expecteds[S16][S16] = true;
expecteds[S16][S32] = true;
expecteds[S16][S64] = true;
expecteds[S16][U2] = false;
expecteds[S16][U4] = false;
expecteds[S16][U8] = false;
expecteds[S16][U16] = false;
expecteds[S16][U32] = false;
expecteds[S16][U64] = false;
expecteds[S16][F16] = false;
expecteds[S16][F32] = true;
expecteds[S16][F64] = true;
expecteds[S16][C64] = true;
expecteds[S16][BF16] = false;
expecteds[S16][C128] = true;
expecteds[S16][F8E5M2] = false;
expecteds[S16][F8E4M3FN] = false;
expecteds[S16][F8E4M3B11FNUZ] = false;
expecteds[S16][F8E5M2FNUZ] = false;
expecteds[S16][F8E4M3FNUZ] = false;
expecteds[S32][PRED] = false;
expecteds[S32][S2] = false;
expecteds[S32][S4] = false;
expecteds[S32][S8] = false;
expecteds[S32][S16] = false;
expecteds[S32][S32] = true;
expecteds[S32][S64] = true;
expecteds[S32][U2] = false;
expecteds[S32][U4] = false;
expecteds[S32][U8] = false;
expecteds[S32][U16] = false;
expecteds[S32][U32] = false;
expecteds[S32][U64] = false;
expecteds[S32][F16] = false;
expecteds[S32][F32] = false;
expecteds[S32][F64] = true;
expecteds[S32][C64] = false;
expecteds[S32][BF16] = false;
expecteds[S32][C128] = true;
expecteds[S32][F8E5M2] = false;
expecteds[S32][F8E4M3FN] = false;
expecteds[S32][F8E4M3B11FNUZ] = false;
expecteds[S32][F8E5M2FNUZ] = false;
expecteds[S32][F8E4M3FNUZ] = false;
expecteds[S64][PRED] = false;
expecteds[S64][S2] = false;
expecteds[S64][S4] = false;
expecteds[S64][S8] = false;
expecteds[S64][S16] = false;
expecteds[S64][S32] = false;
expecteds[S64][S64] = true;
expecteds[S64][U2] = false;
expecteds[S64][U4] = false;
expecteds[S64][U8] = false;
expecteds[S64][U16] = false;
expecteds[S64][U32] = false;
expecteds[S64][U64] = false;
expecteds[S64][F16] = false;
expecteds[S64][F32] = false;
expecteds[S64][F64] = false;
expecteds[S64][C64] = false;
expecteds[S64][BF16] = false;
expecteds[S64][C128] = false;
expecteds[S64][F8E5M2] = false;
expecteds[S64][F8E4M3FN] = false;
expecteds[S64][F8E4M3B11FNUZ] = false;
expecteds[S64][F8E5M2FNUZ] = false;
expecteds[S64][F8E4M3FNUZ] = false;
expecteds[U2][PRED] = false;
expecteds[U2][S2] = false;
expecteds[U2][S4] = true;
expecteds[U2][S8] = true;
expecteds[U2][S16] = true;
expecteds[U2][S32] = true;
expecteds[U2][S64] = true;
expecteds[U2][U2] = true;
expecteds[U2][U4] = true;
expecteds[U2][U8] = true;
expecteds[U2][U16] = true;
expecteds[U2][U32] = true;
expecteds[U2][U64] = true;
expecteds[U2][F16] = true;
expecteds[U2][F32] = true;
expecteds[U2][F64] = true;
expecteds[U2][C64] = true;
expecteds[U2][BF16] = true;
expecteds[U2][C128] = true;
expecteds[U2][BF16] = true;
expecteds[U2][C128] = true;
expecteds[U2][F8E5M2] = true;
expecteds[U2][F8E4M3FN] = true;
expecteds[U2][F8E4M3B11FNUZ] = true;
expecteds[U2][F8E5M2FNUZ] = true;
expecteds[U2][F8E4M3FNUZ] = true;
expecteds[U4][PRED] = false;
expecteds[U4][S2] = false;
expecteds[U4][S4] = false;
expecteds[U4][S8] = true;
expecteds[U4][S16] = true;
expecteds[U4][S32] = true;
expecteds[U4][S64] = true;
expecteds[U4][U2] = false;
expecteds[U4][U4] = true;
expecteds[U4][U8] = true;
expecteds[U4][U16] = true;
expecteds[U4][U32] = true;
expecteds[U4][U64] = true;
expecteds[U4][F16] = true;
expecteds[U4][F32] = true;
expecteds[U4][F64] = true;
expecteds[U4][C64] = true;
expecteds[U4][BF16] = true;
expecteds[U4][C128] = true;
expecteds[U4][BF16] = true;
expecteds[U4][C128] = true;
expecteds[U4][F8E5M2] = false;
expecteds[U4][F8E4M3FN] = true;
expecteds[U4][F8E4M3B11FNUZ] = true;
expecteds[U4][F8E5M2FNUZ] = false;
expecteds[U4][F8E4M3FNUZ] = true;
expecteds[U8][PRED] = false;
expecteds[U8][S2] = false;
expecteds[U8][S4] = false;
expecteds[U8][S8] = false;
expecteds[U8][S16] = true;
expecteds[U8][S32] = true;
expecteds[U8][S64] = true;
expecteds[U8][U2] = false;
expecteds[U8][U4] = false;
expecteds[U8][U8] = true;
expecteds[U8][U16] = true;
expecteds[U8][U32] = true;
expecteds[U8][U64] = true;
expecteds[U8][F16] = true;
expecteds[U8][F32] = true;
expecteds[U8][F64] = true;
expecteds[U8][C64] = true;
expecteds[U8][BF16] = true;
expecteds[U8][C128] = true;
expecteds[U8][BF16] = true;
expecteds[U8][C128] = true;
expecteds[U8][F8E5M2] = false;
expecteds[U8][F8E4M3FN] = false;
expecteds[U8][F8E4M3B11FNUZ] = false;
expecteds[U8][F8E5M2FNUZ] = false;
expecteds[U8][F8E4M3FNUZ] = false;
expecteds[U16][PRED] = false;
expecteds[U16][S2] = false;
expecteds[U16][S4] = false;
expecteds[U16][S8] = false;
expecteds[U16][S16] = false;
expecteds[U16][S32] = true;
expecteds[U16][S64] = true;
expecteds[U16][U2] = false;
expecteds[U16][U4] = false;
expecteds[U16][U8] = false;
expecteds[U16][U16] = true;
expecteds[U16][U32] = true;
expecteds[U16][U64] = true;
expecteds[U16][F16] = false;
expecteds[U16][F32] = true;
expecteds[U16][F64] = true;
expecteds[U16][C64] = true;
expecteds[U16][BF16] = false;
expecteds[U16][C128] = true;
expecteds[U16][F8E5M2] = false;
expecteds[U16][F8E4M3FN] = false;
expecteds[U16][F8E4M3B11FNUZ] = false;
expecteds[U16][F8E5M2FNUZ] = false;
expecteds[U16][F8E4M3FNUZ] = false;
expecteds[U32][PRED] = false;
expecteds[U32][S2] = false;
expecteds[U32][S4] = false;
expecteds[U32][S8] = false;
expecteds[U32][S16] = false;
expecteds[U32][S32] = false;
expecteds[U32][S64] = true;
expecteds[U32][U2] = false;
expecteds[U32][U4] = false;
expecteds[U32][U8] = false;
expecteds[U32][U16] = false;
expecteds[U32][U32] = true;
expecteds[U32][U64] = true;
expecteds[U32][F16] = false;
expecteds[U32][F32] = false;
expecteds[U32][F64] = true;
expecteds[U32][C64] = false;
expecteds[U32][BF16] = false;
expecteds[U32][C128] = true;
expecteds[U32][F8E5M2] = false;
expecteds[U32][F8E4M3FN] = false;
expecteds[U32][F8E4M3B11FNUZ] = false;
expecteds[U32][F8E5M2FNUZ] = false;
expecteds[U32][F8E4M3FNUZ] = false;
expecteds[U64][PRED] = false;
expecteds[U64][S2] = false;
expecteds[U64][S4] = false;
expecteds[U64][S8] = false;
expecteds[U64][S16] = false;
expecteds[U64][S32] = false;
expecteds[U64][S64] = false;
expecteds[U64][U2] = false;
expecteds[U64][U4] = false;
expecteds[U64][U8] = false;
expecteds[U64][U16] = false;
expecteds[U64][U32] = false;
expecteds[U64][U64] = true;
expecteds[U64][F16] = false;
expecteds[U64][F32] = false;
expecteds[U64][F64] = false;
expecteds[U64][C64] = false;
expecteds[U64][BF16] = false;
expecteds[U64][C128] = false;
expecteds[U64][F8E5M2] = false;
expecteds[U64][F8E4M3FN] = false;
expecteds[U64][F8E4M3B11FNUZ] = false;
expecteds[U64][F8E5M2FNUZ] = false;
expecteds[U64][F8E4M3FNUZ] = false;
expecteds[F16][PRED] = false;
expecteds[F16][S2] = false;
expecteds[F16][S4] = false;
expecteds[F16][S8] = false;
expecteds[F16][S16] = false;
expecteds[F16][S32] = false;
expecteds[F16][S64] = false;
expecteds[F16][U2] = false;
expecteds[F16][U4] = false;
expecteds[F16][U8] = false;
expecteds[F16][U16] = false;
expecteds[F16][U32] = false;
expecteds[F16][U64] = false;
expecteds[F16][F16] = true;
expecteds[F16][F32] = true;
expecteds[F16][F64] = true;
expecteds[F16][C64] = true;
expecteds[F16][BF16] = false;
expecteds[F16][C128] = true;
expecteds[F16][F8E5M2] = false;
expecteds[F16][F8E4M3FN] = false;
expecteds[F16][F8E4M3B11FNUZ] = false;
expecteds[F16][F8E5M2FNUZ] = false;
expecteds[F16][F8E4M3FNUZ] = false;
expecteds[F32][PRED] = false;
expecteds[F32][S2] = false;
expecteds[F32][S4] = false;
expecteds[F32][S8] = false;
expecteds[F32][S16] = false;
expecteds[F32][S32] = false;
expecteds[F32][S64] = false;
expecteds[F32][U2] = false;
expecteds[F32][U4] = false;
expecteds[F32][U8] = false;
expecteds[F32][U16] = false;
expecteds[F32][U32] = false;
expecteds[F32][U64] = false;
expecteds[F32][F16] = false;
expecteds[F32][F32] = true;
expecteds[F32][F64] = true;
expecteds[F32][C64] = true;
expecteds[F32][BF16] = false;
expecteds[F32][C128] = true;
expecteds[F32][F8E5M2] = false;
expecteds[F32][F8E4M3FN] = false;
expecteds[F32][F8E4M3B11FNUZ] = false;
expecteds[F32][F8E5M2FNUZ] = false;
expecteds[F32][F8E4M3FNUZ] = false;
expecteds[F64][PRED] = false;
expecteds[F64][S2] = false;
expecteds[F64][S4] = false;
expecteds[F64][S8] = false;
expecteds[F64][S16] = false;
expecteds[F64][S32] = false;
expecteds[F64][S64] = false;
expecteds[F64][U2] = false;
expecteds[F64][U4] = false;
expecteds[F64][U8] = false;
expecteds[F64][U16] = false;
expecteds[F64][U32] = false;
expecteds[F64][U64] = false;
expecteds[F64][F16] = false;
expecteds[F64][F32] = false;
expecteds[F64][F64] = true;
expecteds[F64][C64] = false;
expecteds[F64][BF16] = false;
expecteds[F64][C128] = true;
expecteds[F64][F8E5M2] = false;
expecteds[F64][F8E4M3FN] = false;
expecteds[F64][F8E4M3B11FNUZ] = false;
expecteds[F64][F8E5M2FNUZ] = false;
expecteds[F64][F8E4M3FNUZ] = false;
expecteds[C64][PRED] = false;
expecteds[C64][S2] = false;
expecteds[C64][S4] = false;
expecteds[C64][S8] = false;
expecteds[C64][S16] = false;
expecteds[C64][S32] = false;
expecteds[C64][S64] = false;
expecteds[C64][U2] = false;
expecteds[C64][U4] = false;
expecteds[C64][U8] = false;
expecteds[C64][U16] = false;
expecteds[C64][U32] = false;
expecteds[C64][U64] = false;
expecteds[C64][F16] = false;
expecteds[C64][F32] = false;
expecteds[C64][F64] = false;
expecteds[C64][C64] = true;
expecteds[C64][BF16] = false;
expecteds[C64][C128] = true;
expecteds[C64][F8E5M2] = false;
expecteds[C64][F8E4M3FN] = false;
expecteds[C64][F8E4M3B11FNUZ] = false;
expecteds[C64][F8E5M2FNUZ] = false;
expecteds[C64][F8E4M3FNUZ] = false;
expecteds[BF16][PRED] = false;
expecteds[BF16][S2] = false;
expecteds[BF16][S4] = false;
expecteds[BF16][S8] = false;
expecteds[BF16][S16] = false;
expecteds[BF16][S32] = false;
expecteds[BF16][S64] = false;
expecteds[BF16][U2] = false;
expecteds[BF16][U4] = false;
expecteds[BF16][U8] = false;
expecteds[BF16][U16] = false;
expecteds[BF16][U32] = false;
expecteds[BF16][U64] = false;
expecteds[BF16][F16] = false;
expecteds[BF16][F32] = true;
expecteds[BF16][F64] = true;
expecteds[BF16][C64] = true;
expecteds[BF16][BF16] = true;
expecteds[BF16][C128] = true;
expecteds[BF16][F8E5M2] = false;
expecteds[BF16][F8E4M3FN] = false;
expecteds[BF16][F8E4M3B11FNUZ] = false;
expecteds[BF16][F8E5M2FNUZ] = false;
expecteds[BF16][F8E4M3FNUZ] = false;
expecteds[C128][PRED] = false;
expecteds[C128][S2] = false;
expecteds[C128][S4] = false;
expecteds[C128][S8] = false;
expecteds[C128][S16] = false;
expecteds[C128][S32] = false;
expecteds[C128][S64] = false;
expecteds[C128][U2] = false;
expecteds[C128][U4] = false;
expecteds[C128][U8] = false;
expecteds[C128][U16] = false;
expecteds[C128][U32] = false;
expecteds[C128][U64] = false;
expecteds[C128][F16] = false;
expecteds[C128][F32] = false;
expecteds[C128][F64] = false;
expecteds[C128][C64] = false;
expecteds[C128][BF16] = false;
expecteds[C128][C128] = true;
expecteds[C128][F8E5M2] = false;
expecteds[C128][F8E4M3FN] = false;
expecteds[C128][F8E4M3B11FNUZ] = false;
expecteds[C128][F8E5M2FNUZ] = false;
expecteds[C128][F8E4M3FNUZ] = false;
expecteds[F8E5M2][PRED] = false;
expecteds[F8E5M2][S2] = false;
expecteds[F8E5M2][S4] = false;
expecteds[F8E5M2][S8] = false;
expecteds[F8E5M2][S16] = false;
expecteds[F8E5M2][S32] = false;
expecteds[F8E5M2][S64] = false;
expecteds[F8E5M2][U2] = false;
expecteds[F8E5M2][U4] = false;
expecteds[F8E5M2][U8] = false;
expecteds[F8E5M2][U16] = false;
expecteds[F8E5M2][U32] = false;
expecteds[F8E5M2][U64] = false;
expecteds[F8E5M2][F16] = true;
expecteds[F8E5M2][F32] = true;
expecteds[F8E5M2][F64] = true;
expecteds[F8E5M2][C64] = true;
expecteds[F8E5M2][BF16] = true;
expecteds[F8E5M2][C128] = true;
expecteds[F8E5M2][F8E5M2] = true;
expecteds[F8E5M2][F8E4M3FN] = false;
expecteds[F8E5M2][F8E4M3B11FNUZ] = false;
expecteds[F8E5M2][F8E5M2FNUZ] = false;
expecteds[F8E5M2][F8E4M3FNUZ] = false;
expecteds[F8E4M3FN][PRED] = false;
expecteds[F8E4M3FN][S2] = false;
expecteds[F8E4M3FN][S4] = false;
expecteds[F8E4M3FN][S8] = false;
expecteds[F8E4M3FN][S16] = false;
expecteds[F8E4M3FN][S32] = false;
expecteds[F8E4M3FN][S64] = false;
expecteds[F8E4M3FN][U2] = false;
expecteds[F8E4M3FN][U4] = false;
expecteds[F8E4M3FN][U8] = false;
expecteds[F8E4M3FN][U16] = false;
expecteds[F8E4M3FN][U32] = false;
expecteds[F8E4M3FN][U64] = false;
expecteds[F8E4M3FN][F16] = true;
expecteds[F8E4M3FN][F32] = true;
expecteds[F8E4M3FN][F64] = true;
expecteds[F8E4M3FN][C64] = true;
expecteds[F8E4M3FN][BF16] = true;
expecteds[F8E4M3FN][C128] = true;
expecteds[F8E4M3FN][F8E5M2] = false;
expecteds[F8E4M3FN][F8E4M3FN] = true;
expecteds[F8E4M3FN][F8E4M3B11FNUZ] = false;
expecteds[F8E4M3B11FNUZ][PRED] = false;
expecteds[F8E4M3B11FNUZ][S2] = false;
expecteds[F8E4M3B11FNUZ][S4] = false;
expecteds[F8E4M3B11FNUZ][S8] = false;
expecteds[F8E4M3B11FNUZ][S16] = false;
expecteds[F8E4M3B11FNUZ][S32] = false;
expecteds[F8E4M3B11FNUZ][S64] = false;
expecteds[F8E4M3B11FNUZ][U2] = false;
expecteds[F8E4M3B11FNUZ][U4] = false;
expecteds[F8E4M3B11FNUZ][U8] = false;
expecteds[F8E4M3B11FNUZ][U16] = false;
expecteds[F8E4M3B11FNUZ][U32] = false;
expecteds[F8E4M3B11FNUZ][U64] = false;
expecteds[F8E4M3B11FNUZ][F16] = true;
expecteds[F8E4M3B11FNUZ][F32] = true;
expecteds[F8E4M3B11FNUZ][F64] = true;
expecteds[F8E4M3B11FNUZ][C64] = true;
expecteds[F8E4M3B11FNUZ][BF16] = true;
expecteds[F8E4M3B11FNUZ][C128] = true;
expecteds[F8E4M3B11FNUZ][F8E5M2] = false;
expecteds[F8E4M3B11FNUZ][F8E4M3FN] = false;
expecteds[F8E4M3B11FNUZ][F8E4M3B11FNUZ] = true;
expecteds[F8E4M3B11FNUZ][F8E4M3FNUZ] = false;
expecteds[F8E4M3B11FNUZ][F8E5M2FNUZ] = false;
expecteds[F8E4M3FN][F8E5M2FNUZ] = false;
expecteds[F8E4M3FN][F8E4M3FNUZ] = false;
expecteds[F8E5M2FNUZ][PRED] = false;
expecteds[F8E5M2FNUZ][S2] = false;
expecteds[F8E5M2FNUZ][S4] = false;
expecteds[F8E5M2FNUZ][S8] = false;
expecteds[F8E5M2FNUZ][S16] = false;
expecteds[F8E5M2FNUZ][S32] = false;
expecteds[F8E5M2FNUZ][S64] = false;
expecteds[F8E5M2FNUZ][U2] = false;
expecteds[F8E5M2FNUZ][U4] = false;
expecteds[F8E5M2FNUZ][U8] = false;
expecteds[F8E5M2FNUZ][U16] = false;
expecteds[F8E5M2FNUZ][U32] = false;
expecteds[F8E5M2FNUZ][U64] = false;
expecteds[F8E5M2FNUZ][F16] = true;
expecteds[F8E5M2FNUZ][F32] = true;
expecteds[F8E5M2FNUZ][F64] = true;
expecteds[F8E5M2FNUZ][C64] = true;
expecteds[F8E5M2FNUZ][BF16] = true;
expecteds[F8E5M2FNUZ][C128] = true;
expecteds[F8E5M2FNUZ][F8E5M2] = false;
expecteds[F8E5M2FNUZ][F8E4M3FN] = false;
expecteds[F8E5M2FNUZ][F8E4M3B11FNUZ] = false;
expecteds[F8E5M2FNUZ][F8E5M2FNUZ] = true;
expecteds[F8E5M2FNUZ][F8E4M3FNUZ] = false;
expecteds[F8E4M3FNUZ][PRED] = false;
expecteds[F8E4M3FNUZ][S2] = false;
expecteds[F8E4M3FNUZ][S4] = false;
expecteds[F8E4M3FNUZ][S8] = false;
expecteds[F8E4M3FNUZ][S16] = false;
expecteds[F8E4M3FNUZ][S32] = false;
expecteds[F8E4M3FNUZ][S64] = false;
expecteds[F8E4M3FNUZ][U2] = false;
expecteds[F8E4M3FNUZ][U4] = false;
expecteds[F8E4M3FNUZ][U8] = false;
expecteds[F8E4M3FNUZ][U16] = false;
expecteds[F8E4M3FNUZ][U32] = false;
expecteds[F8E4M3FNUZ][U64] = false;
expecteds[F8E4M3FNUZ][F16] = true;
expecteds[F8E4M3FNUZ][F32] = true;
expecteds[F8E4M3FNUZ][F64] = true;
expecteds[F8E4M3FNUZ][C64] = true;
expecteds[F8E4M3FNUZ][BF16] = true;
expecteds[F8E4M3FNUZ][C128] = true;
expecteds[F8E4M3FNUZ][F8E5M2] = false;
expecteds[F8E4M3FNUZ][F8E4M3FN] = false;
expecteds[F8E4M3FNUZ][F8E4M3B11FNUZ] = false;
expecteds[F8E4M3FNUZ][F8E5M2FNUZ] = false;
expecteds[F8E4M3FNUZ][F8E4M3FNUZ] = true;
for (int from_type_int = PrimitiveType_MIN;
from_type_int < PrimitiveType_ARRAYSIZE; ++from_type_int) {
auto from_type = static_cast<PrimitiveType>(from_type_int);
if (!primitive_util::IsArrayType(from_type)) {
continue;
}
for (int to_type_int = PrimitiveType_MIN;
to_type_int < PrimitiveType_ARRAYSIZE; ++to_type_int) {
auto to_type = static_cast<PrimitiveType>(to_type_int);
if (!primitive_util::IsArrayType(to_type)) {
continue;
}
bool expected = expecteds[from_type][to_type];
bool actual = primitive_util::CastPreservesValues(from_type, to_type);
EXPECT_EQ(expected, actual)
<< primitive_util::LowercasePrimitiveTypeName(from_type) << " -> "
<< primitive_util::LowercasePrimitiveTypeName(to_type);
}
}
}
}
} |
1,784 | cpp | tensorflow/tensorflow | literal | third_party/xla/xla/literal.cc | third_party/xla/xla/literal_test.cc | #ifndef XLA_LITERAL_H_
#define XLA_LITERAL_H_
#include <algorithm>
#include <climits>
#include <complex>
#include <cstdint>
#include <cstring>
#include <initializer_list>
#include <limits>
#include <memory>
#include <optional>
#include <ostream>
#include <string>
#include <type_traits>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/casts.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/array4d.h"
#include "xla/index_util.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/maybe_owning.h"
#include "xla/primitive_util.h"
#include "xla/printer.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/bitmap.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/statusor.h"
namespace xla {
class Literal;
class LiteralSlice;
class LiteralBase {
public:
using DynamicSizeType = ShapeUtil::DynamicSizeType;
virtual ~LiteralBase() = 0;
bool operator==(const LiteralBase& other) const {
return Equal(other, false);
}
bool operator!=(const LiteralBase& other) const { return !(*this == other); }
bool Equal(const LiteralBase& other, bool layout_sensitive) const;
const Shape& shape() const;
LiteralProto ToProto() const;
template <typename NativeT>
absl::Span<const NativeT> data(const ShapeIndex& shape_index = {}) const;
const void* untyped_data(const ShapeIndex& shape_index = {}) const;
int64_t size_bytes(const ShapeIndex& shape_index = {}) const;
absl::StatusOr<int64_t> SerializedSize() const {
return ShapeUtil::SerializedSize(shape());
}
template <typename OutputIterator>
absl::Status Serialize(OutputIterator output) const {
return SerializeWithShapeProto(shape().ToProto(), output);
}
absl::Status SerializeToString(std::string* output) const;
absl::StatusOr<std::string> SerializeAsString() const;
std::string GetR1U8AsString() const;
void Print(Printer* printer) const;
void PrintOneline(Printer* printer) const;
void PrintWithoutShape(Printer* printer) const;
void PrintWithoutShapeOneline(Printer* printer) const;
void PrintWithLayout(Printer* printer) const;
void PrintWithLayoutOneline(Printer* printer) const;
std::string ToString() const;
std::string ToStringOneline() const;
std::string ToStringWithoutShape() const;
std::string ToStringWithoutShapeOneline() const;
std::string ToStringWithLayout() const;
std::string ToStringWithLayoutOneline() const;
template <typename NativeT>
NativeT Get(absl::Span<const int64_t> multi_index,
const ShapeIndex& shape_index) const;
template <typename NativeT>
NativeT Get(absl::Span<const int64_t> multi_index) const;
DynamicSizeType GetDynamicSize(int64_t dim_index,
const ShapeIndex& shape_index) const;
DynamicSizeType GetDynamicSize(int64_t dim_index) const;
template <typename NativeT>
NativeT GetFirstElement() const;
std::optional<int64_t> GetFirstInteger() const;
std::string GetAsString(absl::Span<const int64_t> multi_index,
const ShapeIndex& shape_index = {}) const;
template <typename T>
typename std::enable_if<std::numeric_limits<T>::is_specialized, bool>::type
IsEqualAt(absl::Span<const int64_t> multi_index, T value) const {
if (auto as_s64 = GetIntegralAsS64(multi_index)) {
return *as_s64 == value;
}
complex128 as_complex128 = *GetAsComplex128(multi_index);
return as_complex128.imag() == 0 && as_complex128.real() == value;
}
bool IsEqualAt(absl::Span<const int64_t> multi_index,
complex128 value) const {
if (auto as_s64 = GetIntegralAsS64(multi_index)) {
return *as_s64 == value.real() && value.imag() == 0;
}
auto as_complex128 = GetAsComplex128(multi_index);
return *as_complex128 == value;
}
std::optional<int64_t> GetIntegralAsS64(
absl::Span<const int64_t> multi_index) const;
std::optional<double> GetAsDouble(
absl::Span<const int64_t> multi_index) const;
std::optional<complex128> GetAsComplex128(
absl::Span<const int64_t> multi_index) const;
std::optional<double> GetSumAsDouble(
absl::Span<const int64_t> linear_indices) const;
void EachCellAsString(
absl::FunctionRef<void(absl::Span<const int64_t> indices,
const std::string& value)>
per_cell) const;
template <typename NativeT>
void EachCell(
absl::FunctionRef<void(absl::Span<const int64_t> indices, NativeT value)>
per_cell) const;
bool IsAll(const Literal& scalar) const;
bool IsAll(int8_t value) const;
bool IsAllFloat(float value) const;
bool IsAllComplex(complex64 value) const;
bool IsAllFirst() const;
template <typename T>
int64_t CountEqual(T value) const;
template <typename T>
int64_t CountEqual(std::complex<T> value) const;
bool IsR1Iota() const;
std::optional<int64_t> IsR1StridedIota() const;
bool IsZero(absl::Span<const int64_t> indices) const;
int64_t element_count(const ShapeIndex& index = {}) const {
if (index.empty()) {
return ShapeUtil::ElementsIn(shape());
}
return ShapeUtil::ElementsIn(ShapeUtil::GetSubshape(shape(), index));
}
template <typename H>
friend H AbslHashValue(H state, const LiteralBase& value) {
return LiteralBase::Hash(std::move(state), value);
}
template <typename H, bool kIsLayoutSensitive = true,
int64_t kByteLimit = std::numeric_limits<int64_t>::max()>
static H Hash(H state, const LiteralBase& literal) {
state =
Shape::Hash<H, kIsLayoutSensitive>(std::move(state), literal.shape());
ShapeUtil::ForEachSubshape(literal.shape(), [&](const Shape& subshape,
const ShapeIndex& index) {
if (!subshape.IsArray()) {
return;
}
CHECK(LayoutUtil::IsDenseArray(subshape));
const int64_t size_bytes = literal.size_bytes(index);
const int64_t bytes_to_hash = std::min(size_bytes, kByteLimit);
const bool use_physical_order =
kIsLayoutSensitive || !subshape.has_layout();
auto data = absl::MakeConstSpan(
static_cast<const char*>(literal.untyped_data(index)), size_bytes);
if (use_physical_order) {
state = H::combine(std::move(state), data.first(bytes_to_hash));
return;
}
const int64_t elem_size =
ShapeUtil::ByteSizeOfPrimitiveType(subshape.element_type());
absl::Span<const int64_t> minor_to_major =
subshape.layout().minor_to_major();
DimensionVector elem_index(subshape.dimensions_size());
absl::Span<int64_t> elem_index_span(elem_index.data(), elem_index.size());
int64_t bytes_hashed = 0;
while (bytes_hashed < bytes_to_hash) {
int64_t offset =
elem_size * IndexUtil::MultidimensionalIndexToLinearIndex(
subshape, minor_to_major, elem_index);
state = H::combine(std::move(state), data.subspan(offset, elem_size));
if (!IndexUtil::BumpIndices(subshape, elem_index_span)) return;
bytes_hashed += elem_size;
}
});
return std::move(state);
}
absl::StatusOr<Literal> ConvertToShape(const Shape& dest_shape) const;
absl::StatusOr<Literal> BitcastConvert(const Shape& dest_shape) const;
absl::StatusOr<Literal> Convert(PrimitiveType primitive_dest_type) const;
Literal Clone() const;
std::unique_ptr<Literal> CloneToUnique() const;
Literal Relayout(const Layout& new_layout,
const ShapeIndex& shape_index = {}) const;
Literal Relayout(const Shape& shape_with_layout) const;
Literal ToStatic() const;
Literal ToBoundedDynamic(const Shape& bounded_shape) const;
absl::StatusOr<Literal> Reshape(absl::Span<const int64_t> dimensions) const;
absl::StatusOr<Literal> Broadcast(const Shape& result_shape,
absl::Span<const int64_t> dimensions) const;
Literal Transpose(absl::Span<const int64_t> permutation) const;
Literal Slice(absl::Span<const int64_t> start_indices,
absl::Span<const int64_t> limit_indices) const;
template <typename NativeT>
Literal Replicate(int64_t times) const;
bool IsDetermined(const ShapeIndex& shape_index = {}) const;
bool IsKnown(const ShapeIndex& shape_index = {}) const;
static Literal CreateFromShape(const Shape& shape);
static Literal CreateFromShapeWithUnknownLeafArrays(const Shape& shape);
static Literal CreateFromShapeWithUndeterminedLeafArrays(const Shape& shape);
protected:
class Piece;
void BuildPieceSubtree(const Shape& shape, Piece* piece);
template <typename OutputIterator>
absl::Status SerializeWithShapeProto(const ShapeProto& proto,
OutputIterator output) const;
template <typename OutputIterator>
class SerializeState {
public:
SerializeState(const ShapeProto& shape, OutputIterator output)
: output_(output) {
WriteShape(shape);
}
int64_t num_written() const { return num_written_; }
template <typename NativeT>
void WriteElement(NativeT element) {
constexpr PrimitiveType primitive_type =
primitive_util::NativeToPrimitiveType<NativeT>();
static_assert(primitive_util::BitWidth(primitive_type) % 8 == 0);
if constexpr (primitive_util::IsComplexType(primitive_type)) {
WriteElement(element.real());
WriteElement(element.imag());
} else {
constexpr PrimitiveType unsigned_type =
primitive_util::UnsignedIntegralTypeForBitWidth(
primitive_util::BitWidth(primitive_type));
using UnsignedT = primitive_util::NativeTypeOf<unsigned_type>;
UnsignedT unsigned_element = absl::bit_cast<UnsignedT>(element);
if constexpr (sizeof(UnsignedT) == 1) {
*output_++ = absl::bit_cast<char>(unsigned_element);
++num_written_;
} else {
for (int i = 0; i < sizeof unsigned_element; ++i) {
*output_++ = static_cast<char>(unsigned_element);
unsigned_element >>= CHAR_BIT;
++num_written_;
}
}
}
}
template <typename NativeT>
void WriteElements(absl::Span<const NativeT> elements) {
constexpr PrimitiveType primitive_type =
primitive_util::NativeToPrimitiveType<NativeT>();
constexpr int bits_per_element = primitive_util::BitWidth(primitive_type);
if constexpr (bits_per_element < 8) {
static_assert(!primitive_util::IsFloatingPointType(primitive_type));
static_assert(!primitive_util::IsComplexType(primitive_type));
static_assert(8 % bits_per_element == 0);
constexpr int elements_per_byte = 8 / bits_per_element;
int64_t bytes = elements.size() / elements_per_byte;
for (int64_t i = 0; i < bytes; ++i) {
uint8_t byte = 0;
for (int b = 0; b < elements_per_byte; ++b) {
uint8_t src =
static_cast<uint8_t>(elements[i * elements_per_byte + b]) &
LsbMask<uint8_t>(bits_per_element);
byte |= src << (b * bits_per_element);
}
WriteElement(byte);
}
int64_t rest = elements.size() % elements_per_byte;
if (rest != 0) {
uint8_t byte = 0;
for (int64_t b = 0; b < rest; ++b) {
uint8_t src =
static_cast<uint8_t>(elements[bytes * elements_per_byte + b]) &
LsbMask<uint8_t>(bits_per_element);
byte |= src << (b * bits_per_element);
}
WriteElement(byte);
}
} else {
for (NativeT element : elements) {
WriteElement(element);
}
}
}
void WriteDynamicSizes(absl::Span<const DynamicSizeType> sizes) {
WriteElements(sizes);
}
private:
void WriteShape(const ShapeProto& proto) {
std::string shape_bytes = proto.SerializeAsString();
uint64_t shape_size = shape_bytes.size();
WriteElement(shape_size);
output_ = std::copy(shape_bytes.begin(), shape_bytes.end(), output_);
num_written_ += shape_bytes.size();
}
OutputIterator output_;
int64_t num_written_ = 0;
};
template <typename InputIterator>
class DeserializeState {
public:
DeserializeState(InputIterator input, InputIterator end)
: input_(input), end_(end) {}
int64_t num_read() const { return num_read_; }
template <typename NativeT>
ABSL_MUST_USE_RESULT bool ReadElement(NativeT& element) {
constexpr PrimitiveType primitive_type =
primitive_util::NativeToPrimitiveType<NativeT>();
static_assert(primitive_util::BitWidth(primitive_type) % 8 == 0);
if constexpr (primitive_util::IsComplexType(primitive_type)) {
using ComponentT =
primitive_util::NativeTypeOf<primitive_util::ComplexComponentType(
primitive_type)>;
ComponentT real;
if (!ReadElement(real)) {
return false;
}
ComponentT imag;
if (!ReadElement(imag)) {
return false;
}
element = NativeT(real, imag);
} else {
constexpr PrimitiveType unsigned_type =
primitive_util::UnsignedIntegralTypeForBitWidth(
primitive_util::BitWidth(primitive_type));
using UnsignedT = primitive_util::NativeTypeOf<unsigned_type>;
if constexpr (sizeof(UnsignedT) == 1) {
if (at_end()) {
return false;
}
element = absl::bit_cast<NativeT>(*input_++);
++num_read_;
} else {
UnsignedT unsigned_element = 0;
for (int i = 0, shift = 0; i < sizeof unsigned_element;
++i, shift += CHAR_BIT) {
if (at_end()) {
return false;
}
unsigned_element |=
static_cast<UnsignedT>(static_cast<unsigned char>(*input_++))
<< shift;
++num_read_;
}
element = absl::bit_cast<NativeT>(unsigned_element);
}
}
return true;
}
template <typename NativeT>
ABSL_MUST_USE_RESULT bool ReadElements(absl::Span<NativeT> elements) {
constexpr PrimitiveType primitive_type =
primitive_util::NativeToPrimitiveType<NativeT>();
constexpr int bits_per_element = primitive_util::BitWidth(primitive_type);
if constexpr (bits_per_element < 8) {
static_assert(!primitive_util::IsFloatingPointType(primitive_type));
static_assert(!primitive_util::IsComplexType(primitive_type));
static_assert(8 % bits_per_element == 0);
constexpr int elements_per_byte = 8 / bits_per_element;
int64_t bytes = elements.size() / elements_per_byte;
for (int64_t i = 0; i < bytes; ++i) {
uint8_t byte;
if (!ReadElement(byte)) {
return false;
}
for (int b = 0; b < elements_per_byte; ++b) {
elements[i * elements_per_byte + b] =
static_cast<NativeT>(byte & LsbMask<uint8_t>(bits_per_element));
byte >>= bits_per_element;
}
}
int64_t rest = elements.size() % elements_per_byte;
if (rest != 0) {
uint8_t byte;
if (!ReadElement(byte)) {
return false;
}
for (int64_t b = 0; b < rest; ++b) {
elements[bytes * elements_per_byte + b] =
static_cast<NativeT>(byte & LsbMask<uint8_t>(bits_per_element));
byte >>= bits_per_element;
}
}
} else {
for (NativeT& element : elements) {
if (!ReadElement(element)) {
return false;
}
}
}
return true;
}
bool ReadDynamicSizes(absl::Span<DynamicSizeType> sizes) {
return ReadElements(sizes);
}
absl::StatusOr<Shape> ReadShape(uint64_t size) {
std::string shape_bytes;
shape_bytes.reserve(size);
while (shape_bytes.size() < size) {
if (at_end()) {
return InvalidArgument("Failed to read shape data");
}
shape_bytes.push_back(*input_++);
++num_read_;
}
ShapeProto proto;
if (!proto.ParseFromString(shape_bytes)) {
return InvalidArgument("Failed to parse shape protobuf");
}
Shape shape(proto);
TF_RETURN_IF_ERROR(ShapeUtil::ValidateShapeWithOptionalLayout(shape));
return std::move(shape);
}
bool at_end() const { return input_ == end_; }
private:
InputIterator input_;
InputIterator end_;
int64_t num_read_ = 0;
};
enum class ArrayValueState { kKnown = 0, kUnknown = 1, kUndetermined = 2 };
class Piece {
public:
ArrayValueState get_array_value_state() const;
void set_array_value_state(ArrayValueState state);
template <typename NativeT>
absl::Span<const NativeT> data() const;
template <typename NativeT>
absl::Span<NativeT> data();
void* untyped_data();
const void* untyped_data() const;
template <typename NativeT>
NativeT Get(absl::Span<const int64_t> index) const;
template <typename NativeT>
void Set(absl::Span<const int64_t> index, NativeT value);
DynamicSizeType GetDynamicSize(int64_t dim_index) const;
void SetDynamicSize(int64_t dim_index, DynamicSizeType size);
void AllocateBuffers();
void DeallocateBuffers();
const char* buffer() const;
char* buffer() {
return const_cast<char*>(const_cast<const Piece*>(this)->buffer( | #include "xla/literal.h"
#include <algorithm>
#include <cmath>
#include <complex>
#include <cstdint>
#include <functional>
#include <limits>
#include <random>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/base/casts.h"
#include "absl/hash/hash.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/array4d.h"
#include "xla/index_util.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
using ::testing::HasSubstr;
class LiteralUtilTest : public ::testing::Test {
protected:
LiteralUtilTest() {
Array4D<float> arr4d({
{
{
{1, 2, 3},
{4, 5, 6},
{7, 8, 9},
},
{
{11, 12, 13},
{14, 15, 16},
{17, 18, 19},
},
},
{
{
{101, 102, 103},
{104, 105, 106},
{107, 108, 109},
},
{
{201, 202, 203},
{204, 205, 206},
{207, 208, 209},
},
},
});
layout_r2_dim0major_ = LayoutUtil::MakeLayout({1, 0});
layout_r2_dim0minor_ = LayoutUtil::MakeLayout({0, 1});
layout_r3_dim0major_ = LayoutUtil::MakeLayout({2, 1, 0});
layout_r3_dim0minor_ = LayoutUtil::MakeLayout({0, 1, 2});
layout_r4_dim0major_ = LayoutUtil::MakeLayout({3, 2, 1, 0});
layout_r4_dim0minor_ = LayoutUtil::MakeLayout({0, 1, 2, 3});
literal_r4_2x2x3x3_dim0major_ =
LiteralUtil::CreateR4FromArray4DWithLayout<float>(arr4d,
layout_r4_dim0major_);
literal_r4_2x2x3x3_dim0minor_ =
LiteralUtil::CreateR4FromArray4DWithLayout<float>(arr4d,
layout_r4_dim0minor_);
}
Layout layout_r2_dim0major_;
Layout layout_r2_dim0minor_;
Layout layout_r3_dim0major_;
Layout layout_r3_dim0minor_;
Layout layout_r4_dim0major_;
Layout layout_r4_dim0minor_;
Literal literal_r4_2x2x3x3_dim0major_;
Literal literal_r4_2x2x3x3_dim0minor_;
};
TEST_F(LiteralUtilTest, LiteralScalarToString) {
auto true_lit = LiteralUtil::CreateR0<bool>(true);
EXPECT_EQ("pred[] true", true_lit.ToString());
auto false_lit = LiteralUtil::CreateR0<bool>(false);
EXPECT_EQ("pred[] false", false_lit.ToString());
auto u4_lit = LiteralUtil::CreateR0<u4>(u4(5));
EXPECT_EQ("u4[] 5", u4_lit.ToString());
auto u32_lit = LiteralUtil::CreateR0<uint32_t>(42);
EXPECT_EQ("u32[] 42", u32_lit.ToString());
auto s4_lit = LiteralUtil::CreateR0<s4>(s4(-3));
EXPECT_EQ("s4[] -3", s4_lit.ToString());
auto s32_lit = LiteralUtil::CreateR0<int32_t>(-999);
EXPECT_EQ("s32[] -999", s32_lit.ToString());
auto f32_lit = LiteralUtil::CreateR0<float>(3.14f);
EXPECT_EQ("f32[] 3.14", f32_lit.ToString());
auto f16_lit = LiteralUtil::CreateR0<half>(static_cast<half>(0.5f));
EXPECT_EQ("f16[] 0.5", f16_lit.ToString());
auto c64_lit = LiteralUtil::CreateR0<complex64>({3.14f, 2.78f});
EXPECT_EQ("c64[] (3.14, 2.78)", c64_lit.ToString());
auto c128_lit = LiteralUtil::CreateR0<complex128>({3.14, 2.78});
EXPECT_EQ("c128[] (3.14, 2.78)", c128_lit.ToString());
auto bf16_lit = LiteralUtil::CreateR0<bfloat16>(static_cast<bfloat16>(0.5f));
EXPECT_EQ("bf16[] 0.5", bf16_lit.ToString());
auto bf16_lit_truncated =
LiteralUtil::CreateR0<bfloat16>(static_cast<bfloat16>(3.14f));
ASSERT_EQ("bf16[] 3.141", bf16_lit_truncated.ToString());
auto bf16_lit_truncated2 =
LiteralUtil::CreateR0<bfloat16>(static_cast<bfloat16>(9.001f));
EXPECT_EQ("bf16[] 9", bf16_lit_truncated2.ToString());
auto f8e5m2_lit =
LiteralUtil::CreateR0<tsl::float8_e5m2>(tsl::float8_e5m2(0.5));
EXPECT_EQ("f8e5m2[] 0.5", f8e5m2_lit.ToString());
auto f8e5m2_lit_truncated =
LiteralUtil::CreateR0<tsl::float8_e5m2>(tsl::float8_e5m2(3.141));
EXPECT_EQ("f8e5m2[] 3", f8e5m2_lit_truncated.ToString());
auto f8e4m3_lit =
LiteralUtil::CreateR0<tsl::float8_e4m3fn>(tsl::float8_e4m3fn(0.5));
EXPECT_EQ("f8e4m3fn[] 0.5", f8e4m3_lit.ToString());
auto f8e4m3b11fnuz_lit = LiteralUtil::CreateR0<tsl::float8_e4m3b11fnuz>(
tsl::float8_e4m3b11fnuz(0.5));
EXPECT_EQ("f8e4m3b11fnuz[] 0.5", f8e4m3b11fnuz_lit.ToString());
auto f8e4m3fnuz_lit =
LiteralUtil::CreateR0<tsl::float8_e4m3fnuz>(tsl::float8_e4m3fnuz(0.5));
EXPECT_EQ("f8e4m3fnuz[] 0.5", f8e4m3fnuz_lit.ToString());
auto f8e5m2fnuz_lit =
LiteralUtil::CreateR0<tsl::float8_e5m2fnuz>(tsl::float8_e5m2fnuz(0.5));
EXPECT_EQ("f8e5m2fnuz[] 0.5", f8e5m2fnuz_lit.ToString());
}
TEST_F(LiteralUtilTest, LiteralVectorToString) {
auto pred_vec = LiteralUtil::CreateR1<bool>({true, false, true});
EXPECT_EQ("pred[3] {1, 0, 1}", pred_vec.ToString());
}
TEST_F(LiteralUtilTest, R2ToString) {
const auto literal = LiteralUtil::CreateR2({{1, 2}, {3, 4}, {5, 6}});
const std::string expected = R"(s32[3,2] {
{ 1, 2 },
{ 3, 4 },
{ 5, 6 }
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, R2DynamicToString) {
auto literal = LiteralUtil::CreateR2({{1, 2}, {3, 4}, {5, 6}});
literal.SetDynamicSize(0, {}, 2);
const std::string expected = R"(s32[<=3,2](2,2) {
{ 1, 2 },
{ 3, 4 }
})";
EXPECT_EQ(expected, literal.ToString());
auto literal2 = LiteralUtil::CreateR2({{1, 2, 3}, {4, 5, 6}});
literal2.SetDynamicSize(1, {}, 2);
const std::string expected2 = R"(s32[2,<=3](2,2) {
{ 1, 2 },
{ 4, 5 }
})";
EXPECT_EQ(expected2, literal2.ToString());
}
TEST_F(LiteralUtilTest, R2BoolDynamicToString) {
auto literal = LiteralUtil::CreateR2<bool>(
{{true, true, true}, {true, true, true}, {true, true, true}});
literal.SetDynamicSize(0, {}, 2);
const std::string expected = R"(pred[<=3,3](2,3) {
{ 1, 1, 1 },
{ 1, 1, 1 }
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, R3ToString) {
const auto literal =
LiteralUtil::CreateR3({{{1}, {2}}, {{3}, {4}}, {{5}, {6}}});
const std::string expected = R"(s32[3,2,1] {
{
{1},
{2}
},
{
{3},
{4}
},
{
{5},
{6}
}
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, R6ToString) {
const auto literal =
LiteralUtil::CreateFromDimensions(S32, {2, 2, 1, 1, 1, 2});
const std::string expected = R"(s32[2,2,1,1,1,2] {
{
{
{
{
{ 0, 0 }
}
}
},
{
{
{
{ 0, 0 }
}
}
}
},
{
{
{
{
{ 0, 0 }
}
}
},
{
{
{
{ 0, 0 }
}
}
}
}
})";
EXPECT_EQ(expected, literal.ToString());
}
TEST_F(LiteralUtilTest, TupleToString) {
auto scalar = LiteralUtil::CreateR0<float>(1.0);
auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto tuple = LiteralUtil::MakeTuple({&scalar, &matrix});
const std::string expected = R"((
f32[] 1,
f32[2,2] {
{ 1, 2 },
{ 3, 4 }
}
))";
EXPECT_EQ(expected, tuple.ToString());
}
TEST_F(LiteralUtilTest, CreateR3FromArray3d) {
Array3D<float> array_3d({
{{1.0f, 2.0f},
{3.0f, 4.0f},
{5.0f, 6.0f}},
{{7.0f, 8.0f},
{9.0f, 10.0f},
{11.0f, 12.0f}},
});
auto literal = LiteralUtil::CreateR3FromArray3D(array_3d);
EXPECT_THAT(literal.shape().dimensions(), ElementsAre(2, 3, 2));
std::string result = literal.ToString();
const std::string expected = R"(f32[2,3,2] {
{
{ 1, 2 },
{ 3, 4 },
{ 5, 6 }
},
{
{ 7, 8 },
{ 9, 10 },
{ 11, 12 }
}
})";
EXPECT_EQ(expected, result);
}
TEST_F(LiteralUtilTest, LiteralR4F32ProjectedStringifies) {
auto literal = LiteralUtil::CreateR4Projected<float>({
{1, 2},
{1001, 1002},
{2001, 2002},
}, 1, 2);
EXPECT_THAT(literal.shape().dimensions(), ElementsAre(1, 2, 3, 2));
std::string result = literal.ToString();
const std::string expected = R"(f32[1,2,3,2] {
{
{
{ 1, 2 },
{ 1001, 1002 },
{ 2001, 2002 }
},
{
{ 1, 2 },
{ 1001, 1002 },
{ 2001, 2002 }
}
}
})";
EXPECT_EQ(expected, result);
}
TEST_F(LiteralUtilTest, LiteralR4F32Stringifies) {
EXPECT_THAT(literal_r4_2x2x3x3_dim0major_.shape().dimensions(),
ElementsAre(2, 2, 3, 3));
std::string result = literal_r4_2x2x3x3_dim0major_.ToString();
const std::string expected = R"(f32[2,2,3,3] {
{
{
{ 1, 2, 3 },
{ 4, 5, 6 },
{ 7, 8, 9 }
},
{
{ 11, 12, 13 },
{ 14, 15, 16 },
{ 17, 18, 19 }
}
},
{
{
{ 101, 102, 103 },
{ 104, 105, 106 },
{ 107, 108, 109 }
},
{
{ 201, 202, 203 },
{ 204, 205, 206 },
{ 207, 208, 209 }
}
}
})";
EXPECT_EQ(expected, result);
}
TEST_F(LiteralUtilTest, EachCellR2F32) {
auto literal = LiteralUtil::CreateR2<float>({
{3.1f, 4.2f},
{9.3f, 12.4f},
});
std::vector<std::tuple<int64_t, int64_t, std::string>> seen;
literal.EachCellAsString(
[&seen](absl::Span<const int64_t> indices, const std::string& value) {
seen.emplace_back(indices[0], indices[1], value);
});
using Elem = std::tuple<int64_t, int64_t, std::string>;
std::vector<Elem> expected = {Elem(0, 0, "3.1"), Elem(0, 1, "4.2"),
Elem(1, 0, "9.3"), Elem(1, 1, "12.4")};
EXPECT_EQ(expected, seen);
}
TEST_F(LiteralUtilTest, ScalarEquality) {
auto f32_42 = LiteralUtil::CreateR0<float>(42.0);
auto f32_42_clone = LiteralUtil::CreateR0<float>(42.0);
EXPECT_EQ(f32_42, f32_42);
EXPECT_EQ(f32_42, f32_42_clone);
auto f32_123 = LiteralUtil::CreateR0<float>(123.0);
EXPECT_NE(f32_42, f32_123);
auto f64_42 = LiteralUtil::CreateR0<double>(42.0);
EXPECT_NE(f32_42, f64_42);
}
TEST_F(LiteralUtilTest, NonScalarEquality) {
auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto matrix_clone = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto matrix_different =
LiteralUtil::CreateR2<float>({{4.0, 3.0}, {1.0, 2.0}});
auto vector_literal = LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0, 4.0});
auto scalar = LiteralUtil::CreateR0<float>(1.0);
Literal nil(ShapeUtil::MakeNil());
EXPECT_EQ(matrix, matrix);
EXPECT_EQ(matrix, matrix_clone);
EXPECT_NE(matrix, matrix_different);
EXPECT_NE(matrix, vector_literal);
EXPECT_NE(matrix, scalar);
EXPECT_NE(matrix, nil);
EXPECT_EQ(nil, nil);
}
TEST_F(LiteralUtilTest, TokenEquality) {
auto token0 = LiteralUtil::CreateToken();
auto token1 = LiteralUtil::CreateToken();
auto scalar = LiteralUtil::CreateR0<float>(1.0);
EXPECT_EQ(token0, token1);
EXPECT_NE(token0, scalar);
EXPECT_EQ(LiteralUtil::MakeTuple({&token0}),
LiteralUtil::MakeTuple({&token0}));
EXPECT_EQ(LiteralUtil::MakeTuple({&token0, &scalar}),
LiteralUtil::MakeTuple({&token1, &scalar}));
EXPECT_NE(LiteralUtil::MakeTuple({&token0, &scalar}),
LiteralUtil::MakeTuple({&scalar, &token1}));
}
TEST_F(LiteralUtilTest, DifferentLayoutEquality) {
Literal colmajor(ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2}, {0, 1}));
colmajor.Set<float>({0, 0}, 1.0);
colmajor.Set<float>({0, 1}, 2.0);
colmajor.Set<float>({1, 0}, 3.0);
colmajor.Set<float>({1, 1}, 4.0);
Literal rowmajor(ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2}, {1, 0}));
rowmajor.Set<float>({0, 0}, 1.0);
rowmajor.Set<float>({0, 1}, 2.0);
rowmajor.Set<float>({1, 0}, 3.0);
rowmajor.Set<float>({1, 1}, 4.0);
EXPECT_EQ(rowmajor, colmajor);
}
TEST_F(LiteralUtilTest, DifferentLayoutInEquality) {
Literal colmajor(ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2}, {0, 1}));
colmajor.Set<float>({0, 0}, 1.0);
colmajor.Set<float>({0, 1}, 2.0);
colmajor.Set<float>({1, 0}, 3.0);
colmajor.Set<float>({1, 1}, 4.0);
Literal rowmajor(ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2}, {1, 0}));
rowmajor.Set<float>({0, 0}, 1.0);
rowmajor.Set<float>({0, 1}, 2.0);
rowmajor.Set<float>({1, 0}, 3.0);
rowmajor.Set<float>({1, 1}, 4.0);
EXPECT_FALSE(rowmajor.Equal(colmajor, true));
EXPECT_FALSE(colmajor.Equal(rowmajor, true));
}
TEST_F(LiteralUtilTest, TupleEquality) {
auto scalar = LiteralUtil::CreateR0<float>(1.0);
auto matrix = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto tuple1 = LiteralUtil::MakeTuple({&scalar, &matrix});
auto scalar_clone = LiteralUtil::CreateR0<float>(1.0);
auto tuple2 = LiteralUtil::MakeTuple({&scalar_clone, &matrix});
EXPECT_EQ(tuple1, tuple2);
auto reversed_tuple = LiteralUtil::MakeTuple({&matrix, &scalar});
EXPECT_NE(tuple1, reversed_tuple);
auto scalar_42 = LiteralUtil::CreateR0<float>(42.0);
auto different_tuple = LiteralUtil::MakeTuple({&scalar_42, &matrix});
EXPECT_NE(tuple1, different_tuple);
}
TEST_F(LiteralUtilTest, DynamicShapeEquality) {
auto r1 = LiteralUtil::CreateR1<float>({1.0, 2.0});
r1.SetDynamicSize(0, {}, 1);
auto r2 = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
r2.SetDynamicSize(0, {}, 1);
auto tuple1 = LiteralUtil::MakeTuple({&r1, &r2});
auto r1_clone = LiteralUtil::CreateR1<float>({1.0, 3.0});
r1_clone.SetDynamicSize(0, {}, 1);
auto tuple2 = LiteralUtil::MakeTuple({&r1_clone, &r2});
EXPECT_EQ(tuple1, tuple2);
auto r2_clone = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
r2_clone.SetDynamicSize(0, {}, 2);
auto tuple_3 = LiteralUtil::MakeTuple({&r1_clone, &r2_clone});
EXPECT_NE(tuple1, tuple_3);
}
TEST_F(LiteralUtilTest, C64Equality) {
auto vector = LiteralUtil::CreateR1<complex64>({{1.0, 2.0}, {3.0, 4.0}});
auto vector_clone =
LiteralUtil::CreateR1<complex64>({{1.0, 2.0}, {3.0, 4.0}});
EXPECT_EQ(vector, vector_clone);
auto vector_reversed =
LiteralUtil::CreateR1<complex64>({{3.0, 4.0}, {1.0, 2.0}});
EXPECT_NE(vector, vector_reversed);
}
TEST_F(LiteralUtilTest, C128Equality) {
auto vector = LiteralUtil::CreateR1<complex128>({{1.0, 2.0}, {3.0, 4.0}});
auto vector_clone =
LiteralUtil::CreateR1<complex128>({{1.0, 2.0}, {3.0, 4.0}});
EXPECT_EQ(vector, vector_clone);
auto vector_reversed =
LiteralUtil::CreateR1<complex128>({{3.0, 4.0}, {1.0, 2.0}});
EXPECT_NE(vector, vector_reversed);
}
TEST_F(LiteralUtilTest, IsAllTuple) {
auto element1 = LiteralUtil::CreateR0<float>(0.0);
auto element2 = LiteralUtil::CreateR2<float>({{0.0, 0.0}, {0.0, 0.0}});
auto tuple = LiteralUtil::MakeTuple({&element1, &element1});
EXPECT_FALSE(tuple.IsAll(0));
EXPECT_FALSE(tuple.IsAll(1));
}
TEST_F(LiteralUtilTest, CreateFromShapeTuple) {
auto scalar = LiteralUtil::CreateR0<float>(0.0);
auto matrix = LiteralUtil::CreateR2<int32_t>({{0, 0}, {0, 0}});
auto tuple = LiteralUtil::MakeTuple({&scalar, &matrix});
auto x = Literal::CreateFromShape(tuple.shape());
EXPECT_EQ(tuple, x);
}
TEST_F(LiteralUtilTest, IsAll) {
EXPECT_TRUE(LiteralUtil::CreateR0<bool>(false).IsAll(0));
EXPECT_TRUE(LiteralUtil::CreateR0<bool>(true).IsAll(1));
EXPECT_FALSE(LiteralUtil::CreateR0<bool>(false).IsAll(1));
EXPECT_FALSE(LiteralUtil::CreateR0<bool>(false).IsAll(2));
EXPECT_FALSE(LiteralUtil::CreateR0<bool>(true).IsAll(0));
EXPECT_FALSE(LiteralUtil::CreateR0<bool>(true).IsAll(2));
EXPECT_FALSE(LiteralUtil::CreateR0<bool>(true).IsAll(-1));
auto int8_min = std::numeric_limits<int8_t>::min();
EXPECT_FALSE(LiteralUtil::CreateR0<uint8_t>(255).IsAll(int8_min));
EXPECT_TRUE(LiteralUtil::CreateR0<float>(42.0).IsAll(42));
EXPECT_FALSE(LiteralUtil::CreateR0<float>(42.0001).IsAll(42));
EXPECT_TRUE(LiteralUtil::CreateR1<int>({100, 100, 100}).IsAll(100));
EXPECT_FALSE(LiteralUtil::CreateR1<double>({100, 100, 100.001}).IsAll(100));
EXPECT_TRUE(LiteralUtil::CreateR2<uint64_t>({{8, 8}, {8, 8}}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR2<uint64_t>({{8, 8}, {8, 9}}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR2<uint64_t>({{9, 8}, {8, 8}}).IsAll(8));
half h8(8.0f);
half h9(9.0f);
EXPECT_TRUE(LiteralUtil::CreateR2<half>({{h8}, {h8}}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR2<half>({{h8}, {h9}}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR2<half>({{h9}, {h8}}).IsAll(8));
bfloat16 b8(8.0f);
bfloat16 b9(9.0f);
EXPECT_TRUE(LiteralUtil::CreateR2<bfloat16>({{b8}, {b8}}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR2<bfloat16>({{b8}, {b9}}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR2<bfloat16>({{b9}, {b8}}).IsAll(8));
bfloat16 b91(9.001f);
bfloat16 b90(9.00f);
EXPECT_TRUE(LiteralUtil::CreateR2<bfloat16>({{b91}, {b90}}).IsAll(9.0));
tsl::float8_e5m2 q16(8);
EXPECT_TRUE(LiteralUtil::CreateR1<tsl::float8_e5m2>({q16}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR1<tsl::float8_e5m2>({q16}).IsAll(9));
tsl::float8_e4m3fn r16(9);
EXPECT_FALSE(LiteralUtil::CreateR1<tsl::float8_e4m3fn>({r16}).IsAll(8));
EXPECT_TRUE(LiteralUtil::CreateR1<tsl::float8_e4m3fn>({r16}).IsAll(9));
tsl::float8_e4m3b11fnuz s16(9);
EXPECT_FALSE(LiteralUtil::CreateR1<tsl::float8_e4m3b11fnuz>({s16}).IsAll(8));
EXPECT_TRUE(LiteralUtil::CreateR1<tsl::float8_e4m3b11fnuz>({s16}).IsAll(9));
tsl::float8_e4m3fnuz t16(9);
EXPECT_FALSE(LiteralUtil::CreateR1<tsl::float8_e4m3fnuz>({t16}).IsAll(8));
EXPECT_TRUE(LiteralUtil::CreateR1<tsl::float8_e4m3fnuz>({t16}).IsAll(9));
tsl::float8_e5m2fnuz u16(8);
EXPECT_TRUE(LiteralUtil::CreateR1<tsl::float8_e5m2fnuz>({u16}).IsAll(8));
EXPECT_FALSE(LiteralUtil::CreateR1<tsl::float8_e5m2fnuz>({u16}).IsAll(9));
complex64 c8_9 = {8, 9};
EXPECT_FALSE(LiteralUtil::CreateR2<complex64>({{c8_9}, {c8_9}}).IsAll(8));
auto uint64_max = std::numeric_limits<uint64_t>::max();
EXPECT_FALSE(LiteralUtil::CreateR2<uint64_t>(
{{uint64_max, uint64_max}, {uint64_max, uint64_max}})
.IsAll(-1));
}
TEST_F(LiteralUtilTest, IsAllFloat) {
EXPECT_FALSE(LiteralUtil::CreateR0<bool>(false).IsAllFloat(0));
EXPECT_FALSE(LiteralUtil::CreateR0<int8_t>(0).IsAllFloat(0));
EXPECT_FALSE(LiteralUtil::CreateR0<uint8_t>(0).IsAllFloat(0));
EXPECT_FALSE(LiteralUtil::CreateR0<int>(0).IsAllFloat(0));
EXPECT_TRUE(LiteralUtil::CreateR0<float>(0).IsAllFloat(0));
EXPECT_TRUE(LiteralUtil::CreateR0<float>(.5).IsAllFloat(.5));
EXPECT_TRUE(LiteralUtil::CreateR0<float>(-.5).IsAllFloat(-.5));
EXPECT_FALSE(LiteralUtil::CreateR0<float>(-.5).IsAllFloat(-.49));
EXPECT_FALSE(
LiteralUtil::CreateR2<float>({{0, 0, 0}, {0, .1, 0}}).IsAllFloat(0));
EXPECT_TRUE(LiteralUtil::CreateR2<float>({{.5, .5, .5}, {.5, .5, .5}})
.IsAllFloat(.5));
EXPECT_TRUE(LiteralUtil::CreateR0<double>(0).IsAllFloat(0));
EXPECT_TRUE(LiteralUtil::CreateR0<double>(.5).IsAllFloat(.5));
EXPECT_TRUE(LiteralUtil::CreateR0<double>(-.5).IsAllFloat(-.5));
EXPECT_FALSE(LiteralUtil::CreateR0<double>(-.5).IsAllFloat(-.49));
EXPECT_FALSE(
LiteralUtil::CreateR2<double>({{0, 0, 0}, {0, .1, 0}}).IsAllFloat(0));
EXPECT_TRUE(
LiteralUtil::CreateR0<bfloat16>(bfloat16(128.)).IsAllFloat(128.5));
}
TEST_F(LiteralUtilTest, IsAllComplex) {
EXPECT_FALSE(LiteralUtil::CreateR0<bool>(false).IsAllComplex(0));
EXPECT_FALSE(LiteralUtil::CreateR0<int8_t>(0).IsAllComplex(0));
EXPECT_FALSE(LiteralUtil::CreateR0<uint8_t>(0).IsAllComplex(0));
EXPECT_FALSE(LiteralUtil::CreateR0<int>(0).IsAllComplex(0));
EXPECT_FALSE(LiteralUtil::CreateR0<float>(0).IsAllComplex(0));
EXPECT_FALSE(LiteralUtil::CreateR0<double>(0).IsAllComplex(0));
complex64 c8_9 = {8, 9};
complex64 c7_9 = {7, 9};
EXPECT_TRUE(LiteralUtil::CreateR2<complex64>({{c8_9}, {c8_9}})
.IsAllComplex({8.0f, 9.0f}));
EXPECT_FALSE(LiteralUtil::CreateR2<complex64>({{c7_9}, {c8_9}})
.IsAllComplex({8.0f, 9.0f}));
EXPECT_FALSE(LiteralUtil::CreateR2<complex64>({{c8_9}, {c7_9}})
.IsAllComplex({8.0f, 9.0f}));
}
TEST_F(LiteralUtilTest, IsAllFirst) {
EXPECT_FALSE(LiteralUtil::CreateR1<bool>({false, true}).IsAllFirst());
EXPECT_TRUE(LiteralUtil::CreateR1<bool>({false, false}).IsAllFirst());
EXPECT_FALSE(LiteralUtil::CreateR1<int8_t>({1, 1, 2}).IsAllFirst());
EXPECT_TRUE(LiteralUtil::CreateR1<int8_t>({5, 5, 5, 5}).IsAllFirst());
EXPECT_FALSE(LiteralUtil::CreateR1<uint8_t>({1, 1, 2}).IsAllFirst());
EXPECT_TRUE(LiteralUtil::CreateR1<int32_t>({5, 5, 5, 5}).IsAllFirst());
EXPECT_FALSE(LiteralUtil::CreateR1<int32_t>({1, 1, 2}).IsAllFirst());
EXPECT_TRUE(LiteralUtil::CreateR1<uint32_t>({5, 5, 5, 5}).IsAllFirst());
EXPECT_FALSE(LiteralUtil::CreateR1<uint32_t>({1, 1, 2}).IsAllFirst());
complex64 c8_9 = {8, 9};
complex64 c7_9 = {7, 9};
EXPECT_TRUE(LiteralUtil::CreateR2<complex64>({{c8_9}, {c8_9}}).IsAllFirst());
EXPECT_FALSE(LiteralUtil::CreateR2<complex64>({{c7_9}, {c8_9}}).IsAllFirst());
}
TEST_F(LiteralUtilTest, CountEqualInt) {
EXPECT_EQ(LiteralUtil::CreateR1<int8_t>({}).CountEqual<int8_t>(1), 0);
EXPECT_EQ(
LiteralUtil::CreateR1<int8_t>({1, 2, 3, 4, 5, 100}).CountEqual<int8_t>(2),
1);
EXPECT_EQ(LiteralUtil::CreateR1<int8_t>({0, 3, 6, 0, 9, 18, 0})
.CountEqual<int8_t>(0),
3);
EXPECT_EQ(LiteralUtil::CreateR1<int32_t>({234, 345, 4, 45, 5467, 5467, 5467})
.CountEqual<int32_t>(5467),
3);
}
TEST_F(LiteralUtilTest, CountEqualFloat) {
EXPECT_EQ(LiteralUtil::CreateR1<float>({}).CountEqual<float>(0), 0);
EXPECT_EQ(LiteralUtil::CreateR1<float>({1.1, 2.2, 3.3, 4.4, 5.5, 100.6})
.CountEqual<float>(3.3),
1);
EXPECT_EQ(LiteralUtil::CreateR1<float>({7.62, 3, 7.75, 7.62, 7.3, 2, 7.62})
.CountEqual<float>(7.62),
3);
EXPECT_EQ(LiteralUtil::CreateR1<float>(
{NAN, 0, 6.8, NAN, NAN, NAN, 63.12, 24.6, NAN})
.CountEqual<float>(NAN),
5);
}
TEST_F(LiteralUtilTest, CountEqualBool) {
EXPECT_EQ(LiteralUtil::CreateR1<bool>({false, true}).CountEqual<bool>(false),
1);
}
TEST_F(LiteralUtilTest, CountEqualComplex) {
EXPECT_EQ(LiteralUtil::CreateR1<std::complex<double>>(
{std::complex<float>(1, 2), std::complex<float>(3, 4),
std::complex<float>(5, 6), std::complex<float>(6, 7)})
.CountEqual<float>(std::complex<float>(5, 6)),
1);
}
TEST_F(LiteralUtilTest, CountEqualMismatched) {
EXPECT_EQ(LiteralUtil::CreateR1<float>({13, 10.5, 15.6, 22.7})
.CountEqual<int8_t>(13),
1);
EXPECT_EQ(
LiteralUtil::CreateR1<float>({10.5, 15.6, 22.7}).CountEqual<int8_t>(1),
0);
EXPECT_EQ(LiteralUtil::CreateR1<std::complex<float>>(
{std::complex<float>(1, 2), std::complex<float>(3, 4),
std::complex<float>(5, 6), std::complex<float>(6, 7)})
.CountEqual<float>(1),
0);
}
TEST_F(LiteralUtilTest, IsZero) {
auto scalar_zero = LiteralUtil::CreateR0<float>(0.0f);
auto scalar_one = LiteralUtil::CreateR0<float>(1.0f);
EXPECT_TRUE(scalar_zero.IsZero({}));
EXPECT_FALSE(scalar_one.IsZero({}));
auto array = LiteralUtil::CreateR2<uint32_t>({{1, 2, 0, 3}, {1, 0, 1, 2}});
EXPECT_FALSE(array.IsZero({0, 1}));
EXPECT_TRUE(array.IsZero({0, 2}));
EXPECT_TRUE(array.IsZero({1, 1}));
EXPECT_FALSE(array.IsZero({1, 2}));
auto complex_zero = LiteralUtil::CreateR0<complex64>(0.0f);
auto complex_nonzero = LiteralUtil::CreateR0<complex64>(0.5f);
EXPECT_TRUE(complex_zero.IsZero({}));
EXPECT_FALSE(complex_nonzero.IsZero({}));
}
template <typename T>
class LiteralUtilTestTemplated : public ::testing::Test {};
using TestedTypes = ::testing::Types<float, int32_t, uint32_t, complex64>;
TYPED_TEST_SUITE(LiteralUtilTestTemplated, TestedTypes);
TYPED_TEST(LiteralUtilTestTemplated, Relayout2x2) {
TypeParam half = TypeParam(1) / TypeParam(2);
auto data = LiteralUtil::CreateR2<TypeParam>({{half, 2}, {3, 4}});
const Layout layout01 = LayoutUtil::MakeLayout({0, 1});
const Layout layout10 = LayoutUtil::MakeLayout({1, 0});
auto data01 = data.Relayout(layout01);
EXPECT_TRUE(LayoutUtil::Equal(data01.shape().layout(), layout01));
EXPECT_EQ(data, data01);
auto data10 = data.Relayout(layout10);
EXPECT_TRUE(LayoutUtil::Equal(data10.shape().layout(), layout10));
EXPECT_EQ(data, data10);
}
TEST_F(LiteralUtilTest, ReshapeR0) {
auto original = LiteralUtil::CreateR0<float>(1.7f);
auto reshape = original.Reshape({}).value();
EXPECT_EQ(original, reshape);
}
TEST_F(LiteralUtilTest, ReshapeR4) {
auto original = LiteralUtil::CreateR4WithLayout<float>({{
{{10, 11, 12, 13}, {14, 15, 16, 17}},
{{18, 19, 20, 21}, {22, 23, 24, 25}},
{{26, 27, 28, 29}, {30, 31, 32, 33}},
}}, layout_r4_dim0major_);
auto expected = LiteralUtil::CreateR3WithLayout<float>({
{{10, 11}, {12, 13}, {14, 15}, {16, 17}},
{{18, 19}, {20, 21}, {22, 23}, {24, 25}},
{{26, 27}, {28, 29}, {30, 31}, {32, 33}},
}, layout_r3_dim0major_);
auto reshape = original.Reshape({3, 4, 2}).value();
EXPECT_EQ(expected, reshape);
}
TEST_F(LiteralUtilTest, ReshapeR4Dim0Minor) {
auto original = LiteralUtil::CreateR4WithLayout<float>({{
{{10, 11, 12, 13}, {14, 15, 16, 17}},
{{18, 19, 20, 21}, {22, 23, 24, 25}},
{{26, 27, 28, 29}, {30, 31, 32, 33}},
}}, layout_r4_dim0minor_);
auto expected = LiteralUtil::CreateR3WithLayout<float>({
{{10, 11}, {12, 13}, {14, 15}, {16, 17}},
{{18, 19}, {20, 21}, {22, 23}, {24, 25}},
{{26, 27}, {28, 29}, {30, 31}, {32, 33}},
}, layout_r3_dim0major_);
auto reshape = original.Reshape({3, 4, 2}).value();
EXPECT_EQ(expected, reshape);
}
TEST_F(LiteralUtilTest, TransposeR0) {
auto original = LiteralUtil::CreateR0<float>(1.7f);
auto reshape = original.Transpose({});
EXPECT_EQ(original, reshape);
}
TEST_F(LiteralUtilTest, TransposeR4) {
auto original = LiteralUtil::CreateR4<float>({{
{{10, 11, 12, 13}, {14, 15, 16, 17}},
{{18, 19, 20, 21}, {22, 23, 24, 25}},
{{26, 27, 28, 29}, {30, 31, 32, 33}},
}});
auto reshape = original.Transpose({2, 3, 0, 1});
reshape.EachCell<float>([&](absl::Span<const int64_t> indices, float value) {
EXPECT_EQ(value, original.Get<float>(
{indices[2], indices[3], indices[0], indices[1]}));
});
}
TEST_F(LiteralUtilTest, TransposeDynamicR2) {
auto original = LiteralUtil::CreateR2<float>({{1, 2, 3}, {4, 5, 6}});
original.SetDynamicSize(1, 1);
auto reshape = original.Transpose({1, 0});
reshape.EachCell<float>([&](absl::Span<const int64_t> indices, float value) {
EXPECT_EQ(value, original.Get<float>({indices[1], indices[0]}));
});
}
TEST_F(LiteralUtilTest, ToStaticR2) {
auto original = LiteralUtil::CreateR2<float>({{1, 2, 3}, {4, 5, 6}});
original.SetDynamicSize(1, 1);
auto static_literal = original.ToStatic();
EXPECT_EQ(static_literal.shape(), ShapeUtil::MakeShape(F32, {2, 1}));
EXPECT_TRUE(static_literal.shape().is_static());
static_literal.EachCell<float>(
[&](absl::Span<const int64_t> indices, float value) {
EXPECT_EQ(value, original.Get<float>({indices[0], indices[1]}));
});
}
TEST_F(LiteralUtilTest, ToBoundedDynamicR2) {
auto original = LiteralUtil::CreateR2<float>({{1}, {4}});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {2, 3}, {false, true});
auto dynamic_literal = original.ToBoundedDynamic(dynamic_shape);
EXPECT_EQ(dynamic_literal.shape(), dynamic_shape);
dynamic_literal.EachCell<float>(
[&](absl::Span<const int64_t> indices, float value) {
EXPECT_EQ(value, original.Get<float>({indices[0], indices[1]}));
});
}
TEST_F(LiteralUtilTest, TestR4RelayoutEquivalence) {
auto dim0minor_relaid_to_dim0major =
literal_r4_2x2x3x3_dim0minor_.Relayout(layout_r4_dim0major_);
EXPECT_EQ(literal_r4_2x2x3x3_dim0major_, dim0minor_relaid_to_dim0major);
auto dim0major_relaid_to_dim0minor =
literal_r4_2x2x3x3_dim0major_.Relayout(layout_r4_dim0minor_);
EXPECT_EQ(literal_r4_2x2x3x3_dim0minor_, dim0major_relaid_to_dim0minor);
}
template <bool kIsLayoutSensitive>
struct HashTester {
template <typename H>
friend H AbslHashValue(H h, const HashTester& key) {
return Literal::Hash<H, kIsLayoutSensitive, 64>(
std::move(h), *key.literal);
}
const Literal* literal;
};
TEST_F(LiteralUtilTest, TestR2LinearLayout) {
auto mat_dim0minor = LiteralUtil::CreateR2WithLayout<int32_t>(
{{1, 2, 3}, {4, 5, 6}}, layout_r2_dim0minor_);
EXPECT_EQ(mat_dim0minor.element_count(), 6);
EXPECT_THAT(mat_dim0minor.data<int32_t>(), ElementsAre(1, 4, 2, 5, 3, 6));
auto relaid_mat_to_dim0major = mat_ |
1,785 | cpp | tensorflow/tensorflow | reference_util | third_party/xla/xla/reference_util.cc | third_party/xla/xla/reference_util_test.cc | #ifndef XLA_REFERENCE_UTIL_H_
#define XLA_REFERENCE_UTIL_H_
#include <algorithm>
#include <array>
#include <functional>
#include <memory>
#include <utility>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/types/span.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/array4d.h"
#include "xla/client/padding.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
class ReferenceUtil {
public:
template <typename T>
static std::unique_ptr<Array2D<T>> TransposeArray2D(
const Array2D<T>& operand) {
auto result =
std::make_unique<Array2D<T>>(operand.width(), operand.height());
for (int64_t w = 0; w < operand.width(); ++w) {
for (int64_t h = 0; h < operand.height(); ++h) {
(*result)(w, h) = operand(h, w);
}
}
return result;
}
template <typename T>
static std::unique_ptr<Array2D<T>> MatmulArray2D(const Array2D<T>& lhs,
const Array2D<T>& rhs) {
return HloEvaluator::MatmulArray2D(lhs, rhs);
}
static std::unique_ptr<Array2D<double>> Array2DF32ToF64(
const Array2D<float>& input);
static std::unique_ptr<Array4D<float>> ConvArray4D(
const Array4D<float>& lhs, const Array4D<float>& rhs,
std::pair<int64_t, int64_t> kernel_stride, Padding padding);
static std::unique_ptr<Array4D<float>> ConvArray4DGeneralDimensions(
const Array4D<float>& lhs, const Array4D<float>& rhs,
std::pair<int64_t, int64_t> kernel_stride, Padding padding,
ConvolutionDimensionNumbers dimension_numbers);
static std::unique_ptr<Array4D<float>> ConvArray4DGeneralDimensionsDilated(
const Array4D<float>& lhs, const Array4D<float>& rhs,
std::pair<int64_t, int64_t> kernel_stride, Padding padding,
std::pair<int64_t, int64_t> lhs_dilation,
std::pair<int64_t, int64_t> rhs_dilation,
ConvolutionDimensionNumbers dnums);
static std::unique_ptr<Array3D<float>> ConvArray3D(const Array3D<float>& lhs,
const Array3D<float>& rhs,
int64_t kernel_stride,
Padding padding);
static std::unique_ptr<Array3D<float>> ConvArray3DGeneralDimensionsDilated(
const Array3D<float>& lhs, const Array3D<float>& rhs,
int64_t kernel_stride, Padding padding, int64_t lhs_dilation,
int64_t rhs_dilation, const ConvolutionDimensionNumbers& dnums);
static std::unique_ptr<Array4D<float>> SeparableConvArray4D(
const Array4D<float>& input, const Array4D<float>& depthwise_weights,
const Array4D<float>& pointwise_weights,
std::pair<int64_t, int64_t> kernel_stride, Padding padding);
static std::unique_ptr<std::vector<float>> ReduceToColArray2D(
const Array2D<float>& matrix, float init,
absl::FunctionRef<float(float, float)> reduce_function);
static std::unique_ptr<std::vector<float>> ReduceToRowArray2D(
const Array2D<float>& matrix, float init,
absl::FunctionRef<float(float, float)> reduce_function);
template <typename T>
static std::vector<T> ReduceR2ToR1(const Array2D<T>& input,
int dimension_to_reduce, T init,
absl::FunctionRef<T(T, T)> freduce) {
std::vector<T> result(dimension_to_reduce == 0 ? input.n2() : input.n1(),
init);
for (int i0 = 0; i0 < input.n1(); ++i0) {
for (int i1 = 0; i1 < input.n2(); ++i1) {
int output = dimension_to_reduce == 0 ? i1 : i0;
result[output] = freduce(result[output], input(i0, i1));
}
}
return result;
}
static std::vector<float> Reduce4DTo1D(
const Array4D<float>& array, float init, absl::Span<const int64_t> dims,
absl::FunctionRef<float(float, float)> reduce_function);
static std::unique_ptr<Array4D<float>> Broadcast1DTo4D(
const std::vector<float>& array, const std::vector<int64_t>& bounds,
int64_t broadcast_from_dim);
static std::unique_ptr<Array2D<float>> Reduce3DTo2D(
const Array3D<float>& array, float init, absl::Span<const int64_t> dims,
absl::FunctionRef<float(float, float)> reduce_function);
static std::unique_ptr<Array2D<float>> MapArray2D(
const Array2D<float>& matrix,
absl::FunctionRef<float(float)> map_function);
static std::unique_ptr<Array2D<float>> MapArray2D(
const Array2D<float>& lhs, const Array2D<float>& rhs,
absl::FunctionRef<float(float, float)> map_function);
static std::unique_ptr<Array3D<float>> MapArray3D(
const Array3D<float>& array,
absl::FunctionRef<float(float)> map_function);
static std::unique_ptr<Array3D<float>> MapArray3D(
const Array3D<float>& lhs, const Array3D<float>& rhs,
absl::FunctionRef<float(float, float)> map_function);
static int64_t WindowCount(int64_t unpadded_width, int64_t window_len,
int64_t stride, Padding padding);
static std::unique_ptr<std::vector<float>> ReduceWindow1DAdd(
absl::Span<const float> operand, float init,
absl::Span<const int64_t> window, absl::Span<const int64_t> stride,
Padding padding);
static std::unique_ptr<Array3D<float>> ReduceWindow3DAdd(
const Array3D<float>& operand, float init,
absl::Span<const int64_t> window, absl::Span<const int64_t> stride,
Padding padding);
static std::unique_ptr<Array4D<float>> ReduceWindow4DAdd(
const Array4D<float>& operand, float init,
absl::Span<const int64_t> window, absl::Span<const int64_t> stride,
Padding padding);
static std::unique_ptr<std::vector<float>> ReduceWindow1DGeneric(
absl::Span<const float> operand, float init,
absl::FunctionRef<float(float, float)> reduce_func,
absl::Span<const int64_t> window, absl::Span<const int64_t> stride,
absl::Span<const std::pair<int64_t, int64_t>> padding);
static std::unique_ptr<Array4D<float>> ReduceWindow4DGeneric(
const Array4D<float>& operand, float init,
absl::FunctionRef<float(float, float)> reduce_func,
absl::Span<const int64_t> window, absl::Span<const int64_t> stride,
Padding padding);
static std::unique_ptr<Array4D<float>> ReduceWindow4DGeneric(
const Array4D<float>& operand, float init,
absl::FunctionRef<float(float, float)> reduce_func,
absl::Span<const int64_t> window, absl::Span<const int64_t> stride,
absl::Span<const std::pair<int64_t, int64_t>> padding);
static std::unique_ptr<Array4D<float>> BatchNorm4D(
const Array4D<float>& input, const Array4D<float>& mean,
const Array4D<float>& var, const Array4D<float>& scale,
const Array4D<float>& offset, float epsilon);
static std::unique_ptr<Array4D<float>> SelectAndScatter4DGePlus(
const Array4D<float>& operand, const Array4D<float>& source, float init,
absl::Span<const int64_t> window, absl::Span<const int64_t> stride,
bool same_padding);
template <typename T>
static std::unique_ptr<Array2D<T>> Concat2D(const Array2D<T>& lhs,
const Array2D<T>& rhs,
int concatenate_dimension) {
CHECK(0 <= concatenate_dimension && concatenate_dimension < 2);
auto result = std::make_unique<Array2D<T>>(
concatenate_dimension == 0 ? lhs.n1() + rhs.n1() : lhs.n1(),
concatenate_dimension == 1 ? lhs.n2() + rhs.n2() : lhs.n2());
for (int64_t i0 = 0; i0 < result->n1(); ++i0) {
for (int64_t i1 = 0; i1 < result->n2(); ++i1) {
(*result)(i0, i1) = i0 < lhs.n1() && i1 < lhs.n2()
? lhs(i0, i1)
: rhs(i0 >= lhs.n1() ? i0 - lhs.n1() : i0,
i1 >= lhs.n2() ? i1 - lhs.n2() : i1);
}
}
return result;
}
template <typename T>
static std::unique_ptr<Array3D<T>> Concat3D(const Array3D<T>& lhs,
const Array3D<T>& rhs,
int concatenate_dimension) {
CHECK(0 <= concatenate_dimension && concatenate_dimension < 3);
const int64_t lhs_dims[] = {lhs.n1(), lhs.n2(), lhs.n3()};
const int64_t rhs_dims[] = {rhs.n1(), rhs.n2(), rhs.n3()};
int64_t out_dims[] = {rhs.n1(), rhs.n2(), rhs.n3()};
for (int i = 0; i < 3; ++i) {
if (i != concatenate_dimension) {
out_dims[i] = lhs_dims[i];
CHECK_EQ(lhs_dims[i], rhs_dims[i]);
} else {
out_dims[i] = lhs_dims[i] + rhs_dims[i];
}
}
auto result =
std::make_unique<Array3D<T>>(out_dims[0], out_dims[1], out_dims[2]);
for (int64_t i0 = 0; i0 < result->n1(); ++i0) {
for (int64_t i1 = 0; i1 < result->n2(); ++i1) {
for (int64_t i2 = 0; i2 < result->n3(); ++i2) {
(*result)(i0, i1, i2) =
i0 < lhs.n1() && i1 < lhs.n2() && i2 < lhs.n3()
? lhs(i0, i1, i2)
: rhs(i0 >= lhs.n1() ? i0 - lhs.n1() : i0,
i1 >= lhs.n2() ? i1 - lhs.n2() : i1,
i2 >= lhs.n3() ? i2 - lhs.n3() : i2);
}
}
}
return result;
}
template <typename T>
static std::unique_ptr<Array4D<T>> Concat4D(const Array4D<T>& lhs,
const Array4D<T>& rhs,
int concatenate_dimension) {
CHECK(0 <= concatenate_dimension && concatenate_dimension < 4);
const int64_t lhs_dims[] = {lhs.n1(), lhs.n2(), lhs.n3(), lhs.n4()};
const int64_t rhs_dims[] = {rhs.n1(), rhs.n2(), rhs.n3(), rhs.n4()};
int64_t out_dims[] = {rhs.n1(), rhs.n2(), rhs.n3(), rhs.n4()};
for (int i = 0; i < 4; ++i) {
if (i != concatenate_dimension) {
out_dims[i] = lhs_dims[i];
CHECK_EQ(lhs_dims[i], rhs_dims[i]);
} else {
out_dims[i] = lhs_dims[i] + rhs_dims[i];
}
}
auto result = std::make_unique<Array4D<T>>(out_dims[0], out_dims[1],
out_dims[2], out_dims[3]);
for (int64_t i0 = 0; i0 < result->n1(); ++i0) {
for (int64_t i1 = 0; i1 < result->n2(); ++i1) {
for (int64_t i2 = 0; i2 < result->n3(); ++i2) {
for (int64_t i3 = 0; i3 < result->n4(); ++i3) {
(*result)(i0, i1, i2, i3) =
i0 < lhs.n1() && i1 < lhs.n2() && i2 < lhs.n3() && i3 < lhs.n4()
? lhs(i0, i1, i2, i3)
: rhs(i0 >= lhs.n1() ? i0 - lhs.n1() : i0,
i1 >= lhs.n2() ? i1 - lhs.n2() : i1,
i2 >= lhs.n3() ? i2 - lhs.n3() : i2,
i3 >= lhs.n4() ? i3 - lhs.n4() : i3);
}
}
}
}
return result;
}
template <typename T>
static std::vector<T> ClampSlice1D(absl::Span<const T> input, int64_t start,
int64_t size) {
start = std::min<int64_t>(std::max<int64_t>(0, start), input.size() - size);
std::vector<T> result;
for (int64_t i = 0; i < size; ++i) {
result.push_back(input[(start + i)]);
}
return result;
}
template <typename T>
static std::unique_ptr<Array2D<T>> Slice2D(const Array2D<T>& input,
std::array<int64_t, 2> starts,
std::array<int64_t, 2> limits,
std::array<int64_t, 2> strides) {
CHECK_LE(starts[0], input.n1());
CHECK_LE(starts[1], input.n2());
CHECK_LE(limits[0], input.n1());
CHECK_LE(limits[1], input.n2());
CHECK_GE(strides[0], 1);
CHECK_GE(strides[1], 1);
auto result = std::make_unique<Array2D<T>>(
CeilOfRatio(limits[0] - starts[0], strides[0]),
CeilOfRatio(limits[1] - starts[1], strides[1]));
for (int64_t i0 = 0; i0 < result->n1(); ++i0) {
for (int64_t i1 = 0; i1 < result->n2(); ++i1) {
(*result)(i0, i1) =
input(starts[0] + i0 * strides[0], starts[1] + i1 * strides[1]);
}
}
return result;
}
template <typename T>
static std::unique_ptr<Array3D<T>> Slice3D(const Array3D<T>& input,
std::array<int64_t, 3> starts,
std::array<int64_t, 3> limits,
std::array<int64_t, 3> strides) {
CHECK_LE(starts[0], input.n1());
CHECK_LE(starts[1], input.n2());
CHECK_LE(starts[2], input.n3());
CHECK_LE(limits[0], input.n1());
CHECK_LE(limits[1], input.n2());
CHECK_LE(limits[2], input.n3());
CHECK_GE(strides[0], 1);
CHECK_GE(strides[1], 1);
CHECK_GE(strides[2], 1);
auto result = std::make_unique<Array3D<T>>(
CeilOfRatio(limits[0] - starts[0], strides[0]),
CeilOfRatio(limits[1] - starts[1], strides[1]),
CeilOfRatio(limits[2] - starts[2], strides[2]));
for (int64_t i0 = 0; i0 < result->n1(); ++i0) {
for (int64_t i1 = 0; i1 < result->n2(); ++i1) {
for (int64_t i2 = 0; i2 < result->n3(); ++i2) {
(*result)(i0, i1, i2) =
input(starts[0] + i0 * strides[0], starts[1] + i1 * strides[1],
starts[2] + i2 * strides[2]);
}
}
}
return result;
}
template <typename T>
static std::unique_ptr<Array4D<T>> Slice4D(const Array4D<T>& input,
std::array<int64_t, 4> starts,
std::array<int64_t, 4> limits,
std::array<int64_t, 4> strides) {
CHECK_LE(starts[0], input.n1());
CHECK_LE(starts[1], input.n2());
CHECK_LE(starts[2], input.n3());
CHECK_LE(starts[3], input.n4());
CHECK_LE(limits[0], input.n1());
CHECK_LE(limits[1], input.n2());
CHECK_LE(limits[2], input.n3());
CHECK_LE(limits[3], input.n4());
CHECK_GE(strides[0], 1);
CHECK_GE(strides[1], 1);
CHECK_GE(strides[2], 1);
CHECK_GE(strides[3], 1);
auto result = std::make_unique<Array4D<T>>(
CeilOfRatio(limits[0] - starts[0], strides[0]),
CeilOfRatio(limits[1] - starts[1], strides[1]),
CeilOfRatio(limits[2] - starts[2], strides[2]),
CeilOfRatio(limits[3] - starts[3], strides[3]));
for (int64_t i0 = 0; i0 < result->n1(); ++i0) {
for (int64_t i1 = 0; i1 < result->n2(); ++i1) {
for (int64_t i2 = 0; i2 < result->n3(); ++i2) {
for (int64_t i3 = 0; i3 < result->n4(); ++i3) {
(*result)(i0, i1, i2, i3) =
input(starts[0] + i0 * strides[0], starts[1] + i1 * strides[1],
starts[2] + i2 * strides[2], starts[3] + i3 * strides[3]);
}
}
}
}
return result;
}
static std::unique_ptr<Array2D<float>> MapWithIndexArray2D(
const Array2D<float>& matrix,
absl::FunctionRef<float(float, int64_t, int64_t)> map_function);
template <typename F>
static std::unique_ptr<Array4D<float>> MapArray4D(const Array4D<float>& input,
F&& map_function) {
return MapWithIndexArray4D(
input, [&](float value, int64_t, int64_t, int64_t, int64_t) {
return map_function(value);
});
}
template <typename F>
static std::unique_ptr<Array4D<float>> MapWithIndexArray4D(
const Array4D<float>& input, F&& map_function) {
auto result = std::make_unique<Array4D<float>>(
input.planes(), input.depth(), input.height(), input.width());
for (int64_t plane = 0; plane < input.planes(); ++plane) {
for (int64_t depth = 0; depth < input.depth(); ++depth) {
for (int64_t height = 0; height < input.height(); ++height) {
for (int64_t width = 0; width < input.width(); ++width) {
(*result)(plane, depth, height, width) =
map_function(input(plane, depth, height, width), plane, depth,
height, width);
}
}
}
}
return result;
}
template <typename F>
static std::unique_ptr<Array4D<float>> MapArray4D(const Array4D<float>& lhs,
const Array4D<float>& rhs,
F&& map_function) {
return MapWithIndexArray4D(
lhs, rhs,
[&](float lhs, float rhs, int64_t, int64_t, int64_t, int64_t) {
return map_function(lhs, rhs);
});
}
template <typename F>
static std::unique_ptr<Array4D<float>> MapWithIndexArray4D(
const Array4D<float>& lhs, const Array4D<float>& rhs, F&& map_function) {
auto result = std::make_unique<Array4D<float>>(lhs.planes(), lhs.depth(),
lhs.height(), lhs.width());
for (int64_t plane = 0; plane < lhs.planes(); ++plane) {
for (int64_t depth = 0; depth < lhs.depth(); ++depth) {
for (int64_t height = 0; height < lhs.height(); ++height) {
for (int64_t width = 0; width < lhs.width(); ++width) {
(*result)(plane, depth, height, width) = map_function(
lhs(plane, depth, height, width),
rhs(plane, depth, height, width), plane, depth, height, width);
}
}
}
}
return result;
}
template <typename NativeT>
static std::unique_ptr<Array2D<NativeT>> PadArray2D(
const Array2D<NativeT>& operand, const PaddingConfig& padding,
const NativeT pad) {
int64_t in0 = operand.n1();
int64_t high_padding0 = padding.dimensions(0).edge_padding_high();
int64_t low_padding0 = padding.dimensions(0).edge_padding_low();
int64_t interior_padding0 = padding.dimensions(0).interior_padding();
int64_t out0 =
in0 + low_padding0 + high_padding0 + (in0 - 1) * interior_padding0;
int64_t in1 = operand.n2();
int64_t high_padding1 = padding.dimensions(1).edge_padding_high();
int64_t low_padding1 = padding.dimensions(1).edge_padding_low();
int64_t interior_padding1 = padding.dimensions(1).interior_padding();
int64_t out1 =
in1 + low_padding1 + high_padding1 + (in1 - 1) * interior_padding1;
auto result = std::make_unique<Array2D<NativeT>>(out0, out1);
result->Fill(pad);
int64_t o0 = low_padding0;
for (int64_t i0 = 0; i0 < in0; ++i0) {
int64_t o1 = low_padding1;
for (int64_t i1 = 0; i1 < in1; ++i1) {
if (o0 >= 0 && o1 >= 0 && o0 < out0 && o1 < out1) {
(*result)(o0, o1) = operand(i0, i1);
}
o1 += interior_padding1 + 1;
}
o0 += interior_padding0 + 1;
}
return result;
}
template <typename NativeT>
static Array3D<NativeT> PadArray3D(const Array3D<NativeT>& operand,
const PaddingConfig& padding,
const NativeT pad) {
CHECK_EQ(padding.dimensions_size(), 3);
const int64_t input_bounds[] = {operand.n1(), operand.n2(), operand.n3()};
int64_t pad_low[3];
int64_t pad_high[3];
int64_t pad_interior[3];
int64_t output_bounds[3];
for (int64_t i = 0; i < 3; ++i) {
pad_low[i] = padding.dimensions(i).edge_padding_low();
pad_high[i] = padding.dimensions(i).edge_padding_high();
CHECK_LE(0, pad_low[i]);
CHECK_LE(0, pad_high[i]);
CHECK_LE(0, padding.dimensions(i).interior_padding())
<< "not implemented";
pad_interior[i] = padding.dimensions(i).interior_padding();
output_bounds[i] = pad_low[i] + input_bounds[i] + pad_high[i] +
(input_bounds[i] - 1) * pad_interior[i];
}
Array3D<NativeT> result(output_bounds[0], output_bounds[1],
output_bounds[2]);
int indices[] = {0, 0, 0};
for (indices[0] = 0; indices[0] < output_bounds[0]; ++indices[0]) {
for (indices[1] = 0; indices[1] < output_bounds[1]; ++indices[1]) {
for (indices[2] = 0; indices[2] < output_bounds[2]; ++indices[2]) {
NativeT* value = &result(indices[0], indices[1], indices[2]);
bool value_padded = false;
for (int i = 0; i < 3; ++i) {
bool in_low_padding = indices[i] < pad_low[i];
bool in_high_padding = indices[i] >= output_bounds[i] - pad_high[i];
if (in_low_padding || in_high_padding) {
*value = pad;
value_padded = true;
}
if (pad_interior[i] &&
(indices[i] - pad_low[i]) % (pad_interior[i] + 1)) {
*value = pad;
value_padded = true;
}
}
if (value_padded) {
continue;
}
*value = operand((indices[0] - pad_low[0]) / (pad_interior[0] + 1),
(indices[1] - pad_low[1]) / (pad_interior[1] + 1),
(indices[2] - pad_low[2]) / (pad_interior[2] + 1));
}
}
}
return result;
}
template <typename NativeT>
static Array4D<NativeT> PadArray4D(const Array4D<NativeT>& operand,
const PaddingConfig& padding,
const NativeT pad) {
CHECK_EQ(padding.dimensions_size(), 4);
const int64_t input_bounds[] = {operand.n1(), operand.n2(), operand.n3(),
operand.n4()};
int64_t pad_low[4];
int64_t pad_high[4];
int64_t pad_interior[4];
int64_t output_bounds[4];
for (int64_t i = 0; i < 4; ++i) {
pad_low[i] = padding.dimensions(i).edge_padding_low();
pad_high[i] = padding.dimensions(i).edge_padding_high();
CHECK_LE(0, padding.dimensions(i).interior_padding())
<< "not implemented";
pad_interior[i] = padding.dimensions(i).interior_padding();
output_bounds[i] = pad_low[i] + input_bounds[i] + pad_high[i] +
(input_bounds[i] - 1) * pad_interior[i];
}
Array4D<NativeT> result(output_bounds[0], output_bounds[1],
output_bounds[2], output_bounds[3]);
result.Each([&](absl::Span<const int64_t> indices, NativeT* value) {
for (int i = 0; i < 4; ++i) {
bool in_low_padding = indices[i] < pad_low[i];
bool in_high_padding = indices[i] >= output_bounds[i] - pad_high[i];
if (in_low_padding || in_high_padding) {
*value = pad;
return;
}
if (pad_interior[i] &&
(indices[i] - pad_low[i]) % (pad_interior[i] + 1)) {
*value = pad;
return;
}
}
*value = operand((indices[0] - pad_low[0]) / (pad_interior[0] + 1),
(indices[1] - pad_low[1]) / (pad_interior[1] + 1),
(indices[2] - pad_low[2]) / (pad_interior[2] + 1),
(indices[3] - pad_low[3]) / (pad_interior[3] + 1));
});
return result;
}
template <typename F, typename T1, typename... Ts>
static std::unique_ptr<Array2D<T1>> ApplyElementwise2D(
F&& f, const Array2D<T1>& array1, const Array2D<Ts>&... arrays) {
AssertSameSize2D(array1, arrays...);
auto result = std::make_unique<Array2D<T1>>(array1.n1(), array1.n2());
for (int64_t i = 0; i < array1.n1(); ++i) {
for (int64_t j = 0; j < array1.n2(); ++j) {
(*result)(i, j) = f(array1(i, j), arrays(i, j)...);
}
}
return result;
}
private:
template <typename T1, typename T2, typename... Ts>
static void AssertSameSize2D(const Array2D<T1>& array1,
const Array2D<T2>& array2,
const Array2D<Ts>&... arrays) {
static_assert(std::is_same<T1, T2>::value, "Args must be same type.");
CHECK_EQ(array1.n1(), array2.n1());
CHECK_EQ(array1.n2(), array2.n2());
AssertSameSize2D(array2, arrays...);
}
template <typename Array1>
static void AssertSameSize2D(const Array1& array1) {}
ReferenceUtil(const ReferenceUtil&) = delete;
ReferenceUtil& operator=(const ReferenceUtil&) = delete;
};
}
#endif
#include "xla/reference_util.h"
#include <array>
#include <cmath>
#include <functional>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "xla/client/xla_builder.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/literal_util.h"
#include "xla/service/shape_inference.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/math/math_util.h"
#include "tsl/platform/logging.h"
namespace xla {
std::unique_ptr<Array2D<double>> ReferenceUtil::Array2DF32ToF64(
const Array2D<float>& input) {
auto result =
std::make_unique<Array2D<double>>(input.height(), input.width());
for (int64_t rowno = 0; rowno < input.height(); ++rowno) {
for (int64_t colno = 0; colno < input.width(); ++colno) {
(*result)(rowno, colno) = input(rowno, colno);
}
}
return result;
}
std::unique_ptr<Array3D<float>> ReferenceUtil::ConvArray3D(
const Array3D<float>& lhs, const Array3D<float>& rhs, int64_t kernel_stride,
Padding padding) {
return ConvArray3DGeneralDimensionsDilated(
lhs, rhs, kernel_stride, padding, 1, 1,
XlaBuilder::CreateDefaultConvDimensionNumbers(1));
}
std::unique_ptr<Array3D<float>>
ReferenceUtil::ConvArray3DGeneralDimensionsDilated(
const Array3D<float>& lhs, const Array3D<float>& rhs, int64_t kernel_stride,
Padding padding, int64_t lhs_dilation, int64_t rhs_dilation,
const ConvolutionDimensionNumbers& dnums) {
CHECK_EQ(dnums.input_spatial_dimensions_size(), 1);
CHECK_EQ(dnums.kernel_spatial_dimensions_size(), 1);
CHECK_EQ(dnums.output_spatial_dimensions_size(), 1); | #include "xla/reference_util.h"
#include <cmath>
#include <memory>
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/array4d.h"
#include "xla/client/padding.h"
#include "xla/literal.h"
#include "xla/test.h"
#include "xla/tests/literal_test_util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
class ReferenceUtilTest : public ::testing::Test {
protected:
ReferenceUtilTest() {
matrix_ = std::make_unique<Array2D<float>>(rows_, cols_);
for (int64_t i = 0; i < rows_; ++i) {
for (int64_t j = 0; j < cols_; ++j) {
(*matrix_)(i, j) = i * cols_ + j + 1;
}
}
}
const int64_t rows_ = 2;
const int64_t cols_ = 3;
std::unique_ptr<Array2D<float>> matrix_;
};
TEST_F(ReferenceUtilTest, TransposeArray2D) {
auto result = ReferenceUtil::TransposeArray2D(*matrix_);
auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result);
LiteralTestUtil::ExpectR2Near<float>({{1.f, 4.f}, {2.f, 5.f}, {3.f, 6.f}},
actual_literal, ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, MatmulArray2D) {
Array2D<float> rhs({
{7.f, 8.f},
{9.f, 10.f},
{11.f, 12.f},
});
auto result = ReferenceUtil::MatmulArray2D(*matrix_, rhs);
auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result);
LiteralTestUtil::ExpectR2Near<float>({{58.f, 64.f}, {139.f, 154.f}},
actual_literal, ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, ReduceToColArray2D) {
auto add = [](float lhs, float rhs) { return lhs + rhs; };
auto result = ReferenceUtil::ReduceToColArray2D(*matrix_, 0.0f, add);
auto actual_literal = LiteralUtil::CreateR1<float>(*result);
LiteralTestUtil::ExpectR1Near<float>({6.f, 15.f}, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, ReduceToRowArray2D) {
auto add = [](float lhs, float rhs) { return lhs + rhs; };
auto result = ReferenceUtil::ReduceToRowArray2D(*matrix_, 0.0f, add);
auto actual_literal = LiteralUtil::CreateR1<float>(*result);
LiteralTestUtil::ExpectR1Near<float>({5.f, 7.f, 9.f}, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, Array2DF32ToF64Test) {
auto result = ReferenceUtil::Array2DF32ToF64(*matrix_);
ASSERT_EQ(result->height(), matrix_->height());
ASSERT_EQ(result->width(), matrix_->width());
for (int64_t rowno = 0; rowno < matrix_->height(); ++rowno) {
for (int64_t colno = 0; colno < matrix_->width(); ++colno) {
EXPECT_EQ(static_cast<double>((*matrix_)(rowno, colno)),
(*result)(rowno, colno));
}
}
}
TEST_F(ReferenceUtilTest, Reduce4Dto1DZeroSizedArray) {
auto result = LiteralUtil::CreateR1<float>(ReferenceUtil::Reduce4DTo1D(
Array4D<float>(1, 0, 1, 1), 0, {0, 1, 2},
[](float a, float b) { return a + b; }));
LiteralTestUtil::ExpectR1Equal<float>({0}, result);
}
TEST_F(ReferenceUtilTest, MapArray2D) {
auto identity = [](float value) { return std::log(std::exp(value)); };
auto result = ReferenceUtil::MapArray2D(*matrix_, identity);
auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result);
LiteralTestUtil::ExpectR2NearArray2D(*matrix_, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, MapArray3D) {
auto identity = [](float value) { return std::log(std::exp(value)); };
Array3D<float> input(2, 3, 4);
input.FillIota(0);
auto result = ReferenceUtil::MapArray3D(input, identity);
auto actual_literal = LiteralUtil::CreateR3FromArray3D(*result);
LiteralTestUtil::ExpectR3NearArray3D(input, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, MapWithIndexArray2D) {
auto add_index = [](float value, int64_t row, int64_t col) {
return value + row + col;
};
auto result = ReferenceUtil::MapWithIndexArray2D(*matrix_, add_index);
auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result);
LiteralTestUtil::ExpectR2Near<float>({{1.f, 3.f, 5.f}, {5.f, 7.f, 9.f}},
actual_literal, ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, MapArray4D) {
auto input = std::make_unique<Array4D<float>>(2, 3,
4, 5);
input->FillWithMultiples(1.0f);
auto multiply_by_two = [](float value) { return 2 * value; };
auto result = ReferenceUtil::MapArray4D(*input, multiply_by_two);
auto actual_literal = LiteralUtil::CreateR4FromArray4D(*result);
Array4D<float> expected(2, 3, 4, 5);
expected.FillWithMultiples(2.0f);
LiteralTestUtil::ExpectR4NearArray4D(expected, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, MapWithIndexArray4D) {
auto input = std::make_unique<Array4D<float>>(2, 3,
4, 5);
input->FillWithMultiples(1.0f);
auto subtract_index = [](float value, int64_t plane, int64_t depth,
int64_t height, int64_t width) {
return value - (3 * 4 * 5 * plane + 4 * 5 * depth + 5 * height + width);
};
auto result = ReferenceUtil::MapWithIndexArray4D(*input, subtract_index);
auto actual_literal = LiteralUtil::CreateR4FromArray4D(*result);
Array4D<float> expected(2, 3, 4, 5);
expected.Fill(0.0f);
LiteralTestUtil::ExpectR4NearArray4D(expected, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, SliceArray2D) {
auto result = ReferenceUtil::Slice2D(*matrix_, {{0, 0}}, {{2, 2}}, {{1, 1}});
auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result);
LiteralTestUtil::ExpectR2Near<float>({{1.f, 2.f}, {4.f, 5.f}}, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, SliceStridedArray2D) {
auto result = ReferenceUtil::Slice2D(*matrix_, {{0, 0}}, {{2, 3}}, {{1, 2}});
auto actual_literal = LiteralUtil::CreateR2FromArray2D(*result);
LiteralTestUtil::ExpectR2Near<float>({{1.f, 3.f}, {4.f, 6.f}}, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, SliceArray3D) {
Array3D<float> input(2, 3, 4);
input.FillIota(0);
auto result =
ReferenceUtil::Slice3D(input, {{0, 0, 0}}, {{2, 2, 2}}, {{1, 1, 1}});
auto actual_literal = LiteralUtil::CreateR3FromArray3D(*result);
LiteralTestUtil::ExpectR3Near<float>(
{{{0.f, 1.f}, {4.f, 5.f}}, {{12.f, 13.f}, {16.f, 17.f}}}, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, SliceStridedArray3D) {
Array3D<float> input(2, 3, 4);
input.FillIota(0);
auto result =
ReferenceUtil::Slice3D(input, {{0, 0, 0}}, {{2, 3, 4}}, {{1, 2, 2}});
auto actual_literal = LiteralUtil::CreateR3FromArray3D(*result);
LiteralTestUtil::ExpectR3Near<float>(
{{{0.f, 2.f}, {8.f, 10.f}}, {{12.f, 14.f}, {20.f, 22.f}}}, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, SliceArray4D) {
Array4D<float> input(2, 3, 4, 5);
input.FillIota(0);
auto result = ReferenceUtil::Slice4D(input, {{1, 0, 0, 0}}, {{2, 2, 2, 2}},
{{1, 1, 1, 1}});
auto actual_literal = LiteralUtil::CreateR4FromArray4D(*result);
LiteralTestUtil::ExpectR4Near<float>(
{{{{60.f, 61.f}, {65.f, 66.f}}, {{80.f, 81.f}, {85.f, 86.f}}}},
actual_literal, ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, SliceStridedArray4D) {
Array4D<float> input(2, 3, 4, 5);
input.FillIota(0);
auto result = ReferenceUtil::Slice4D(input, {{1, 0, 0, 0}}, {{2, 3, 4, 5}},
{{1, 2, 2, 2}});
auto actual_literal = LiteralUtil::CreateR4FromArray4D(*result);
LiteralTestUtil::ExpectR4Near<float>(
{{{{60.f, 62.f, 64.f}, {70.f, 72.f, 74.f}},
{{100.f, 102.f, 104.f}, {110.f, 112.f, 114.f}}}},
actual_literal, ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, ConvArray3DWithSamePadding) {
Array3D<float> input = {{{1, 2, 3, 4}}};
Array3D<float> weights = {{{5, 6}}};
std::unique_ptr<Array3D<float>> actual =
ReferenceUtil::ConvArray3D(input, weights, 1, Padding::kSame);
Array3D<float> expected = {{{17, 28, 39, 20}}};
auto actual_literal = LiteralUtil::CreateR3FromArray3D(*actual);
LiteralTestUtil::ExpectR3NearArray3D<float>(expected, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, ConvArray3DWithValidPadding) {
Array3D<float> input = {{{1, 2, 3, 4}}};
Array3D<float> weights = {{{5, 6}}};
std::unique_ptr<Array3D<float>> actual =
ReferenceUtil::ConvArray3D(input, weights, 1, Padding::kValid);
Array3D<float> expected = {{{17, 28, 39}}};
auto actual_literal = LiteralUtil::CreateR3FromArray3D(*actual);
LiteralTestUtil::ExpectR3NearArray3D<float>(expected, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, ConvWithSamePadding) {
Array4D<float> input(1, 1, 4, 4);
input.FillWithYX(Array2D<float>({
{1, 2, 3, 4 },
{5, 6, 7, 8 },
{9, 10, 11, 12},
{13, 14, 15, 16},
}));
Array4D<float> weights(1, 1, 2, 2);
weights.FillWithYX(Array2D<float>({
{5, 6},
{7, 8},
}));
std::unique_ptr<Array4D<float>> actual =
ReferenceUtil::ConvArray4D(input, weights, {1, 1}, Padding::kSame);
Array4D<float> expected(1, 1, 4, 4);
expected.FillWithYX(Array2D<float>({
{100, 126, 152, 76},
{204, 230, 256, 124},
{308, 334, 360, 172},
{149, 160, 171, 80},
}));
auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual);
LiteralTestUtil::ExpectR4NearArray4D<float>(expected, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, ConvWithValidPadding) {
Array4D<float> input(1, 1, 4, 4);
input.FillWithYX(Array2D<float>({
{1, 2, 3, 4 },
{5, 6, 7, 8 },
{9, 10, 11, 12},
{13, 14, 15, 16},
}));
Array4D<float> weights(1, 1, 2, 2);
weights.FillWithYX(Array2D<float>({
{5, 6},
{7, 8},
}));
std::unique_ptr<Array4D<float>> actual =
ReferenceUtil::ConvArray4D(input, weights, {1, 1}, Padding::kValid);
Array4D<float> expected(1, 1, 3, 3);
expected.FillWithYX(Array2D<float>({
{1*5+2*6+5*7+6*8, 126, 152},
{204, 230, 256},
{308, 334, 11*5+12*6+15*7+16*8},
}));
auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual);
LiteralTestUtil::ExpectR4NearArray4D<float>(expected, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, ConvGeneralDimensionsWithSamePadding) {
Array4D<float> input({
{{{1, 2, 3, 4}},
{{5, 6, 7, 8}},
{{9, 10, 11, 12}}},
{{{13, 14, 15, 16}},
{{17, 18, 19, 20}},
{{21, 22, 23, 24}}}
});
Array4D<float> weight({{
{{1, 2, 3},
{4, 5, 6}},
{{7, 8, 9},
{10, 11, 12}},
{{13, 14, 15},
{16, 17, 18}}
}});
ConvolutionDimensionNumbers dimension_numbers;
dimension_numbers.set_input_batch_dimension(2);
dimension_numbers.set_input_feature_dimension(0);
dimension_numbers.set_output_batch_dimension(2);
dimension_numbers.set_output_feature_dimension(0);
dimension_numbers.add_input_spatial_dimensions(1);
dimension_numbers.add_output_spatial_dimensions(1);
dimension_numbers.add_input_spatial_dimensions(3);
dimension_numbers.add_output_spatial_dimensions(3);
dimension_numbers.set_kernel_output_feature_dimension(0);
dimension_numbers.set_kernel_input_feature_dimension(2);
dimension_numbers.add_kernel_spatial_dimensions(1);
dimension_numbers.add_kernel_spatial_dimensions(3);
std::unique_ptr<Array4D<float>> actual =
ReferenceUtil::ConvArray4DGeneralDimensions(
input, weight, {1, 1}, Padding::kSame, dimension_numbers);
Array4D<float> expected({{
{{1110, 1688, 1838, 1226}},
{{1683, 2514, 2685, 1761}},
{{878, 1280, 1358, 866}}
}});
auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual);
LiteralTestUtil::ExpectR4NearArray4D<float>(expected, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, ConvGeneralDimensionsWithValidPadding) {
Array4D<float> input({
{{{1, 2, 3, 4}},
{{5, 6, 7, 8}},
{{9, 10, 11, 12}}},
{{{13, 14, 15, 16}},
{{17, 18, 19, 20}},
{{21, 22, 23, 24}}}
});
Array4D<float> weight({{
{{1, 7, 13},
{4, 10, 16}},
{{2, 8, 14},
{5, 11, 17}},
{{3, 9, 15},
{6, 12, 18}}
}});
ConvolutionDimensionNumbers dimension_numbers;
dimension_numbers.set_input_batch_dimension(2);
dimension_numbers.set_input_feature_dimension(0);
dimension_numbers.set_output_batch_dimension(2);
dimension_numbers.set_output_feature_dimension(0);
dimension_numbers.add_input_spatial_dimensions(1);
dimension_numbers.add_output_spatial_dimensions(1);
dimension_numbers.add_input_spatial_dimensions(3);
dimension_numbers.add_output_spatial_dimensions(3);
dimension_numbers.set_kernel_output_feature_dimension(0);
dimension_numbers.set_kernel_input_feature_dimension(2);
dimension_numbers.add_kernel_spatial_dimensions(3);
dimension_numbers.add_kernel_spatial_dimensions(1);
std::unique_ptr<Array4D<float>> actual =
ReferenceUtil::ConvArray4DGeneralDimensions(
input, weight, {1, 1}, Padding::kValid, dimension_numbers);
Array4D<float> expected({{{{2514, 2685}}}});
auto actual_literal = LiteralUtil::CreateR4FromArray4D(*actual);
LiteralTestUtil::ExpectR4NearArray4D<float>(expected, actual_literal,
ErrorSpec(0.0001));
}
TEST_F(ReferenceUtilTest, ApplyElementwise2D) {
Array2D<float> a({{1, 2}, {3, 4}});
Array2D<float> b({{10, 20}, {30, 40}});
Array2D<float> c({{100, 200}, {300, 400}});
auto actual = ReferenceUtil::ApplyElementwise2D(
[](float x, float y, float z) { return 100 * x + 10 * y + z; }, a, b, c);
auto actual_literal = LiteralUtil::CreateR2FromArray2D(*actual);
LiteralTestUtil::ExpectR2Near({{300.f, 600.f}, {900.f, 1200.f}},
actual_literal, ErrorSpec(0.0001));
}
}
} |
1,786 | cpp | tensorflow/tensorflow | status_macros | third_party/xla/xla/status_macros.cc | third_party/xla/xla/status_macros_test.cc | #ifndef XLA_STATUS_MACROS_H_
#define XLA_STATUS_MACROS_H_
#include <memory>
#include <ostream>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "xla/statusor.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/status.h"
namespace xla {
namespace status_macros {
extern const char kPossibleAutoJitAlternative[];
class MakeErrorStream {
public:
class MakeErrorStreamWithOutput {
public:
explicit MakeErrorStreamWithOutput(MakeErrorStream* error_stream)
: wrapped_error_stream_(error_stream) {}
template <typename T>
MakeErrorStreamWithOutput& operator<<(const T& value) {
*wrapped_error_stream_ << value;
return *this;
}
operator absl::Status() { return wrapped_error_stream_->GetStatus(); }
template <typename T>
operator absl::StatusOr<T>() {
return wrapped_error_stream_->GetStatus();
}
private:
MakeErrorStream* wrapped_error_stream_;
MakeErrorStreamWithOutput(const MakeErrorStreamWithOutput&) = delete;
MakeErrorStreamWithOutput& operator=(const MakeErrorStreamWithOutput&) =
delete;
};
enum PriorMessageHandling { kAppendToPriorMessage, kPrependToPriorMessage };
template <typename ERROR_CODE_TYPE>
MakeErrorStream(const char* file, int line, ERROR_CODE_TYPE code);
template <typename T>
MakeErrorStreamWithOutput& operator<<(const T& value) {
CheckNotDone();
impl_->stream_ << value;
return impl_->make_error_stream_with_output_wrapper_;
}
MakeErrorStream& with_log_stack_trace() {
impl_->should_log_stack_trace_ = true;
return *this;
}
MakeErrorStreamWithOutput& add_ret_check_failure(const char* condition);
private:
class Impl {
public:
Impl(const char* file, int line, tsl::error::Code code,
MakeErrorStream* error_stream, bool is_logged_by_default = true);
Impl(const absl::Status& status,
PriorMessageHandling prior_message_handling, const char* file,
int line, MakeErrorStream* error_stream);
~Impl();
absl::Status GetStatus();
void CheckNotDone() const;
private:
const char* file_;
int line_;
absl::StatusCode code_;
PriorMessageHandling prior_message_handling_ = kAppendToPriorMessage;
std::string prior_message_;
bool is_done_;
std::ostringstream stream_;
bool should_log_;
int log_severity_;
bool should_log_stack_trace_;
MakeErrorStreamWithOutput make_error_stream_with_output_wrapper_;
friend class MakeErrorStream;
Impl(const Impl&) = delete;
Impl& operator=(const Impl&) = delete;
};
void CheckNotDone() const;
absl::Status GetStatus() const { return impl_->GetStatus(); }
std::unique_ptr<Impl> impl_;
MakeErrorStream(const MakeErrorStream&) = delete;
MakeErrorStream& operator=(const MakeErrorStream&) = delete;
};
template <typename ERROR_CODE_TYPE>
TF_ATTRIBUTE_NOINLINE MakeErrorStream::MakeErrorStream(const char* file,
int line,
ERROR_CODE_TYPE code)
: impl_(new Impl(file, line, code, this, true)) {}
class StatusAdaptorForMacros {
public:
explicit StatusAdaptorForMacros(absl::Status status)
: status_(std::move(status)) {}
StatusAdaptorForMacros(const StatusAdaptorForMacros&) = delete;
StatusAdaptorForMacros& operator=(const StatusAdaptorForMacros&) = delete;
explicit operator bool() const { return ABSL_PREDICT_TRUE(status_.ok()); }
absl::Status&& Consume() { return std::move(status_); }
private:
absl::Status status_;
};
}
}
#define TF_RET_CHECK(condition) \
while (ABSL_PREDICT_FALSE(!(condition))) \
return xla::status_macros::MakeErrorStream(__FILE__, __LINE__, \
::tsl::error::INTERNAL) \
.with_log_stack_trace() \
.add_ret_check_failure(#condition)
#endif
#include "xla/status_macros.h"
#include <algorithm>
#include <string>
#include "absl/base/attributes.h"
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/stacktrace.h"
#include "tsl/platform/status.h"
namespace xla {
namespace status_macros {
ABSL_CONST_INIT const char kPossibleAutoJitAlternative[] =
"This error might be occurring with the use of xla.compile. If it is not "
"necessary that every Op be compiled with XLA, an alternative is to use "
"auto_jit with OptimizerOptions.global_jit_level = ON_2 or the environment "
"variable TF_XLA_FLAGS=\"tf_xla_auto_jit=2\" which will attempt to use xla "
"to compile as much of the graph as the compiler is able to.";
static void LogError(const absl::Status& status, const char* filename, int line,
int log_severity, bool should_log_stack_trace) {
if (ABSL_PREDICT_TRUE(log_severity != tsl::NUM_SEVERITIES)) {
std::string stack_trace;
if (should_log_stack_trace) {
stack_trace = absl::StrCat("\n", tsl::CurrentStackTrace());
}
switch (log_severity) {
case tsl::INFO:
LOG(INFO) << status << stack_trace;
break;
case tsl::WARNING:
LOG(WARNING) << status << stack_trace;
break;
case tsl::ERROR:
LOG(ERROR) << status << stack_trace;
break;
case tsl::FATAL:
LOG(FATAL) << status << stack_trace;
break;
case tsl::NUM_SEVERITIES:
break;
default:
LOG(FATAL) << "Unknown LOG severity " << log_severity;
}
}
}
static absl::Status MakeError(const char* filename, int line,
absl::StatusCode code, const std::string& message,
bool should_log, int log_severity,
bool should_log_stack_trace) {
if (ABSL_PREDICT_FALSE(code == absl::StatusCode::kOk)) {
LOG(ERROR) << "Cannot create error with status OK";
code = absl::StatusCode::kUnknown;
}
const absl::Status status = absl::Status(code, message);
if (ABSL_PREDICT_TRUE(should_log)) {
LogError(status, filename, line, log_severity, should_log_stack_trace);
}
return status;
}
MakeErrorStream::MakeErrorStreamWithOutput&
MakeErrorStream::add_ret_check_failure(const char* condition) {
return *this << "RET_CHECK failure (" << impl_->file_ << ":" << impl_->line_
<< ") " << condition << " ";
}
void MakeErrorStream::CheckNotDone() const { impl_->CheckNotDone(); }
MakeErrorStream::Impl::Impl(const char* file, int line, tsl::error::Code code,
MakeErrorStream* error_stream,
bool is_logged_by_default)
: file_(file),
line_(line),
code_(static_cast<absl::StatusCode>(code)),
is_done_(false),
should_log_(is_logged_by_default),
log_severity_(tsl::ERROR),
should_log_stack_trace_(false),
make_error_stream_with_output_wrapper_(error_stream) {}
MakeErrorStream::Impl::Impl(const absl::Status& status,
PriorMessageHandling prior_message_handling,
const char* file, int line,
MakeErrorStream* error_stream)
: file_(file),
line_(line),
code_(!status.ok() ? static_cast<absl::StatusCode>(status.code())
: absl::StatusCode::kUnknown),
prior_message_handling_(prior_message_handling),
prior_message_(status.message()),
is_done_(false),
should_log_(true),
log_severity_(tsl::ERROR),
should_log_stack_trace_(false),
make_error_stream_with_output_wrapper_(error_stream) {
DCHECK(!status.ok()) << "Attempted to append/prepend error text to status OK";
}
MakeErrorStream::Impl::~Impl() {
if (!is_done_) {
LOG(ERROR) << "MakeErrorStream destructed without getting absl::Status: "
<< file_ << ":" << line_ << " " << stream_.str();
}
}
absl::Status MakeErrorStream::Impl::GetStatus() {
if (is_done_) {
LOG(ERROR) << "MakeErrorStream got absl::Status more than once: " << file_
<< ":" << line_ << " " << stream_.str();
}
is_done_ = true;
const std::string& stream_str = stream_.str();
const std::string str = prior_message_handling_ == kAppendToPriorMessage
? absl::StrCat(prior_message_, stream_str)
: absl::StrCat(stream_str, prior_message_);
if (ABSL_PREDICT_FALSE(str.empty())) {
return MakeError(
file_, line_, code_,
absl::StrCat(str, "Error without message at ", file_, ":", line_),
true , tsl::ERROR ,
should_log_stack_trace_);
} else {
return MakeError(file_, line_, code_, str, should_log_, log_severity_,
should_log_stack_trace_);
}
}
void MakeErrorStream::Impl::CheckNotDone() const {
if (is_done_) {
LOG(ERROR) << "MakeErrorStream shift called after getting absl::Status: "
<< file_ << ":" << line_ << " " << stream_.str();
}
}
}
} | #include "xla/status_macros.h"
#include <functional>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::Status RetCheckFail() {
TF_RET_CHECK(2 > 3);
return absl::OkStatus();
}
absl::Status RetCheckFailWithExtraMessage() {
TF_RET_CHECK(2 > 3) << "extra message";
return absl::OkStatus();
}
absl::Status RetCheckSuccess() {
TF_RET_CHECK(3 > 2);
return absl::OkStatus();
}
TEST(StatusMacros, RetCheckFailing) {
absl::Status status = RetCheckFail();
EXPECT_EQ(status.code(), tsl::error::INTERNAL);
EXPECT_THAT(status.message(),
::testing::ContainsRegex("RET_CHECK failure.*2 > 3"));
}
TEST(StatusMacros, RetCheckFailingWithExtraMessage) {
absl::Status status = RetCheckFailWithExtraMessage();
EXPECT_EQ(status.code(), tsl::error::INTERNAL);
EXPECT_THAT(status.message(),
::testing::ContainsRegex("RET_CHECK.*2 > 3 extra message"));
}
TEST(StatusMacros, RetCheckSucceeding) {
absl::Status status = RetCheckSuccess();
EXPECT_IS_OK(status);
}
absl::StatusOr<int> CreateIntSuccessfully() { return 42; }
absl::StatusOr<int> CreateIntUnsuccessfully() {
return tsl::errors::Internal("foobar");
}
TEST(StatusMacros, AssignOrAssertOnOK) {
TF_ASSERT_OK_AND_ASSIGN(int result, CreateIntSuccessfully());
EXPECT_EQ(42, result);
}
absl::Status ReturnStatusOK() { return absl::OkStatus(); }
absl::Status ReturnStatusError() { return (tsl::errors::Internal("foobar")); }
using StatusReturningFunction = std::function<absl::Status()>;
absl::StatusOr<int> CallStatusReturningFunction(
const StatusReturningFunction& func) {
TF_RETURN_IF_ERROR(func());
return 42;
}
TEST(StatusMacros, ReturnIfErrorOnOK) {
absl::StatusOr<int> rc = CallStatusReturningFunction(ReturnStatusOK);
EXPECT_IS_OK(rc);
EXPECT_EQ(42, std::move(rc).value());
}
TEST(StatusMacros, ReturnIfErrorOnError) {
absl::StatusOr<int> rc = CallStatusReturningFunction(ReturnStatusError);
EXPECT_FALSE(rc.ok());
EXPECT_EQ(rc.status().code(), tsl::error::INTERNAL);
}
TEST(StatusMacros, AssignOrReturnSuccessfully) {
absl::Status status = []() {
TF_ASSIGN_OR_RETURN(int value, CreateIntSuccessfully());
EXPECT_EQ(value, 42);
return absl::OkStatus();
}();
EXPECT_IS_OK(status);
}
TEST(StatusMacros, AssignOrReturnUnsuccessfully) {
absl::Status status = []() {
TF_ASSIGN_OR_RETURN(int value, CreateIntUnsuccessfully());
(void)value;
return absl::OkStatus();
}();
EXPECT_FALSE(status.ok());
EXPECT_EQ(status.code(), tsl::error::INTERNAL);
}
} |
1,787 | cpp | tensorflow/tensorflow | window_util | third_party/xla/xla/window_util.cc | third_party/xla/xla/window_util_test.cc | #ifndef XLA_WINDOW_UTIL_H_
#define XLA_WINDOW_UTIL_H_
#include "absl/types/span.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace window_util {
Window MakeWindow(absl::Span<const int64_t> sizes);
Window MakeWindow(absl::Span<const int64_t> sizes,
absl::Span<const int64_t> strides);
PaddingConfig MakeSymmetricPadding(absl::Span<const int64_t> sizes);
std::string ToString(const WindowDimension& dim);
std::string ToString(const Window& window);
bool HasStride(const Window& window);
bool HasPadding(const Window& window);
bool HasSymmetricPadding(const Window& window);
bool HasNegativePadding(const Window& window);
bool HasSymmetricPadding(const PaddingConfig& padding_config);
bool HasBaseDilation(const Window& window);
bool HasWindowDilation(const Window& window);
bool HasDilation(const Window& window);
bool HasOverlappingWindow(const Window& window);
bool HasWindowReversal(const Window& window);
bool AllOrNoneReversed(const Window& window);
bool IsTrivialWindowDimension(const WindowDimension& window_dimension);
int64_t DilatedBound(int64_t bound, int64_t dilation);
int64_t StridedBound(int64_t bound, int64_t window_size, int64_t stride);
}
}
#endif
#include "xla/window_util.h"
#include <functional>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/functional/function_ref.h"
#include "absl/strings/str_cat.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace window_util {
Window MakeWindow(absl::Span<const int64_t> sizes) {
Window window;
for (int64_t size : sizes) {
auto* dimension = window.add_dimensions();
dimension->set_size(size);
dimension->set_stride(1);
dimension->set_base_dilation(1);
dimension->set_window_dilation(1);
}
return window;
}
Window MakeWindow(absl::Span<const int64_t> sizes,
absl::Span<const int64_t> strides) {
Window window;
CHECK_EQ(sizes.size(), strides.size());
for (auto nb = 0; nb < sizes.size(); ++nb) {
auto* dimension = window.add_dimensions();
dimension->set_size(sizes[nb]);
dimension->set_stride(strides[nb]);
dimension->set_base_dilation(1);
dimension->set_window_dilation(1);
}
return window;
}
PaddingConfig MakeSymmetricPadding(absl::Span<const int64_t> sizes) {
PaddingConfig config;
for (int64_t size : sizes) {
auto* dimension = config.add_dimensions();
dimension->set_edge_padding_low(size);
dimension->set_edge_padding_high(size);
}
return config;
}
std::string ToString(const WindowDimension& dim) {
using absl::StrAppend;
using absl::StrCat;
std::string str = StrCat("(size=", dim.size());
if (dim.stride() != 1) {
StrAppend(&str, ",stride=", dim.stride());
}
if (dim.padding_low() != 0) {
StrAppend(&str, ",padding_low=", dim.padding_low());
}
if (dim.padding_high() != 0) {
StrAppend(&str, ",padding_high=", dim.padding_high());
}
if (dim.base_dilation() != 1) {
StrAppend(&str, ",base_dilation=", dim.base_dilation());
}
if (dim.window_dilation() != 1) {
StrAppend(&str, ",window_dilation=", dim.window_dilation());
}
if (dim.window_reversal()) {
StrAppend(&str, ",window_reversal");
}
StrAppend(&str, ")");
return str;
}
std::string ToString(const Window& window) {
using absl::StrAppend;
using absl::StrCat;
std::string str;
const auto add_field =
[&](const char* heading,
absl::FunctionRef<std::string(const WindowDimension&)> format) {
StrAppend(&str, heading, "=");
const char* prefix = "";
for (const auto& window_dimension : window.dimensions()) {
StrAppend(&str, prefix, format(window_dimension));
prefix = "x";
}
};
if (window.dimensions_size() > 0) {
add_field("size",
[](const WindowDimension& dim) { return StrCat(dim.size()); });
}
if (HasStride(window)) {
add_field(" stride",
[](const WindowDimension& dim) { return StrCat(dim.stride()); });
}
if (HasPadding(window)) {
add_field(" pad", [](const WindowDimension& dim) {
return StrCat(dim.padding_low(), "_", dim.padding_high());
});
}
if (HasBaseDilation(window)) {
add_field(" lhs_dilate", [](const WindowDimension& dim) {
return StrCat(dim.base_dilation());
});
}
if (HasWindowDilation(window)) {
add_field(" rhs_dilate", [](const WindowDimension& dim) {
return StrCat(dim.window_dilation());
});
}
if (HasWindowReversal(window)) {
add_field(" rhs_reversal", [](const WindowDimension& dim) {
return StrCat(dim.window_reversal() ? 1 : 0);
});
}
return str;
}
bool HasStride(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.stride() != 1) {
return true;
}
}
return false;
}
bool HasPadding(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.padding_low() != 0 || dim.padding_high() != 0) {
return true;
}
}
return false;
}
bool HasSymmetricPadding(const Window& window) {
return absl::c_all_of(window.dimensions(), [](const WindowDimension& dim) {
return dim.padding_low() == dim.padding_high();
});
}
bool HasSymmetricPadding(const PaddingConfig& padding_config) {
return absl::c_all_of(padding_config.dimensions(),
[](const PaddingConfig::PaddingConfigDimension& dim) {
return dim.edge_padding_low() ==
dim.edge_padding_high();
});
}
bool HasNegativePadding(const Window& window) {
return absl::c_any_of(window.dimensions(), [](const WindowDimension& dim) {
return dim.padding_low() < 0 || dim.padding_high() < 0;
});
}
bool HasBaseDilation(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.base_dilation() != 1) {
return true;
}
}
return false;
}
bool HasWindowDilation(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.window_dilation() != 1) {
return true;
}
}
return false;
}
bool HasWindowReversal(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.window_reversal()) {
return true;
}
}
return false;
}
bool AllOrNoneReversed(const Window& window) {
if (window.dimensions().empty()) {
return true;
}
bool reversed = window.dimensions()[0].window_reversal();
return absl::c_all_of(window.dimensions(), [&](const WindowDimension& dim) {
return dim.window_reversal() == reversed;
});
}
bool HasDilation(const Window& window) {
return HasBaseDilation(window) || HasWindowDilation(window);
}
bool IsTrivialWindowDimension(const WindowDimension& window_dimension) {
return window_dimension.size() == 1 && window_dimension.stride() == 1 &&
window_dimension.padding_low() == 0 &&
window_dimension.padding_high() == 0 &&
window_dimension.window_dilation() == 1 &&
window_dimension.base_dilation() == 1;
}
bool HasOverlappingWindow(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.size() > dim.stride()) {
return true;
}
}
return false;
}
int64_t DilatedBound(int64_t bound, int64_t dilation) {
CHECK_GE(bound, 0);
CHECK_GE(dilation, 1);
if (bound == 0) {
return 0;
}
return (bound - 1) * dilation + 1;
}
int64_t StridedBound(int64_t bound, int64_t window_size, int64_t stride) {
CHECK_GE(window_size, 0);
CHECK_GE(bound, 0);
CHECK_GE(stride, 1);
if (bound == 0 || window_size > bound) {
return 0;
}
return (bound - window_size) / stride + 1;
}
}
} | #include "xla/window_util.h"
#include "xla/test.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
TEST(WindowUtilTest, HasOverlappingWindowTest) {
EXPECT_FALSE(
window_util::HasOverlappingWindow(window_util::MakeWindow({1, 1})));
EXPECT_TRUE(
window_util::HasOverlappingWindow(window_util::MakeWindow({2, 2, 2, 2})));
}
TEST(WindowUtilTest, MakeWindowStrideTest) {
Window w = window_util::MakeWindow({1, 2}, {3, 4});
EXPECT_EQ(w.dimensions()[0].size(), 1);
EXPECT_EQ(w.dimensions()[1].size(), 2);
EXPECT_EQ(w.dimensions()[0].stride(), 3);
EXPECT_EQ(w.dimensions()[1].stride(), 4);
}
}
} |
1,788 | cpp | tensorflow/tensorflow | text_literal_reader | third_party/xla/xla/text_literal_reader.cc | third_party/xla/xla/text_literal_reader_test.cc | #ifndef XLA_TEXT_LITERAL_READER_H_
#define XLA_TEXT_LITERAL_READER_H_
#include <memory>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/literal.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
namespace xla {
class TextLiteralReader {
public:
static absl::StatusOr<Literal> ReadPath(absl::string_view path);
private:
explicit TextLiteralReader(tsl::RandomAccessFile* file);
absl::StatusOr<Literal> ReadAllLines();
std::unique_ptr<tsl::RandomAccessFile> file_;
TextLiteralReader(const TextLiteralReader&) = delete;
TextLiteralReader& operator=(const TextLiteralReader&) = delete;
};
}
#endif
#include "xla/text_literal_reader.h"
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "xla/literal.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/io/buffered_inputstream.h"
#include "tsl/lib/io/random_inputstream.h"
#include "tsl/platform/protobuf.h"
namespace xla {
absl::StatusOr<Literal> TextLiteralReader::ReadPath(absl::string_view path) {
CHECK(!absl::EndsWith(path, ".gz"))
<< "TextLiteralReader no longer supports reading .gz files";
std::unique_ptr<tsl::RandomAccessFile> file;
absl::Status s =
tsl::Env::Default()->NewRandomAccessFile(std::string(path), &file);
if (!s.ok()) {
return s;
}
TextLiteralReader reader(file.release());
return reader.ReadAllLines();
}
TextLiteralReader::TextLiteralReader(tsl::RandomAccessFile* file)
: file_(file) {}
absl::StatusOr<Literal> TextLiteralReader::ReadAllLines() {
tsl::io::RandomAccessInputStream stream(file_.get());
tsl::io::BufferedInputStream buf(&stream, 65536);
std::string shape_string;
absl::Status s = buf.ReadLine(&shape_string);
if (!s.ok()) {
return s;
}
absl::StripAsciiWhitespace(&shape_string);
TF_ASSIGN_OR_RETURN(Shape shape, ParseShape(shape_string));
if (shape.element_type() != F32) {
return Unimplemented(
"unsupported element type for text literal reading: %s",
ShapeUtil::HumanString(shape));
}
Literal result(shape);
const float fill = std::numeric_limits<float>::quiet_NaN();
result.PopulateWithValue<float>(fill);
std::vector<absl::string_view> pieces;
std::vector<absl::string_view> coordinates;
std::vector<int64_t> coordinate_values;
std::string line;
while (buf.ReadLine(&line).ok()) {
pieces = absl::StrSplit(line, ':');
absl::string_view coordinates_string =
absl::StripAsciiWhitespace(pieces[0]);
absl::string_view value_string = absl::StripAsciiWhitespace(pieces[1]);
if (!absl::ConsumePrefix(&coordinates_string, "(")) {
return InvalidArgument(
"expected '(' at the beginning of coordinates: \"%s\"", line);
}
if (!absl::ConsumeSuffix(&coordinates_string, ")")) {
return InvalidArgument("expected ')' at the end of coordinates: \"%s\"",
line);
}
float value;
if (!absl::SimpleAtof(value_string, &value)) {
return InvalidArgument("could not parse value as float: \"%s\"",
value_string);
}
coordinates = absl::StrSplit(coordinates_string, ',');
coordinate_values.clear();
for (absl::string_view piece : coordinates) {
int64_t coordinate_value;
if (!absl::SimpleAtoi(piece, &coordinate_value)) {
return InvalidArgument(
"could not parse coordinate member as int64_t: \"%s\"",
std::string(piece));
}
coordinate_values.push_back(coordinate_value);
}
if (coordinate_values.size() != shape.dimensions_size()) {
return InvalidArgument(
"line did not have expected number of coordinates; want %d got %u: "
"\"%s\"",
shape.dimensions_size(), coordinate_values.size(), line);
}
result.Set<float>(coordinate_values, value);
}
return std::move(result);
}
} | #include "xla/text_literal_reader.h"
#include <string>
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
namespace xla {
namespace {
TEST(TextLiteralReaderTest, ReadsR3File) {
std::string contents = R"(f32[1,2,3]
(0,0,0): 42.5
(0,0,1): 43.5
(0,0,2): 44.5
(0,1,0): 45.5
(0,1,1): 46.5
(0,1,2): 47.5
)";
std::string fname = tsl::testing::TmpDir() + "/ReadsR3File.data.txt";
EXPECT_TRUE(
tsl::WriteStringToFile(tsl::Env::Default(), fname, contents).ok());
Literal literal = TextLiteralReader::ReadPath(fname).value();
EXPECT_TRUE(
ShapeUtil::Equal(ShapeUtil::MakeShape(F32, {1, 2, 3}), literal.shape()));
EXPECT_EQ(42.5, literal.Get<float>({0, 0, 0}));
EXPECT_EQ(43.5, literal.Get<float>({0, 0, 1}));
EXPECT_EQ(44.5, literal.Get<float>({0, 0, 2}));
EXPECT_EQ(45.5, literal.Get<float>({0, 1, 0}));
EXPECT_EQ(46.5, literal.Get<float>({0, 1, 1}));
EXPECT_EQ(47.5, literal.Get<float>({0, 1, 2}));
}
}
} |
1,789 | cpp | tensorflow/tensorflow | cpu_function_runtime | third_party/xla/xla/cpu_function_runtime.cc | tensorflow/compiler/tf2xla/cpu_function_runtime_test.cc | #ifndef XLA_CPU_FUNCTION_RUNTIME_H_
#define XLA_CPU_FUNCTION_RUNTIME_H_
#include <stdint.h>
#include <cassert>
#include <cstdlib>
namespace xla {
namespace cpu_function_runtime {
struct EncodedBufferInfo {
uint64_t packed_kind_and_size = 0;
uint32_t entry_param_number = -1;
uint32_t result_param_number = -1;
};
class BufferInfo {
public:
explicit constexpr BufferInfo(const EncodedBufferInfo& encoded)
: kind_(UnpackKind(encoded.packed_kind_and_size)),
size_(UnpackSize(encoded.packed_kind_and_size)),
entry_param_number_(encoded.entry_param_number),
result_param_number_(encoded.result_param_number) {}
bool is_constant() const { return kind() == Kind::kConstant; }
bool is_entry_parameter() const {
return kind() == Kind::kParameter && entry_param_number_ >= 0;
}
uint32_t entry_parameter_number() const {
assert(is_entry_parameter());
return entry_param_number_;
}
void set_result_parameter_number(uint32_t param_number) {
result_param_number_ = param_number;
}
bool is_result_parameter() const {
return result_param_number_ >= 0;
}
uint32_t result_parameter_number() const {
assert(is_result_parameter());
return result_param_number_;
}
bool is_temp_buffer() const { return kind() == Kind::kTempBuffer; }
bool is_on_stack_buffer() const { return kind() == Kind::kOnStackBuffer; }
uint64_t size() const { return size_; }
EncodedBufferInfo Encode() const {
static_assert(sizeof(*this) == 16, "");
EncodedBufferInfo ret;
ret.packed_kind_and_size = Pack(kind(), size_);
ret.entry_param_number = entry_param_number_;
ret.result_param_number = result_param_number_;
return ret;
}
bool operator==(const BufferInfo& buffer_info) const {
if (kind() != buffer_info.kind() || size() != buffer_info.size()) {
return false;
}
return !is_entry_parameter() ||
entry_parameter_number() == buffer_info.entry_parameter_number();
}
static BufferInfo MakeTempBuffer(uint64_t size) {
return BufferInfo(Kind::kTempBuffer, size);
}
static BufferInfo MakeConstant(uint64_t size) {
return BufferInfo(Kind::kConstant, size);
}
static BufferInfo MakeEntryParameter(uint64_t size,
uint32_t entry_param_number) {
return BufferInfo(Kind::kParameter, size, entry_param_number);
}
static BufferInfo MakeResultParameter(uint64_t size,
uint32_t result_param_number) {
return BufferInfo(Kind::kTempBuffer, size, -1,
result_param_number);
}
static BufferInfo MakeOnStackBuffer(uint64_t size) {
return BufferInfo(Kind::kOnStackBuffer, size);
}
private:
BufferInfo() = default;
enum class Kind : uint64_t {
kConstant,
kTempBuffer,
kParameter,
kOnStackBuffer
};
Kind kind() const { return static_cast<Kind>(kind_); }
explicit BufferInfo(Kind kind, uint64_t size)
: BufferInfo(kind, size,
-1,
-1) {}
explicit BufferInfo(Kind kind, uint64_t size, uint32_t entry_param_number)
: BufferInfo(kind, size, entry_param_number,
-1) {}
explicit BufferInfo(Kind kind, uint64_t size, uint32_t entry_param_number,
uint32_t result_param_number)
: kind_(kind),
size_(size),
entry_param_number_(entry_param_number),
result_param_number_(result_param_number) {}
static uint64_t Pack(Kind kind, uint64_t size) {
return (static_cast<uint64_t>(size) << 2) | static_cast<uint64_t>(kind);
}
static inline constexpr Kind UnpackKind(uint64_t packed) {
return static_cast<Kind>((packed << 62) >> 62);
}
static inline constexpr uint64_t UnpackSize(uint64_t packed) {
return packed >> 2;
}
Kind kind_ : 2;
uint64_t size_ : 62;
int32_t entry_param_number_ = -1;
int32_t result_param_number_ = -1;
};
inline constexpr size_t Align() { return 64; }
inline constexpr size_t MinAlign() { return 16; }
#define XLA_ALIGN alignas(xla::cpu_function_runtime::Align())
size_t AlignedBufferBytes(const BufferInfo* buffer_infos, size_t n,
bool allocate_entry_params);
void* MallocContiguousBuffers(const BufferInfo* buffer_infos, size_t n,
bool allocate_entry_params, void** bufs,
bool annotate_initialized);
void FreeContiguous(void* contiguous);
}
}
#endif
#include "xla/cpu_function_runtime.h"
#include "absl/base/dynamic_annotations.h"
namespace xla {
namespace {
void* aligned_malloc(size_t size, int minimum_alignment) {
#if defined(__ANDROID__) || defined(OS_ANDROID) || defined(OS_CYGWIN)
return memalign(minimum_alignment, size);
#elif defined(_WIN32)
return _aligned_malloc(size, minimum_alignment);
#else
void* ptr = nullptr;
const int required_alignment = sizeof(void*);
if (minimum_alignment < required_alignment) return malloc(size);
if (posix_memalign(&ptr, minimum_alignment, size) != 0)
return nullptr;
else
return ptr;
#endif
}
void aligned_free(void* aligned_memory) {
#if defined(_WIN32)
_aligned_free(aligned_memory);
#else
free(aligned_memory);
#endif
}
size_t align_to(size_t n, size_t align) {
return (((n - 1) / align) + 1) * align;
}
}
namespace cpu_function_runtime {
size_t AlignedBufferBytes(const BufferInfo* buffer_infos, size_t n,
bool allocate_entry_params) {
size_t total = 0;
for (size_t i = 0; i < n; ++i) {
bool should_allocate =
buffer_infos[i].is_temp_buffer() ||
(buffer_infos[i].is_entry_parameter() && allocate_entry_params);
if (should_allocate) {
total += align_to(buffer_infos[i].size(), Align());
}
}
return total;
}
void* MallocContiguousBuffers(const BufferInfo* buffer_infos, size_t n,
bool allocate_entry_params, void** bufs,
bool annotate_initialized) {
const size_t total =
AlignedBufferBytes(buffer_infos, n, allocate_entry_params);
void* contiguous = nullptr;
if (total > 0) {
contiguous = aligned_malloc(total, Align());
if (annotate_initialized) {
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(contiguous, total);
}
}
uintptr_t pos = reinterpret_cast<uintptr_t>(contiguous);
for (size_t i = 0; i < n; ++i) {
bool should_allocate =
buffer_infos[i].is_temp_buffer() ||
(buffer_infos[i].is_entry_parameter() && allocate_entry_params);
if (should_allocate) {
bufs[i] = reinterpret_cast<void*>(pos);
pos += align_to(buffer_infos[i].size(), Align());
} else {
bufs[i] = nullptr;
}
}
return contiguous;
}
void FreeContiguous(void* contiguous) {
if (contiguous != nullptr) {
aligned_free(contiguous);
}
}
}
} | #include "xla/cpu_function_runtime.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using ::xla::cpu_function_runtime::BufferInfo;
TEST(XlaCompiledCpuFunctionTest, AlignmentValue) {
EXPECT_EQ(xla::cpu_function_runtime::Align(), Allocator::kAllocatorAlignment);
EXPECT_LE(xla::cpu_function_runtime::MinAlign(),
Allocator::kAllocatorAlignment);
}
std::vector<BufferInfo> SizesToBufferInfos(const intptr_t* sizes, size_t n) {
std::vector<BufferInfo> buffer_infos;
std::transform(sizes, sizes + n, std::back_inserter(buffer_infos),
[&](intptr_t size) {
if (size == -1) {
int64_t on_stack_buffer_size = 4;
return BufferInfo::MakeOnStackBuffer(on_stack_buffer_size);
}
return BufferInfo::MakeTempBuffer(size);
});
return buffer_infos;
}
size_t AlignedBufferBytesFromSizes(const intptr_t* sizes, size_t n) {
std::vector<BufferInfo> buffer_infos = SizesToBufferInfos(sizes, n);
return AlignedBufferBytes(buffer_infos.data(), n,
false);
}
void* MallocContiguousBuffersFromSizes(const intptr_t* sizes, size_t n,
void** bufs, bool annotate_initialized) {
std::vector<BufferInfo> buffer_infos = SizesToBufferInfos(sizes, n);
return MallocContiguousBuffers(buffer_infos.data(), n,
false, bufs,
annotate_initialized);
}
TEST(XlaCompiledCpuFunctionTest, AlignedBufferBytes) {
EXPECT_EQ(AlignedBufferBytesFromSizes(nullptr, 0), 0);
static constexpr intptr_t sizesA[1] = {-1};
EXPECT_EQ(AlignedBufferBytesFromSizes(sizesA, 1), 0);
static constexpr intptr_t sizesB[1] = {3};
EXPECT_EQ(AlignedBufferBytesFromSizes(sizesB, 1), 64);
static constexpr intptr_t sizesC[1] = {32};
EXPECT_EQ(AlignedBufferBytesFromSizes(sizesC, 1), 64);
static constexpr intptr_t sizesD[7] = {1, -1, 32, -1, 64, 2, 3};
EXPECT_EQ(AlignedBufferBytesFromSizes(sizesD, 7), 320);
}
void* add_ptr(void* base, uintptr_t delta) {
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(base) + delta);
}
TEST(XlaCompiledCpuFunctionTest, MallocFreeContiguousBuffers) {
void* base = MallocContiguousBuffersFromSizes(nullptr, 0, nullptr, false);
EXPECT_EQ(base, nullptr);
xla::cpu_function_runtime::FreeContiguous(base);
static constexpr intptr_t sizesA[1] = {-1};
void* bufA[1];
base = MallocContiguousBuffersFromSizes(sizesA, 1, bufA, false);
EXPECT_EQ(base, nullptr);
EXPECT_EQ(bufA[0], nullptr);
xla::cpu_function_runtime::FreeContiguous(base);
static constexpr intptr_t sizesB[1] = {3};
void* bufB[1];
base = MallocContiguousBuffersFromSizes(sizesB, 1, bufB, false);
EXPECT_NE(base, nullptr);
EXPECT_EQ(bufB[0], add_ptr(base, 0));
char* bufB0_bytes = static_cast<char*>(bufB[0]);
bufB0_bytes[0] = 'A';
bufB0_bytes[1] = 'B';
bufB0_bytes[2] = 'C';
xla::cpu_function_runtime::FreeContiguous(base);
static constexpr intptr_t sizesC[1] = {3};
void* bufC[1];
base = MallocContiguousBuffersFromSizes(sizesC, 1, bufC, true);
EXPECT_NE(base, nullptr);
EXPECT_EQ(bufC[0], add_ptr(base, 0));
char* bufC0_bytes = static_cast<char*>(bufC[0]);
bufC0_bytes[0] = 'A';
bufC0_bytes[1] = 'B';
bufC0_bytes[2] = 'C';
xla::cpu_function_runtime::FreeContiguous(base);
static constexpr intptr_t sizesD[7] = {1, -1, 32, -1, 64, 2, 3};
void* bufD[7];
base = MallocContiguousBuffersFromSizes(sizesD, 7, bufD, false);
EXPECT_NE(base, nullptr);
EXPECT_EQ(bufD[0], add_ptr(base, 0));
EXPECT_EQ(bufD[1], nullptr);
EXPECT_EQ(bufD[2], add_ptr(base, 64));
EXPECT_EQ(bufD[3], nullptr);
EXPECT_EQ(bufD[4], add_ptr(base, 128));
EXPECT_EQ(bufD[5], add_ptr(base, 192));
EXPECT_EQ(bufD[6], add_ptr(base, 256));
for (int i = 0; i < 7; ++i) {
const intptr_t size = sizesD[i];
if (size != -1) {
char* bufD_bytes = static_cast<char*>(bufD[i]);
for (size_t j = 0; j < size; ++j) {
bufD_bytes[j] = 'A' + j;
}
}
}
xla::cpu_function_runtime::FreeContiguous(base);
}
void CheckRoundTripIsOk(const BufferInfo& buffer_info) {
BufferInfo round_trip(buffer_info.Encode());
ASSERT_EQ(round_trip, buffer_info);
}
TEST(XlaCompiledCpuFunctionTest, BufferInfoTest) {
CheckRoundTripIsOk(BufferInfo::MakeTempBuffer(0));
CheckRoundTripIsOk(BufferInfo::MakeTempBuffer(4));
CheckRoundTripIsOk(BufferInfo::MakeOnStackBuffer(0));
CheckRoundTripIsOk(BufferInfo::MakeOnStackBuffer(4));
CheckRoundTripIsOk(BufferInfo::MakeConstant(0));
CheckRoundTripIsOk(BufferInfo::MakeConstant(4));
CheckRoundTripIsOk(
BufferInfo::MakeEntryParameter(0, 4));
CheckRoundTripIsOk(
BufferInfo::MakeEntryParameter(4, 0));
}
}
} |
1,790 | cpp | tensorflow/tensorflow | shape_tree | third_party/xla/xla/shape_tree.cc | third_party/xla/xla/shape_tree_test.cc | #ifndef XLA_SHAPE_TREE_H_
#define XLA_SHAPE_TREE_H_
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <type_traits>
#include <utility>
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/lib/gtl/iterator_range.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace internal {
class IndexTable {
public:
struct Entry {
size_t node_id;
std::make_signed_t<size_t> children_start_id = -1;
};
IndexTable() = default;
explicit IndexTable(const Shape& shape);
bool empty() const { return entries_.empty(); }
const Entry& operator[](ShapeIndexView index) const;
private:
void CreateEntry(Entry& entry, const Shape& shape, size_t& next_node_id);
absl::InlinedVector<Entry, 1> entries_;
};
}
template <typename T>
class ShapeTree {
template <typename U>
friend class ShapeTree;
public:
using Node = std::pair<ShapeIndex, T>;
using Nodes = absl::InlinedVector<Node, 1>;
using IndexTable = internal::IndexTable;
template <typename Iterator, typename ValueType>
class LeafIterator;
ShapeTree() : ShapeTree(ShapeUtil::MakeNil()) {}
explicit ShapeTree(Shape shape)
: ShapeTree(std::make_shared<Shape>(std::move(shape))) {}
explicit ShapeTree(const Shape* shape)
: ShapeTree(shape, CreateNodes(*shape)) {}
ShapeTree(Shape shape, const T& init_value)
: ShapeTree(std::make_shared<Shape>(std::move(shape)), init_value) {}
ShapeTree(const Shape* shape, const T& init_value)
: ShapeTree(shape, CreateNodes(*shape, init_value)) {}
const T& element(ShapeIndexView index) const { return find(index)->second; }
T* mutable_element(ShapeIndexView index) { return &find(index)->second; }
const Shape& shape() const { return *shape_; }
void replace_shape_ptr(const Shape& shape) {
if (shape_storage_ != nullptr) {
DCHECK_EQ(shape, *shape_storage_);
shape_storage_ = nullptr;
}
shape_ = &shape;
}
bool IsLeaf(ShapeIndexView index) const {
return index_table_[index].children_start_id == -1;
}
using iterator = typename Nodes::iterator;
using const_iterator = typename Nodes::const_iterator;
using reverse_iterator = typename Nodes::reverse_iterator;
using const_reverse_iterator = typename Nodes::const_reverse_iterator;
using leaf_iterator = LeafIterator<iterator, Node>;
using const_leaf_iterator = LeafIterator<const_iterator, const Node>;
using reverse_leaf_iterator = std::reverse_iterator<leaf_iterator>;
using const_reverse_leaf_iterator =
std::reverse_iterator<const_leaf_iterator>;
iterator begin() { return nodes_.begin(); }
iterator end() { return nodes_.end(); }
const_iterator begin() const { return nodes_.begin(); }
const_iterator end() const { return nodes_.end(); }
reverse_iterator rbegin() { return nodes_.rbegin(); }
reverse_iterator rend() { return nodes_.rend(); }
const_reverse_iterator rbegin() const { return nodes_.rbegin(); }
const_reverse_iterator rend() const { return nodes_.rend(); }
leaf_iterator leaf_begin() { return leaf_iterator(*this, nodes_.begin()); }
leaf_iterator leaf_end() { return leaf_iterator(*this, nodes_.end()); }
const_leaf_iterator leaf_begin() const {
return const_leaf_iterator(*this, nodes_.begin());
}
const_leaf_iterator leaf_end() const {
return const_leaf_iterator(*this, nodes_.end());
}
tsl::gtl::iterator_range<leaf_iterator> leaves() {
return tsl::gtl::make_range(leaf_begin(), leaf_end());
}
tsl::gtl::iterator_range<const_leaf_iterator> leaves() const {
return tsl::gtl::make_range(leaf_begin(), leaf_end());
}
reverse_leaf_iterator leaf_rbegin() {
return reverse_leaf_iterator(leaf_end());
}
reverse_leaf_iterator leaf_rend() {
return reverse_leaf_iterator(leaf_begin());
}
const_reverse_leaf_iterator leaf_rbegin() const {
return const_reverse_leaf_iterator(leaf_end());
}
const_reverse_leaf_iterator leaf_rend() const {
return const_reverse_leaf_iterator(leaf_begin());
}
iterator find(ShapeIndexView index) {
return nodes_.begin() + index_table_[index].node_id;
}
const_iterator find(ShapeIndexView index) const {
return nodes_.begin() + index_table_[index].node_id;
}
int64_t leaf_count() const { return std::distance(leaf_begin(), leaf_end()); }
void ForEachElement(
absl::FunctionRef<void(const ShapeIndex&, const T&)> func) const {
for (const Node& node : nodes_) {
func(node.first, node.second);
}
}
void ForEachMutableElement(
absl::FunctionRef<void(const ShapeIndex&, T*)> func) {
for (Node& node : nodes_) {
func(node.first, &node.second);
}
}
absl::Status ForEachElementWithStatus(
absl::FunctionRef<absl::Status(const ShapeIndex&, const T&)> func) const {
for (const Node& node : nodes_) {
TF_RETURN_IF_ERROR(func(node.first, node.second));
}
return absl::OkStatus();
}
absl::Status ForEachMutableElementWithStatus(
absl::FunctionRef<absl::Status(const ShapeIndex&, T*)> func) {
for (Node& node : nodes_) {
TF_RETURN_IF_ERROR(func(node.first, &node.second));
}
return absl::OkStatus();
}
void ForEachElementPostOrder(
absl::FunctionRef<void(const ShapeIndex&, const T&)> func) const {
for (auto node = nodes_.rbegin(); node != nodes_.rend(); ++node) {
func(node->first, node->second);
}
}
void ForEachMutableElementPostOrder(
absl::FunctionRef<void(const ShapeIndex&, T*)> func) {
for (auto node = nodes_.rbegin(); node != nodes_.rend(); ++node) {
func(node->first, &node->second);
}
}
absl::Status ForEachElementPostOrderWithStatus(
absl::FunctionRef<absl::Status(const ShapeIndex&, const T&)> func) const {
for (auto node = nodes_.rbegin(); node != nodes_.rend(); ++node) {
TF_RETURN_IF_ERROR(func(node->first, node->second));
}
return absl::OkStatus();
}
absl::Status ForEachMutableElementPostOrderWithStatus(
absl::FunctionRef<absl::Status(const ShapeIndex&, T*)> func) {
for (auto node = nodes_.rbegin(); node != nodes_.rend(); ++node) {
TF_RETURN_IF_ERROR(func(node->first, &node->second));
}
return absl::OkStatus();
}
template <typename U>
ShapeTree<U> Map(absl::FunctionRef<U(const T&)> func) {
typename ShapeTree<U>::Nodes result_nodes;
result_nodes.reserve(nodes_.size());
for (const Node& node : nodes_) {
result_nodes.push_back({node.first, func(node.second)});
}
ShapeTree<U> result(shape_, std::move(result_nodes));
result.index_table_ = index_table_;
result.shape_storage_ = shape_storage_;
return result;
}
template <typename U>
absl::StatusOr<ShapeTree<U>> MapWithStatus(
absl::FunctionRef<absl::StatusOr<U>(const T&)> func) {
typename ShapeTree<U>::Nodes result_nodes;
result_nodes.reserve(nodes_.size());
for (const Node& node : nodes_) {
TF_ASSIGN_OR_RETURN(U result, func(node.second));
result_nodes.push_back({node.first, std::move(result)});
}
ShapeTree<U> result(shape_, std::move(result_nodes));
result.index_table_ = index_table_;
result.shape_storage_ = shape_storage_;
return result;
}
void CopySubtreeFrom(const ShapeTree<T>& other, const ShapeIndex& src_index,
const ShapeIndex& dst_index) {
const Shape& src_shape = ShapeUtil::GetSubshape(other.shape(), src_index);
const Shape& dst_shape = ShapeUtil::GetSubshape(shape(), dst_index);
CHECK(ShapeUtil::Compatible(src_shape, dst_shape))
<< src_shape << ", " << dst_shape;
auto replace_shape_index_prefix = [&](const ShapeIndex& index) {
auto without_prefix = ShapeIndexView(index).subspan(src_index.size());
ShapeIndex result;
result.reserve(dst_index.size() + without_prefix.size());
result.insert(result.end(), dst_index.begin(), dst_index.end());
result.insert(result.end(), without_prefix.begin(), without_prefix.end());
return result;
};
auto first = other.find(src_index);
auto last = first + ShapeUtil::SubshapeCount(src_shape);
std::transform(first, last, find(dst_index), [&](const Node& node) -> Node {
return {replace_shape_index_prefix(node.first), node.second};
});
}
absl::StatusOr<ShapeTree<T>> SubShapeTree(const ShapeIndex& index) const {
TF_ASSIGN_OR_RETURN(const Shape* sub_shape,
ShapeUtil::TryGetSubshape(shape(), index));
size_t count = ShapeUtil::SubshapeCount(*sub_shape);
Nodes sub_tree_nodes;
sub_tree_nodes.reserve(count);
for (auto it = find(index), end = it + count; it != end; ++it) {
auto without_prefix = ShapeIndexView(it->first).subspan(index.size());
sub_tree_nodes.push_back(Node{without_prefix, it->second});
}
return ShapeTree(sub_shape, std::move(sub_tree_nodes));
}
bool operator==(const ShapeTree<T>& other) const {
return nodes_ == other.nodes_;
}
bool operator!=(const ShapeTree<T>& other) const { return !(*this == other); }
private:
explicit ShapeTree(std::shared_ptr<Shape> shape) : ShapeTree(shape.get()) {
shape_storage_.swap(shape);
}
ShapeTree(std::shared_ptr<Shape> shape, const T& init_value)
: ShapeTree(shape.get(), init_value) {
shape_storage_.swap(shape);
}
ShapeTree(const Shape* shape, Nodes nodes)
: nodes_(std::move(nodes)), index_table_(*shape), shape_(shape) {
DCHECK_EQ(nodes_.size(), ShapeUtil::SubshapeCount(*shape));
}
template <typename... Ts>
static Nodes CreateNodes(const Shape& shape, Ts&&... args) {
Nodes nodes;
ShapeUtil::ForEachSubshape(
shape, [&](const Shape&, const ShapeIndex& index) {
nodes.push_back({index, T(std::forward<Ts>(args)...)});
});
return nodes;
}
Nodes nodes_;
IndexTable index_table_;
std::shared_ptr<Shape> shape_storage_;
const Shape* shape_;
};
template <typename T>
template <typename Iterator, typename ValueType>
class ShapeTree<T>::LeafIterator {
public:
using iterator_category = std::bidirectional_iterator_tag;
using value_type = ValueType;
using difference_type = ptrdiff_t;
using pointer = value_type*;
using reference = value_type&;
LeafIterator(const ShapeTree& tree, Iterator it) : tree_(tree), it_(it) {
while ((it_ != tree_.nodes_.end()) && !IsLeaf()) ++it_;
}
LeafIterator& operator++() {
do {
++it_;
} while ((it_ != tree_.nodes_.end()) && !IsLeaf());
return *this;
}
LeafIterator operator++(int) {
auto prev = *this;
++(*this);
return prev;
}
LeafIterator& operator--() {
do {
--it_;
} while ((it_ != tree_.nodes_.begin()) && !IsLeaf());
return *this;
}
LeafIterator operator--(int) {
auto prev = *this;
--(*this);
return prev;
}
bool operator==(const LeafIterator& other) const { return it_ == other.it_; }
bool operator!=(const LeafIterator& other) const { return !(*this == other); }
ValueType& operator*() const { return *it_; }
ValueType* operator->() const { return &*it_; }
private:
bool IsLeaf() const { return tree_.IsLeaf(it_->first); }
const ShapeTree<T>& tree_;
Iterator it_;
};
}
#endif
#include "xla/shape_tree.h"
#include <cstddef>
#include <cstdint>
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace internal {
IndexTable::IndexTable(const Shape& shape) : entries_(1) {
size_t next_node_id = 0;
CreateEntry(entries_[0], shape, next_node_id);
}
void IndexTable::CreateEntry(Entry& entry, const Shape& shape,
size_t& next_node_id) {
entry.node_id = next_node_id++;
if (!shape.IsTuple()) return;
size_t children_start_id = entries_.size();
entry.children_start_id = children_start_id;
entries_.resize(entries_.size() + shape.tuple_shapes_size());
for (size_t i = 0; i < shape.tuple_shapes_size(); ++i) {
CreateEntry(entries_[children_start_id + i], shape.tuple_shapes(i),
next_node_id);
}
}
const IndexTable::Entry& IndexTable::operator[](ShapeIndexView index) const {
const Entry* result = &entries_.front();
for (int64_t i : index) {
CHECK_GE(result->children_start_id, 0);
result = &entries_[result->children_start_id + i];
}
return *result;
}
}
} | #include "xla/shape_tree.h"
#include <iterator>
#include <memory>
#include <utility>
#include <vector>
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
class ShapeTreeTest : public ::testing::Test {
protected:
ShapeTreeTest() {
array_shape_ = ShapeUtil::MakeShape(F32, {42, 42, 123});
tuple_shape_ =
ShapeUtil::MakeTupleShape({array_shape_, array_shape_, array_shape_});
nested_tuple_shape_ = ShapeUtil::MakeTupleShape(
{array_shape_, ShapeUtil::MakeTupleShape({array_shape_, array_shape_}),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({array_shape_, array_shape_}),
array_shape_})});
}
void TestShapeConstructor(const Shape& shape, int expected_num_nodes);
void TestInitValueConstructor(const Shape& shape, int expected_num_nodes);
Shape array_shape_;
Shape tuple_shape_;
Shape nested_tuple_shape_;
};
TEST_F(ShapeTreeTest, DefaultConstructor) {
ShapeTree<int> int_tree;
EXPECT_TRUE(ShapeUtil::IsEmptyTuple(int_tree.shape()));
ShapeTree<bool> bool_tree;
EXPECT_TRUE(ShapeUtil::IsEmptyTuple(bool_tree.shape()));
}
void ShapeTreeTest::TestShapeConstructor(const Shape& shape,
int expected_num_nodes) {
ShapeTree<int> int_tree(shape);
int num_nodes = 0;
int_tree.ForEachElement([&num_nodes](const ShapeIndex& , int data) {
EXPECT_EQ(0, data);
++num_nodes;
});
EXPECT_EQ(expected_num_nodes, num_nodes);
ShapeTree<bool> bool_tree(shape);
num_nodes = 0;
bool_tree.ForEachElement(
[&num_nodes](const ShapeIndex& , bool data) {
EXPECT_EQ(false, data);
++num_nodes;
});
EXPECT_EQ(expected_num_nodes, num_nodes);
}
TEST_F(ShapeTreeTest, ShapeConstructor) {
TestShapeConstructor(array_shape_, 1);
TestShapeConstructor(tuple_shape_, 4);
TestShapeConstructor(nested_tuple_shape_, 10);
}
void ShapeTreeTest::TestInitValueConstructor(const Shape& shape,
int expected_num_nodes) {
ShapeTree<int> tree(shape, 42);
int num_nodes = 0;
tree.ForEachElement([&num_nodes](const ShapeIndex& , int data) {
EXPECT_EQ(42, data);
++num_nodes;
});
EXPECT_EQ(expected_num_nodes, num_nodes);
num_nodes = 0;
tree.ForEachMutableElement(
[&num_nodes](const ShapeIndex& , int* data) {
EXPECT_EQ(42, *data);
*data = num_nodes;
++num_nodes;
});
EXPECT_EQ(expected_num_nodes, num_nodes);
num_nodes = 0;
tree.ForEachElement([&num_nodes](const ShapeIndex& , int data) {
EXPECT_EQ(num_nodes, data);
++num_nodes;
});
EXPECT_EQ(expected_num_nodes, num_nodes);
}
TEST_F(ShapeTreeTest, InitValueConstructor) {
TestInitValueConstructor(array_shape_, 1);
TestInitValueConstructor(tuple_shape_, 4);
TestInitValueConstructor(nested_tuple_shape_, 10);
}
TEST_F(ShapeTreeTest, EmptyTupleMustHaveNoLeaves) {
ShapeTree<int> shape_tree{ShapeUtil::MakeTupleShape({})};
EXPECT_EQ(0, shape_tree.leaf_count());
}
TEST_F(ShapeTreeTest, NestedEmptyTuple) {
Shape shape(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeTupleShape({}), array_shape_}));
ShapeTree<int> shape_tree{shape};
EXPECT_EQ(ShapeUtil::GetLeafCount(shape), shape_tree.leaf_count());
}
TEST_F(ShapeTreeTest, ArrayShape) {
ShapeTree<int> shape_tree{array_shape_};
*shape_tree.mutable_element({}) = 42;
EXPECT_EQ(42, shape_tree.element({}));
*shape_tree.mutable_element({}) = 123;
EXPECT_EQ(123, shape_tree.element({}));
EXPECT_TRUE(ShapeUtil::Compatible(array_shape_, shape_tree.shape()));
ShapeTree<int> copy{shape_tree};
EXPECT_EQ(123, copy.element({}));
*copy.mutable_element({}) = 99;
EXPECT_EQ(99, copy.element({}));
EXPECT_EQ(123, shape_tree.element({}));
copy = shape_tree;
EXPECT_EQ(123, copy.element({}));
}
TEST_F(ShapeTreeTest, TupleShape) {
ShapeTree<int> shape_tree{tuple_shape_};
*shape_tree.mutable_element({}) = 1;
*shape_tree.mutable_element({0}) = 42;
*shape_tree.mutable_element({1}) = 123;
*shape_tree.mutable_element({2}) = -100;
EXPECT_EQ(1, shape_tree.element({}));
EXPECT_EQ(42, shape_tree.element({0}));
EXPECT_EQ(123, shape_tree.element({1}));
EXPECT_EQ(-100, shape_tree.element({2}));
EXPECT_TRUE(ShapeUtil::Compatible(tuple_shape_, shape_tree.shape()));
int sum = 0;
shape_tree.ForEachElement(
[&sum](const ShapeIndex& , int data) { sum += data; });
EXPECT_EQ(66, sum);
ShapeTree<int> copy{shape_tree};
EXPECT_EQ(1, copy.element({}));
EXPECT_EQ(42, copy.element({0}));
EXPECT_EQ(123, copy.element({1}));
EXPECT_EQ(-100, copy.element({2}));
shape_tree.ForEachMutableElement(
[](const ShapeIndex& , int* data) { *data = 0; });
EXPECT_EQ(0, shape_tree.element({}));
EXPECT_EQ(0, shape_tree.element({0}));
EXPECT_EQ(0, shape_tree.element({1}));
EXPECT_EQ(0, shape_tree.element({2}));
EXPECT_EQ(1, copy.element({}));
EXPECT_EQ(42, copy.element({0}));
EXPECT_EQ(123, copy.element({1}));
EXPECT_EQ(-100, copy.element({2}));
copy = shape_tree;
EXPECT_EQ(0, copy.element({}));
EXPECT_EQ(0, copy.element({0}));
EXPECT_EQ(0, copy.element({1}));
EXPECT_EQ(0, copy.element({2}));
}
TEST_F(ShapeTreeTest, NestedTupleShape) {
ShapeTree<int> shape_tree{nested_tuple_shape_};
*shape_tree.mutable_element({0}) = 42;
*shape_tree.mutable_element({1, 1}) = 123;
*shape_tree.mutable_element({2, 0, 1}) = -100;
EXPECT_EQ(42, shape_tree.element({0}));
EXPECT_EQ(123, shape_tree.element({1, 1}));
EXPECT_EQ(-100, shape_tree.element({2, 0, 1}));
EXPECT_TRUE(ShapeUtil::Compatible(nested_tuple_shape_, shape_tree.shape()));
ShapeTree<int> copy{shape_tree};
EXPECT_EQ(42, copy.element({0}));
EXPECT_EQ(123, copy.element({1, 1}));
EXPECT_EQ(-100, copy.element({2, 0, 1}));
*copy.mutable_element({0}) = 1;
*copy.mutable_element({1, 1}) = 2;
*copy.mutable_element({2, 0, 1}) = 3;
EXPECT_EQ(1, copy.element({0}));
EXPECT_EQ(2, copy.element({1, 1}));
EXPECT_EQ(3, copy.element({2, 0, 1}));
EXPECT_EQ(42, shape_tree.element({0}));
EXPECT_EQ(123, shape_tree.element({1, 1}));
EXPECT_EQ(-100, shape_tree.element({2, 0, 1}));
copy = shape_tree;
EXPECT_EQ(42, copy.element({0}));
EXPECT_EQ(123, copy.element({1, 1}));
EXPECT_EQ(-100, copy.element({2, 0, 1}));
}
TEST_F(ShapeTreeTest, InvalidIndexingTuple) {
ShapeTree<int> shape_tree{tuple_shape_};
#ifndef NDEBUG
EXPECT_DEATH(shape_tree.element({4}), "");
#endif
}
TEST_F(ShapeTreeTest, InvalidIndexingNestedTuple) {
ShapeTree<int> shape_tree{nested_tuple_shape_};
#ifndef NDEBUG
EXPECT_DEATH(shape_tree.element({0, 0}), "");
#endif
}
TEST_F(ShapeTreeTest, ShapeTreeOfNonCopyableType) {
ShapeTree<std::unique_ptr<int>> shape_tree{tuple_shape_};
EXPECT_EQ(shape_tree.element({2}).get(), nullptr);
*shape_tree.mutable_element({2}) = std::make_unique<int>(42);
EXPECT_EQ(*shape_tree.element({2}), 42);
}
TEST_F(ShapeTreeTest, CopySubtreeFromArrayShape) {
ShapeTree<int> source(array_shape_);
*source.mutable_element({}) = 42;
ShapeTree<int> destination(array_shape_, 123);
EXPECT_EQ(destination.element({}), 123);
destination.CopySubtreeFrom(source, {},
{});
EXPECT_EQ(destination.element({}), 42);
}
TEST_F(ShapeTreeTest, FullCopySubtreeFromTupleShape) {
ShapeTree<int> source(tuple_shape_);
*source.mutable_element({}) = 10;
*source.mutable_element({0}) = 11;
*source.mutable_element({1}) = 12;
*source.mutable_element({2}) = 13;
ShapeTree<int> destination(tuple_shape_, 0);
destination.CopySubtreeFrom(source, {},
{});
EXPECT_EQ(destination.element({}), 10);
EXPECT_EQ(destination.element({0}), 11);
EXPECT_EQ(destination.element({1}), 12);
EXPECT_EQ(destination.element({2}), 13);
}
TEST_F(ShapeTreeTest, SingleElementCopySubtreeFromTupleShape) {
ShapeTree<int> source(tuple_shape_);
*source.mutable_element({}) = 10;
*source.mutable_element({0}) = 11;
*source.mutable_element({1}) = 12;
*source.mutable_element({2}) = 13;
ShapeTree<int> destination(tuple_shape_, 0);
destination.CopySubtreeFrom(source, {0},
{1});
EXPECT_EQ(destination.element({}), 0);
EXPECT_EQ(destination.element({0}), 0);
EXPECT_EQ(destination.element({1}), 11);
EXPECT_EQ(destination.element({2}), 0);
}
TEST_F(ShapeTreeTest, CopySubtreeIntoNestedShape) {
ShapeTree<int> source(
ShapeUtil::MakeTupleShape({array_shape_, array_shape_}));
*source.mutable_element({}) = 10;
*source.mutable_element({0}) = 11;
*source.mutable_element({1}) = 12;
ShapeTree<int> destination(nested_tuple_shape_, 0);
destination.CopySubtreeFrom(source, {},
{2, 0});
EXPECT_EQ(destination.element({}), 0);
EXPECT_EQ(destination.element({0}), 0);
EXPECT_EQ(destination.element({1}), 0);
EXPECT_EQ(destination.element({1, 0}), 0);
EXPECT_EQ(destination.element({1, 1}), 0);
EXPECT_EQ(destination.element({2}), 0);
EXPECT_EQ(destination.element({2, 0}), 10);
EXPECT_EQ(destination.element({2, 0, 0}), 11);
EXPECT_EQ(destination.element({2, 0, 1}), 12);
EXPECT_EQ(destination.element({2, 1}), 0);
}
TEST_F(ShapeTreeTest, CopySubtreeFromNestedShape) {
ShapeTree<int> source(nested_tuple_shape_, 42);
*source.mutable_element({1}) = 10;
*source.mutable_element({1, 0}) = 11;
*source.mutable_element({1, 1}) = 12;
ShapeTree<int> destination(
ShapeUtil::MakeTupleShape({array_shape_, array_shape_}), 0);
destination.CopySubtreeFrom(source, {1},
{});
EXPECT_EQ(destination.element({}), 10);
EXPECT_EQ(destination.element({0}), 11);
EXPECT_EQ(destination.element({1}), 12);
}
TEST_F(ShapeTreeTest, OperatorEquals) {
{
ShapeTree<int> a(array_shape_, 123);
ShapeTree<int> b(array_shape_, 42);
ShapeTree<int> c(array_shape_, 42);
EXPECT_FALSE(a == b);
EXPECT_TRUE(a != b);
EXPECT_TRUE(b == c);
}
{
ShapeTree<int> a(tuple_shape_);
*a.mutable_element({}) = 10;
*a.mutable_element({0}) = 11;
*a.mutable_element({1}) = 12;
ShapeTree<int> b(tuple_shape_);
*b.mutable_element({}) = 10;
*b.mutable_element({0}) = 42;
*b.mutable_element({1}) = 11;
ShapeTree<int> c(tuple_shape_);
*c.mutable_element({}) = 10;
*c.mutable_element({0}) = 42;
*c.mutable_element({1}) = 11;
EXPECT_FALSE(a == b);
EXPECT_TRUE(a != b);
EXPECT_TRUE(b == c);
EXPECT_FALSE(b != c);
}
}
TEST_F(ShapeTreeTest, ConstructWithPointerToShape) {
ShapeTree<int> t(&nested_tuple_shape_, 42);
int num_nodes = 0;
t.ForEachElement([&num_nodes](const ShapeIndex& , int data) {
EXPECT_EQ(42, data);
++num_nodes;
});
EXPECT_EQ(10, num_nodes);
}
TEST_F(ShapeTreeTest, CopyWithPointerToShape) {
ShapeTree<int> source(&nested_tuple_shape_, 0);
ShapeTree<int> dest(source);
EXPECT_EQ(&dest.shape(), &nested_tuple_shape_);
}
TEST_F(ShapeTreeTest, CopyAssignWithPointerToShape) {
ShapeTree<int> source(&nested_tuple_shape_, 0);
ShapeTree<int> dest;
dest = source;
EXPECT_EQ(&dest.shape(), &nested_tuple_shape_);
}
TEST_F(ShapeTreeTest, IterateSimple) {
ShapeTree<int> t(nested_tuple_shape_, 42);
int num_nodes = 0;
for (auto index_to_data : t) {
EXPECT_EQ(42, index_to_data.second);
++num_nodes;
}
EXPECT_EQ(10, num_nodes);
}
TEST_F(ShapeTreeTest, ConstIterate) {
const ShapeTree<int> t(nested_tuple_shape_, 42);
int num_nodes = 0;
for (const auto& index_to_data : t) {
EXPECT_EQ(42, index_to_data.second);
++num_nodes;
}
EXPECT_EQ(10, num_nodes);
}
TEST_F(ShapeTreeTest, IterateAndMutate) {
ShapeTree<int> t(nested_tuple_shape_, 42);
int i = 0;
for (auto& index_to_data : t) {
EXPECT_EQ(42, index_to_data.second);
if (i == 1) {
index_to_data.second = 98;
}
++i;
}
(*t.begin()).second = 78;
EXPECT_EQ(78, (*t.begin()).second);
i = 0;
for (auto& index_to_data : t) {
if (i == 0) {
EXPECT_EQ(78, index_to_data.second);
} else if (i == 1) {
EXPECT_EQ(98, index_to_data.second);
} else {
EXPECT_EQ(42, index_to_data.second);
}
++i;
}
EXPECT_EQ(78, (*t.begin()).second);
EXPECT_EQ(98, (*std::next(t.begin())).second);
}
TEST_F(ShapeTreeTest, IterateOrder) {
ShapeTree<int> t(nested_tuple_shape_, 42);
std::vector<ShapeIndex> v;
v.reserve(t.leaf_count());
for (auto index_to_data : t) {
v.push_back(index_to_data.first);
}
EXPECT_EQ(v, (std::vector<ShapeIndex>{{},
{0},
{1},
{1, 0},
{1, 1},
{2},
{2, 0},
{2, 0, 0},
{2, 0, 1},
{2, 1}}));
}
TEST_F(ShapeTreeTest, ReverseIterateOrder) {
ShapeTree<int> t(nested_tuple_shape_, 42);
std::vector<ShapeIndex> v;
v.reserve(t.leaf_count());
for (auto it = t.rbegin(); it != t.rend(); ++it) {
v.push_back(it->first);
}
EXPECT_EQ(v, (std::vector<ShapeIndex>{
{2, 1},
{2, 0, 1},
{2, 0, 0},
{2, 0},
{2},
{1, 1},
{1, 0},
{1},
{0},
{},
}));
}
TEST_F(ShapeTreeTest, Find) {
ShapeTree<int> t(nested_tuple_shape_, 42);
auto found = t.find({1, 0});
EXPECT_NE(found, t.end());
EXPECT_EQ(found->first, ShapeIndex({1, 0}));
EXPECT_EQ(found->second, 42);
}
TEST_F(ShapeTreeTest, ConstFind) {
const ShapeTree<int> t(nested_tuple_shape_, 42);
auto found = t.find({1, 0});
EXPECT_NE(found, t.end());
EXPECT_EQ(found->first, ShapeIndex({1, 0}));
EXPECT_EQ(found->second, 42);
}
TEST_F(ShapeTreeTest, IterateOrderLeaves) {
ShapeTree<int> t(nested_tuple_shape_, 42);
std::vector<ShapeIndex> v;
const auto& leaves = t.leaves();
v.reserve(t.leaf_count());
for (auto index_to_data : leaves) {
v.push_back(index_to_data.first);
}
EXPECT_EQ(v, (std::vector<ShapeIndex>{
{0}, {1, 0}, {1, 1}, {2, 0, 0}, {2, 0, 1}, {2, 1}}));
}
TEST_F(ShapeTreeTest, ReverseIterateOrderLeaves) {
ShapeTree<int> t(nested_tuple_shape_, 42);
std::vector<ShapeIndex> v;
v.reserve(t.leaf_count());
for (auto it = t.leaf_rbegin(); it != t.leaf_rend(); ++it) {
v.push_back(it->first);
}
EXPECT_EQ(v, (std::vector<ShapeIndex>{
{2, 1},
{2, 0, 1},
{2, 0, 0},
{1, 1},
{1, 0},
{0},
}));
}
void BM_Construct(::testing::benchmark::State& state) {
const int depth = state.range(0);
const int fan_out = state.range(1);
Shape shape = ShapeUtil::MakeShape(F32, {32, 64, 128});
for (int i = 0; i < depth; ++i) {
std::vector<xla::Shape> shapes(fan_out, shape);
shape = ShapeUtil::MakeTupleShape(shapes);
}
for (auto s : state) {
ShapeTree<int> shape_tree(shape);
}
}
void BM_ConstructUnowned(::testing::benchmark::State& state) {
const int depth = state.range(0);
const int fan_out = state.range(1);
Shape shape = ShapeUtil::MakeShape(F32, {32, 64, 128});
for (int i = 0; i < depth; ++i) {
std::vector<xla::Shape> shapes(fan_out, shape);
shape = ShapeUtil::MakeTupleShape(shapes);
}
for (auto s : state) {
ShapeTree<int> shape_tree(&shape);
}
}
void BM_Copy(::testing::benchmark::State& state) {
const int depth = state.range(0);
const int fan_out = state.range(1);
Shape shape = ShapeUtil::MakeShape(F32, {32, 64, 128});
for (int i = 0; i < depth; ++i) {
std::vector<xla::Shape> shapes(fan_out, shape);
shape = ShapeUtil::MakeTupleShape(shapes);
}
ShapeTree<int> shape_tree(shape);
for (auto s : state) {
ShapeTree<int> copy = shape_tree;
tsl::testing::DoNotOptimize(copy);
}
}
void BM_Move(::testing::benchmark::State& state) {
const int depth = state.range(0);
const int fan_out = state.range(1);
Shape shape = ShapeUtil::MakeShape(F32, {32, 64, 128});
for (int i = 0; i < depth; ++i) {
std::vector<xla::Shape> shapes(fan_out, shape);
shape = ShapeUtil::MakeTupleShape(shapes);
}
ShapeTree<int> shape_tree(shape);
for (auto s : state) {
ShapeTree<int> copy = std::move(shape_tree);
shape_tree = std::move(copy);
}
}
void BM_ForEach(::testing::benchmark::State& state) {
const int depth = state.range(0);
const int fan_out = state.range(1);
Shape shape = ShapeUtil::MakeShape(F32, {32, 64, 128});
for (int i = 0; i < depth; ++i) {
std::vector<xla::Shape> shapes(fan_out, shape);
shape = ShapeUtil::MakeTupleShape(shapes);
}
ShapeTree<int> shape_tree(shape);
for (auto s : state) {
shape_tree.ForEachMutableElement([](const ShapeIndex& index, int* data) {
tsl::testing::DoNotOptimize(index);
});
}
}
void BM_Iterate(::testing::benchmark::State& state) {
const int depth = state.range(0);
const int fan_out = state.range(1);
Shape shape = ShapeUtil::MakeShape(F32, {32, 64, 128});
for (int i = 0; i < depth; ++i) {
std::vector<xla::Shape> shapes(fan_out, shape);
shape = ShapeUtil::MakeTupleShape(shapes);
}
ShapeTree<int> shape_tree(shape);
for (auto s : state) {
for (auto& iter : shape_tree) {
tsl::testing::DoNotOptimize(iter.second);
}
}
}
#define BENCHMARK_WITH_ARGS(name) \
BENCHMARK(name)->ArgPair(2, 8)->ArgPair(1, 1000)
BENCHMARK_WITH_ARGS(BM_Construct);
BENCHMARK_WITH_ARGS(BM_ConstructUnowned);
BENCHMARK_WITH_ARGS(BM_Copy);
BENCHMARK_WITH_ARGS(BM_Move);
BENCHMARK_WITH_ARGS(BM_ForEach);
BENCHMARK_WITH_ARGS(BM_Iterate);
}
} |
1,791 | cpp | tensorflow/tensorflow | layout | third_party/xla/xla/layout.cc | third_party/xla/xla/layout_test.cc | #ifndef XLA_LAYOUT_H_
#define XLA_LAYOUT_H_
#include <cstdint>
#include <limits>
#include <memory>
#include <ostream>
#include <string>
#include "absl/container/inlined_vector.h"
#include "absl/types/span.h"
#include "xla/printer.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
class Shape;
class Tile {
public:
Tile() = default;
explicit Tile(absl::Span<const int64_t> dimensions)
: dimensions_(dimensions.begin(), dimensions.end()) {}
static Tile CreateFromProto(const TileProto& tile_proto) {
return Tile(tile_proto.dimensions());
}
TileProto ToProto() const;
void SetProto(TileProto& tile_proto) const;
bool operator==(const Tile& other) const {
return dimensions() == other.dimensions();
}
bool operator!=(const Tile& other) const { return !(*this == other); }
void Print(Printer* printer) const;
std::string ToString() const;
int64_t dimension(int i) const { return dimensions_[i]; }
absl::Span<const int64_t> dimensions() const { return dimensions_; }
Tile& add_dimensions(int64_t value) {
dimensions_.push_back(value);
return *this;
}
Tile& clear_dimensions() {
dimensions_.clear();
return *this;
}
static constexpr int64_t kCombineDimension =
std::numeric_limits<int64_t>::min();
template <typename H>
friend H AbslHashValue(H h, const Tile& t) {
return H::combine(std::move(h), t.dimensions_);
}
private:
absl::InlinedVector<int64_t, 2> dimensions_;
};
using TileVector = absl::InlinedVector<Tile, 3>;
class SplitConfig {
public:
SplitConfig(int64_t dimension, absl::Span<const int64_t> split_indices)
: dimension_(dimension),
split_indices_(split_indices.begin(), split_indices.end()) {}
static SplitConfig CreateFromProto(
const SplitConfigProto& split_config_proto) {
return SplitConfig(split_config_proto.dimension(),
split_config_proto.split_indices());
}
SplitConfigProto ToProto() const;
void SetProto(SplitConfigProto& split_config_proto) const;
bool operator==(const SplitConfig& other) const {
return dimension() == other.dimension() &&
split_indices() == other.split_indices();
}
bool operator!=(const SplitConfig& other) const { return !(*this == other); }
std::string ToString() const;
int64_t dimension() const { return dimension_; }
SplitConfig& set_dimension(int64_t dimension) {
dimension_ = dimension;
return *this;
}
absl::Span<const int64_t> split_indices() const { return split_indices_; }
int64_t split_indices(int64_t idx) const { return split_indices_.at(idx); }
int64_t split_indices_size() const { return split_indices_.size(); }
SplitConfig& add_split_indices(int64_t split_index) {
split_indices_.push_back(split_index);
return *this;
}
SplitConfig& clear_split_indices() {
split_indices_.clear();
return *this;
}
template <typename H>
friend H AbslHashValue(H h, const SplitConfig& t) {
return H::combine(std::move(h), t.dimension_, t.split_indices_);
}
private:
int64_t dimension_;
absl::InlinedVector<int64_t, 1> split_indices_;
};
class Layout {
public:
Layout();
Layout(const Layout& other);
Layout(Layout&& other);
~Layout();
explicit Layout(absl::Span<const int64_t> minor_to_major);
explicit Layout(absl::Span<const int64_t> minor_to_major,
absl::Span<const DimLevelType> dim_level_types,
absl::Span<const bool> dim_unique,
absl::Span<const bool> dim_ordered,
absl::Span<const Tile> tiles,
int64_t tail_padding_alignment_in_elements = 1,
PrimitiveType index_primitive_type = PRIMITIVE_TYPE_INVALID,
PrimitiveType element_primitive_type = PRIMITIVE_TYPE_INVALID,
int64_t element_size_in_bits = 0, int64_t memory_space = 0,
absl::Span<const SplitConfig> split_configs = {},
std::unique_ptr<Shape> physical_shape = nullptr,
int64_t dynamic_shape_metadata_prefix_bytes = 0);
Layout& operator=(const Layout& other);
Layout& operator=(Layout&& other);
static Layout CreateFromProto(const LayoutProto& proto);
LayoutProto ToProto() const;
void SetProto(LayoutProto& proto) const;
void Print(Printer* printer) const;
std::string ToString() const;
class Equal {
public:
Equal() = default;
bool operator()(const Layout& lhs, const Layout& rhs);
Equal& IgnoreTiles() {
ignore_tiles_ = true;
return *this;
}
Equal& IgnoreTailPaddingAlignmentInElements() {
ignore_tail_padding_alignment_in_elements_ = true;
return *this;
}
Equal& IgnoreIndexPrimitiveType() {
ignore_index_primitive_type_ = true;
return *this;
}
Equal& IgnorePointerPrimitiveType() {
ignore_pointer_primitive_type_ = true;
return *this;
}
Equal& IgnoreMemorySpace() {
ignore_memory_space_ = true;
return *this;
}
Equal& IgnoreSplitConfigs() {
ignore_split_configs_ = true;
return *this;
}
Equal& IgnorePhysicalShape() {
ignore_physical_shape_ = true;
return *this;
}
Equal& IgnoreElementSize() {
ignore_element_size_ = true;
return *this;
}
Equal& MinorToMajorOnly() {
return IgnoreTiles()
.IgnoreIndexPrimitiveType()
.IgnorePointerPrimitiveType()
.IgnoreMemorySpace()
.IgnorePhysicalShape()
.IgnoreElementSize()
.IgnoreTailPaddingAlignmentInElements();
}
private:
bool ignore_tiles_ = false;
bool ignore_tail_padding_alignment_in_elements_ = false;
bool ignore_element_size_ = false;
bool ignore_index_primitive_type_ = false;
bool ignore_pointer_primitive_type_ = false;
bool ignore_memory_space_ = false;
bool ignore_split_configs_ = false;
bool ignore_physical_shape_ = false;
};
bool operator==(const Layout& other) const;
bool operator!=(const Layout& other) const { return !(*this == other); }
int dim_level_types_size() const { return n_dim_level_types_; }
DimLevelType dim_level_type(int index) const {
return dim_attributes_[index].dim_level_type;
}
Layout& set_dim_level_type(int index, DimLevelType dim_level_type) {
dim_attributes_[index].dim_level_type = dim_level_type;
return *this;
}
Layout& add_dim_level_type(DimLevelType dim_level_type) {
while (n_dim_level_types_ >= dim_attributes_.size()) {
dim_attributes_.push_back(DimInfo());
}
dim_attributes_[n_dim_level_types_].dim_level_type = dim_level_type;
n_dim_level_types_++;
return *this;
}
Layout& clear_dim_level_types() {
n_dim_level_types_ = 0;
return *this;
}
int dim_unique_size() const { return n_dim_unique_; }
bool dim_unique(int index) const { return dim_attributes_[index].dim_unique; }
Layout& set_dim_unique(int index, bool unique) {
dim_attributes_[index].dim_unique = unique;
return *this;
}
Layout& add_dim_unique(bool unique) {
while (n_dim_unique_ >= dim_attributes_.size()) {
dim_attributes_.push_back(DimInfo());
}
dim_attributes_[n_dim_unique_].dim_unique = unique;
n_dim_unique_++;
return *this;
}
int dim_ordered_size() const { return n_dim_ordered_; }
bool dim_ordered(int index) const {
return dim_attributes_[index].dim_ordered;
}
Layout& set_dim_ordered(int index, bool ordered) {
dim_attributes_[index].dim_ordered = ordered;
return *this;
}
Layout& add_dim_ordered(bool ordered) {
while (n_dim_ordered_ >= dim_attributes_.size()) {
dim_attributes_.push_back(DimInfo());
}
dim_attributes_[n_dim_ordered_].dim_ordered = ordered;
n_dim_ordered_++;
return *this;
}
int minor_to_major_size() const { return minor_to_major_.size(); }
int64_t minor_to_major(int index) const { return minor_to_major_[index]; }
Layout& set_minor_to_major(int index, int64_t value) {
minor_to_major_[index] = value;
return *this;
}
Layout& add_minor_to_major(int64_t value) {
minor_to_major_.push_back(value);
return *this;
}
Layout& clear_minor_to_major() {
minor_to_major_.clear();
return *this;
}
Layout& DeleteDimension(int64_t dim_to_delete);
absl::Span<const int64_t> minor_to_major() const { return minor_to_major_; }
DimensionVector* mutable_minor_to_major() { return &minor_to_major_; }
int64_t tiles_size() const { return tiles_.size(); }
const Tile& tiles(int index) const { return tiles_[index]; }
Tile* mutable_tiles(int index) { return &tiles_[index]; }
Tile* add_tiles() {
tiles_.push_back(Tile());
return &tiles_.back();
}
Layout& clear_tiles() {
tiles_.clear();
return *this;
}
absl::Span<const Tile> tiles() const { return tiles_; }
TileVector* mutable_tiles() { return &tiles_; }
int64_t element_size_in_bits() const { return element_size_in_bits_; }
Layout& set_element_size_in_bits(int64_t value) {
element_size_in_bits_ = value;
return *this;
}
int64_t tail_padding_alignment_in_elements() const {
return tail_padding_alignment_in_elements_;
}
Layout& set_tail_padding_alignment_in_elements(int64_t value) {
tail_padding_alignment_in_elements_ = value;
return *this;
}
PrimitiveType index_primitive_type() const { return index_primitive_type_; }
Layout& set_index_primitive_type(PrimitiveType value) {
index_primitive_type_ = value;
return *this;
}
PrimitiveType pointer_primitive_type() const {
return pointer_primitive_type_;
}
Layout& set_pointer_primitive_type(PrimitiveType value) {
pointer_primitive_type_ = value;
return *this;
}
static constexpr int64_t kDefaultMemorySpace = 0;
static constexpr int64_t kGenericFastMemorySpace = 1;
static constexpr int64_t kHostMemorySpace = 5;
int64_t memory_space() const { return memory_space_; }
Layout& set_memory_space(int64_t value) {
memory_space_ = value;
return *this;
}
int split_configs_size() const { return split_configs_.size(); }
const SplitConfig& split_configs(int index) const {
return split_configs_.at(index);
}
SplitConfig* mutable_split_configs(int index) {
return &split_configs_.at(index);
}
Layout& add_split_configs(const SplitConfig& split_config) {
split_configs_.push_back(split_config);
return *this;
}
void clear_split_configs() { split_configs_.clear(); }
absl::Span<const SplitConfig> split_configs() const { return split_configs_; }
bool has_physical_shape() const { return physical_shape_ != nullptr; }
const Shape& physical_shape() const {
CHECK(has_physical_shape());
return *physical_shape_;
}
Shape* mutable_physical_shape();
void clear_physical_shape();
int64_t dynamic_shape_metadata_prefix_bytes() const {
return dynamic_shape_metadata_prefix_bytes_;
}
void set_dynamic_shape_metadata_prefix_bytes(int64_t bytes) {
dynamic_shape_metadata_prefix_bytes_ = bytes;
}
void Swap(Layout* other) {
using std::swap;
swap(*this, *other);
}
void Clear() { *this = Layout(); }
template <typename H>
friend H AbslHashValue(H h, const Layout& l) {
return H::combine(std::move(h), l.minor_to_major_, l.tiles_,
l.element_size_in_bits_, l.index_primitive_type_,
l.pointer_primitive_type_, l.memory_space_,
l.split_configs_, l.tail_padding_alignment_in_elements_);
}
private:
struct DimInfo {
DimInfo()
: dim_level_type(DIM_DENSE), dim_unique(false), dim_ordered(false) {}
DimLevelType dim_level_type : 6;
bool dim_unique : 1;
bool dim_ordered : 1;
};
absl::InlinedVector<DimInfo, InlineRank()> dim_attributes_;
uint8_t n_dim_level_types_ = 0;
uint8_t n_dim_unique_ = 0;
uint8_t n_dim_ordered_ = 0;
PrimitiveType index_primitive_type_ : 8;
PrimitiveType pointer_primitive_type_ : 8;
int8_t memory_space_ = 0;
int64_t element_size_in_bits_ = 0;
DimensionVector minor_to_major_;
TileVector tiles_;
absl::InlinedVector<SplitConfig, 1> split_configs_;
int64_t tail_padding_alignment_in_elements_ = 1;
std::unique_ptr<Shape> physical_shape_;
int64_t dynamic_shape_metadata_prefix_bytes_ = 0;
};
std::ostream& operator<<(std::ostream& out, const Tile& Tile);
std::ostream& operator<<(std::ostream& out, const Layout& layout);
}
#endif
#include "xla/layout.h"
#include <cstdint>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/layout_util.h"
#include "xla/primitive_util.h"
#include "xla/printer.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
TileProto Tile::ToProto() const {
TileProto tile_proto;
SetProto(tile_proto);
return tile_proto;
}
void Tile::SetProto(TileProto& tile_proto) const {
tile_proto.Clear();
for (int64_t i : dimensions()) {
tile_proto.add_dimensions(i);
}
}
void Tile::Print(Printer* printer) const {
printer->Append("(");
AppendJoin(printer, dimensions(), ",", [&](Printer* printer, int64_t dim) {
if (dim >= 0) {
printer->Append(dim);
} else {
if (dim == kCombineDimension) {
printer->Append("*");
} else {
printer->Append("Invalid value ");
printer->Append(dim);
}
}
});
printer->Append(")");
}
std::string Tile::ToString() const {
StringPrinter printer;
Print(&printer);
return std::move(printer).ToString();
}
Layout::Layout()
: index_primitive_type_(PRIMITIVE_TYPE_INVALID),
pointer_primitive_type_(PRIMITIVE_TYPE_INVALID) {}
SplitConfigProto SplitConfig::ToProto() const {
SplitConfigProto split_config_proto;
split_config_proto.set_dimension(dimension_);
for (int64_t i : split_indices_) {
split_config_proto.add_split_indices(i);
}
return split_config_proto;
}
void SplitConfig::SetProto(SplitConfigProto& split_config_proto) const {
split_config_proto.Clear();
split_config_proto.set_dimension(dimension_);
for (int64_t i : split_indices_) {
split_config_proto.add_split_indices(i);
}
}
std::string SplitConfig::ToString() const {
return absl::StrCat("(", dimension_, ":", absl::StrJoin(split_indices_, ","),
")");
}
Layout::Layout(absl::Span<const int64_t> minor_to_major)
: index_primitive_type_(PRIMITIVE_TYPE_INVALID),
pointer_primitive_type_(PRIMITIVE_TYPE_INVALID),
minor_to_major_(minor_to_major.begin(), minor_to_major.end()) {}
Layout::Layout(absl::Span<const int64_t> minor_to_major,
absl::Span<const DimLevelType> dim_level_types,
absl::Span<const bool> dim_unique,
absl::Span<const bool> dim_ordered, absl::Span<const Tile> tiles,
int64_t tail_padding_alignment_in_elements,
PrimitiveType index_primitive_type,
PrimitiveType element_primitive_type,
int64_t element_size_in_bits, int64_t memory_space,
absl::Span<const SplitConfig> split_configs,
std::unique_ptr<Shape> physical_shape,
int64_t dynamic_shape_metadata_prefix_bytes)
: index_primitive_type_(index_primitive_type),
pointer_primitive_type_(element_primitive_type),
memory_space_(memory_space),
element_size_in_bits_(element_size_in_bits),
minor_to_major_(minor_to_major.begin(), minor_to_major.end()),
tiles_(tiles.begin(), tiles.end()),
split_configs_(split_configs.begin(), split_configs.end()),
tail_padding_alignment_in_elements_(tail_padding_alignment_in_elements),
physical_shape_(std::move(physical_shape)),
dynamic_shape_metadata_prefix_bytes_(
dynamic_shape_metadata_prefix_bytes) {
n_dim_level_types_ = dim_level_types.size();
n_dim_unique_ = dim_unique.size();
n_dim_ordered_ = dim_ordered.size();
const int n_attributes = std::max<int>(
n_dim_level_types_, std::max<int>(n_dim_unique_, n_dim_ordered_));
dim_attributes_.resize(n_attributes);
for (int i = 0; i < n_attributes; i++) {
if (i < n_dim_level_types_)
dim_attributes_[i].dim_level_type = dim_level_types[i];
if (i < n_dim_unique_) dim_attributes_[i].dim_unique = dim_unique[i];
if (i < n_dim_ordered_) dim_attributes_[i].dim_ordered = dim_ordered[i];
}
}
Layout::Layout(const Layout& other)
: dim_attributes_(other.dim_attributes_),
n_dim_level_types_(other.n_dim_level_types_),
n_dim_unique_(other.n_dim_unique_),
n_dim_ordered_(other.n_dim_ordered_),
index_primitive_type_(other.index_primitive_type_),
pointer_primitive_type_(other.pointer_primitive_type_),
memory_space_(other.memory_space_),
element_size_in_bits_(other.element_size_in_bits_),
minor_to_major_(other.minor_to_major_),
tiles_(other.tiles_),
split_configs_(other.split_configs_),
tail_padding_alignment_in_elements_(
other.tail_padding_alignment_in_elements_),
physical_shape_(other.physical_shape_ != nullptr
? std::make_unique<Shape>(*other.physical_shape_)
: nullptr),
dynamic_shape_metadata_prefix_bytes_(
other.dynamic_shape_metadata_prefix_bytes_) {}
Layout::Layout(Layout&& other) = default;
Layout::~Layout() = default;
Layout& Layout::operator=(const Layout& other) {
if (this != &other) {
dim_attributes_ = other.dim_attributes_;
n_dim_level_types_ = other.n_dim_level_types_;
n_dim_unique_ = other.n_dim_unique_;
n_dim_ordered_ = other.n_dim_ordered_;
minor_to_major_ = other.minor_to_major_;
tiles_ = other.tiles_;
tail_padding_alignment_in_elements_ =
other.tail_padding_alignment_in_elements_;
index_primitive_type_ = other.index_primitive_type_;
pointer_primitive_type_ = other.pointer_primitive_type_;
element_size_in_bits_ = other.element_size_in_bits_;
memory_space_ = other.memory_space_;
split_configs_ = other.split_configs_;
if (other.physical_shape_ != nullptr) {
physical_shape_ = std::make_unique<Shape>(*other.physical_shape_);
} else {
physical_shape_ = nullptr;
}
dynamic_shape_metadata_prefix_bytes_ =
other.dynamic_shape_metadata_prefix_bytes_;
}
return *this;
}
Layout& Layout::operator=(Layout&& other) = default;
Layout Layout::CreateFromProto(const LayoutProto& proto) {
Layout layout;
for (int dim_level_type : proto.dim_level_types()) {
layout.add_dim_level_type(static_cast<DimLevelType>(dim_level_type));
}
for (bool dim_unique : proto.dim_unique()) {
layout.add_dim_unique(dim_unique);
}
for (bool dim_ordered : proto.dim_ordered()) {
layout.add_dim_ordered(dim_ordered);
}
layout.minor_to_major_.reserve(proto.minor_to_major_size());
for (const int64_t dimension : proto.minor_to_major()) {
layout.add_minor_to_major(dimension);
}
for (const TileProto& tile_proto : proto.tiles()) {
*layout.add_tiles() = Tile::CreateFromProto(tile_proto);
}
if (proto.tail_padding_alignment_in_elements() != 0) {
layout.set_tail_padding_alignment_in_elements(
proto.tail_padding_alignment_in_elements());
} else {
layout.set_tail_padding_alignment_in_elements(1);
}
layout.set_index_primitive_type(proto.index_primitive_type());
layout.set_pointer_primitive_type(proto.pointer_primitive_type());
layout.set_element_size_in_bits(proto.element_size_in_bits());
layout.set_memory_space(proto.memory_space());
for (const SplitConfigProto& split_config_proto : proto.split_configs()) {
layout.add_split_configs(SplitConfig::CreateFromProto(split_config_proto));
}
if (proto.has_physical_shape()) {
*layout.mutable_physical_shape() = Shape(proto.physical_shape());
}
layout.set_dynamic_shape_metadata_prefix_bytes(
proto.dynamic_shape_metadata_prefix_bytes());
return layout;
}
LayoutProto Layout::ToProto() const {
LayoutProto proto;
SetProto(proto);
return proto;
}
void Layout::SetProto(LayoutProto& proto) const {
proto.Clear();
for (int i = 0; i < n_dim_level_types_; i++) {
proto.add_dim_level_types(dim_level_type(i));
}
for (int i = 0; i < n_dim_unique_; i++) {
proto.add_dim_unique(dim_unique(i));
}
for (int i = 0; i < n_dim_ordered_; i++) {
proto.add_dim_ordered(dim_ordered(i));
}
proto.mutable_minor_to_major()->Reserve(minor_to_major_size());
for (const int64_t dimension : minor_to_major()) {
proto.add_minor_to_major(dimension);
}
for (const Tile& tile : tiles()) {
tile.SetProto(*proto.add_tiles());
}
proto.set_tail_padding_alignment_in_elements(
tail_padding_alignment_in_elements());
proto.set_index_primitive_type(index_primitive_type());
proto.set_pointer_primitive_type(pointer_primitive_type());
proto.set_element_size_in_bits(element_size_in_bits_);
proto.set_memory_space(memory_space_);
for (const SplitConfig& split_config : split_configs()) {
split_config.SetProto(*proto.add_split_configs());
}
if (has_physical_shape()) {
*proto.mutable_physical_shape() = physical_shape_->ToProto();
}
proto.set_dynamic_shape_metadata_prefix_bytes(
dynamic_shape_metadata_prefix_bytes_);
}
namespace {
absl::string_view DimLevelTypeAbbrev(DimLevelType dim_level_type) {
switch (dim_level_type) {
case DIM_DENSE:
return "D";
case DIM_COMPRESSED:
return "C";
case DIM_SINGLETON:
return "S";
case xla::DIM_LOOSE_COMPRESSED:
return "H";
default:
LOG(FATAL) << "Invalid DimLevelType value: " << dim_level_type;
}
}
}
void Layout::Print(Printer* printer) const {
printer->Append("{");
AppendJoin(printer, minor_to_major(), ",");
bool colon_printed = false;
auto print_colon = [&]() {
if (colon_printed) return;
printer->Append(":");
colon_printed = true;
};
if (n_dim_level_types_ > 0) {
auto print_one = [&](int i) {
printer->Append(DimLevelTypeAbbrev(dim_level_type(i)));
if (n_dim_unique_ > 0 && !dim_unique(i)) {
printer->Append("+");
}
if (n_dim_ordered_ > 0 && !dim_ordered(i)) {
printer->Append("~");
}
};
print_colon();
printer->Append("D(");
print_one(0);
for (int i = 1; i < n_dim_level_types_; ++i) {
printer->Append(",");
print_one(i);
}
printer->Append(")");
}
if (!tiles().empty()) {
print_colon();
printer->Append("T");
for (const Tile& tile : tiles()) {
tile.Print(printer);
}
}
if (tail_padding_alignment_in_elements() != 1) {
print_colon();
printer->Append("L(");
printer->Append(tail_padding_alignment_in_elements());
printer->Append(")");
}
if (index_primitive_type() != PRIMITIVE_TYPE_INVALID) {
print_colon();
if (primitive_util::IsIntegralType(index_primitive_type())) {
printer->Append("#(");
printer->Append(
primitive_util::LowercasePrimitiveTypeName(index_primitive_type()));
printer->Append(")");
} else {
printer->Append("#(invalid)");
}
}
if (pointer_primitive_type() != PRIMITIVE_TYPE_INVALID) {
print_colon();
if (primitive_util::IsIntegralType(pointer_primitive_type())) {
printer->Append("*(");
printer->Append(
primitive_util::LowercasePrimitiveTypeName(pointer_primitive_type()));
printer->Append(")");
} else {
printer->Append("*(invalid)");
}
}
if (element_size_in_bits() != 0) {
print_colon();
printer->Append("E(");
printer->Append(element_size_in_bits());
printer->Append(")");
}
if (memory_space() != 0) {
print_colon();
printer->Append("S(");
printer->Append(memory_space());
printer->Append(")");
}
if (!split_configs().empty()) {
print_colon();
printer->Append("SC");
for (const auto& split_config : split_configs()) {
printer->Append(split_config.ToString());
}
}
if (has_physical_shape()) {
print_colon();
printer->Append("P(");
physical_shape_->Print(printer, true);
printer->Append(")");
}
if (dynamic_shape_metadata_prefix_bytes_ > 0) {
print_colon();
printer->Append("M(");
printer->Append(dynamic_shape_metadata_prefix_bytes());
printer->Append(")");
}
printer->Append("}");
}
std::string Layout::ToString() const {
StringPrinter printer;
Print(&printer);
return std::move(printer).ToString();
}
bool Layout::Equal::operator()(const Layout& lhs, const Layout& rhs) {
if (!LayoutUtil::IsDense(lhs) || !LayoutUtil::IsDense(rhs)) {
if (lhs.dim_level_types_size() != rhs.dim_level_types_size()) {
return false;
}
for (int i = 0; i < lhs.dim_level_types_size(); i++) {
if (lhs.dim_level_type(i) != rhs.dim_level_type(i)) {
return false;
}
}
if (lhs.dim_unique_size() != rhs.dim_unique_size()) {
return false;
}
for (int i = 0; i < lhs.dim_unique_size(); i++) {
if (lhs.dim_unique(i) != rhs.dim_unique(i)) {
return false;
}
}
if (lhs.dim_ordered_size() != rhs.dim_ordered_size()) {
return false;
}
for (int i = 0; i < lhs.dim_ordered_size(); i++) {
if (lhs.dim_ordered(i) != rhs.dim_ordered(i)) {
return false;
}
}
}
if (lhs.minor_to_major() != rhs.minor_to_major()) {
return false;
}
if (!ignore_tiles_ && lhs.tiles() != rhs.tiles()) {
return false;
}
if (!ignore_tail_padding_alignment_in_elements_ &&
lhs.tail_padding_alignment_in_elements() !=
rhs.tail_padding_alignment_in_elements()) {
return false;
}
if (!ignore_index_primitive_type_ &&
lhs.index_primitive_type() != rhs.index_primitive_type()) {
return false;
}
if (!ignore_pointer_primitive_type_ &&
lhs.pointer_primitive_type() != rhs.pointer_primitive_type()) {
return false;
}
if (!ignore_element_size_ &&
lhs.element_size_in_bits() != rhs.element_size_in_bits()) {
return false;
}
if (!ignore_memory_space_ && lhs.memory_space() != rhs.memory_space()) {
return false;
}
if (!ignore_split_configs_ && lhs.split_configs() != rhs.split_ | #include "xla/layout.h"
#include <cstdint>
#include <memory>
#include <sstream>
#include <vector>
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
class LayoutTest : public ::testing::Test {};
TEST_F(LayoutTest, ToString) {
EXPECT_EQ(Layout().ToString(), "{}");
EXPECT_EQ(Layout({4, 5, 6}).ToString(), "{4,5,6}");
EXPECT_EQ(Layout({4, 5, 6}).ToString(), "{4,5,6}");
EXPECT_EQ(Layout({3, 2, 1, 0}, {}, {}, {}, {Tile({42, 123}), Tile({4, 5})})
.ToString(),
"{3,2,1,0:T(42,123)(4,5)}");
EXPECT_EQ(Layout({3, 2, 1, 0}, {}, {}, {}, {Tile({42, 123}), Tile({4, 5})})
.set_tail_padding_alignment_in_elements(100)
.set_element_size_in_bits(42)
.ToString(),
"{3,2,1,0:T(42,123)(4,5)L(100)E(42)}");
EXPECT_EQ(Layout({3, 2, 1, 0}, {}, {}, {}, {Tile({42, 123}), Tile({4, 5})})
.set_memory_space(3)
.ToString(),
"{3,2,1,0:T(42,123)(4,5)S(3)}");
EXPECT_EQ(Layout({0, 1}, {}, {}, {}, {Tile({123})})
.add_split_configs(SplitConfig(0, {3}))
.add_split_configs(SplitConfig(1, {0, 4}))
.ToString(),
"{0,1:T(123)SC(0:3)(1:0,4)}");
}
TEST_F(LayoutTest, StreamOut) {
{
std::ostringstream oss;
oss << Tile({7, 8});
EXPECT_EQ(oss.str(), "(7,8)");
}
{
std::ostringstream oss;
oss << Layout({0, 1, 2});
EXPECT_EQ(oss.str(), "{0,1,2}");
}
}
TEST_F(LayoutTest, Equality) {
EXPECT_EQ(Layout(), Layout());
const std::vector<int64_t> empty_dims;
EXPECT_EQ(Layout(empty_dims), Layout(empty_dims));
EXPECT_EQ(Layout(), Layout(empty_dims));
EXPECT_EQ(Layout({0, 1, 2, 3}), Layout({0, 1, 2, 3}));
EXPECT_NE(Layout({0, 1, 2, 3}), Layout({0, 1, 2}));
EXPECT_EQ(Layout({0, 1, 2}, {}, {}, {}, {Tile({42, 44})}),
Layout({0, 1, 2}, {}, {}, {}, {Tile({42, 44})}));
EXPECT_NE(Layout({0, 1, 2}, {}, {}, {}, {Tile({42, 44})}),
Layout({0, 1, 2}, {}, {}, {}, {Tile({42, 45})}));
EXPECT_NE(Layout({0, 1, 2}, {}, {}, {}, {Tile({42, 44})}),
Layout({0, 1, 2, 3}));
EXPECT_EQ(Layout({0, 1, 2}).set_element_size_in_bits(33),
Layout({0, 1, 2}).set_element_size_in_bits(33));
EXPECT_NE(Layout({0, 1, 2}).set_element_size_in_bits(33),
Layout({0, 1, 2}).set_element_size_in_bits(7));
EXPECT_EQ(Layout({0, 1, 2}).set_memory_space(3),
Layout({0, 1, 2}).set_memory_space(3));
EXPECT_NE(Layout({0, 1, 2}).set_memory_space(1),
Layout({0, 1, 2}).set_memory_space(3));
EXPECT_FALSE(Layout::Equal()(Layout({0, 1, 2}, {}, {}, {}, {Tile({42, 44})}),
Layout({0, 1, 2})));
EXPECT_EQ(Layout({0, 1, 2}).add_split_configs(SplitConfig(0, {2})),
Layout({0, 1, 2}).add_split_configs(SplitConfig(0, {2})));
EXPECT_NE(Layout({0, 1, 2}).add_split_configs(SplitConfig(0, {2})),
Layout({0, 1, 2}).add_split_configs(SplitConfig(0, {3})));
EXPECT_TRUE(Layout::Equal().IgnoreTiles()(
Layout({0, 1, 2}, {}, {}, {}, {Tile({42, 44})}), Layout({0, 1, 2})));
EXPECT_FALSE(Layout::Equal()(
Layout({0, 1, 2}, {}, {}, {}, {}, 1, PRIMITIVE_TYPE_INVALID,
PRIMITIVE_TYPE_INVALID, 32),
Layout({0, 1, 2}, {}, {}, {}, {}, 1, PRIMITIVE_TYPE_INVALID,
PRIMITIVE_TYPE_INVALID, 1)));
EXPECT_TRUE(Layout::Equal().IgnoreElementSize()(
Layout({0, 1, 2}).set_element_size_in_bits(32),
Layout({0, 1, 2}).set_element_size_in_bits(1)));
EXPECT_TRUE(Layout::Equal().IgnoreMemorySpace()(
Layout({0, 1, 2}).set_memory_space(1),
Layout({0, 1, 2}).set_memory_space(3)));
EXPECT_TRUE(Layout::Equal().IgnoreSplitConfigs()(
Layout({0, 1, 2}).add_split_configs(SplitConfig(0, {2})),
Layout({0, 1, 2}).add_split_configs(SplitConfig(0, {3}))));
}
TEST_F(LayoutTest, LayoutToFromProto) {
auto expect_unchanged = [](const Layout& layout) {
EXPECT_EQ(layout, Layout::CreateFromProto(layout.ToProto()));
};
expect_unchanged(Layout());
expect_unchanged(Layout({1, 3, 2, 0}));
expect_unchanged(Layout({0, 1}).set_element_size_in_bits(42));
expect_unchanged(
Layout({3, 2, 1, 0}, {}, {}, {}, {Tile({42, 123}), Tile({4, 5})}));
expect_unchanged(Layout({1, 0}, {DIM_DENSE, DIM_COMPRESSED}, {}, {}, {}));
expect_unchanged(
Layout({1, 0}, {DIM_DENSE, DIM_COMPRESSED}, {}, {}, {}, 1,
PRIMITIVE_TYPE_INVALID, PRIMITIVE_TYPE_INVALID, 0, 0, {},
std::make_unique<Shape>(ShapeUtil::MakeShape(S32, {10, 10}))));
expect_unchanged(Layout({0, 1}, {}, {}, {}, {Tile({123})})
.add_split_configs(SplitConfig(0, {3}))
.add_split_configs(SplitConfig(1, {0, 4})));
}
}
} |
1,792 | cpp | tensorflow/tensorflow | parse_flags_from_env | third_party/xla/xla/parse_flags_from_env.cc | third_party/xla/xla/parse_flags_from_env_test.cc | #ifndef XLA_PARSE_FLAGS_FROM_ENV_H_
#define XLA_PARSE_FLAGS_FROM_ENV_H_
#include <vector>
#include "absl/strings/string_view.h"
#include "xla/tsl/util/command_line_flags.h"
#include "xla/types.h"
namespace xla {
void ParseFlagsFromEnvAndDieIfUnknown(absl::string_view envvar,
const std::vector<tsl::Flag>& flag_list);
void ParseFlagsFromEnvAndIgnoreUnknown(absl::string_view envvar,
const std::vector<tsl::Flag>& flag_list);
void ResetFlagsFromEnvForTesting(absl::string_view envvar, int** pargc,
std::vector<char*>** pargv);
}
#endif
#include "xla/parse_flags_from_env.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <memory>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/tsl/util/command_line_flags.h"
#include "tsl/platform/logging.h"
namespace xla {
static const char kWS[] = " \t\r\n";
namespace {
struct FreeDeleter {
void operator()(char* ptr) { free(ptr); }
};
struct EnvArgv {
EnvArgv() : initialized(false), argc(0) {}
bool initialized;
int argc;
std::vector<char*> argv;
std::vector<std::unique_ptr<char, FreeDeleter>> argv_save;
};
}
static void AppendToEnvArgv(const char* s0, size_t s0len, const char* s1,
size_t s1len, EnvArgv* a) {
if (s0 == nullptr) {
a->argv.push_back(nullptr);
a->argv_save.push_back(nullptr);
} else {
std::string s = std::string(s0, s0len) + std::string(s1, s1len);
char* str = strdup(s.c_str());
a->argv.push_back(str);
a->argv_save.emplace_back(str);
a->argc++;
}
}
static size_t FindFirstOf(const std::string& s, const char* x, size_t pos) {
size_t result = s.find_first_of(x, pos);
return result == std::string::npos ? s.size() : result;
}
static size_t FindFirstNotOf(const std::string& s, const char* x, size_t pos) {
size_t result = s.find_first_not_of(x, pos);
return result == std::string::npos ? s.size() : result;
}
static void ParseArgvFromString(const std::string& flag_str, EnvArgv* a) {
size_t b = FindFirstNotOf(flag_str, kWS, 0);
while (b != flag_str.size() && flag_str[b] == '-') {
size_t e = b;
while (e != flag_str.size() && isascii(flag_str[e]) &&
(strchr("-_", flag_str[e]) != nullptr ||
absl::ascii_isalnum(flag_str[e]))) {
e++;
}
if (e != flag_str.size() && flag_str[e] == '=' &&
e + 1 != flag_str.size() && strchr("'\"", flag_str[e + 1]) != nullptr) {
int c;
e++;
size_t eflag = e;
char quote = flag_str[e];
e++;
std::string value;
for (; e != flag_str.size() && (c = flag_str[e]) != quote; e++) {
if (quote == '"' && c == '\\' && e + 1 != flag_str.size()) {
e++;
c = flag_str[e];
}
value += c;
}
if (e != flag_str.size()) {
e++;
}
AppendToEnvArgv(flag_str.data() + b, eflag - b, value.data(),
value.size(), a);
} else {
e = FindFirstOf(flag_str, kWS, e);
AppendToEnvArgv(flag_str.data() + b, e - b, "", 0, a);
}
b = FindFirstNotOf(flag_str, kWS, e);
}
}
static void SetArgvFromEnv(absl::string_view envvar, EnvArgv* a) {
if (!a->initialized) {
static const char kDummyArgv[] = "<argv[0]>";
AppendToEnvArgv(kDummyArgv, strlen(kDummyArgv), nullptr, 0,
a);
const char* env = getenv(std::string(envvar).c_str());
if (env == nullptr || env[0] == '\0') {
} else if (env[strspn(env, kWS)] == '-') {
ParseArgvFromString(env, a);
} else {
FILE* fp = fopen(env, "r");
if (fp != nullptr) {
std::string str;
char buf[512];
int n;
while ((n = fread(buf, 1, sizeof(buf), fp)) > 0) {
str.append(buf, n);
}
fclose(fp);
ParseArgvFromString(str, a);
} else {
LOG(QFATAL)
<< "Could not open file \"" << env
<< "\" to read flags for environment variable \"" << envvar
<< "\". (We assumed \"" << env
<< "\" was a file name because it did not start with a \"--\".)";
}
}
AppendToEnvArgv(nullptr, 0, nullptr, 0, a);
a->initialized = true;
}
}
static absl::flat_hash_map<std::string, EnvArgv>& EnvArgvs() {
static auto* env_argvs = new absl::flat_hash_map<std::string, EnvArgv>();
return *env_argvs;
}
static absl::Mutex env_argv_mu(absl::kConstInit);
static void DieIfEnvHasUnknownFlagsLeft(absl::string_view envvar);
void ParseFlagsFromEnvAndDieIfUnknown(absl::string_view envvar,
const std::vector<tsl::Flag>& flag_list) {
ParseFlagsFromEnvAndIgnoreUnknown(envvar, flag_list);
DieIfEnvHasUnknownFlagsLeft(envvar);
}
void ParseFlagsFromEnvAndIgnoreUnknown(
absl::string_view envvar, const std::vector<tsl::Flag>& flag_list) {
absl::MutexLock lock(&env_argv_mu);
auto* env_argv = &EnvArgvs()[envvar];
SetArgvFromEnv(envvar, env_argv);
if (VLOG_IS_ON(1)) {
VLOG(1) << "For env var " << envvar << " found arguments:";
for (int i = 0; i < env_argv->argc; i++) {
VLOG(1) << " argv[" << i << "] = " << env_argv->argv[i];
}
}
QCHECK(tsl::Flags::Parse(&env_argv->argc, env_argv->argv.data(), flag_list))
<< "Flag parsing failed.\n"
<< tsl::Flags::Usage(getenv(std::string(envvar).c_str()), flag_list);
}
static void DieIfEnvHasUnknownFlagsLeft(absl::string_view envvar) {
absl::MutexLock lock(&env_argv_mu);
auto* env_argv = &EnvArgvs()[envvar];
SetArgvFromEnv(envvar, env_argv);
if (env_argv->argc != 1) {
auto unknown_flags = absl::MakeSpan(env_argv->argv);
unknown_flags.remove_prefix(1);
LOG(QFATAL) << "Unknown flag" << (unknown_flags.size() > 1 ? "s" : "")
<< " in " << envvar << ": "
<< absl::StrJoin(unknown_flags, " ");
}
}
void ResetFlagsFromEnvForTesting(absl::string_view envvar, int** pargc,
std::vector<char*>** pargv) {
absl::MutexLock lock(&env_argv_mu);
EnvArgvs().erase(envvar);
auto& env_argv = EnvArgvs()[envvar];
*pargc = &env_argv.argc;
*pargv = &env_argv.argv;
}
} | #include "xla/parse_flags_from_env.h"
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <vector>
#include "absl/strings/str_format.h"
#include "xla/tsl/util/command_line_flags.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/subprocess.h"
#include "tsl/platform/test.h"
namespace xla {
static void TestParseFlagsFromEnv(const char* msg) {
int* pargc;
std::vector<char*>* pargv;
ResetFlagsFromEnvForTesting("TF_XLA_FLAGS", &pargc, &pargv);
bool simple = false;
std::string with_value;
std::string embedded_quotes;
std::string single_quoted;
std::string double_quoted;
std::vector<tsl::Flag> flag_list = {
tsl::Flag("simple", &simple, ""),
tsl::Flag("with_value", &with_value, ""),
tsl::Flag("embedded_quotes", &embedded_quotes, ""),
tsl::Flag("single_quoted", &single_quoted, ""),
tsl::Flag("double_quoted", &double_quoted, ""),
};
ParseFlagsFromEnvAndDieIfUnknown("TF_XLA_FLAGS", flag_list);
CHECK_EQ(*pargc, 1) << msg;
const std::vector<char*>& argv_second = *pargv;
CHECK_NE(argv_second[0], nullptr) << msg;
CHECK_EQ(argv_second[1], nullptr) << msg;
CHECK(simple) << msg;
CHECK_EQ(with_value, "a_value") << msg;
CHECK_EQ(embedded_quotes, "single'double\"") << msg;
CHECK_EQ(single_quoted, "single quoted \\\\ \n \"") << msg;
CHECK_EQ(double_quoted, "double quoted \\ \n '\"") << msg;
}
static const char kTestFlagString[] =
"--simple "
"--with_value=a_value "
"--embedded_quotes=single'double\" "
"--single_quoted='single quoted \\\\ \n \"' "
"--double_quoted=\"double quoted \\\\ \n '\\\"\" ";
TEST(ParseFlagsFromEnv, Basic) {
tsl::setenv("TF_XLA_FLAGS", kTestFlagString, true );
TestParseFlagsFromEnv("(flags in environment variable)");
}
TEST(ParseFlagsFromEnv, File) {
static const char* kTempVars[] = {"TEST_TMPDIR", "TMP"};
static const char kTempDir[] = "/tmp";
const char* tmp_dir = nullptr;
for (int i = 0; i != TF_ARRAYSIZE(kTempVars) && tmp_dir == nullptr; i++) {
tmp_dir = getenv(kTempVars[i]);
}
if (tmp_dir == nullptr) {
tmp_dir = kTempDir;
}
std::string tmp_file =
absl::StrFormat("%s/parse_flags_from_env.%d", tmp_dir, getpid());
FILE* fp = fopen(tmp_file.c_str(), "w");
CHECK_NE(fp, nullptr) << "can't write to " << tmp_file;
for (int i = 0; kTestFlagString[i] != '\0'; i++) {
putc(kTestFlagString[i], fp);
}
fflush(fp);
CHECK_EQ(ferror(fp), 0) << "writes failed to " << tmp_file;
fclose(fp);
tsl::setenv("TF_XLA_FLAGS", tmp_file.c_str(), true );
TestParseFlagsFromEnv("(flags in file)");
unlink(tmp_file.c_str());
}
static const char* binary_name;
TEST(ParseFlagsFromEnv, EnvAndFlag) {
static struct {
const char* env;
const char* arg;
const char* expected_value;
} test[] = {
{nullptr, nullptr, "1\n"},
{nullptr, "--int_flag=2", "2\n"},
{"--int_flag=3", nullptr, "3\n"},
{"--int_flag=3", "--int_flag=2", "2\n"},
};
for (int i = 0; i != TF_ARRAYSIZE(test); i++) {
if (test[i].env == nullptr) {
tsl::unsetenv("TF_XLA_FLAGS");
} else {
tsl::setenv("TF_XLA_FLAGS", test[i].env, true);
}
tsl::SubProcess child;
std::vector<std::string> argv;
argv.push_back(binary_name);
argv.push_back("--recursing");
if (test[i].arg != nullptr) {
argv.push_back(test[i].arg);
}
child.SetProgram(binary_name, argv);
child.SetChannelAction(tsl::CHAN_STDOUT, tsl::ACTION_PIPE);
child.SetChannelAction(tsl::CHAN_STDERR, tsl::ACTION_PIPE);
CHECK(child.Start()) << "test " << i;
std::string stdout_str;
std::string stderr_str;
int child_status = child.Communicate(nullptr, &stdout_str, &stderr_str);
CHECK_EQ(child_status, 0) << "test " << i << "\nstdout\n"
<< stdout_str << "\nstderr\n"
<< stderr_str;
stdout_str.erase(std::remove(stdout_str.begin(), stdout_str.end(), '\r'),
stdout_str.end());
CHECK_EQ(stdout_str, test[i].expected_value) << "test " << i;
}
}
TEST(ParseFlagsFromEnv, ErrorOutOnFlagFailure) {
const char* env = "--int_flag=3parsefailure";
if (env == nullptr) {
tsl::unsetenv("TF_XLA_FLAGS");
} else {
tsl::setenv("TF_XLA_FLAGS", env, true);
}
tsl::SubProcess child;
std::vector<std::string> argv;
argv.push_back(binary_name);
argv.push_back("--recursing");
child.SetProgram(binary_name, argv);
child.SetChannelAction(tsl::CHAN_STDOUT, tsl::ACTION_PIPE);
child.SetChannelAction(tsl::CHAN_STDERR, tsl::ACTION_PIPE);
EXPECT_TRUE(child.Start());
std::string stdout_str;
std::string stderr_str;
int child_status = child.Communicate(nullptr, &stdout_str, &stderr_str);
EXPECT_NE(child_status, 0);
}
TEST(ParseFlagsFromEnv, ErrorOutOnUnknownFlag) {
const char* env = "--int_flag=3 --unknown_flag=value";
if (env == nullptr) {
tsl::unsetenv("TF_XLA_FLAGS");
} else {
tsl::setenv("TF_XLA_FLAGS", env, true);
}
tsl::SubProcess child;
std::vector<std::string> argv;
argv.push_back(binary_name);
argv.push_back("--recursing");
child.SetProgram(binary_name, argv);
child.SetChannelAction(tsl::CHAN_STDOUT, tsl::ACTION_PIPE);
child.SetChannelAction(tsl::CHAN_STDERR, tsl::ACTION_PIPE);
EXPECT_TRUE(child.Start());
std::string stdout_str;
std::string stderr_str;
int child_status = child.Communicate(nullptr, &stdout_str, &stderr_str);
EXPECT_NE(child_status, 0);
}
}
int main(int argc, char* argv[]) {
xla::binary_name = argv[0];
bool recursing = false;
int32_t int_flag = 1;
const std::vector<tsl::Flag> flag_list = {
tsl::Flag("recursing", &recursing,
"Whether the binary is being invoked recursively."),
tsl::Flag("int_flag", &int_flag, "An integer flag to test with"),
};
std::string usage = tsl::Flags::Usage(argv[0], flag_list);
xla::ParseFlagsFromEnvAndDieIfUnknown("TF_XLA_FLAGS", flag_list);
tsl::Flags::Parse(&argc, argv, flag_list);
if (recursing) {
printf("%d\n", int_flag);
exit(0);
}
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} |
1,793 | cpp | tensorflow/tensorflow | outfeed_receiver | third_party/xla/xla/python/outfeed_receiver.cc | third_party/xla/xla/python/outfeed_receiver_test.cc | #ifndef XLA_PYTHON_OUTFEED_RECEIVER_H_
#define XLA_PYTHON_OUTFEED_RECEIVER_H_
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/client/executable_build_options.h"
#include "xla/client/xla_builder.h"
#include "xla/literal.h"
#include "xla/python/pjrt_ifrt/pjrt_client.h"
#include "xla/python/pjrt_ifrt/pjrt_device.h"
namespace xla {
class OutfeedReceiverImpl;
class OutfeedReceiver {
public:
using Callback = std::function<void(ifrt::PjRtDevice*, uint32_t,
std::shared_ptr<Literal>)>;
OutfeedReceiver(
Callback callback, absl::Span<ifrt::PjRtClient* const> clients,
ssize_t max_callback_queue_size_bytes,
const std::optional<ExecutableBuildOptions>& executable_build_options);
OutfeedReceiver(const OutfeedReceiver&) = delete;
OutfeedReceiver& operator=(const OutfeedReceiver&) = delete;
~OutfeedReceiver();
void Start();
absl::StatusOr<XlaOp> AddOutfeedToBuilder(XlaBuilder* builder, XlaOp token,
uint32_t consumer_id,
std::vector<XlaOp> arrays,
uint32_t device_idx);
private:
std::unique_ptr<OutfeedReceiverImpl> p_impl_;
};
}
#endif
#include "xla/python/outfeed_receiver.h"
#include <sys/types.h>
#include <cstdint>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/client/executable_build_options.h"
#include "xla/client/sharding_builder.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/python/pjrt_ifrt/pjrt_client.h"
#include "xla/python/pjrt_ifrt/pjrt_device.h"
#include "xla/service/computation_placer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla {
int constexpr kOutfeedHeaderWords = 2;
uint32_t constexpr kOutfeedHeaderStart = 271828;
uint32_t constexpr kOutfeedCidShutdown = 0;
class OutfeedData {
public:
OutfeedData(ifrt::PjRtDevice* device, uint32_t consumer_id, Shape shape)
: device_(device),
consumer_id_(consumer_id),
shape_(shape),
literal_(nullptr),
literal_size_bytes_(0) {}
ifrt::PjRtDevice* device() { return device_; }
uint32_t consumer_id() const { return consumer_id_; }
Shape shape() const { return shape_; }
std::unique_ptr<Literal> literal() {
CHECK(literal_);
return std::move(literal_);
}
void SetLiteral(std::unique_ptr<Literal> literal);
ssize_t literal_size_bytes() const { return literal_size_bytes_; }
std::string DebugString() const;
private:
ifrt::PjRtDevice* device_;
uint32_t consumer_id_;
Shape shape_;
std::unique_ptr<Literal> literal_;
ssize_t literal_size_bytes_;
};
void OutfeedData::SetLiteral(std::unique_ptr<Literal> literal) {
literal_ = std::move(literal);
shape_ = literal_->shape();
int total_size_bytes = 0;
ShapeUtil::ForEachSubshape(
shape_, [&](const Shape& literal_subshape, const ShapeIndex& index) {
if (!literal_subshape.IsTuple()) {
total_size_bytes += ShapeUtil::ByteSizeOf(literal_subshape, 8);
}
});
literal_size_bytes_ = total_size_bytes;
}
std::string OutfeedData::DebugString() const {
return absl::StrFormat("dev=%s; cons=%d; shape=%s", device_->DebugString(),
consumer_id_, shape_.ToString());
}
class OutfeedReceiverImpl {
public:
OutfeedReceiverImpl(
OutfeedReceiver::Callback callback,
absl::Span<ifrt::PjRtClient* const> clients,
ssize_t max_callback_queue_size_bytes,
const std::optional<ExecutableBuildOptions>& executable_build_options);
OutfeedReceiverImpl(const OutfeedReceiverImpl&) = delete;
OutfeedReceiverImpl& operator=(const OutfeedReceiverImpl&) = delete;
~OutfeedReceiverImpl();
void Start();
absl::StatusOr<XlaOp> AddOutfeedToBuilder(XlaBuilder* builder, XlaOp token,
uint32_t consumer_id,
std::vector<XlaOp> arrays,
uint32_t device_idx);
private:
bool CallbackQueueHasSpace() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return callback_queue_size_bytes_ < max_callback_queue_size_bytes_;
}
bool ShutdownDone() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return (num_working_callback_threads_ == 0 && num_listening_threads_ == 0);
}
void CallbackThreadLoop(int device_idx);
void DeviceListenerThreadLoop(int device_idx);
absl::Status SendShutdownOutfeedHeader(int device_idx);
absl::StatusOr<std::unique_ptr<Literal>> ReceiveRawFromOutfeed(
ifrt::PjRtDevice* device, const Shape& shape);
void EnqueueReceivedData(uint32_t device_idx,
std::unique_ptr<OutfeedData> received)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void Shutdown();
OutfeedReceiver::Callback callback_;
std::vector<ifrt::PjRtDevice*> devices_;
uint64_t max_callback_queue_size_bytes_;
std::optional<ExecutableBuildOptions> executable_build_options_;
absl::Mutex mu_;
absl::flat_hash_map<uint32_t, Shape> shape_registry_ ABSL_GUARDED_BY(mu_);
uint64_t callback_queue_size_bytes_ ABSL_GUARDED_BY(mu_);
int num_listening_threads_ ABSL_GUARDED_BY(mu_);
bool shutdown_started_ ABSL_GUARDED_BY(mu_);
int num_working_callback_threads_ ABSL_GUARDED_BY(mu_);
std::vector<std::queue<std::unique_ptr<OutfeedData>>> callback_queues_
ABSL_GUARDED_BY(mu_);
std::unique_ptr<tsl::thread::ThreadPool> threads_;
};
OutfeedReceiverImpl::OutfeedReceiverImpl(
OutfeedReceiver::Callback callback,
absl::Span<ifrt::PjRtClient* const> clients,
ssize_t max_callback_queue_size_bytes,
const std::optional<ExecutableBuildOptions>& executable_build_options)
: executable_build_options_(executable_build_options) {
callback_ = callback;
max_callback_queue_size_bytes_ = max_callback_queue_size_bytes;
for (const auto& client : clients) {
for (auto device : client->addressable_devices()) {
devices_.push_back(tensorflow::down_cast<ifrt::PjRtDevice*>(device));
}
}
CHECK_GT(devices_.size(), 0);
callback_queues_ =
std::vector<std::queue<std::unique_ptr<OutfeedData>>>(devices_.size());
callback_queue_size_bytes_ = 0;
num_listening_threads_ = 0;
num_working_callback_threads_ = 0;
shutdown_started_ = false;
}
void OutfeedReceiverImpl::Start() {
{
absl::MutexLock lock(&mu_);
CHECK(!shutdown_started_);
}
int num_threads = 2 * devices_.size();
threads_ = std::make_unique<tsl::thread::ThreadPool>(
tsl::Env::Default(), "outfeed_receiver", num_threads);
for (int device_idx = 0; device_idx < devices_.size(); ++device_idx) {
threads_->Schedule(
[this, device_idx]() { DeviceListenerThreadLoop(device_idx); });
threads_->Schedule(
[this, device_idx]() { CallbackThreadLoop(device_idx); });
}
}
void OutfeedReceiverImpl::Shutdown() {
VLOG(2) << "Shutdown start";
{
absl::MutexLock lock(&mu_);
CHECK(!shutdown_started_);
shutdown_started_ = true;
}
for (int device_idx = 0; device_idx < devices_.size(); ++device_idx) {
TF_CHECK_OK(SendShutdownOutfeedHeader(device_idx));
}
VLOG(2) << "Shutdown waiting for listening and callback threads to stop";
absl::MutexLock lock(&mu_);
mu_.Await(absl::Condition(this, &OutfeedReceiverImpl::ShutdownDone));
VLOG(2) << "Shutdown done";
}
OutfeedReceiverImpl::~OutfeedReceiverImpl() {
VLOG(2) << "~OutfeedReceiverImpl";
Shutdown();
}
void OutfeedReceiverImpl::DeviceListenerThreadLoop(int device_idx) {
{
absl::MutexLock lock(&mu_);
++num_listening_threads_;
}
ifrt::PjRtDevice* device = devices_[device_idx];
while (true) {
Shape header_shape = ShapeUtil::MakeShape(U32, {kOutfeedHeaderWords});
std::unique_ptr<Literal> header =
ReceiveRawFromOutfeed(device, header_shape).value();
absl::Span<uint32_t> header_data = header->data<uint32_t>();
CHECK_EQ(header_data.size(), kOutfeedHeaderWords);
CHECK_EQ(header_data[0], kOutfeedHeaderStart);
uint32_t consumer_id = header_data[1];
Shape shape;
{
absl::MutexLock lock(&mu_);
auto registered_shape = shape_registry_.find(consumer_id);
if (registered_shape == shape_registry_.end()) {
LOG(FATAL)
<< "[" << device->DebugString()
<< "] Cannot find registered shape for consumer ID " << consumer_id
<< ". Perhaps the code was compiled with a different instance "
<< "of OutfeedReceiver.";
}
shape = registered_shape->second;
}
auto received = std::make_unique<OutfeedData>(device, consumer_id, shape);
VLOG(2) << "Listener received header " << received->DebugString();
if (consumer_id == kOutfeedCidShutdown) {
VLOG(2) << "[" << device->DebugString()
<< "] Listener received shutdown header";
absl::MutexLock lock(&mu_);
--num_listening_threads_;
VLOG(2) << "[" << device->DebugString() << "] Enqueue shutdown callback";
EnqueueReceivedData(device_idx, std::move(received));
return;
}
std::unique_ptr<Literal> data =
ReceiveRawFromOutfeed(device, shape).value();
received->SetLiteral(std::move(data));
absl::MutexLock lock(&mu_);
EnqueueReceivedData(device_idx, std::move(received));
}
}
void OutfeedReceiverImpl::EnqueueReceivedData(
uint32_t device_idx, std::unique_ptr<OutfeedData> received)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
mu_.Await(absl::Condition(this, &OutfeedReceiverImpl::CallbackQueueHasSpace));
ssize_t literal_size_bytes = received->literal_size_bytes();
callback_queue_size_bytes_ += literal_size_bytes;
VLOG(2) << "Listener enqueues data " << received->DebugString() << " of size "
<< literal_size_bytes << " bytes; "
<< (1 + callback_queues_[device_idx].size())
<< " callbacks in queue of total size " << callback_queue_size_bytes_
<< " bytes.\n";
callback_queues_[device_idx].push(std::move(received));
}
absl::StatusOr<std::unique_ptr<Literal>>
OutfeedReceiverImpl::ReceiveRawFromOutfeed(ifrt::PjRtDevice* device,
const Shape& shape) {
auto literal = std::make_unique<Literal>(shape);
TF_RETURN_IF_ERROR(
device->client()->TransferFromOutfeed(device, literal.get()));
return literal;
}
void OutfeedReceiverImpl::CallbackThreadLoop(int device_idx) {
const ifrt::PjRtDevice* device = devices_[device_idx];
{
absl::MutexLock lock(&mu_);
num_working_callback_threads_++;
}
while (true) {
std::unique_ptr<OutfeedData> received;
{
absl::MutexLock lock(&mu_);
mu_.Await(absl::Condition(
+[](std::queue<std::unique_ptr<OutfeedData>>* queue) {
return !queue->empty();
},
&callback_queues_[device_idx]));
received = std::move(callback_queues_[device_idx].front());
callback_queues_[device_idx].pop();
callback_queue_size_bytes_ -= received->literal_size_bytes();
VLOG(2) << "[" << device->DebugString() << "] Dequeued callback for "
<< received->DebugString() << "; "
<< callback_queues_[device_idx].size()
<< " callbacks in queue of total size "
<< callback_queue_size_bytes_ << " bytes.\n";
}
if (received->consumer_id() == kOutfeedCidShutdown) {
VLOG(2) << "[" << device->DebugString()
<< "] Callback loop received shutdown signal";
{
absl::MutexLock lock(&mu_);
CHECK(callback_queues_[device_idx].empty());
--num_working_callback_threads_;
}
VLOG(2) << "[" << device->DebugString() << "] Callback loop done";
return;
}
{
tsl::profiler::TraceMe traceme("OutfeedReceiver::Callback");
callback_(received->device(), received->consumer_id(),
received->literal());
}
}
}
absl::Status OutfeedReceiverImpl::SendShutdownOutfeedHeader(int device_idx) {
const ifrt::PjRtDevice* device = devices_[device_idx];
constexpr int consumer_id = kOutfeedCidShutdown;
VLOG(2) << "[" << device->DebugString()
<< "] SendSpecialHeader cons=" << consumer_id;
XlaBuilder builder(
absl::StrFormat("special_outfeed_header_%d_%d", consumer_id, device_idx));
XlaOp cst_operand = xla::ConstantR0<int32_t>(&builder, 0);
XlaOp outfeed =
AddOutfeedToBuilder(&builder, CreateToken(&builder), consumer_id, {}, 0)
.value();
XlaOp add_dep = xla::internal::XlaBuilderFriend::BuildAddDependency(
&builder, cst_operand, outfeed, ShapeUtil::MakeScalarShape(S32));
XlaComputation computation = builder.Build(add_dep).value();
CompileOptions compile_options;
if (executable_build_options_) {
compile_options.executable_build_options = *executable_build_options_;
}
compile_options.executable_build_options.set_num_replicas(1);
compile_options.executable_build_options.set_num_partitions(1);
DeviceAssignment device_assignment(1, 1);
device_assignment(0, 0) = device->Id().value();
compile_options.executable_build_options.set_device_assignment(
device_assignment);
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtLoadedExecutable> executable,
devices_[device_idx]->client()->pjrt_client()->Compile(
computation, std::move(compile_options)));
ExecuteOptions execute_options;
TF_ASSIGN_OR_RETURN(
std::vector<std::vector<std::unique_ptr<PjRtBuffer>>> output_buffers,
executable->Execute({{}}, execute_options));
return absl::OkStatus();
}
absl::StatusOr<XlaOp> OutfeedReceiverImpl::AddOutfeedToBuilder(
XlaBuilder* builder, XlaOp token, uint32_t consumer_id,
std::vector<XlaOp> arrays, uint32_t device_idx) {
XlaOp data = Tuple(builder, std::move(arrays));
Shape shape_with_layout = builder->GetShape(data).value();
ShapeUtil::ForEachMutableSubshape(
&shape_with_layout, [](Shape* subshape, const ShapeIndex&) {
if (!subshape->has_layout()) {
LayoutUtil::SetToDefaultLayout(subshape);
}
});
VLOG(2) << "RegisterShape cons=" << consumer_id
<< "; shape=" << shape_with_layout.ToString();
{
absl::MutexLock lock(&mu_);
auto found = shape_registry_.find(consumer_id);
if (found != shape_registry_.end()) {
if (!ShapeUtil::Equal(shape_with_layout, found->second)) {
return InvalidArgument(
"Shape %s does not match previous shape %s used "
"for consumer id %d",
shape_with_layout.DebugString(), found->second.DebugString(),
consumer_id);
}
} else {
shape_registry_.insert({consumer_id, shape_with_layout});
}
}
std::vector<uint32_t> header{kOutfeedHeaderStart, consumer_id};
XlaOp header_op = ConstantR1<uint32_t>(builder, header);
builder->SetSharding(sharding_builder::AssignDevice(device_idx));
token = OutfeedWithToken(
header_op, token, ShapeUtil::MakeShape(U32, {kOutfeedHeaderWords}), "");
if (consumer_id != kOutfeedCidShutdown) {
token = OutfeedWithToken(data, token, shape_with_layout, "");
}
builder->ClearSharding();
return token;
}
OutfeedReceiver::OutfeedReceiver(
Callback callback, absl::Span<ifrt::PjRtClient* const> clients,
ssize_t max_callback_queue_size_bytes,
const std::optional<ExecutableBuildOptions>& executable_build_options) {
p_impl_ = std::make_unique<OutfeedReceiverImpl>(callback, clients,
max_callback_queue_size_bytes,
executable_build_options);
}
OutfeedReceiver::~OutfeedReceiver() = default;
void OutfeedReceiver::Start() { p_impl_->Start(); }
absl::StatusOr<XlaOp> OutfeedReceiver::AddOutfeedToBuilder(
XlaBuilder* builder, XlaOp token, uint32_t consumer_id,
std::vector<XlaOp> arrays, uint32_t device_idx) {
if (consumer_id == kOutfeedCidShutdown) {
return InvalidArgument("Consumer ID cannot be a reserved value: %d",
consumer_id);
}
return p_impl_->AddOutfeedToBuilder(builder, token, consumer_id, arrays,
device_idx);
}
} | #include "xla/python/outfeed_receiver.h"
#include <memory>
#include <optional>
#include <vector>
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "xla/client/client_library.h"
#include "xla/client/executable_build_options.h"
#include "xla/client/xla_builder.h"
#include "xla/pjrt/cpu/cpu_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_stream_executor_client.h"
#include "xla/service/platform_util.h"
#include "xla/test.h"
namespace xla {
namespace {
absl::Status CompileAndExecute(XlaBuilder* builder, XlaOp root, int device_id,
PjRtClient* client) {
XlaComputation computation = builder->Build(root).value();
CompileOptions compile_options;
compile_options.executable_build_options.set_num_replicas(1);
compile_options.executable_build_options.set_num_partitions(1);
DeviceAssignment device_assignment(1, 1);
device_assignment(0, 0) = device_id;
compile_options.executable_build_options.set_device_assignment(
device_assignment);
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtLoadedExecutable> executable,
client->Compile(computation, std::move(compile_options)));
ExecuteOptions execute_options;
TF_ASSIGN_OR_RETURN(
std::vector<std::vector<std::unique_ptr<PjRtBuffer>>> output_buffers,
executable->Execute({{}}, execute_options));
return absl::OkStatus();
}
class Accumulator {
public:
struct Data {
uint32_t consumer_id;
std::shared_ptr<Literal> data;
};
void Receive(uint32_t consumer_id, std::shared_ptr<Literal> data) {
absl::MutexLock lock(&mutex_);
received_.push_back(Data{consumer_id, data});
}
std::vector<Data> received() {
absl::MutexLock lock(&mutex_);
return received_;
}
private:
absl::Mutex mutex_;
std::vector<Data> received_ ABSL_GUARDED_BY(mutex_);
};
TEST(OutfeedReceiverTest, ReceiveOutfeedSimple) {
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<PjRtClient> cpu_client,
GetTfrtCpuClient(CpuClientOptions()));
auto ifrt_cpu_client = ifrt::PjRtClient::Create(cpu_client);
std::vector<ifrt::PjRtClient*> clients{ifrt_cpu_client.get()};
auto receiver = std::make_unique<Accumulator>();
OutfeedReceiver::Callback callback =
[&receiver](xla::ifrt::PjRtDevice* device, uint32_t consumer_id,
std::shared_ptr<Literal> data) {
receiver->Receive(consumer_id, data);
};
auto outfeed_receiver =
std::make_shared<OutfeedReceiver>(callback, clients, 128, std::nullopt);
outfeed_receiver->Start();
XlaBuilder builder("execute_test_outfeed");
constexpr int consumer_id0 = 5;
const Shape shape0 = ShapeUtil::MakeShape(U32, {16});
XlaOp data = Iota(&builder, shape0, 0);
XlaOp send = outfeed_receiver
->AddOutfeedToBuilder(&builder, CreateToken(&builder),
consumer_id0, {data}, 0)
.value();
EXPECT_TRUE(CompileAndExecute(&builder, send, 0, cpu_client.get()).ok());
outfeed_receiver = nullptr;
std::vector<Accumulator::Data> received = receiver->received();
EXPECT_EQ(1, received.size());
EXPECT_EQ(consumer_id0, received[0].consumer_id);
EXPECT_EQ(ShapeUtil::MakeTupleShape({shape0}), received[0].data->shape());
}
TEST(OutfeedReceiverTest, ReceiveOutfeedTwoComputations) {
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<PjRtClient> cpu_client,
GetTfrtCpuClient(CpuClientOptions()));
auto ifrt_cpu_client = ifrt::PjRtClient::Create(cpu_client);
std::vector<ifrt::PjRtClient*> clients{ifrt_cpu_client.get()};
auto receiver = std::make_unique<Accumulator>();
OutfeedReceiver::Callback callback =
[&receiver](xla::ifrt::PjRtDevice* device, uint32_t consumer_id,
std::shared_ptr<Literal> data) {
receiver->Receive(consumer_id, data);
};
auto outfeed_receiver =
std::make_shared<OutfeedReceiver>(callback, clients, 128, std::nullopt);
outfeed_receiver->Start();
XlaBuilder builder0("execute_test_outfeed_0");
constexpr int consumer_id0 = 5;
const Shape shape0 = ShapeUtil::MakeShape(U32, {16});
XlaOp data0 = Iota(&builder0, shape0, 0);
XlaOp send0 = outfeed_receiver
->AddOutfeedToBuilder(&builder0, CreateToken(&builder0),
consumer_id0, {data0}, 0)
.value();
EXPECT_TRUE(CompileAndExecute(&builder0, send0, 0, cpu_client.get()).ok());
XlaBuilder builder1("execute_test_outfeed_1");
constexpr int consumer_id1 = 6;
const Shape shape1 = ShapeUtil::MakeShape(U32, {128});
XlaOp data1 = Iota(&builder1, shape1, 0);
XlaOp send1 = outfeed_receiver
->AddOutfeedToBuilder(&builder1, CreateToken(&builder1),
consumer_id1, {data1}, 0)
.value();
EXPECT_TRUE(CompileAndExecute(&builder1, send1, 0, cpu_client.get()).ok());
outfeed_receiver = nullptr;
std::vector<Accumulator::Data> received = receiver->received();
EXPECT_EQ(2, received.size());
EXPECT_EQ(consumer_id0, received[0].consumer_id);
EXPECT_EQ(ShapeUtil::MakeTupleShape({shape0}), received[0].data->shape());
EXPECT_EQ(consumer_id1, received[1].consumer_id);
EXPECT_EQ(ShapeUtil::MakeTupleShape({shape1}), received[1].data->shape());
}
TEST(OutfeedReceiverTest, ReceiveOutfeedTwoOutfeed) {
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<PjRtClient> cpu_client,
GetTfrtCpuClient(CpuClientOptions()));
auto ifrt_cpu_client = ifrt::PjRtClient::Create(cpu_client);
std::vector<ifrt::PjRtClient*> clients{ifrt_cpu_client.get()};
auto receiver = std::make_unique<Accumulator>();
OutfeedReceiver::Callback callback =
[&receiver](xla::ifrt::PjRtDevice* device, uint32_t consumer_id,
std::shared_ptr<Literal> data) {
receiver->Receive(consumer_id, data);
};
auto outfeed_receiver =
std::make_shared<OutfeedReceiver>(callback, clients, 128, std::nullopt);
outfeed_receiver->Start();
XlaBuilder builder("execute_test_outfeed");
constexpr int consumer_id0 = 5;
const Shape shape0 = ShapeUtil::MakeShape(U32, {16});
XlaOp data0 = Iota(&builder, shape0, 0);
XlaOp send0 = outfeed_receiver
->AddOutfeedToBuilder(&builder, CreateToken(&builder),
consumer_id0, {data0}, 0)
.value();
constexpr int consumer_id1 = 6;
const Shape shape1 = ShapeUtil::MakeShape(U32, {128});
XlaOp data1 = Iota(&builder, shape1, 0);
XlaOp send1 =
outfeed_receiver
->AddOutfeedToBuilder(&builder, send0, consumer_id1, {data1}, 0)
.value();
EXPECT_TRUE(CompileAndExecute(&builder, send1, 0, cpu_client.get()).ok());
outfeed_receiver = nullptr;
std::vector<Accumulator::Data> received = receiver->received();
EXPECT_EQ(2, received.size());
EXPECT_EQ(consumer_id0, received[0].consumer_id);
EXPECT_EQ(ShapeUtil::MakeTupleShape({shape0}), received[0].data->shape());
EXPECT_EQ(consumer_id1, received[1].consumer_id);
EXPECT_EQ(ShapeUtil::MakeTupleShape({shape1}), received[1].data->shape());
}
TEST(OutfeedReceiverTest, DifferentShapeForConsumerIdError) {
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<PjRtClient> cpu_client,
GetTfrtCpuClient(CpuClientOptions()));
auto ifrt_cpu_client = ifrt::PjRtClient::Create(cpu_client);
std::vector<ifrt::PjRtClient*> clients{ifrt_cpu_client.get()};
auto receiver = std::make_unique<Accumulator>();
OutfeedReceiver::Callback callback =
[&receiver](xla::ifrt::PjRtDevice* device, uint32_t consumer_id,
std::shared_ptr<Literal> data) {
receiver->Receive(consumer_id, data);
};
auto outfeed_receiver =
std::make_shared<OutfeedReceiver>(callback, clients, 128, std::nullopt);
outfeed_receiver->Start();
XlaBuilder builder("execute_test_outfeed");
constexpr int consumer_id0 = 5;
const Shape shape0 = ShapeUtil::MakeShape(U32, {16});
XlaOp data0 = Iota(&builder, shape0, 0);
XlaOp send0 = outfeed_receiver
->AddOutfeedToBuilder(&builder, CreateToken(&builder),
consumer_id0, {data0}, 0)
.value();
const Shape shape1 = ShapeUtil::MakeShape(U32, {128});
XlaOp data1 = Iota(&builder, shape1, 0);
absl::StatusOr<XlaOp> send1 = outfeed_receiver->AddOutfeedToBuilder(
&builder, send0, consumer_id0, {data1}, 0);
EXPECT_FALSE(send1.ok());
EXPECT_THAT(
send1.status().ToString(),
testing::ContainsRegex(
#if defined(PLATFORM_WINDOWS)
"does not match previous shape \\w*/*\\w* *\\n?element_type"));
#else
"does not match previous shape (go/\\w+[ "
"]+\\n)?element_type"));
#endif
}
TEST(OutfeedReceiverTest, InvalidConsumerIdError) {
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<PjRtClient> cpu_client,
GetTfrtCpuClient(CpuClientOptions()));
auto ifrt_cpu_client = ifrt::PjRtClient::Create(cpu_client);
std::vector<ifrt::PjRtClient*> clients{ifrt_cpu_client.get()};
auto receiver = std::make_unique<Accumulator>();
OutfeedReceiver::Callback callback =
[&receiver](xla::ifrt::PjRtDevice* device, uint32_t consumer_id,
std::shared_ptr<Literal> data) {
receiver->Receive(consumer_id, data);
};
auto outfeed_receiver =
std::make_shared<OutfeedReceiver>(callback, clients, 128, std::nullopt);
outfeed_receiver->Start();
XlaBuilder builder("execute_test_outfeed");
const Shape shape0 = ShapeUtil::MakeShape(U32, {16});
XlaOp data0 = Iota(&builder, shape0, 0);
absl::StatusOr<XlaOp> send0 = outfeed_receiver->AddOutfeedToBuilder(
&builder, CreateToken(&builder), 0, {data0}, 0);
EXPECT_FALSE(send0.ok());
EXPECT_THAT(send0.status().ToString(),
testing::HasSubstr("Consumer ID cannot be a reserved value"));
}
}
} |
1,794 | cpp | tensorflow/tensorflow | sharding | third_party/xla/xla/python/ifrt/sharding.cc | third_party/xla/xla/python/ifrt/sharding_test.cc | #ifndef XLA_PYTHON_IFRT_SHARDING_H_
#define XLA_PYTHON_IFRT_SHARDING_H_
#include <memory>
#include <optional>
#include <ostream>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/log/check.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/index_domain.h"
#include "xla/python/ifrt/ir/sharding_param.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.pb.h"
namespace xla {
namespace ifrt {
struct DeserializeShardingOptions;
class Sharding : public llvm::RTTIExtends<Sharding, Serializable> {
public:
using DeserializeOptions = DeserializeShardingOptions;
const DeviceList& devices() const { return devices_; }
MemoryKind memory_kind() const { return memory_kind_; }
bool IsFullyReplicated() const { return is_fully_replicated_; }
bool operator==(const Sharding& other) const;
bool operator!=(const Sharding& other) const { return !(*this == other); }
virtual absl::StatusOr<Shape> GetShardShape(const Shape& shape) const = 0;
virtual bool HasSamePartitioning(const Sharding& other) const = 0;
virtual absl::StatusOr<std::unique_ptr<Sharding>> WithDeviceAssignment(
std::optional<DeviceList> devices,
std::optional<MemoryKind> memory_kind) const = 0;
virtual absl::StatusOr<
std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>>>
Disassemble(const Shape& shape) const = 0;
virtual absl::StatusOr<
std::vector<std::pair<DynamicShape, std::shared_ptr<const Sharding>>>>
Disassemble(const DynamicShape& dynamic_shape) const = 0;
virtual absl::StatusOr<std::vector<IndexDomain>> IndexDomains(
const Shape& shape) const = 0;
static absl::StatusOr<std::unique_ptr<Sharding>> FromProto(
DeviceList::LookupDeviceFunc lookup_device,
const ShardingProto& sharding_proto);
absl::StatusOr<ShardingProto> ToProto() const;
virtual std::string DebugString() const = 0;
static char ID;
protected:
Sharding(DeviceList devices, MemoryKind memory_kind, bool is_fully_replicated)
: devices_(devices),
memory_kind_(memory_kind),
is_fully_replicated_(is_fully_replicated) {}
DeviceList devices_;
MemoryKind memory_kind_;
bool is_fully_replicated_;
};
std::ostream& operator<<(std::ostream& os, const Sharding& sharding);
class SingleDeviceSharding final
: public llvm::RTTIExtends<SingleDeviceSharding, Sharding> {
public:
static std::unique_ptr<SingleDeviceSharding> Create(Device* device,
MemoryKind memory_kind);
~SingleDeviceSharding() override = default;
absl::StatusOr<Shape> GetShardShape(const Shape& shape) const override;
bool HasSamePartitioning(const Sharding& other) const override;
absl::StatusOr<std::unique_ptr<Sharding>> WithDeviceAssignment(
std::optional<DeviceList> devices,
std::optional<MemoryKind> memory_kind) const override;
absl::StatusOr<std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>>>
Disassemble(const Shape& shape) const override;
absl::StatusOr<
std::vector<std::pair<DynamicShape, std::shared_ptr<const Sharding>>>>
Disassemble(const DynamicShape& dynamic_shape) const override;
absl::StatusOr<std::vector<IndexDomain>> IndexDomains(
const Shape& shape) const override;
std::string DebugString() const override;
static char ID;
private:
explicit SingleDeviceSharding(Device* device, MemoryKind memory_kind)
: llvm::RTTIExtends<SingleDeviceSharding, Sharding>(
DeviceList({device}), memory_kind, true) {}
};
class OpaqueSharding : public llvm::RTTIExtends<OpaqueSharding, Sharding> {
public:
static std::unique_ptr<OpaqueSharding> Create(DeviceList devices,
MemoryKind memory_kind);
~OpaqueSharding() override = default;
absl::StatusOr<Shape> GetShardShape(const Shape& shape) const override;
bool HasSamePartitioning(const Sharding& other) const override;
absl::StatusOr<std::unique_ptr<Sharding>> WithDeviceAssignment(
std::optional<DeviceList> devices,
std::optional<MemoryKind> memory_kind) const override;
absl::StatusOr<std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>>>
Disassemble(const Shape& shape) const override;
absl::StatusOr<
std::vector<std::pair<DynamicShape, std::shared_ptr<const Sharding>>>>
Disassemble(const DynamicShape& dynamic_shape) const override;
absl::StatusOr<std::vector<IndexDomain>> IndexDomains(
const Shape& shape) const override;
std::string DebugString() const override;
static char ID;
private:
explicit OpaqueSharding(DeviceList devices, MemoryKind memory_kind);
};
class ConcreteSharding : public llvm::RTTIExtends<ConcreteSharding, Sharding> {
public:
static std::unique_ptr<ConcreteSharding> Create(
DeviceList devices, MemoryKind memory_kind, Shape shape,
std::vector<Shape> shard_shapes);
static std::unique_ptr<ConcreteSharding> Create(
DeviceList devices, MemoryKind memory_kind, DynamicShape dynamic_shape,
std::vector<DynamicShape> shard_dynamic_shapes);
bool has_dynamic_shape() const {
DCHECK(this);
return std::holds_alternative<DynamicShape>(shape_) &&
std::holds_alternative<std::vector<DynamicShape>>(shard_shapes_);
}
bool has_static_shape() const {
DCHECK(this);
return std::holds_alternative<Shape>(shape_) &&
std::holds_alternative<std::vector<Shape>>(shard_shapes_);
}
const Shape& shape() const {
DCHECK(has_static_shape());
return std::get<Shape>(shape_);
}
const DynamicShape& dynamic_shape() const {
DCHECK(has_dynamic_shape());
return std::get<DynamicShape>(shape_);
}
const std::vector<Shape>& shard_shapes() const {
DCHECK(this);
DCHECK(std::holds_alternative<std::vector<Shape>>(shard_shapes_));
return std::get<std::vector<Shape>>(shard_shapes_);
}
const std::vector<DynamicShape>& shard_dynamic_shapes() const {
DCHECK(this);
DCHECK(std::holds_alternative<std::vector<DynamicShape>>(shard_shapes_));
return std::get<std::vector<DynamicShape>>(shard_shapes_);
}
~ConcreteSharding() override = default;
absl::StatusOr<Shape> GetShardShape(const Shape& shape) const override;
bool HasSamePartitioning(const Sharding& other) const override;
absl::StatusOr<std::unique_ptr<Sharding>> WithDeviceAssignment(
std::optional<DeviceList> devices,
std::optional<MemoryKind> memory_kind) const override;
absl::StatusOr<std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>>>
Disassemble(const Shape& shape) const override;
absl::StatusOr<
std::vector<std::pair<DynamicShape, std::shared_ptr<const Sharding>>>>
Disassemble(const DynamicShape& dynamic_shape) const override;
absl::StatusOr<std::vector<IndexDomain>> IndexDomains(
const Shape& shape) const override;
std::string DebugString() const override;
static char ID;
private:
ConcreteSharding(DeviceList devices, MemoryKind memory_kind, Shape shape,
std::vector<Shape> shard_shapes);
ConcreteSharding(DeviceList devices, MemoryKind memory_kind,
DynamicShape dynamic_shape,
std::vector<DynamicShape> shard_dynamic_shapes);
std::variant<Shape, DynamicShape> shape_;
std::variant<std::vector<Shape>, std::vector<DynamicShape>> shard_shapes_;
};
class ConcreteEvenSharding
: public llvm::RTTIExtends<ConcreteEvenSharding, Sharding> {
public:
static std::unique_ptr<ConcreteEvenSharding> Create(
DeviceList devices, MemoryKind memory_kind, Shape shape,
Shape shard_shape, bool is_fully_replicated = false);
Shape shape() const {
DCHECK(this);
return shape_;
}
const Shape& shard_shape() const {
DCHECK(this);
return shard_shape_;
}
~ConcreteEvenSharding() override = default;
absl::StatusOr<Shape> GetShardShape(const Shape& shape) const override;
bool HasSamePartitioning(const Sharding& other) const override;
absl::StatusOr<std::unique_ptr<Sharding>> WithDeviceAssignment(
std::optional<DeviceList> devices,
std::optional<MemoryKind> memory_kind) const override;
absl::StatusOr<std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>>>
Disassemble(const Shape& shape) const override;
absl::StatusOr<
std::vector<std::pair<DynamicShape, std::shared_ptr<const Sharding>>>>
Disassemble(const DynamicShape& dynamic_shape) const override;
absl::StatusOr<std::vector<IndexDomain>> IndexDomains(
const Shape& shape) const override;
std::string DebugString() const override;
static char ID;
private:
ConcreteEvenSharding(DeviceList devices, MemoryKind memory_kind, Shape shape,
Shape shard_shape, bool is_fully_replicated);
Shape shape_;
Shape shard_shape_;
};
class ShardingParamSharding
: public llvm::RTTIExtends<ShardingParamSharding, Sharding> {
public:
static absl::StatusOr<std::unique_ptr<ShardingParamSharding>> Create(
ShardingParam sharding_param, DeviceList devices, MemoryKind memory_kind);
const ShardingParam& sharding_param() const { return sharding_param_; }
absl::StatusOr<Shape> GetShardShape(const Shape& shape) const override;
bool HasSamePartitioning(const Sharding& other) const override;
absl::StatusOr<std::unique_ptr<Sharding>> WithDeviceAssignment(
std::optional<DeviceList> devices,
std::optional<MemoryKind> memory_kind) const override;
absl::StatusOr<std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>>>
Disassemble(const Shape& shape) const override;
absl::StatusOr<
std::vector<std::pair<DynamicShape, std::shared_ptr<const Sharding>>>>
Disassemble(const DynamicShape& dynamic_shape) const override;
absl::StatusOr<std::vector<IndexDomain>> IndexDomains(
const Shape& shape) const override;
std::string DebugString() const override;
static char ID;
private:
ShardingParamSharding(ShardingParam sharding_param, DeviceList devices,
MemoryKind memory_kind);
ShardingParam sharding_param_;
};
struct DeserializeShardingOptions
: llvm::RTTIExtends<DeserializeShardingOptions, DeserializeOptions> {
explicit DeserializeShardingOptions(
DeviceList::LookupDeviceFunc lookup_device)
: lookup_device(lookup_device) {}
static char ID;
DeviceList::LookupDeviceFunc lookup_device;
};
}
}
#endif
#include "xla/python/ifrt/sharding.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <ostream>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/index.h"
#include "xla/python/ifrt/index_domain.h"
#include "xla/python/ifrt/ir/sharding_param.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/shape.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
bool ComputeIsFullyReplicated(const ShardingParam& sharding_param) {
return llvm::all_of(sharding_param.dim_shards(),
[](auto shards) { return shards == 1; });
}
template <typename ContainerT>
class MajorToMinorIter {
public:
using IteratorT = typename ContainerT::const_iterator;
using ValueT = typename ContainerT::value_type;
static MajorToMinorIter<ContainerT> cbegin(
absl::Span<const ContainerT> containers) {
std::vector<IteratorT> iters;
iters.reserve(containers.size());
for (const ContainerT& container : containers) {
iters.push_back(container.cbegin());
}
return MajorToMinorIter(containers, std::move(iters));
}
std::vector<ValueT> operator*() const {
std::vector<ValueT> result;
result.reserve(iters_.size());
for (const auto& iter : iters_) {
result.push_back(*iter);
}
return result;
}
void operator++() {
for (int i = iters_.size() - 1; i >= 0; --i) {
++iters_[i];
if (iters_[i] != containers_[i].end()) {
break;
}
if (i != 0) {
iters_[i] = containers_[i].begin();
}
}
}
bool IsEnd() const {
return iters_.empty() || iters_[0] == containers_[0].end();
}
private:
MajorToMinorIter(absl::Span<const ContainerT> containers,
std::vector<IteratorT> iters)
: containers_(containers), iters_(iters) {
DCHECK_EQ(iters.size(), containers.size());
}
absl::Span<const ContainerT> containers_;
std::vector<IteratorT> iters_;
};
std::vector<Index> GetTileIndices(absl::Span<const int64_t> dim_shards) {
std::vector<std::vector<int64_t>> indices;
indices.reserve(dim_shards.size());
for (const int64_t dim_shard : dim_shards) {
std::vector<int64_t> index(dim_shard);
absl::c_iota(index, 0);
indices.push_back(std::move(index));
}
std::vector<Index> result;
int64_t shard_count =
absl::c_accumulate(dim_shards, 1, std::multiplies<int64_t>());
result.reserve(shard_count);
for (auto iter = MajorToMinorIter<std::vector<int64_t>>::cbegin(indices);
!iter.IsEnd(); ++iter) {
result.push_back(Index(*iter));
}
return result;
}
}
char Sharding::ID = 0;
char SingleDeviceSharding::ID = 0;
char OpaqueSharding::ID = 0;
char ConcreteSharding::ID = 0;
char ConcreteEvenSharding::ID = 0;
char ShardingParamSharding::ID = 0;
char DeserializeShardingOptions::ID = 0;
bool Sharding::operator==(const Sharding& other) const {
if (this == &other) {
return true;
}
return HasSamePartitioning(other) && memory_kind_ == other.memory_kind_ &&
devices() == other.devices();
}
absl::StatusOr<std::unique_ptr<Sharding>> Sharding::FromProto(
DeviceList::LookupDeviceFunc lookup_device,
const ShardingProto& sharding_proto) {
return Deserialize<Sharding>(
sharding_proto.serialized_sharding(),
std::make_unique<DeserializeShardingOptions>(std::move(lookup_device)));
}
absl::StatusOr<ShardingProto> Sharding::ToProto() const {
ShardingProto sharding_proto;
TF_ASSIGN_OR_RETURN(*sharding_proto.mutable_serialized_sharding(),
Serialize(const_cast<Sharding&>(*this)));
return sharding_proto;
}
std::ostream& operator<<(std::ostream& os, const Sharding& sharding) {
return os << sharding.DebugString();
}
std::unique_ptr<SingleDeviceSharding> SingleDeviceSharding::Create(
Device* device, MemoryKind memory_kind) {
return std::unique_ptr<SingleDeviceSharding>(
new SingleDeviceSharding(device, memory_kind));
}
absl::StatusOr<Shape> SingleDeviceSharding::GetShardShape(
const Shape& shape) const {
return shape;
}
bool SingleDeviceSharding::HasSamePartitioning(const Sharding& other) const {
if (this == &other) {
return true;
}
return llvm::isa<SingleDeviceSharding>(&other);
}
absl::StatusOr<std::unique_ptr<Sharding>>
SingleDeviceSharding::WithDeviceAssignment(
std::optional<DeviceList> devices,
std::optional<MemoryKind> memory_kind) const {
if (devices.has_value() && devices->size() != 1) {
return InvalidArgument(
"SingleDeviceSharding can only have one device, but was asked to have "
"%d devices",
devices->size());
}
return Create(devices.value_or(devices_).front(),
memory_kind.value_or(memory_kind_));
}
absl::StatusOr<std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>>>
SingleDeviceSharding::Disassemble(const Shape& shape) const {
DCHECK(this);
return std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>>{
{shape, SingleDeviceSharding::Create(devices_[0], memory_kind_)}};
}
absl::StatusOr<
std::vector<std::pair<DynamicShape, std::shared_ptr<const Sharding>>>>
SingleDeviceSharding::Disassemble(const DynamicShape& dynamic_shape) const {
DCHECK(this);
return std::vector<std::pair<DynamicShape, std::shared_ptr<const Sharding>>>{
{dynamic_shape, SingleDeviceSharding::Create(devices_[0], memory_kind_)}};
}
absl::StatusOr<std::vector<IndexDomain>> SingleDeviceSharding::IndexDomains(
const Shape& shape) const {
DCHECK(this);
std::vector<IndexDomain> result;
result.reserve(1);
result.push_back(IndexDomain(shape));
return result;
}
std::string SingleDeviceSharding::DebugString() const {
DCHECK(this);
return absl::StrFormat("SingleDeviceSharding(%s, memory_kind: %s)",
devices_.front()->ToString(),
memory_kind_.DebugString());
}
std::unique_ptr<OpaqueSharding> OpaqueSharding::Create(DeviceList devices,
MemoryKind memory_kind) {
return std::unique_ptr<OpaqueSharding>(
new OpaqueSharding(std::move(devices), memory_kind));
}
OpaqueSharding::OpaqueSharding(DeviceList devices, MemoryKind memory_kind)
: llvm::RTTIExtends<OpaqueSharding, Sharding>(
std::move(devices), memory_kind, false) {}
absl::StatusOr<Shape> OpaqueSharding::GetShardShape(const Shape& shape) const {
return InvalidArgument(
"OpaqueSharding does not have shard shape information");
}
bool OpaqueSharding::HasSamePartitioning(const Sharding& other) const {
if (this == &other) {
return true;
}
return false;
}
absl::StatusOr<std::unique_ptr<Sharding>> OpaqueSharding::WithDeviceAssignment(
std::optional<DeviceList> devices,
std::optional<MemoryKind> memory_kind) const {
if (devices.has_value() && devices->size() != devices_.size()) {
return InvalidArgument(
"OpaqueSharding should have the same number of devices as the current "
"sharding, but was asked to have %d devices",
devices->size());
}
return Create(devices.value_or(devices_), memory_kind.value_or(memory_kind_));
}
absl::StatusOr<std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>>>
OpaqueSharding::Disassemble(const Shape& shape) const {
DCHECK(this);
return InvalidArgument(
"OpaqueSharding does not have shard shape information");
}
absl::StatusOr<
std::vector<std::pair<DynamicShape, std::shared_ptr<const Sharding>>>>
OpaqueSharding::Disassemble(const DynamicShape& dynamic_shape) const {
DCHECK(this);
return InvalidArgument(
"OpaqueSharding does not have shard shape information");
}
absl::StatusOr<std::vector<IndexDomain>> OpaqueSharding::IndexDomains(
const Shape& shape) const {
DCHECK(this);
return InvalidArgument(
"OpaqueSharding does not have index domain information");
}
std::string OpaqueSharding::DebugString() const {
DCHECK(this);
return absl::StrFormat(
"OpaqueSharding(devices: %s, memory_kind: %s)",
absl::StrJoin(devices_, ",",
[](std::string* out, const Device* device) {
absl::StrAppend(out, device->ToString());
}),
memory_kind_.DebugString());
}
std::unique_ptr<ConcreteSharding> ConcreteSharding::Create(
DeviceList devices, MemoryKind memory_kind, Shape shape,
std::vector<Shape> shard_shapes) {
CHECK_EQ(devices.size(), shard_shapes.size());
return std::unique_ptr<ConcreteSharding>(
new ConcreteSharding(std::move(devices), memory_kind, std::move(shape),
std::move(shard_shapes)));
}
std::unique_ptr<ConcreteSharding> ConcreteSharding::Create(
DeviceList devices, MemoryKind memory_kind, DynamicShape dynamic_shape,
std::vector<DynamicShape> shard_dynamic_shapes) {
CHECK_EQ(devices.size(), shard_dynamic_shapes.size());
return std::unique_ptr<ConcreteSharding>(new ConcreteSharding(
std::move(devices), memory_kind, std::move(dynamic_shape),
std::move(shard_dynamic_shapes)));
}
ConcreteSharding::ConcreteSharding(DeviceList devices, MemoryKind memory_kind,
Shape shape, std::vector<Shape> shard_shapes)
: llvm::RTTIExtends<ConcreteSharding, Sharding>(
std::move(devices), memory_kind, false),
shape_(std::move(shape)),
shard_shapes_(std::move(shard_shapes)) {}
ConcreteSharding::ConcreteSharding(
DeviceList devices, MemoryKind memory_kind, DynamicShape dynamic_shape,
std::vector<DynamicShape> shard_dynamic_shapes)
: llvm::RTTIExtends<ConcreteSharding, Sharding>(
std::move(devices), memory_kind, false),
shape_(std::move(dynamic_shape)),
shard_shapes_(std::move(shard_dynamic_shapes)) {}
absl::StatusOr<Shape> ConcreteSharding::GetShardShape(
const Shape& shape) const {
return InvalidArgument("ConcreteSharding does not have a fixed shard shape");
}
bool ConcreteSharding::HasSamePartitioning(const Sharding& other) const {
if (this == &other) {
return true;
}
const auto* other_concrete_sharding =
llvm::dyn_cast<ConcreteSharding>(&other);
if (!other_concrete_sharding) {
return false;
}
return shape_ == other_concrete_sharding->shape_ &&
shard_shapes_ == other_concrete_sharding->shard_shapes_;
}
absl::StatusOr<std::unique_ptr<Sharding>>
ConcreteSharding::WithDeviceAssignment(
std::optional<DeviceList> devices,
std::optional<MemoryKind> memory_kind) const {
if (devices.has_value() && devices->size() != devices_.size()) {
return InvalidArgument(
"ConcreteSharding should have the same number of devices as the "
"current sharding, but was asked to have %d devices",
devices->size());
}
if (has_static_shape()) {
return Create(devices.value_or(devices_),
memory_kind.value_or(memory_kind_), std::get<Shape>(shape_),
std::get<std::vector<Shape>>(shard_shapes_));
} else {
return Create(devices.value_or(devices_),
memory_kind.value_or(memory_kind_),
std::get<DynamicShape>(shape_),
std::get<std::vector<DynamicShape>>(shard_shapes_));
}
}
absl::StatusOr<std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>>>
ConcreteSharding::Disassemble(const Shape& shape) const {
DCHECK(this);
if (!has_static_shape()) {
return InvalidArgument(
"ConcreteSharding holds dynamic shape, but was asked "
"to disassemble static shape %s",
shape.DebugString());
}
if (shape != std::get<Shape>(shape_)) {
return InvalidArgument(
"ConcreteSharding can only disassemble shape %s, but was asked "
"to disassemble shape %s",
std::get<Shape>(shape_).DebugString(), shape.DebugString());
}
std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>> result;
result.reserve(devices_.size());
const std::vector<Shape>& shard_shapes =
std::get<std::vector<Shape>>(shard_shapes_);
for (int i = 0; i < devices_.size(); ++i) {
result.push_back({shard_shapes[i],
SingleDeviceSharding::Create(devices_[i], memory_kind_)});
}
return result;
}
absl::StatusOr<
std::vector<std::pair<DynamicShape, std::shared_ptr<const Sharding>>>>
ConcreteSharding::Disassemble(const DynamicShape& dynamic_shape) const {
DCHECK(this);
if (!has_dynamic_shape()) {
return InvalidArgument(
"ConcreteSharding holds static shape, but was asked "
"to disassemble dynamic shape %s",
dynamic_shape.DebugString());
}
if (dynamic_shape != std::get<DynamicShape>(shape_)) {
return InvalidArgument(
"ConcreteSharding can only disassemble dynamic shape %s, but was asked "
"to disassemble dynamic shape %s",
std::get<DynamicShape>(shape_).DebugString(),
dynamic_shape.DebugString());
}
std::vector<std::pair<DynamicShape, std::shared_ptr<const Sharding>>> result;
result.reserve(devices_.size());
const std::vector<DynamicShape>& shard_dynamic_shapes =
std::get<std::vector<DynamicShape>>(shard_shapes_);
for (int i = 0; i < devices_.size(); ++i) {
result.push_back({shard_dynamic_shapes[i],
SingleDeviceSharding::Create(devices_[i], memory_kind_)});
}
return result;
}
absl::StatusOr<std::vector<IndexDomain>> ConcreteSharding::IndexDomains(
const Shape& shape) const {
DCHECK(this);
return InvalidArgument(
"ConcreteSharding does not have index domain information");
}
std::string ConcreteSharding::DebugString() const {
DCHECK(this);
return std::visit(
[this](const auto& shape, const auto& shard_shapes) {
return absl::StrFormat(
"ConcreteSharding(devices: %s, shape: %s, shard_shapes: %s, "
"memory_kind: %s)",
absl::StrJoin(devices_, ",",
[](std::string* out, const Device* device) {
absl::StrAppend(out, device->ToString());
}),
shape.DebugString(),
absl::StrJoin(shard_shapes, ",",
[](std::string* out, const auto& shard_shape) {
absl::StrAppend(out, shard_shape.DebugString());
}),
memory_kind_.DebugString());
},
shape_, shard_shapes_);
}
std::unique_ptr<ConcreteEvenSharding> ConcreteEvenSharding::Create(
DeviceList devices, MemoryKind memory_kind, Shape shape, Shape shard | #include "xla/python/ifrt/sharding.h"
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "llvm/Support/Casting.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/index.h"
#include "xla/python/ifrt/index_domain.h"
#include "xla/python/ifrt/ir/sharding_param.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::testing::SizeIs;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
class SingleDeviceShardingTest : public test_util::ShardingTest {};
class OpaqueShardingTest : public test_util::ShardingTest {};
class ConcreteShardingTest : public test_util::ShardingTest {};
class ConcreteEvenShardingTest : public test_util::ShardingTest {};
class ShardingParamShardingTest : public test_util::ShardingTest {};
TEST_P(SingleDeviceShardingTest, IsFullyReplicated) {
auto device_list = GetDevices({0});
std::shared_ptr<const Sharding> sharding =
SingleDeviceSharding::Create(device_list.devices().front(), MemoryKind());
EXPECT_TRUE(sharding->IsFullyReplicated());
}
TEST_P(SingleDeviceShardingTest, GetShardShape) {
auto device_list = GetDevices({0});
std::shared_ptr<const Sharding> sharding =
SingleDeviceSharding::Create(device_list.devices().front(), MemoryKind());
EXPECT_THAT(sharding->GetShardShape(Shape({10, 20})),
IsOkAndHolds(Shape({10, 20})));
}
TEST_P(SingleDeviceShardingTest, HasSamePartitioning) {
auto device_list0 = GetDevices({0});
std::shared_ptr<const Sharding> sharding0 = SingleDeviceSharding::Create(
device_list0.devices().front(), MemoryKind());
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding0));
{
auto device_list1 = GetDevices({1});
std::shared_ptr<const Sharding> sharding1 = SingleDeviceSharding::Create(
device_list1.devices().front(), MemoryKind());
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding1));
}
}
TEST_P(SingleDeviceShardingTest, WithDeviceAssignment) {
auto device_list0 = GetDevices({0});
std::shared_ptr<const Sharding> sharding0 = SingleDeviceSharding::Create(
device_list0.devices().front(), MemoryKind());
{
auto device_list1 = GetDevices({1});
std::shared_ptr<const Sharding> sharding1 = SingleDeviceSharding::Create(
device_list1.devices().front(), MemoryKind());
TF_ASSERT_OK_AND_ASSIGN(
auto new_sharding,
sharding0->WithDeviceAssignment(device_list1,
std::nullopt));
EXPECT_EQ(*new_sharding, *sharding1);
}
{
auto device_list1 = GetDevices({0, 1});
EXPECT_THAT(sharding0->WithDeviceAssignment(device_list1,
std::nullopt),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("SingleDeviceSharding can only have one "
"device, but was asked to have 2 devices")));
}
}
TEST_P(SingleDeviceShardingTest, IndexDomains) {
auto device_list = GetDevices({0});
std::shared_ptr<const Sharding> sharding =
SingleDeviceSharding::Create(device_list.devices().front(), MemoryKind());
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto index_domains, sharding->IndexDomains(shape));
EXPECT_THAT(index_domains, ElementsAre(IndexDomain(shape)));
}
TEST_P(SingleDeviceShardingTest, Disassemble) {
auto device_list = GetDevices({0});
std::shared_ptr<const Sharding> sharding =
SingleDeviceSharding::Create(device_list.devices().front(), MemoryKind());
{
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto disassembled, sharding->Disassemble(shape));
ASSERT_THAT(disassembled, SizeIs(1));
const auto& [result_shape, result_sharding] = disassembled[0];
EXPECT_EQ(shape, result_shape);
EXPECT_EQ(*result_sharding, *sharding);
}
{
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape dynamic_shape,
DynamicShape::Create(Shape({10, 20}),
BoundedDynamicShapeTag({true, true})));
TF_ASSERT_OK_AND_ASSIGN(auto disassembled,
sharding->Disassemble(dynamic_shape));
ASSERT_THAT(disassembled, SizeIs(1));
const auto& [result_shape, result_sharding] = disassembled[0];
EXPECT_EQ(dynamic_shape, result_shape);
EXPECT_EQ(*result_sharding, *sharding);
}
}
TEST_P(OpaqueShardingTest, IsFullyReplicated) {
auto device_list = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding =
OpaqueSharding::Create(device_list, MemoryKind());
EXPECT_FALSE(sharding->IsFullyReplicated());
}
TEST_P(OpaqueShardingTest, GetShardShape) {
auto device_list = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding =
OpaqueSharding::Create(device_list, MemoryKind());
EXPECT_THAT(sharding->GetShardShape(Shape({10, 20})),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("OpaqueSharding does not have shard shape")));
}
TEST_P(OpaqueShardingTest, HasSamePartitioning) {
auto device_list0 = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding0 =
OpaqueSharding::Create(device_list0, MemoryKind());
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding0));
{
auto device_list1 = GetDevices({2, 3});
std::shared_ptr<const Sharding> sharding1 =
OpaqueSharding::Create(device_list0, MemoryKind());
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
}
TEST_P(OpaqueShardingTest, WithDeviceAssignment) {
auto device_list0 = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding0 =
OpaqueSharding::Create(device_list0, MemoryKind());
{
auto device_list1 = GetDevices({2, 3});
std::shared_ptr<const Sharding> sharding1 =
OpaqueSharding::Create(device_list0, MemoryKind());
TF_ASSERT_OK_AND_ASSIGN(
auto new_sharding,
sharding0->WithDeviceAssignment(device_list1,
std::nullopt));
ASSERT_TRUE(llvm::isa<OpaqueSharding>(*new_sharding));
EXPECT_THAT(new_sharding->devices().devices(),
ElementsAreArray(device_list1.devices()));
}
{
auto device_list1 = GetDevices({0, 1, 2, 3});
EXPECT_THAT(
sharding0->WithDeviceAssignment(device_list1,
std::nullopt),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("OpaqueSharding should have the same number of "
"devices as the current sharding, but was asked to "
"have 4 devices")));
}
}
TEST_P(OpaqueShardingTest, FailedToDisassemble) {
auto device_list = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding =
OpaqueSharding::Create(device_list, MemoryKind());
EXPECT_THAT(
sharding->Disassemble(Shape({30})),
StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("OpaqueSharding does not have shard shape information")));
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape dynamic_shape,
DynamicShape::Create(Shape({30}), BoundedDynamicShapeTag({true})));
EXPECT_THAT(
sharding->Disassemble(dynamic_shape),
StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("OpaqueSharding does not have shard shape information")));
}
TEST_P(OpaqueShardingTest, IndexDomainsFails) {
auto device_list = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding =
OpaqueSharding::Create(device_list, MemoryKind());
EXPECT_THAT(
sharding->IndexDomains(Shape({30})),
StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("OpaqueSharding does not have index domain information")));
}
TEST_P(ConcreteShardingTest, IsFullyReplicated) {
auto device_list = GetDevices({0, 1});
std::vector<Shape> shard_shapes;
shard_shapes.reserve(2);
shard_shapes.push_back(Shape({10}));
shard_shapes.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding = ConcreteSharding::Create(
device_list, MemoryKind(), Shape({30}), shard_shapes);
EXPECT_FALSE(sharding->IsFullyReplicated());
}
TEST_P(ConcreteShardingTest, GetShardShape) {
auto device_list = GetDevices({0, 1});
std::vector<Shape> shard_shapes;
shard_shapes.reserve(2);
shard_shapes.push_back(Shape({10}));
shard_shapes.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding = ConcreteSharding::Create(
device_list, MemoryKind(), Shape({30}), shard_shapes);
EXPECT_THAT(
sharding->GetShardShape(Shape({30})),
StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("ConcreteSharding does not have a fixed shard shape")));
}
TEST_P(ConcreteShardingTest, HasSamePartitioning) {
auto device_list0 = GetDevices({0, 1});
std::vector<Shape> shard_shapes0;
shard_shapes0.reserve(2);
shard_shapes0.push_back(Shape({10}));
shard_shapes0.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding0 = ConcreteSharding::Create(
device_list0, MemoryKind(), Shape({30}), shard_shapes0);
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding0));
{
auto device_list1 = GetDevices({2, 3});
std::vector<Shape> shard_shapes1;
shard_shapes1.reserve(2);
shard_shapes1.push_back(Shape({10}));
shard_shapes1.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding1 = ConcreteSharding::Create(
device_list1, MemoryKind(), Shape({30}), shard_shapes1);
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({2, 3, 4});
std::vector<Shape> shard_shapes1;
shard_shapes1.reserve(3);
shard_shapes1.push_back(Shape({10}));
shard_shapes1.push_back(Shape({20}));
shard_shapes1.push_back(Shape({30}));
std::shared_ptr<const Sharding> sharding1 = ConcreteSharding::Create(
device_list1, MemoryKind(), Shape({60}), shard_shapes1);
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({2, 3});
std::vector<Shape> shard_shapes1;
shard_shapes1.reserve(2);
shard_shapes1.push_back(Shape({10}));
shard_shapes1.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding1 = ConcreteSharding::Create(
device_list1, MemoryKind(), Shape({40}), shard_shapes1);
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({2, 3});
std::vector<Shape> shard_shapes1;
shard_shapes1.reserve(2);
shard_shapes1.push_back(Shape({10000}));
shard_shapes1.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding1 = ConcreteSharding::Create(
device_list1, MemoryKind(), Shape({30}), shard_shapes1);
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
}
TEST_P(ConcreteShardingTest, WithDeviceAssignment) {
auto device_list0 = GetDevices({0, 1});
std::vector<Shape> shard_shapes0;
shard_shapes0.reserve(2);
shard_shapes0.push_back(Shape({10}));
shard_shapes0.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding0 = ConcreteSharding::Create(
device_list0, MemoryKind(), Shape({30}), shard_shapes0);
{
auto device_list1 = GetDevices({0, 1});
std::vector<Shape> shard_shapes1;
shard_shapes1.reserve(2);
shard_shapes1.push_back(Shape({10}));
shard_shapes1.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding1 = ConcreteSharding::Create(
device_list1, MemoryKind(), Shape({30}), shard_shapes1);
TF_ASSERT_OK_AND_ASSIGN(
auto new_sharding,
sharding0->WithDeviceAssignment(device_list1,
std::nullopt));
EXPECT_EQ(*new_sharding, *sharding1);
}
{
auto device_list1 = GetDevices({0, 1, 2, 3});
EXPECT_THAT(
sharding0->WithDeviceAssignment(device_list1,
std::nullopt),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("ConcreteSharding should have the same number of "
"devices as the current sharding, but was asked to "
"have 4 devices")));
}
}
TEST_P(ConcreteShardingTest, Disassemble) {
auto device_list = GetDevices({0, 1});
std::vector<Shape> shard_shapes;
shard_shapes.reserve(2);
shard_shapes.push_back(Shape({10}));
shard_shapes.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding = ConcreteSharding::Create(
device_list, MemoryKind(), Shape({30}), shard_shapes);
TF_ASSERT_OK_AND_ASSIGN(auto disassembled,
sharding->Disassemble(Shape({30})));
ASSERT_THAT(disassembled, SizeIs(2));
for (int i = 0; i < 2; ++i) {
const auto& [shape, sharding] = disassembled[i];
EXPECT_EQ(shape, shard_shapes[i]);
EXPECT_EQ(*sharding,
*SingleDeviceSharding::Create(device_list[i], MemoryKind()));
}
}
TEST_P(ConcreteShardingTest, DisassembleDynamicShape) {
DeviceList device_list = GetDevices({0, 1});
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape dynamic_shape,
DynamicShape::Create(Shape({10}), BoundedDynamicShapeTag({true})));
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape shard_dynamic_shape1,
DynamicShape::Create(Shape({3}), BoundedDynamicShapeTag({true})));
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape shard_dynamic_shape2,
DynamicShape::Create(Shape({7}), BoundedDynamicShapeTag({true})));
std::vector<DynamicShape> shard_dynamic_shapes{
std::move(shard_dynamic_shape1), std::move(shard_dynamic_shape2)};
auto sharding = ConcreteSharding::Create(device_list, MemoryKind(),
dynamic_shape, shard_dynamic_shapes);
EXPECT_THAT(sharding->Disassemble(Shape({10})),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("ConcreteSharding holds dynamic shape")));
TF_ASSERT_OK_AND_ASSIGN(auto disassembled,
sharding->Disassemble(DynamicShape(dynamic_shape)));
ASSERT_THAT(disassembled, SizeIs(2));
for (int i = 0; i < disassembled.size(); ++i) {
const auto& [dynamic_shape, sharding] = disassembled[i];
EXPECT_EQ(dynamic_shape, shard_dynamic_shapes[i]);
EXPECT_EQ(*sharding,
*SingleDeviceSharding::Create(device_list[i], MemoryKind()));
}
}
TEST_P(ConcreteShardingTest, DisassembleFailsForUnexpectedShape) {
auto device_list = GetDevices({0, 1});
std::vector<Shape> shard_shapes;
shard_shapes.reserve(2);
shard_shapes.push_back(Shape({10}));
shard_shapes.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding = ConcreteSharding::Create(
device_list, MemoryKind(), Shape({30}), shard_shapes);
EXPECT_THAT(sharding->Disassemble(Shape({40})),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("ConcreteSharding can only disassemble")));
}
TEST_P(ConcreteShardingTest, IndexDomainsFails) {
auto device_list = GetDevices({0, 1});
std::vector<Shape> shard_shapes;
shard_shapes.reserve(2);
shard_shapes.push_back(Shape({10}));
shard_shapes.push_back(Shape({20}));
std::shared_ptr<const Sharding> sharding = ConcreteSharding::Create(
device_list, MemoryKind(), Shape({30}), shard_shapes);
EXPECT_THAT(sharding->IndexDomains(Shape({30})),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("ConcreteSharding does not have index "
"domain information")));
}
TEST_P(ConcreteEvenShardingTest, IsFullyReplicated) {
auto device_list = GetDevices({0, 1});
{
std::shared_ptr<const Sharding> sharding =
ConcreteEvenSharding::Create(device_list, MemoryKind(), Shape({30}),
Shape({15}), true);
EXPECT_TRUE(sharding->IsFullyReplicated());
}
{
std::shared_ptr<const Sharding> sharding = ConcreteEvenSharding::Create(
device_list, MemoryKind(), Shape({30}), Shape({15}),
false);
EXPECT_FALSE(sharding->IsFullyReplicated());
}
}
TEST_P(ConcreteEvenShardingTest, GetShardShape) {
auto device_list = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding =
ConcreteEvenSharding::Create(device_list, MemoryKind(), Shape({30}),
Shape({15}), true);
EXPECT_THAT(sharding->GetShardShape(Shape({30})), IsOkAndHolds(Shape({15})));
EXPECT_THAT(
sharding->GetShardShape(Shape({45})),
StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("ConcreteEvenSharding has a shard shape for shape [30], "
"but was asked to get a shard shape for shape [45]")));
}
TEST_P(ConcreteEvenShardingTest, HasSamePartitioning) {
auto device_list0 = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding0 =
ConcreteEvenSharding::Create(device_list0, MemoryKind(), Shape({30}),
Shape({15}), true);
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding0));
{
auto device_list1 = GetDevices({2, 3});
std::shared_ptr<const Sharding> sharding1 =
ConcreteEvenSharding::Create(device_list1, MemoryKind(), Shape({30}),
Shape({15}), true);
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({2, 3, 4});
std::shared_ptr<const Sharding> sharding1 =
ConcreteEvenSharding::Create(device_list1, MemoryKind(), Shape({30}),
Shape({15}), true);
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({2, 3});
std::shared_ptr<const Sharding> sharding1 =
ConcreteEvenSharding::Create(device_list1, MemoryKind(), Shape({45}),
Shape({15}), true);
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({2, 3});
std::shared_ptr<const Sharding> sharding1 =
ConcreteEvenSharding::Create(device_list1, MemoryKind(), Shape({30}),
Shape({10}), true);
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({2, 3});
std::shared_ptr<const Sharding> sharding1 = ConcreteEvenSharding::Create(
device_list1, MemoryKind(), Shape({30}), Shape({15}),
false);
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
}
TEST_P(ConcreteEvenShardingTest, WithDeviceAssignment) {
auto device_list0 = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding0 =
ConcreteEvenSharding::Create(device_list0, MemoryKind(), Shape({30}),
Shape({15}), true);
{
auto device_list1 = GetDevices({2, 3});
std::shared_ptr<const Sharding> sharding1 =
ConcreteEvenSharding::Create(device_list1, MemoryKind(), Shape({30}),
Shape({15}), true);
TF_ASSERT_OK_AND_ASSIGN(
auto new_sharding,
sharding0->WithDeviceAssignment(device_list1,
std::nullopt));
EXPECT_EQ(*new_sharding, *sharding1);
}
{
auto device_list1 = GetDevices({0, 1, 2, 3});
EXPECT_THAT(
sharding0->WithDeviceAssignment(device_list1,
std::nullopt),
StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("ConcreteEvenSharding should have the same number of "
"devices as the current sharding, but was asked to "
"have 4 devices")));
}
}
TEST_P(ConcreteEvenShardingTest, Disassemble) {
auto device_list = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding =
ConcreteEvenSharding::Create(device_list, MemoryKind(), Shape({30}),
Shape({15}), false);
TF_ASSERT_OK_AND_ASSIGN(auto disassembled,
sharding->Disassemble(Shape({30})));
ASSERT_THAT(disassembled, SizeIs(2));
for (int i = 0; i < 2; ++i) {
const auto& [shape, sharding] = disassembled[i];
EXPECT_EQ(shape, Shape({15}));
EXPECT_EQ(*sharding,
*SingleDeviceSharding::Create(device_list[i], MemoryKind()));
}
}
TEST_P(ConcreteEvenShardingTest, DisassembleFailsForUnexpectedShape) {
auto device_list = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding =
ConcreteEvenSharding::Create(device_list, MemoryKind(), Shape({30}),
Shape({15}), false);
EXPECT_THAT(sharding->Disassemble(Shape({40})),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("ConcreteEvenSharding can only disassemble")));
}
TEST_P(ConcreteEvenShardingTest, IndexDomainsFails) {
auto device_list = GetDevices({0, 1});
std::vector<Shape> shard_shapes;
std::shared_ptr<const Sharding> sharding =
ConcreteEvenSharding::Create(device_list, MemoryKind(), Shape({30}),
Shape({15}), false);
EXPECT_THAT(
sharding->IndexDomains(Shape({30})),
StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr(
"ConcreteEvenSharding does not have index domain information")));
}
TEST_P(ShardingParamShardingTest, CreateFailsWhenDeviceCountNotMatch) {
auto device_list = GetDevices({0, 1});
ShardingParam param{{2, 3},
{{1, 0}, {3, 2}}};
EXPECT_THAT(ShardingParamSharding::Create(param, device_list, MemoryKind()),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("Device counts don't match. From "
"ShardingParam 6 vs from DeviceList 2")));
}
TEST_P(ShardingParamShardingTest, IsFullyReplicated) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
{
ShardingParam param{{1, 1},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> param_sharding,
ShardingParamSharding::Create(param, device_list, MemoryKind()));
EXPECT_TRUE(param_sharding->IsFullyReplicated());
}
{
ShardingParam param{{1, 6},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> param_sharding,
ShardingParamSharding::Create(param, device_list, MemoryKind()));
EXPECT_FALSE(param_sharding->IsFullyReplicated());
}
{
ShardingParam param{{2, 3},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> param_sharding,
ShardingParamSharding::Create(param, device_list, MemoryKind()));
EXPECT_FALSE(param_sharding->IsFullyReplicated());
}
}
TEST_P(ShardingParamShardingTest, GetShardShape) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
ShardingParam param{{2, 3},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> sharding,
ShardingParamSharding::Create(param, device_list, MemoryKind()));
EXPECT_THAT(sharding->GetShardShape(Shape({6, 6})),
IsOkAndHolds(Shape({3, 2})));
EXPECT_THAT(sharding->GetShardShape(Shape({6, 6, 6})),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("Numbers of dimensions don't match. From "
"Shape 3 vs from ShardingParam 2")));
}
TEST_P(ShardingParamShardingTest, HasSamePartitioning) {
auto device_list0 = GetDevices({0, 1, 2, 3, 4, 5});
ShardingParam param0{{2, 3},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> sharding0,
ShardingParamSharding::Create(param0, device_list0, MemoryKind()));
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding0));
{
auto device_list1 = GetDevices({3, 4, 5, 0, 1, 2});
ShardingParam param1{{2, 3},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> sharding1,
ShardingParamSharding::Create(param1, device_list1, MemoryKind()));
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({3, 4, 5});
ShardingParam param1{{3, 1},
{{1, 0}, {1, 3}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> sharding1,
ShardingParamSharding::Create(param1, device_list1, MemoryKind()));
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({3, 4, 5, 0, 1, 2});
ShardingParam param1{{3, 2},
{{0, 1}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> sharding1,
ShardingParamSharding::Create(param1, device_list1, MemoryKind()));
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
}
TEST_P(ShardingParamShardingTest, WithDeviceAssignment) {
auto device_list0 = GetDevices({0, 1, 2, 3, 4, 5});
ShardingParam param0{{2, 3},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> sharding0,
ShardingParamSharding::Create(param0, device_list0, MemoryKind()));
{
auto device_list1 = GetDevices({3, 4, 5, 0, 1, 2});
ShardingParam param1{{2, 3},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> sharding1,
ShardingParamSharding::Create(param1, device_list1, MemoryKind()));
TF_ASSERT_OK_AND_ASSIGN(
auto new_sharding,
sharding0->WithDeviceAssignment(device_list1,
std::nullopt));
EXPECT_EQ(*new_sharding, *sharding1);
}
{
auto device_list1 = GetDevices({0, 1, 2});
EXPECT_THAT(
sharding0->WithDeviceAssignment(device_list1,
std::nullopt),
StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("ShardingParamSharding should have the same number of "
"devices as the current sharding, but was asked to "
"have 3 devices")));
}
}
TEST_P(ShardingParamShardingTest, Disassemble) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
ShardingParam param{{2, 3},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> param_sharding,
ShardingParamSharding::Create(param, device_list, MemoryKind()));
TF_ASSERT_OK_AND_ASSIGN(auto disassembled,
param_sharding->Disassemble(Shape({6, 6})));
ASSERT_THAT(disassembled, SizeIs(6));
for (int i = 0; i < 6; ++i) {
const auto& [shape, sharding] = disassembled[i];
EXPECT_EQ(shape, Shape({3, 2}));
EXPECT_EQ(*sharding,
*SingleDeviceSharding::Create(device_list[i], MemoryKind()));
}
}
TEST_P(ShardingParamShardingTest, DisassembleFailsWhenRankNotMatch) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
ShardingParam param{{2, 3},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> param_sharding,
ShardingParamSharding::Create(param, device_list, MemoryKind()));
EXPECT_THAT(param_sharding->Disassemble(Shape({6, 6, 6})),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("Numbers of dimensions don't match. From "
"Shape 3 vs from ShardingParam 2")));
}
TEST_P(ShardingParamShardingTest, DisassembleFailsForUnevenSharding) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
ShardingParam param{{2, 3},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> param_sharding,
ShardingParamSharding::Create(param, device_list, MemoryKind()));
EXPECT_THAT(
param_sharding->Disassemble(Shape({7, 6})),
StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("Uneven shard is not supported. dim: 7, dim_shards: 2")));
}
TEST_P(ShardingParamShardingTest, IndexDomain) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
ShardingParam param{{2, 3},
{{0, 1}, {2, 3}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> param_sharding,
ShardingParamSharding::Create(param, device_list, MemoryKind()));
TF_ASSERT_OK_AND_ASSIGN(auto index_domains,
param_sharding->IndexDomains(Shape({6, 6})));
EXPECT_THAT(index_domains,
ElementsAre(IndexDomain(Index({0, 0}), Shape({3, 2})),
IndexDomain(Index({0, 2}), Shape({3, 2})),
IndexDomain(Index({0, 4}), Shape({3, 2})),
IndexDomain(Index({3, 0}), Shape({3, 2})),
IndexDomain(Index({3, 2}), Shape({3, 2})),
IndexDomain(Index({3, 4}), Shape({3, 2}))));
}
TEST_P(ShardingParamShardingTest, IndexDomainWithPermutation) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
ShardingParam param{{2, 3},
{{1, 0}, {3, 2}}};
TF_ASSERT_OK_AND_ASSIGN(
std::shared_ptr<const Sharding> param_sharding,
ShardingParamSharding::Create(param, device_list, MemoryKind()));
TF_ASSERT_OK_AND_ASSIGN(auto index_domains,
param_sharding->IndexDomains(Shape({6, 6})));
EXPECT_THAT(index_domains,
ElementsAre(IndexDomain(Index({0, 0}), Shape({3, 2})),
IndexDomain(Index({0, 4}), Shape({3, 2})),
IndexDomain(Index({3, 2}), Shape({3, 2})),
IndexDomain(Index({0, 2}), Shape({3, 2})),
IndexDomain(Index({3, 0}), Shape({3, 2})),
IndexDomain(Index({3, 4}), Shape({3, 2}))));
}
TEST_P(ShardingParamShardingTest, IndexDomainWithReplication) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
ShardingParam param{{2, 1},
{{0, 1}, {2, 3}}};
TF_ASSERT_OK_AND_ASSIGN( |
1,795 | cpp | tensorflow/tensorflow | aggregate_profile | third_party/xla/xla/python/aggregate_profile.cc | third_party/xla/xla/python/aggregate_profile_test.cc | #ifndef XLA_PYTHON_AGGREGATE_PROFILE_H_
#define XLA_PYTHON_AGGREGATE_PROFILE_H_
#include "absl/types/span.h"
#include "tsl/profiler/protobuf/profiled_instructions.pb.h"
namespace xla {
void AggregateProfiledInstructionsProto(
absl::Span<const tensorflow::profiler::ProfiledInstructionsProto> profiles,
int percentile,
tensorflow::profiler::ProfiledInstructionsProto *result_profile);
}
#endif
#include "xla/python/aggregate_profile.h"
#include <algorithm>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/types/span.h"
#include "xla/python/xplane_to_profile_instructions.h"
namespace xla {
void AggregateProfiledInstructionsProto(
absl::Span<const tensorflow::profiler::ProfiledInstructionsProto> profiles,
int percentile,
tensorflow::profiler::ProfiledInstructionsProto *result_profile) {
if (percentile < 0 || percentile > 100) return;
absl::flat_hash_map<std::string, HloLatencyInfo> hlo_latency_info;
for (const auto &profile : profiles) {
for (const auto &cost : profile.costs()) {
hlo_latency_info[cost.name()].durations.emplace_back(cost.cost_us());
}
}
for (const auto &iter : hlo_latency_info) {
auto *cost = result_profile->add_costs();
std::vector<double> durations = iter.second.durations;
int index = 0;
if (durations.size() > 1) {
std::sort(durations.begin(), durations.end());
index = percentile / 100.0 * (durations.size() - 1);
}
cost->set_cost_us(durations[index]);
cost->set_name(iter.first);
}
}
} | #include "xla/python/aggregate_profile.h"
#include <map>
#include <string>
#include <vector>
#include "absl/types/span.h"
#include "tsl/platform/test.h"
#include "tsl/profiler/protobuf/profiled_instructions.pb.h"
namespace xla {
namespace {
using tensorflow::profiler::ProfiledInstructionsProto;
TEST(AggregateProfiledInstructionsProtoTest, aggregateAndGetPercentile) {
tensorflow::profiler::ProfiledInstructionsProto profile_a;
{
auto *cost_a = profile_a.add_costs();
cost_a->set_cost_us(10);
cost_a->set_name("reduce");
}
{
auto *cost_a = profile_a.add_costs();
cost_a->set_cost_us(30);
cost_a->set_name("copy");
}
tensorflow::profiler::ProfiledInstructionsProto profile_c;
{
auto *cost_c = profile_c.add_costs();
cost_c->set_cost_us(30);
cost_c->set_name("reduce");
}
std::vector<tensorflow::profiler::ProfiledInstructionsProto> profiles = {
profile_a, profile_c};
std::vector<int> custom_call_costs = {0, 10, 20, 30, 40, 50,
60, 70, 80, 90, 100};
for (int cost : custom_call_costs) {
tensorflow::profiler::ProfiledInstructionsProto profile_custom_call;
{
auto *cost_c = profile_custom_call.add_costs();
cost_c->set_cost_us(cost);
cost_c->set_name("custom-call");
}
profiles.push_back(profile_custom_call);
}
tensorflow::profiler::ProfiledInstructionsProto result_90th;
AggregateProfiledInstructionsProto(
absl::Span<const tensorflow::profiler::ProfiledInstructionsProto>(
profiles.data(), profiles.size()),
90, &result_90th);
EXPECT_EQ(result_90th.costs_size(), 3);
std::map<std::string, float> costs;
for (const auto &cost : result_90th.costs()) {
costs[cost.name()] = cost.cost_us();
}
EXPECT_EQ(costs["copy"], 30);
EXPECT_EQ(costs["custom-call"], 90);
EXPECT_EQ(costs["reduce"], 10);
tensorflow::profiler::ProfiledInstructionsProto result_10th;
AggregateProfiledInstructionsProto(
absl::Span<const tensorflow::profiler::ProfiledInstructionsProto>(
profiles.data(), profiles.size()),
10, &result_10th);
EXPECT_EQ(result_10th.costs_size(), 3);
for (const auto &cost : result_10th.costs()) {
costs[cost.name()] = cost.cost_us();
}
EXPECT_EQ(costs["copy"], 30);
EXPECT_EQ(costs["custom-call"], 10);
EXPECT_EQ(costs["reduce"], 10);
}
TEST(AggregateProfiledInstructionsProtoTest, getIncorrectPercentile) {
tensorflow::profiler::ProfiledInstructionsProto profile_a;
{
auto *cost_a = profile_a.add_costs();
cost_a->set_cost_us(10);
cost_a->set_name("reduce");
}
std::vector<tensorflow::profiler::ProfiledInstructionsProto> profiles = {
profile_a};
tensorflow::profiler::ProfiledInstructionsProto result;
AggregateProfiledInstructionsProto(
absl::Span<const tensorflow::profiler::ProfiledInstructionsProto>(
profiles.data(), profiles.size()),
-1, &result);
EXPECT_EQ(result.costs_size(), 0);
AggregateProfiledInstructionsProto(
absl::Span<const tensorflow::profiler::ProfiledInstructionsProto>(
profiles.data(), profiles.size()),
101, &result);
EXPECT_EQ(result.costs_size(), 0);
AggregateProfiledInstructionsProto(
absl::Span<const tensorflow::profiler::ProfiledInstructionsProto>(
profiles.data(), profiles.size()),
100, &result);
EXPECT_EQ(result.costs_size(), 1);
}
}
} |
1,796 | cpp | tensorflow/tensorflow | xplane_to_profile_instructions | third_party/xla/xla/python/xplane_to_profile_instructions.cc | third_party/xla/xla/python/xplane_to_profile_instructions_test.cc | #ifndef XLA_PYTHON_XPLANE_TO_PROFILE_INSTRUCTIONS_H_
#define XLA_PYTHON_XPLANE_TO_PROFILE_INSTRUCTIONS_H_
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "tsl/profiler/protobuf/profiled_instructions.pb.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
namespace xla {
extern const char kCostNameSep[];
struct HloLatencyInfo {
std::vector<double> durations;
};
absl::Status ConvertXplaneToProfiledInstructionsProto(
std::vector<tensorflow::profiler::XSpace> xspaces,
tensorflow::profiler::ProfiledInstructionsProto*
profiled_instructions_proto);
absl::Status ConvertXplaneUnderLogdirToProfiledInstructionsProto(
const std::string& logdir, tensorflow::profiler::ProfiledInstructionsProto*
profiled_instructions_proto);
}
#endif
#include "xla/python/xplane_to_profile_instructions.h"
#include <cstdint>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo.pb.h"
#include "xla/xla.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/convert/xla_op_utils.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
#include "tsl/profiler/utils/file_system_utils.h"
#include "tsl/profiler/utils/tf_xplane_visitor.h"
#include "tsl/profiler/utils/xplane_schema.h"
#include "tsl/profiler/utils/xplane_utils.h"
#include "tsl/profiler/utils/xplane_visitor.h"
namespace xla {
namespace {
constexpr char kXPlanePb[] = "xplane.pb";
constexpr char kCostNameSep[] = "::";
using tensorflow::profiler::XPlane;
using tensorflow::profiler::XSpace;
using tsl::profiler::CreateTfXPlaneVisitor;
using tsl::profiler::FindPlanesWithPrefix;
using tsl::profiler::FindPlaneWithName;
using tsl::profiler::GetStatTypeStr;
using tsl::profiler::HostEventType;
using tsl::profiler::IsInternalEvent;
using tsl::profiler::ProfilerJoinPath;
using tsl::profiler::StatType;
using tsl::profiler::XEventMetadataVisitor;
using tsl::profiler::XEventVisitor;
using tsl::profiler::XLineVisitor;
using tsl::profiler::XPlaneVisitor;
using tsl::profiler::XStatVisitor;
void GetXPlaneLatencyInfo(
const XPlaneVisitor& xplane,
const absl::flat_hash_map<std::string, std::string>& hlo_module_info,
absl::flat_hash_map<std::string, HloLatencyInfo>* hlo_latency_info) {
xplane.ForEachLine([hlo_latency_info,
hlo_module_info](const XLineVisitor& xline) {
if (xline.DisplayName() == tsl::profiler::kXlaAsyncOpLineName) {
return;
}
xline.ForEachEvent([hlo_latency_info,
hlo_module_info](const XEventVisitor& xevent) {
int64_t event_type =
xevent.Type().value_or(HostEventType::kUnknownHostEventType);
if (IsInternalEvent(event_type)) return;
std::optional<std::string> hlo_name = std::nullopt;
std::optional<std::string> hlo_module_name = std::nullopt;
std::optional<std::string> fingerprint = std::nullopt;
std::optional<int64_t> program_id = std::nullopt;
auto for_each_stat = [&](const XStatVisitor& stat) {
if (stat.ValueCase() == tsl::profiler::XStat::VALUE_NOT_SET) return;
if (stat.Name() == GetStatTypeStr(StatType::kHloOp)) {
hlo_name = stat.ToString();
}
if (stat.Name() == GetStatTypeStr(StatType::kProgramId)) {
program_id = stat.IntValue();
}
if (stat.Name() == GetStatTypeStr(StatType::kHloModule)) {
hlo_module_name = stat.ToString();
}
};
xevent.Metadata().ForEachStat(for_each_stat);
xevent.ForEachStat(for_each_stat);
if (!hlo_name.has_value() || !hlo_module_name.has_value()) {
return;
}
if (hlo_module_name.has_value()) {
std::string fingerprint_key = hlo_module_name.value();
if (program_id.has_value()) {
fingerprint_key = tsl::profiler::HloModuleNameWithProgramId(
hlo_module_name.value(), program_id.value());
}
if (hlo_module_info.contains(fingerprint_key)) {
fingerprint = hlo_module_info.at(fingerprint_key);
}
}
double latency = static_cast<double>(xevent.DurationNs()) / 1e3;
std::string key = hlo_name.value();
if (fingerprint.has_value()) {
key = absl::StrCat(fingerprint.value(), kCostNameSep, hlo_name.value());
}
(*hlo_latency_info)[key].durations.emplace_back(latency);
});
});
}
std::unique_ptr<xla::HloModule> CreateModuleFromProto(
const xla::HloModuleProto& proto) {
auto config = xla::HloModule::CreateModuleConfigFromProto(proto, {});
if (config.ok()) {
auto module = xla::HloModule::CreateFromProto(proto, config.value());
if (module.ok()) {
return std::move(*module);
}
}
return nullptr;
}
std::optional<std::string> GetHloModuleFingerprint(
const xla::HloModuleProto& hlo_module_proto) {
std::unique_ptr<xla::HloModule> hlo_module =
CreateModuleFromProto(hlo_module_proto);
if (hlo_module == nullptr) {
return std::nullopt;
}
const auto& map = hlo_module->entry_computation()
->root_instruction()
->frontend_attributes()
.map();
auto it = map.find("fingerprint_before_lhs");
if (it != map.end()) {
return it->second;
}
return std::nullopt;
}
void GetXPlaneHloModuleInfo(
const XPlaneVisitor& xplane,
absl::flat_hash_map<std::string, std::string>* hlo_module_info) {
xplane.ForEachEventMetadata([&](const XEventMetadataVisitor& event_metadata) {
event_metadata.ForEachStat([&](const XStatVisitor& stat) {
xla::HloProto hlo_proto;
if (tsl::ParseProtoUnlimited(&hlo_proto, stat.BytesValue().data(),
stat.BytesValue().size())) {
const xla::HloModuleProto& hlo_module_proto = hlo_proto.hlo_module();
std::optional<std::string> fingerprint =
GetHloModuleFingerprint(hlo_module_proto);
if (fingerprint.has_value()) {
std::string key_with_id = tsl::profiler::HloModuleNameWithProgramId(
hlo_module_proto.name(), hlo_module_proto.id());
(*hlo_module_info)[key_with_id] = fingerprint.value();
}
}
});
});
}
}
absl::Status ConvertXplaneUnderLogdirToProfiledInstructionsProto(
const std::string& logdir, tensorflow::profiler::ProfiledInstructionsProto*
profiled_instructions_proto) {
std::vector<std::string> children_path;
TF_RETURN_IF_ERROR(tsl::Env::Default()->GetChildren(logdir, &children_path));
if (children_path.empty()) {
return absl::NotFoundError(
absl::StrCat("Could not find file under: ", logdir));
}
std::vector<tensorflow::profiler::XSpace> xspaces;
for (const std::string& child_path : children_path) {
if (absl::StrContains(child_path, kXPlanePb)) {
std::string xspace_path = ProfilerJoinPath(logdir, child_path);
tensorflow::profiler::XSpace xspace;
TF_RETURN_IF_ERROR(
ReadBinaryProto(tsl::Env::Default(), xspace_path, &xspace));
xspaces.emplace_back(xspace);
}
}
return ConvertXplaneToProfiledInstructionsProto(xspaces,
profiled_instructions_proto);
}
absl::Status ConvertXplaneToProfiledInstructionsProto(
std::vector<tensorflow::profiler::XSpace> xspaces,
tensorflow::profiler::ProfiledInstructionsProto*
profiled_instructions_proto) {
absl::flat_hash_map<std::string, HloLatencyInfo> hlo_latency_info;
absl::flat_hash_map<std::string, std::string> hlo_module_info;
for (const XSpace& xspace : xspaces) {
const XPlane* metadata_plane =
FindPlaneWithName(xspace, tsl::profiler::kMetadataPlaneName);
if (metadata_plane != nullptr) {
XPlaneVisitor xplane = CreateTfXPlaneVisitor(metadata_plane);
GetXPlaneHloModuleInfo(xplane, &hlo_module_info);
}
std::vector<const XPlane*> device_planes =
FindPlanesWithPrefix(xspace, tsl::profiler::kGpuPlanePrefix);
if (device_planes.empty()) {
device_planes =
FindPlanesWithPrefix(xspace, tsl::profiler::kTpuPlanePrefix);
}
if (device_planes.empty()) {
device_planes =
FindPlanesWithPrefix(xspace, tsl::profiler::kCustomPlanePrefix);
}
for (const XPlane* device_plane : device_planes) {
XPlaneVisitor xplane = CreateTfXPlaneVisitor(device_plane);
GetXPlaneLatencyInfo(xplane, hlo_module_info, &hlo_latency_info);
}
}
for (const auto& iter : hlo_latency_info) {
auto* cost = profiled_instructions_proto->add_costs();
std::vector<double> durations = iter.second.durations;
double sum = std::accumulate(durations.begin(), durations.end(), 0.0);
cost->set_cost_us(sum / durations.size());
cost->set_name(iter.first);
}
return absl::OkStatus();
}
} | #include "xla/python/xplane_to_profile_instructions.h"
#include <cstdint>
#include <memory>
#include <string>
#include "xla/service/hlo.pb.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/test.h"
#include "tsl/profiler/convert/xla_op_utils.h"
#include "tsl/profiler/protobuf/profiled_instructions.pb.h"
#include "tsl/profiler/protobuf/xplane.pb.h"
#include "tsl/profiler/rpc/client/save_profile.h"
#include "tsl/profiler/utils/file_system_utils.h"
#include "tsl/profiler/utils/xplane_builder.h"
#include "tsl/profiler/utils/xplane_schema.h"
namespace xla {
namespace {
using tensorflow::profiler::XSpace;
using tsl::profiler::GetStatTypeStr;
using tsl::profiler::GpuPlaneName;
using tsl::profiler::kHostThreadsPlaneName;
using tsl::profiler::kMetadataPlaneName;
using tsl::profiler::StatType;
using tsl::profiler::XEventBuilder;
using tsl::profiler::XLineBuilder;
using tsl::profiler::XPlaneBuilder;
void CreateXSpace(XSpace* space, int first_device_latency,
int second_device_latency) {
XPlaneBuilder host_plane(space->add_planes());
host_plane.SetName(kHostThreadsPlaneName);
XLineBuilder thread1 = host_plane.GetOrCreateLine(10);
thread1.SetName("thread1");
XEventBuilder event1 =
thread1.AddEvent(*host_plane.GetOrCreateEventMetadata("event1"));
event1.SetTimestampNs(150000);
event1.SetDurationNs(10000);
event1.AddStatValue(*host_plane.GetOrCreateStatMetadata("tf_op"),
*host_plane.GetOrCreateStatMetadata("Relu"));
XLineBuilder thread2 = host_plane.GetOrCreateLine(20);
thread2.SetName("thread2");
XEventBuilder event2 =
thread2.AddEvent(*host_plane.GetOrCreateEventMetadata("event2"));
event2.SetTimestampNs(160000);
event2.SetDurationNs(10000);
event2.AddStatValue(*host_plane.GetOrCreateStatMetadata("tf_op"),
*host_plane.GetOrCreateStatMetadata("Conv2D"));
int64_t program_id = 1;
XPlaneBuilder device_plane(space->add_planes());
device_plane.SetName(GpuPlaneName(0));
device_plane.SetId(0);
XLineBuilder stream1 = device_plane.GetOrCreateLine(30);
stream1.SetName("gpu stream 1");
XEventBuilder event3 =
stream1.AddEvent(*device_plane.GetOrCreateEventMetadata("kernel1"));
event3.SetTimestampNs(180000);
event3.SetDurationNs(first_device_latency);
event3.AddStatValue(
*device_plane.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kHloOp)),
*device_plane.GetOrCreateStatMetadata("custom-call"));
event3.AddStatValue(*device_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kHloModule)),
*device_plane.GetOrCreateStatMetadata("test_module"));
event3.AddStatValue(*device_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kProgramId)),
program_id);
XPlaneBuilder device_plane_2(space->add_planes());
device_plane_2.SetName(GpuPlaneName(1));
device_plane_2.SetId(0);
XLineBuilder stream2 = device_plane.GetOrCreateLine(30);
stream2.SetName("gpu stream 1");
XEventBuilder event5 =
stream1.AddEvent(*device_plane.GetOrCreateEventMetadata("kernel1"));
event5.SetTimestampNs(180000);
event5.SetDurationNs(second_device_latency);
event5.AddStatValue(
*device_plane.GetOrCreateStatMetadata(GetStatTypeStr(StatType::kHloOp)),
*device_plane.GetOrCreateStatMetadata("custom-call"));
event5.AddStatValue(*device_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kHloModule)),
*device_plane.GetOrCreateStatMetadata("test_module"));
event5.AddStatValue(*device_plane.GetOrCreateStatMetadata(
GetStatTypeStr(StatType::kProgramId)),
program_id);
}
void CreateXSpaceWithFingerprint(XSpace* space, int first_device_latency,
int second_device_latency) {
XPlaneBuilder metadata_plane(space->add_planes());
metadata_plane.SetName(kMetadataPlaneName);
const char* hlo_text = R"(
HloModule test_module
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY ar {
p0 = f32[32] parameter(0)
p1 = f32[32, 32] parameter(1)
p2 = f32[32, 32] parameter(2)
p3 = f32[32] parameter(3)
dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm"
dot1 = f32[32,32]{1,0} custom-call(dot0, p2), custom_call_target="__cublas$gemm"
dot2 = f32[32,32]{1,0} custom-call(dot1, p2), custom_call_target="__cublas$gemm"
dot3 = f32[32,32]{1,0} custom-call(dot2, p2), custom_call_target="__cublas$gemm"
dot4 = f32[32,32]{1,0} custom-call(dot3, p2), custom_call_target="__cublas$gemm"
dot5 = f32[32,32]{1,0} custom-call(dot4, p2), custom_call_target="__cublas$gemm"
dot6 = f32[32,32]{1,0} custom-call(dot5, p2), custom_call_target="__cublas$gemm"
ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op
ar-done = f32[32] all-reduce-done(ar-start)
%ag-start = (f32[32], f32[64]) all-gather-start(p3), dimensions={0}
%ag-done = f32[64] all-gather-done(%ag-start)
add0 = f32[32,32] add(dot0, dot1)
add1 = f32[32,32] add(add0, dot2)
add2 = f32[32,32] add(add1, dot3)
add3 = f32[32,32] add(add2, dot4)
add4 = f32[32,32] add(add3, dot5)
add5 = f32[32,32] add(add4, dot6)
ROOT t = (f32[32], f32[64], f32[32,32]) tuple(ar-done, %ag-done, add5)
})";
xla::HloModuleConfig config;
auto module = std::make_unique<VerifiedHloModule>(
"test_module", config, false,
true,
ShapeUtil::ByteSizeOfElements);
if (module->ParseHloStringAndVerifyModule(hlo_text).ok()) {
HloInstruction* root = module->entry_computation()->root_instruction();
FrontendAttributes attributes;
(*attributes.mutable_map())["fingerprint_before_lhs"] = "08a5";
root->add_frontend_attributes(attributes);
xla::HloModuleProto hlo_module_proto = module->ToProto();
hlo_module_proto.set_id(1);
xla::HloProto hlo_proto;
*hlo_proto.mutable_hlo_module() = hlo_module_proto;
int64_t program_id = 1;
tsl::profiler::XEventMetadata* event_metadata =
metadata_plane.GetOrCreateEventMetadata(program_id);
event_metadata->set_name(tsl::profiler::HloModuleNameWithProgramId(
hlo_proto.hlo_module().name(), program_id));
tsl::profiler::XStatsBuilder<tsl::profiler::XEventMetadata> event_stats(
event_metadata, &metadata_plane);
auto* hlo_proto_stat = metadata_plane.GetOrCreateStatMetadata(
GetStatTypeStr(tsl::profiler::StatType::kHloProto));
event_stats.AddStatValue(*hlo_proto_stat, hlo_proto);
}
return CreateXSpace(space, first_device_latency, second_device_latency);
}
TEST(XplaneToProfiledInstructionsProtoTest,
ConvertXplaneUnderLogdirToProfiledInstructionsProto) {
tensorflow::profiler::ProfiledInstructionsProto profile_proto;
std::string logdir = testing::TempDir() + "/logdir";
std::string run = tsl::profiler::GetCurrentTimeStampAsString();
const std::string path = tsl::profiler::ProfilerJoinPath(logdir, run);
XSpace xspace_first_host;
CreateXSpace(&xspace_first_host, 10000, 10000);
auto status =
tsl::profiler::SaveXSpace(logdir, run, "host_0", xspace_first_host);
EXPECT_TRUE(status.ok());
XSpace xspace_2nd_host;
CreateXSpace(&xspace_2nd_host, 15000, 5000);
status = tsl::profiler::SaveXSpace(logdir, run, "host_1", xspace_2nd_host);
EXPECT_TRUE(status.ok());
EXPECT_TRUE(
ConvertXplaneUnderLogdirToProfiledInstructionsProto(path, &profile_proto)
.ok());
EXPECT_EQ(profile_proto.costs_size(), 1);
EXPECT_EQ(profile_proto.costs(0).cost_us(), 10);
EXPECT_EQ(profile_proto.costs(0).name(), "custom-call");
}
TEST(XplaneToProfiledInstructionsProtoTest,
ConvertXplaneUnderLogdirToProfiledInstructionsProtoWithFingerprint) {
tensorflow::profiler::ProfiledInstructionsProto profile_proto;
std::string logdir = testing::TempDir() + "/logdir";
std::string run = tsl::profiler::GetCurrentTimeStampAsString();
const std::string path = tsl::profiler::ProfilerJoinPath(logdir, run);
XSpace xspace_first_host;
CreateXSpaceWithFingerprint(&xspace_first_host, 10000, 10000);
auto status =
tsl::profiler::SaveXSpace(logdir, run, "host_0", xspace_first_host);
EXPECT_TRUE(status.ok());
XSpace xspace_2nd_host;
CreateXSpaceWithFingerprint(&xspace_2nd_host, 15000, 5000);
status = tsl::profiler::SaveXSpace(logdir, run, "host_1", xspace_2nd_host);
EXPECT_TRUE(status.ok());
EXPECT_TRUE(
ConvertXplaneUnderLogdirToProfiledInstructionsProto(path, &profile_proto)
.ok());
EXPECT_EQ(profile_proto.costs_size(), 1);
EXPECT_EQ(profile_proto.costs(0).cost_us(), 10);
EXPECT_EQ(profile_proto.costs(0).name(), "08a5::custom-call");
}
TEST(XplaneToProfiledInstructionsProtoTest,
ConvertXplaneToProfiledInstructionsProto) {
tensorflow::profiler::ProfiledInstructionsProto profile_proto;
XSpace xspace_a;
CreateXSpace(&xspace_a, 10000, 10000);
XSpace xspace_b;
CreateXSpace(&xspace_b, 15000, 5000);
EXPECT_TRUE(ConvertXplaneToProfiledInstructionsProto({xspace_a, xspace_b},
&profile_proto)
.ok());
EXPECT_EQ(profile_proto.costs_size(), 1);
EXPECT_EQ(profile_proto.costs(0).cost_us(), 10);
EXPECT_EQ(profile_proto.costs(0).name(), "custom-call");
}
TEST(XplaneToProfiledInstructionsProtoTest,
ConvertXplaneToProfiledInstructionsProtoWithFingerprint) {
tensorflow::profiler::ProfiledInstructionsProto profile_proto;
XSpace xspace_a;
CreateXSpaceWithFingerprint(&xspace_a, 10000, 10000);
XSpace xspace_b;
CreateXSpaceWithFingerprint(&xspace_b, 15000, 5000);
EXPECT_TRUE(ConvertXplaneToProfiledInstructionsProto({xspace_a, xspace_b},
&profile_proto)
.ok());
EXPECT_EQ(profile_proto.costs_size(), 1);
EXPECT_EQ(profile_proto.costs(0).cost_us(), 10);
EXPECT_EQ(profile_proto.costs(0).name(), "08a5::custom-call");
}
}
} |
1,797 | cpp | tensorflow/tensorflow | pjrt_compiler | third_party/xla/xla/pjrt/pjrt_compiler.cc | third_party/xla/xla/pjrt/pjrt_compiler_test.cc | #ifndef XLA_PJRT_PJRT_COMPILER_H_
#define XLA_PJRT_PJRT_COMPILER_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "xla/client/xla_computation.h"
#include "xla/pjrt/pjrt_device_description.h"
#include "xla/pjrt/pjrt_executable.h"
#include "tsl/platform/fingerprint.h"
namespace xla {
using PjRtPlatformId = uint64_t;
inline const char* CpuName() {
static constexpr char kCpuName[] = "cpu";
return kCpuName;
}
inline const char* CudaName() {
static constexpr char kCudaName[] = "cuda";
return kCudaName;
}
inline const char* RocmName() {
static constexpr char kRocmName[] = "rocm";
return kRocmName;
}
inline const char* SyclName() {
static constexpr char kSyclName[] = "sycl";
return kSyclName;
}
inline const char* TpuName() {
static constexpr char kTpuName[] = "tpu";
return kTpuName;
}
inline PjRtPlatformId CpuId() {
static const PjRtPlatformId kCpuId = tsl::Fingerprint64(CpuName());
return kCpuId;
}
inline PjRtPlatformId CudaId() {
static const PjRtPlatformId kCudaId = tsl::Fingerprint64(CudaName());
return kCudaId;
}
inline PjRtPlatformId RocmId() {
static const PjRtPlatformId kRocmId = tsl::Fingerprint64(RocmName());
return kRocmId;
}
inline PjRtPlatformId SyclId() {
static const PjRtPlatformId kSyclId = tsl::Fingerprint64(SyclName());
return kSyclId;
}
inline PjRtPlatformId TpuId() {
static const PjRtPlatformId kTpuId = tsl::Fingerprint64(TpuName());
return kTpuId;
}
class PjRtCompiler;
class PjRtClient;
class PjRtTopologyDescription {
public:
virtual ~PjRtTopologyDescription() = default;
virtual PjRtPlatformId platform_id() const = 0;
virtual absl::string_view platform_name() const = 0;
virtual absl::string_view platform_version() const = 0;
virtual std::optional<PjRtCompiler*> compiler() const { return std::nullopt; }
virtual std::vector<std::unique_ptr<const PjRtDeviceDescription>>
DeviceDescriptions() const = 0;
virtual bool is_subslice_topology() const { return false; }
virtual absl::StatusOr<int> ProcessCount() const {
return absl::UnimplementedError("ProcessCount is unsupported.");
}
virtual absl::StatusOr<int> CoreCountOfDefaultType() const {
return absl::UnimplementedError("CoreCountOfDefaultType is unsupported.");
}
virtual absl::StatusOr<int> LogicalDeviceCountOfDefaultType() const {
return absl::UnimplementedError(
"LogicalDeviceCountOfDefaultType is unsupported.");
}
virtual absl::StatusOr<int> CoreCountOfDefaultTypePerProcess() const {
return absl::UnimplementedError(
"CoreCountOfDefaultTypePerProcess is unsupported.");
}
virtual absl::StatusOr<int> CoreCountOfDefaultTypePerChip() const {
return absl::UnimplementedError(
"CoreCountOfDefaultTypePerChip is unsupported.");
}
virtual absl::StatusOr<std::string> Serialize() const = 0;
virtual const absl::flat_hash_map<std::string, PjRtDeviceAttribute>&
Attributes() const = 0;
virtual absl::StatusOr<Layout> GetDefaultLayout(
PrimitiveType element_type, absl::Span<const int64_t> dims) const = 0;
};
class PjRtCompiler {
public:
virtual ~PjRtCompiler() = default;
virtual absl::StatusOr<std::unique_ptr<PjRtExecutable>> Compile(
CompileOptions options, const XlaComputation& computation,
const PjRtTopologyDescription& topology, PjRtClient* client) = 0;
virtual absl::StatusOr<std::unique_ptr<PjRtExecutable>> Compile(
CompileOptions options, mlir::ModuleOp module,
const PjRtTopologyDescription& topology, PjRtClient* client) = 0;
};
void PjRtRegisterCompiler(absl::string_view platform_name,
std::unique_ptr<PjRtCompiler> compiler);
absl::StatusOr<std::unique_ptr<PjRtExecutable>> PjRtCompile(
CompileOptions options, const XlaComputation& computation,
const PjRtTopologyDescription& topology, PjRtClient* client = nullptr);
absl::StatusOr<std::unique_ptr<PjRtExecutable>> PjRtCompile(
CompileOptions options, mlir::ModuleOp module,
const PjRtTopologyDescription& topology, PjRtClient* client = nullptr);
}
#endif
#include "xla/pjrt/pjrt_compiler.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "xla/pjrt/metrics.h"
namespace xla {
ABSL_CONST_INIT absl::Mutex registry_mutex(absl::kConstInit);
absl::flat_hash_map<std::string, std::unique_ptr<PjRtCompiler>>*
CompilerRegistry() {
static auto* compiler_registry =
new absl::flat_hash_map<std::string, std::unique_ptr<PjRtCompiler>>();
return compiler_registry;
}
class ScopedMetricHelper {
public:
explicit ScopedMetricHelper(absl::string_view metric_name)
: metric_name_(metric_name) {
if (metric_name == metrics::kPjrtCompilerCompileComputationMetricName) {
metrics::RecordPjrtCompilerCompileComputationStatus(true);
} else if (metric_name == metrics::kPjrtCompilerCompileModuleMetricName) {
metrics::RecordPjrtCompilerCompileModuleStatus(true);
} else {
LOG(ERROR) << "No corresponding handler function for metric: "
<< metric_name;
}
}
~ScopedMetricHelper() {
if (metric_name_ == metrics::kPjrtCompilerCompileComputationMetricName) {
metrics::RecordPjrtCompilerCompileComputationStatus(false);
} else if (metric_name_ == metrics::kPjrtCompilerCompileModuleMetricName) {
metrics::RecordPjrtCompilerCompileModuleStatus(false);
}
}
private:
absl::string_view metric_name_;
};
void PjRtRegisterCompiler(absl::string_view platform_name,
std::unique_ptr<PjRtCompiler> compiler) {
CHECK(compiler != nullptr);
absl::MutexLock l(®istry_mutex);
auto* compiler_registry = CompilerRegistry();
CHECK(!compiler_registry->contains(platform_name));
(*compiler_registry)[platform_name] = std::move(compiler);
}
absl::StatusOr<std::unique_ptr<PjRtExecutable>> PjRtCompile(
CompileOptions options, const XlaComputation& computation,
const PjRtTopologyDescription& topology, PjRtClient* client) {
auto topology_compiler = topology.compiler();
ScopedMetricHelper helper(metrics::kPjrtCompilerCompileComputationMetricName);
if (topology_compiler.has_value()) {
return (*topology_compiler)
->Compile(std::move(options), computation, topology, client);
}
absl::ReaderMutexLock l(®istry_mutex);
const auto* compiler_registry = CompilerRegistry();
auto it = compiler_registry->find(topology.platform_name());
if (it == compiler_registry->end()) {
return tsl::errors::NotFound(absl::StrCat(
"No compiler registered for platform ", topology.platform_name()));
}
return it->second->Compile(std::move(options), computation, topology, client);
}
absl::StatusOr<std::unique_ptr<PjRtExecutable>> PjRtCompile(
CompileOptions options, mlir::ModuleOp module,
const PjRtTopologyDescription& topology, PjRtClient* client) {
auto topology_compiler = topology.compiler();
ScopedMetricHelper helper(metrics::kPjrtCompilerCompileModuleMetricName);
if (topology_compiler.has_value()) {
return (*topology_compiler)
->Compile(std::move(options), module, topology, client);
}
absl::ReaderMutexLock l(®istry_mutex);
const auto* compiler_registry = CompilerRegistry();
auto it = compiler_registry->find(topology.platform_name());
if (it == compiler_registry->end()) {
return tsl::errors::NotFound(absl::StrCat(
"No compiler registered for platform ", topology.platform_name()));
}
return it->second->Compile(std::move(options), module, topology, client);
}
} | #include "xla/pjrt/pjrt_compiler.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/client/xla_computation.h"
#include "xla/pjrt/metrics.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_device_description.h"
#include "tsl/lib/monitoring/cell_reader.h"
#include "tsl/platform/status_matchers.h"
namespace xla {
using metrics::kPjrtCompilerCompileComputationMetricName;
using metrics::kPjrtCompilerCompileModuleMetricName;
using ::tsl::monitoring::testing::CellReader;
using ::tsl::testing::StatusIs;
namespace {
class PjRtTestTopology : public PjRtTopologyDescription {
public:
PjRtPlatformId platform_id() const override { return 0; }
absl::string_view platform_name() const override { return "not_registered"; }
absl::string_view platform_version() const override { return "test"; }
std::vector<std::unique_ptr<const PjRtDeviceDescription>> DeviceDescriptions()
const override {
LOG(FATAL) << "Unused";
}
absl::StatusOr<std::string> Serialize() const override { return "test_topo"; }
const absl::flat_hash_map<std::string, PjRtDeviceAttribute>& Attributes()
const override {
LOG(FATAL) << "Unused";
}
absl::StatusOr<Layout> GetDefaultLayout(
PrimitiveType element_type,
absl::Span<const int64_t> dims) const override {
return Unimplemented("TestTopology does not support GetDefaultLayout");
}
};
TEST(PjRtCompilerTest, CompilerNotRegistered) {
PjRtTestTopology topology;
CompileOptions options;
XlaComputation computation;
auto res = PjRtCompile(options, computation, topology);
EXPECT_TRUE(tsl::errors::IsNotFound(res.status()));
}
TEST(PjRtCompilerTest, CompilerRegistered) {
class PjRtTestTopology : public PjRtTopologyDescription {
public:
PjRtPlatformId platform_id() const override { return 0; }
absl::string_view platform_name() const override { return "registered"; }
absl::string_view platform_version() const override { return "test"; }
std::vector<std::unique_ptr<const PjRtDeviceDescription>>
DeviceDescriptions() const override {
LOG(FATAL) << "Unused";
}
absl::StatusOr<std::string> Serialize() const override {
return "test_topo";
}
const absl::flat_hash_map<std::string, PjRtDeviceAttribute>& Attributes()
const override {
LOG(FATAL) << "Unused";
}
absl::StatusOr<Layout> GetDefaultLayout(
PrimitiveType element_type,
absl::Span<const int64_t> dims) const override {
return Unimplemented("TestTopology does not support GetDefaultLayout");
}
};
PjRtTestTopology topology;
class PjRtTestCompiler : public PjRtCompiler {
public:
absl::StatusOr<std::unique_ptr<PjRtExecutable>> Compile(
CompileOptions options, const XlaComputation& computation,
const PjRtTopologyDescription& topology, PjRtClient* client) override {
return tsl::errors::Unimplemented("test compiler!");
}
absl::StatusOr<std::unique_ptr<PjRtExecutable>> Compile(
CompileOptions options, mlir::ModuleOp module,
const PjRtTopologyDescription& topology, PjRtClient* client) override {
return tsl::errors::Unimplemented("test compiler!");
}
};
std::unique_ptr<PjRtCompiler> compiler = std::make_unique<PjRtTestCompiler>();
PjRtRegisterCompiler(topology.platform_name(), std::move(compiler));
CompileOptions options;
XlaComputation computation;
auto res = PjRtCompile(options, computation, topology);
EXPECT_TRUE(tsl::errors::IsUnimplemented(res.status()));
}
TEST(PjRtCompilerTest, PjrtCompileComputationMetric) {
PjRtTestTopology topology;
xla::CompileOptions compile_options;
XlaComputation xla_computation;
CellReader<bool> metric_reader(
std::string{kPjrtCompilerCompileComputationMetricName});
EXPECT_THAT(PjRtCompile(compile_options, xla_computation, topology,
nullptr),
StatusIs(tensorflow::error::NOT_FOUND));
EXPECT_FALSE(metric_reader.Read());
}
TEST(PjRtCompilerTest, PjrtCompileModuleMetric) {
PjRtTestTopology topology;
xla::CompileOptions compile_options;
mlir::ModuleOp module;
CellReader<bool> metric_reader(
std::string{kPjrtCompilerCompileModuleMetricName});
EXPECT_THAT(PjRtCompile(compile_options, module, topology,
nullptr),
StatusIs(tensorflow::error::NOT_FOUND));
EXPECT_FALSE(metric_reader.Read());
}
}
} |
1,798 | cpp | tensorflow/tensorflow | xla_sharding | third_party/xla/xla/python/pjrt_ifrt/xla_sharding.cc | third_party/xla/xla/python/pjrt_ifrt/xla_sharding_test.cc | #ifndef XLA_PYTHON_PJRT_IFRT_XLA_SHARDING_H_
#define XLA_PYTHON_PJRT_IFRT_XLA_SHARDING_H_
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/index_domain.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
namespace xla {
namespace ifrt {
class XlaCompatibleSharding
: public llvm::RTTIExtends<XlaCompatibleSharding, Sharding> {
public:
using llvm::RTTIExtends<XlaCompatibleSharding, Sharding>::RTTIExtends;
static char ID;
};
class HloSharding final
: public llvm::RTTIExtends<HloSharding, XlaCompatibleSharding> {
public:
static std::unique_ptr<HloSharding> Create(DeviceList devices,
MemoryKind memory_kind,
xla::HloSharding xla_hlo_sharding);
const xla::HloSharding& xla_hlo_sharding() const { return xla_hlo_sharding_; }
~HloSharding() override = default;
absl::StatusOr<Shape> GetShardShape(const Shape& shape) const override;
bool HasSamePartitioning(const Sharding& other) const override;
absl::StatusOr<std::unique_ptr<Sharding>> WithDeviceAssignment(
std::optional<DeviceList> devices,
std::optional<MemoryKind> memory_kind) const override;
absl::StatusOr<std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>>>
Disassemble(const Shape& shape) const override;
absl::StatusOr<
std::vector<std::pair<DynamicShape, std::shared_ptr<const Sharding>>>>
Disassemble(const DynamicShape& dynamic_shape) const override;
absl::StatusOr<std::vector<IndexDomain>> IndexDomains(
const Shape& shape) const override;
std::string DebugString() const override;
static char ID;
private:
HloSharding(DeviceList devices, MemoryKind memory_kind,
xla::HloSharding xla_hlo_sharding);
xla::HloSharding xla_hlo_sharding_;
};
std::vector<IndexDomain> TEST_HloShardingIndexDomainsSlowPath(
const HloSharding& sharding, const Shape& shape);
}
}
#endif
#include "xla/python/pjrt_ifrt/xla_sharding.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/index.h"
#include "xla/python/ifrt/index_domain.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
char XlaCompatibleSharding::ID = 0;
char HloSharding::ID = 0;
namespace {
bool NextIndex(Index::Elements* index, absl::Span<const int64_t> limit) {
DCHECK_LE(index->size(), limit.size());
for (int64_t i = index->size() - 1; i >= 0; --i) {
++(*index)[i];
if ((*index)[i] < limit[i]) {
return true;
}
(*index)[i] = 0;
}
return false;
}
std::vector<IndexDomain> IndexDomainsSlowPath(
const xla::HloSharding& hlo_sharding, const DeviceList& devices,
const Shape& shape) {
auto xla_shape = xla::ShapeUtil::MakeShapeWithDescendingLayout(
xla::PrimitiveType::S32, shape.dims());
if (devices.size() > 8) {
LOG_FIRST_N(WARNING, 1)
<< "Taking a slow path for HloSharding::IndexDomains(). This will not "
"scale for a large number of devices.";
}
std::vector<IndexDomain> result;
result.reserve(devices.size());
Index::Elements origin(shape.dims().size());
Shape::Dimensions shard_shape(shape.dims().size());
for (int device_idx = 0; device_idx < devices.size(); ++device_idx) {
auto tile_offset = hlo_sharding.TileOffsetForDevice(xla_shape, device_idx);
auto tile_limit = hlo_sharding.TileLimitForDevice(xla_shape, device_idx);
for (int i = 0; i < shape.dims().size(); ++i) {
origin[i] = tile_offset[i];
shard_shape[i] = tile_limit[i] - tile_offset[i];
}
result.push_back(IndexDomain(Index(origin), Shape(shard_shape)));
}
return result;
}
}
std::unique_ptr<HloSharding> HloSharding::Create(
DeviceList devices, MemoryKind memory_kind,
xla::HloSharding xla_hlo_sharding) {
return std::unique_ptr<HloSharding>(new HloSharding(
std::move(devices), memory_kind, std::move(xla_hlo_sharding)));
}
HloSharding::HloSharding(DeviceList devices, MemoryKind memory_kind,
xla::HloSharding xla_hlo_sharding)
: llvm::RTTIExtends<HloSharding, XlaCompatibleSharding>(
std::move(devices), memory_kind, xla_hlo_sharding.IsReplicated()),
xla_hlo_sharding_(std::move(xla_hlo_sharding)) {}
absl::StatusOr<Shape> HloSharding::GetShardShape(const Shape& shape) const {
if (shape.dims().size() != xla_hlo_sharding_.TiledDataRank()) {
return InvalidArgument(
"Numbers of dimensions don't match. From Shape %d vs from "
"HloSharding %d",
shape.dims().size(), xla_hlo_sharding_.TiledDataRank());
}
const absl::Span<const int64_t> tile_assignment_dims =
xla_hlo_sharding_.tile_assignment().dimensions();
Shape::Dimensions tile_shape;
tile_shape.reserve(shape.dims().size());
for (int64_t i = 0; i < shape.dims().size(); ++i) {
tile_shape.push_back(
xla::CeilOfRatio(shape.dims()[i], tile_assignment_dims[i]));
}
return Shape(std::move(tile_shape));
}
bool HloSharding::HasSamePartitioning(const Sharding& other) const {
if (this == &other) {
return true;
}
const auto* other_hlo_sharding = llvm::dyn_cast<HloSharding>(&other);
if (!other_hlo_sharding) {
return false;
}
return xla_hlo_sharding_ == other_hlo_sharding->xla_hlo_sharding_;
}
absl::StatusOr<std::unique_ptr<Sharding>> HloSharding::WithDeviceAssignment(
std::optional<DeviceList> devices,
std::optional<MemoryKind> memory_kind) const {
if (devices.has_value() && devices->size() != devices_.size()) {
return InvalidArgument(
"HloSharding should have the same number of devices as the current "
"sharding, but was asked to have %d devices",
devices->size());
}
return Create(devices.value_or(devices_), memory_kind.value_or(memory_kind_),
xla_hlo_sharding_);
}
absl::StatusOr<std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>>>
HloSharding::Disassemble(const Shape& shape) const {
TF_ASSIGN_OR_RETURN(auto index_domains, IndexDomains(shape));
std::vector<std::pair<Shape, std::shared_ptr<const Sharding>>> result;
result.reserve(index_domains.size());
for (int i = 0; i < index_domains.size(); ++i) {
result.push_back({index_domains[i].shape(),
SingleDeviceSharding::Create(devices_[i], memory_kind_)});
}
return result;
}
absl::StatusOr<
std::vector<std::pair<DynamicShape, std::shared_ptr<const Sharding>>>>
HloSharding::Disassemble(const DynamicShape& dynamic_shape) const {
return InvalidArgument(
"HloSharding can only disassemble static shape, but was asked "
"to disassemble dynamic shape %s",
dynamic_shape.DebugString());
}
absl::StatusOr<std::vector<IndexDomain>> HloSharding::IndexDomains(
const Shape& shape) const {
auto format_shape = [&] {
return absl::StrCat("[", absl::StrJoin(shape.dims(), ","), "]");
};
std::vector<IndexDomain> result;
const int num_devices = devices_.size();
if (xla_hlo_sharding_.IsReplicated() || xla_hlo_sharding_.IsTileMaximal()) {
IndexDomain element(shape);
result.resize(num_devices, element);
return result;
}
if (!xla_hlo_sharding_.IsTiled()) {
return IndexDomainsSlowPath(xla_hlo_sharding_, devices_, shape);
}
for (const xla::OpSharding::Type subgroup_type :
xla_hlo_sharding_.subgroup_types()) {
if (subgroup_type != xla::OpSharding::REPLICATED) {
return IndexDomainsSlowPath(xla_hlo_sharding_, devices_, shape);
}
}
if (xla_hlo_sharding_.tile_assignment().num_elements() != num_devices) {
return absl::InvalidArgumentError(absl::StrFormat(
"sharding's tile_assignment_devices and device count does not "
"match: %d vs. %d; shape=%s, sharding=%s",
xla_hlo_sharding_.tile_assignment().num_elements(), num_devices,
format_shape(), DebugString()));
}
if (xla_hlo_sharding_.TotalNumTiles() != num_devices) {
return absl::InvalidArgumentError(
absl::StrFormat("sharding's tile count and device count does not "
"match: %d vs. %d; shape=%s, sharding=%s",
xla_hlo_sharding_.TotalNumTiles(), num_devices,
format_shape(), xla_hlo_sharding_.ToString()));
}
const int64_t tiled_data_rank = xla_hlo_sharding_.TiledDataRank();
if (shape.dims().size() != tiled_data_rank) {
return absl::InvalidArgumentError(
absl::StrFormat("shape must have %d dimensions, but has %d dimensions: "
"shape=%s, sharding=%s",
tiled_data_rank, shape.dims().size(), format_shape(),
xla_hlo_sharding_.ToString()));
}
TF_ASSIGN_OR_RETURN(Shape tile_shape, GetShardShape(shape));
const absl::Span<const int64_t> tile_shape_dims = tile_shape.dims();
const absl::Span<const int64_t> tile_assignment_dims =
xla_hlo_sharding_.tile_assignment().dimensions();
const int64_t replication_dim = xla_hlo_sharding_.SubgroupReplicationDim();
int64_t num_replicas;
if (replication_dim == -1) {
num_replicas = 1;
} else {
num_replicas = tile_assignment_dims[replication_dim];
}
Index::Elements unique_tile_index(shape.dims().size());
std::vector<Index::Elements> origins(num_devices);
Index::Elements origin(shape.dims().size());
int64_t device_assignment_index = 0;
do {
for (int64_t i = 0; i < shape.dims().size(); ++i) {
origin[i] =
std::min(tile_shape_dims[i] * unique_tile_index[i], shape.dims()[i]);
}
for (int64_t i = 0; i < num_replicas; ++i) {
CHECK_LT(device_assignment_index, num_devices);
const int64_t device_id = xla_hlo_sharding_.tile_assignment()
.array()
.data()[device_assignment_index];
if (device_id < 0 || device_id >= num_devices) {
return absl::InvalidArgumentError(
absl::StrFormat("Out of range device id in device_assignment: %d; "
"valid range: [0, %d)",
device_id, num_devices));
}
origins[device_id] = origin;
++device_assignment_index;
}
} while (NextIndex(&unique_tile_index, tile_assignment_dims));
result.reserve(num_devices);
for (int device_idx = 0; device_idx < num_devices; ++device_idx) {
Shape::Dimensions actual_tile_shape;
actual_tile_shape.reserve(tile_shape_dims.size());
for (int i = 0; i < tile_shape_dims.size(); ++i) {
actual_tile_shape.push_back(std::min(
tile_shape_dims[i], shape.dims()[i] - origins[device_idx][i]));
}
result.push_back(IndexDomain(Index(origins[device_idx]),
Shape(std::move(actual_tile_shape))));
}
return result;
}
std::string HloSharding::DebugString() const {
return absl::StrFormat("HloSharding(memory_kind: %s, hlo_sharding: %s)",
memory_kind_.DebugString(),
xla_hlo_sharding_.ToString());
}
std::vector<IndexDomain> TEST_HloShardingIndexDomainsSlowPath(
const HloSharding& hlo_sharding, const Shape& shape) {
return IndexDomainsSlowPath(hlo_sharding.xla_hlo_sharding(),
hlo_sharding.devices(), shape);
}
}
} | #include "xla/python/pjrt_ifrt/xla_sharding.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/tile_assignment.h"
#include "xla/python/ifrt/index.h"
#include "xla/python/ifrt/index_domain.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/ifrt/sharding_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::testing::SizeIs;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
class HloShardingTest : public test_util::ShardingTest {};
TEST_P(HloShardingTest, IsFullyReplicated) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
{
auto xla_hlo_sharding = xla::HloSharding::Replicate();
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
EXPECT_TRUE(sharding->IsFullyReplicated());
}
{
auto xla_hlo_sharding = xla::HloSharding::IotaTile({1, 6});
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
EXPECT_FALSE(sharding->IsFullyReplicated());
}
{
auto xla_hlo_sharding = xla::HloSharding::IotaTile({2, 3});
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
EXPECT_FALSE(sharding->IsFullyReplicated());
}
}
TEST_P(HloShardingTest, GetShardShape) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
auto xla_hlo_sharding = xla::HloSharding::IotaTile({2, 3});
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
EXPECT_THAT(sharding->GetShardShape(Shape({6, 6})),
IsOkAndHolds(Shape({3, 2})));
EXPECT_THAT(sharding->GetShardShape(Shape({6, 6, 6})),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("Numbers of dimensions don't match. From "
"Shape 3 vs from HloSharding 2")));
}
TEST_P(HloShardingTest, HasSamePartitioning) {
auto device_list0 = GetDevices({0, 1, 2, 3, 4, 5});
auto xla_hlo_sharding0 = xla::HloSharding::IotaTile({2, 3});
std::shared_ptr<const HloSharding> sharding0 =
HloSharding::Create(device_list0, MemoryKind(), xla_hlo_sharding0);
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding0));
{
auto device_list1 = GetDevices({3, 4, 5, 0, 1, 2});
auto xla_hlo_sharding1 = xla::HloSharding::IotaTile({2, 3});
std::shared_ptr<const HloSharding> sharding1 =
HloSharding::Create(device_list1, MemoryKind(), xla_hlo_sharding1);
EXPECT_TRUE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({3, 4, 5});
auto xla_hlo_sharding1 = xla::HloSharding::IotaTile({3, 1});
std::shared_ptr<const HloSharding> sharding1 =
HloSharding::Create(device_list1, MemoryKind(), xla_hlo_sharding1);
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
{
auto device_list1 = GetDevices({3, 4, 5, 0, 1, 2});
auto xla_hlo_sharding1 = xla::HloSharding::IotaTile({3, 2});
std::shared_ptr<const HloSharding> sharding1 =
HloSharding::Create(device_list1, MemoryKind(), xla_hlo_sharding1);
EXPECT_FALSE(sharding0->HasSamePartitioning(*sharding1));
}
}
TEST_P(HloShardingTest, WithDeviceAssignment) {
auto device_list0 = GetDevices({0, 1, 2, 3, 4, 5});
auto xla_hlo_sharding0 = xla::HloSharding::IotaTile({2, 3});
std::shared_ptr<const HloSharding> sharding0 =
HloSharding::Create(device_list0, MemoryKind(), xla_hlo_sharding0);
{
auto device_list1 = GetDevices({3, 4, 5, 0, 1, 2});
auto xla_hlo_sharding1 = xla::HloSharding::IotaTile({2, 3});
std::shared_ptr<const HloSharding> sharding1 =
HloSharding::Create(device_list1, MemoryKind(), xla_hlo_sharding1);
TF_ASSERT_OK_AND_ASSIGN(
auto new_sharding,
sharding0->WithDeviceAssignment(device_list1,
std::nullopt));
EXPECT_EQ(*new_sharding, *sharding1);
}
{
auto device_list1 = GetDevices({0, 1, 2});
EXPECT_THAT(
sharding0->WithDeviceAssignment(device_list1,
std::nullopt),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("HloSharding should have the same number of "
"devices as the current sharding, but was asked to "
"have 3 devices")));
}
}
TEST_P(HloShardingTest, IndexDomainsWithReplication) {
auto device_list = GetDevices({0, 1});
auto xla_hlo_sharding = xla::HloSharding::Replicate();
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto index_domains, sharding->IndexDomains(shape));
EXPECT_THAT(index_domains,
ElementsAre(IndexDomain(shape), IndexDomain(shape)));
EXPECT_THAT(
index_domains,
ElementsAreArray(TEST_HloShardingIndexDomainsSlowPath(*sharding, shape)));
}
TEST_P(HloShardingTest, DisassembleWithReplication) {
auto device_list = GetDevices({0, 1});
auto xla_hlo_sharding = xla::HloSharding::Replicate();
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto disassembled, sharding->Disassemble(shape));
ASSERT_THAT(disassembled, SizeIs(2));
for (int i = 0; i < 2; ++i) {
const auto& [shape, sharding] = disassembled[i];
EXPECT_EQ(shape, Shape({10, 20}));
EXPECT_EQ(*sharding,
*SingleDeviceSharding::Create(device_list[i], MemoryKind()));
}
}
TEST_P(HloShardingTest, IndexDomainsWithTile) {
auto device_list = GetDevices({0, 1});
auto xla_hlo_sharding = xla::HloSharding::Tile(
xla::TileAssignment((absl::Span<const int64_t>){2, 1}));
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto index_domains, sharding->IndexDomains(shape));
EXPECT_THAT(index_domains,
ElementsAre(IndexDomain(Index({0, 0}), Shape({5, 20})),
IndexDomain(Index({5, 0}), Shape({5, 20}))));
EXPECT_THAT(
index_domains,
ElementsAreArray(TEST_HloShardingIndexDomainsSlowPath(*sharding, shape)));
}
TEST_P(HloShardingTest, DisassembleWithTile) {
auto device_list = GetDevices({0, 1});
auto xla_hlo_sharding = xla::HloSharding::Tile(
xla::TileAssignment((absl::Span<const int64_t>){2, 1}));
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto disassembled, sharding->Disassemble(shape));
ASSERT_THAT(disassembled, SizeIs(2));
for (int i = 0; i < 2; ++i) {
const auto& [shape, sharding] = disassembled[i];
EXPECT_EQ(shape, Shape({5, 20}));
EXPECT_EQ(*sharding,
*SingleDeviceSharding::Create(device_list[i], MemoryKind()));
}
}
TEST_P(HloShardingTest, IndexDomainsWithUnevenTile) {
auto device_list = GetDevices({0, 1});
auto xla_hlo_sharding = xla::HloSharding::Tile(
xla::TileAssignment((absl::Span<const int64_t>){2, 1}));
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({11, 20});
TF_ASSERT_OK_AND_ASSIGN(auto index_domains, sharding->IndexDomains(shape));
EXPECT_THAT(index_domains,
ElementsAre(IndexDomain(Index({0, 0}), Shape({6, 20})),
IndexDomain(Index({6, 0}), Shape({5, 20}))));
EXPECT_THAT(
index_domains,
ElementsAreArray(TEST_HloShardingIndexDomainsSlowPath(*sharding, shape)));
}
TEST_P(HloShardingTest, DisassembleWithUnevenTile) {
auto device_list = GetDevices({0, 1});
auto xla_hlo_sharding = xla::HloSharding::Tile(
xla::TileAssignment((absl::Span<const int64_t>){2, 1}));
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({11, 20});
TF_ASSERT_OK_AND_ASSIGN(auto disassembled, sharding->Disassemble(shape));
ASSERT_THAT(disassembled, SizeIs(2));
for (int i = 0; i < 2; ++i) {
const auto& [shape, sharding] = disassembled[i];
if (i == 0) {
EXPECT_EQ(shape, Shape({6, 20}));
} else {
EXPECT_EQ(shape, Shape({5, 20}));
}
EXPECT_EQ(*sharding,
*SingleDeviceSharding::Create(device_list[i], MemoryKind()));
}
}
TEST_P(HloShardingTest, IndexDomainsWithPartialTile) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
auto xla_hlo_sharding =
xla::HloSharding::PartialTile(xla::TileAssignment({2, 1, 3}));
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto index_domains, sharding->IndexDomains(shape));
EXPECT_THAT(index_domains,
ElementsAre(IndexDomain(Index({0, 0}), Shape({5, 20})),
IndexDomain(Index({0, 0}), Shape({5, 20})),
IndexDomain(Index({0, 0}), Shape({5, 20})),
IndexDomain(Index({5, 0}), Shape({5, 20})),
IndexDomain(Index({5, 0}), Shape({5, 20})),
IndexDomain(Index({5, 0}), Shape({5, 20}))));
EXPECT_THAT(
index_domains,
ElementsAreArray(TEST_HloShardingIndexDomainsSlowPath(*sharding, shape)));
}
TEST_P(HloShardingTest, DisassembleWithPartialTile) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
auto xla_hlo_sharding =
xla::HloSharding::PartialTile(xla::TileAssignment({2, 1, 3}));
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto disassembled, sharding->Disassemble(shape));
ASSERT_THAT(disassembled, SizeIs(6));
for (int i = 0; i < 6; ++i) {
const auto& [shape, sharding] = disassembled[i];
EXPECT_EQ(shape, Shape({5, 20}));
EXPECT_EQ(*sharding,
*SingleDeviceSharding::Create(device_list[i], MemoryKind()));
}
}
TEST_P(HloShardingTest, IndexDomainsWithSubgroupReplicated) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
auto xla_hlo_sharding = xla::HloSharding::Subgroup(
xla::TileAssignment({2, 1, 3}), {xla::OpSharding::REPLICATED});
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto index_domains, sharding->IndexDomains(shape));
EXPECT_THAT(index_domains,
ElementsAre(IndexDomain(Index({0, 0}), Shape({5, 20})),
IndexDomain(Index({0, 0}), Shape({5, 20})),
IndexDomain(Index({0, 0}), Shape({5, 20})),
IndexDomain(Index({5, 0}), Shape({5, 20})),
IndexDomain(Index({5, 0}), Shape({5, 20})),
IndexDomain(Index({5, 0}), Shape({5, 20}))));
EXPECT_THAT(
index_domains,
ElementsAreArray(TEST_HloShardingIndexDomainsSlowPath(*sharding, shape)));
}
TEST_P(HloShardingTest, DisassembleWithSubgroupReplicated) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
auto xla_hlo_sharding = xla::HloSharding::Subgroup(
xla::TileAssignment({2, 1, 3}), {xla::OpSharding::REPLICATED});
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto disassembled, sharding->Disassemble(shape));
ASSERT_THAT(disassembled, SizeIs(6));
for (int i = 0; i < 6; ++i) {
const auto& [shape, sharding] = disassembled[i];
EXPECT_EQ(shape, Shape({5, 20}));
EXPECT_EQ(*sharding,
*SingleDeviceSharding::Create(device_list[i], MemoryKind()));
}
}
TEST_P(HloShardingTest, IndexDomainsWithSubgroupMaximalSlowPath) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
auto xla_hlo_sharding = xla::HloSharding::Subgroup(
xla::TileAssignment({2, 1, 3}), {xla::OpSharding::MAXIMAL});
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto index_domains, sharding->IndexDomains(shape));
EXPECT_THAT(index_domains,
ElementsAre(IndexDomain(Index({0, 0}), Shape({5, 20})),
IndexDomain(Index({0, 0}), Shape({5, 20})),
IndexDomain(Index({0, 0}), Shape({5, 20})),
IndexDomain(Index({5, 0}), Shape({5, 20})),
IndexDomain(Index({5, 0}), Shape({5, 20})),
IndexDomain(Index({5, 0}), Shape({5, 20}))));
EXPECT_THAT(
index_domains,
ElementsAreArray(TEST_HloShardingIndexDomainsSlowPath(*sharding, shape)));
}
TEST_P(HloShardingTest, DisassembleWithSubgroupMaximalSlowPath) {
auto device_list = GetDevices({0, 1, 2, 3, 4, 5});
auto xla_hlo_sharding = xla::HloSharding::Subgroup(
xla::TileAssignment({2, 1, 3}), {xla::OpSharding::MAXIMAL});
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
TF_ASSERT_OK_AND_ASSIGN(auto disassembled, sharding->Disassemble(shape));
ASSERT_THAT(disassembled, SizeIs(6));
for (int i = 0; i < 6; ++i) {
const auto& [shape, sharding] = disassembled[i];
EXPECT_EQ(shape, Shape({5, 20}));
EXPECT_EQ(*sharding,
*SingleDeviceSharding::Create(device_list[i], MemoryKind()));
}
}
TEST_P(HloShardingTest, DisassembleFailsWithInvalidDeviceCount) {
auto device_list = GetDevices({0});
auto xla_hlo_sharding = xla::HloSharding::Tile(
xla::TileAssignment((absl::Span<const int64_t>){2, 1}));
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10, 20});
EXPECT_THAT(sharding->Disassemble(shape),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("sharding's tile_assignment_devices and "
"device count does not match: 2 vs. 1")));
}
TEST_P(HloShardingTest, DisassembleFailsWithMismatchingShapeDimsSize) {
auto device_list = GetDevices({0, 1});
auto xla_hlo_sharding = xla::HloSharding::Tile(
xla::TileAssignment((absl::Span<const int64_t>){2, 1}));
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
Shape shape({10});
EXPECT_THAT(
sharding->Disassemble(shape),
StatusIs(
tsl::error::INVALID_ARGUMENT,
HasSubstr("shape must have 2 dimensions, but has 1 dimensions")));
}
TEST_P(HloShardingTest, DisassembleFailsWithDynamicShape) {
auto device_list = GetDevices({0, 1});
auto xla_hlo_sharding = xla::HloSharding::Tile(
xla::TileAssignment((absl::Span<const int64_t>){2}));
std::shared_ptr<const HloSharding> sharding =
HloSharding::Create(device_list, MemoryKind(), xla_hlo_sharding);
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape dynamic_shape,
DynamicShape::Create(Shape({10}), BoundedDynamicShapeTag({true})));
EXPECT_THAT(sharding->Disassemble(dynamic_shape),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("can only disassemble static shape")));
}
INSTANTIATE_TEST_SUITE_P(NumDevices, HloShardingTest,
testing::Values(test_util::ShardingTestParam{
.num_devices = 6, .num_addressable_devices = 4}));
}
}
} |
1,799 | cpp | tensorflow/tensorflow | pjrt_executable | third_party/xla/xla/pjrt/pjrt_executable.cc | third_party/xla/xla/pjrt/pjrt_executable_test.cc | #ifndef XLA_PJRT_PJRT_EXECUTABLE_H_
#define XLA_PJRT_PJRT_EXECUTABLE_H_
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/client/executable_build_options.h"
#include "xla/ffi/execution_context.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/layout.h"
#include "xla/pjrt/compile_options.pb.h"
#include "xla/pjrt/executable_metadata.pb.h"
#include "xla/pjrt/execute_options.pb.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/service/compiler.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/shape.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
class MultiSliceConfig {
public:
virtual ~MultiSliceConfig();
virtual int32_t NumSlices() const = 0;
virtual int32_t SliceId() const = 0;
virtual absl::flat_hash_map<int32_t, int32_t> NumDevicesPerSlice() const = 0;
virtual std::string Serialize() const = 0;
};
struct CompileOptions {
std::optional<std::vector<Shape>> argument_layouts;
bool parameter_is_tupled_arguments = false;
ExecutableBuildOptions executable_build_options;
bool compile_portable_executable = false;
int64_t profile_version = 0;
const MultiSliceConfig* multi_slice_config = nullptr;
using OptionOverride = std::variant<std::string, bool, int64_t, double>;
std::vector<std::pair<std::string, OptionOverride>> env_option_overrides;
std::optional<xla::Compiler::TargetConfig> target_config;
PrecisionConfig::Precision matrix_unit_operand_precision =
PrecisionConfig::DEFAULT;
absl::Status ApplyAllOptionOverrides();
absl::Status ApplyOption(const std::string& key, const OptionOverride& value);
absl::Status ApplyOptionFromString(
const tsl::protobuf::FieldDescriptor* field, const std::string& value);
static absl::StatusOr<
std::vector<std::pair<std::string, CompileOptions::OptionOverride>>>
LoadEnvOptionOverrides(
const google::protobuf::Map<std::string, xla::OptionOverrideProto>&
env_option_overrides);
void SerializeEnvOptionOverrides(
google::protobuf::Map<std::string, xla::OptionOverrideProto>*
output_env_option_overrides) const;
absl::StatusOr<CompileOptionsProto> ToProto() const;
static absl::StatusOr<CompileOptions> FromProto(
const CompileOptionsProto& proto);
};
struct LoadOptions {
struct ComputationOrigin {
int x = 0;
int y = 0;
int z = 0;
};
std::optional<ComputationOrigin> computation_origin;
const MultiSliceConfig* multi_slice_config = nullptr;
};
class ExecuteContext {
public:
virtual ~ExecuteContext() = default;
ffi::ExecutionContext& ffi_context() { return ffi_context_; }
const ffi::ExecutionContext& ffi_context() const { return ffi_context_; }
private:
ffi::ExecutionContext ffi_context_;
};
struct PjRtTransferMetadata {
Shape device_shape;
};
class PjRtChunk;
class CopyToDeviceStream;
struct SendCallback {
int64_t channel_id;
std::function<absl::Status(const PjRtTransferMetadata& metadata,
PjRtChunk chunk, size_t total_size_in_bytes,
bool done)>
callback;
};
struct RecvCallback {
int64_t channel_id;
std::function<void(const PjRtTransferMetadata& metadata,
std::unique_ptr<CopyToDeviceStream> stream)>
callback;
};
struct ExecuteOptions {
bool arguments_are_tupled = false;
bool untuple_result = false;
int32_t launch_id = 0;
const ExecuteContext* context = nullptr;
bool strict_shape_checking = true;
const MultiSliceConfig* multi_slice_config = nullptr;
absl::Span<const std::vector<SendCallback>> send_callbacks;
absl::Span<const std::vector<RecvCallback>> recv_callbacks;
bool use_major_to_minor_data_layout_for_callbacks = false;
enum class ExecutionMode { kDefault = 0, kSynchronous, kAsynchronous };
ExecutionMode execution_mode = ExecutionMode::kDefault;
absl::flat_hash_set<int> non_donatable_input_indices;
absl::StatusOr<ExecuteOptionsProto> ToProto() const;
static absl::StatusOr<ExecuteOptions> FromProto(
const ExecuteOptionsProto& proto);
};
struct CompiledMemoryStats {
int64_t generated_code_size_in_bytes = 0;
int64_t argument_size_in_bytes = 0;
int64_t output_size_in_bytes = 0;
int64_t alias_size_in_bytes = 0;
int64_t temp_size_in_bytes = 0;
int64_t host_generated_code_size_in_bytes = 0;
int64_t host_argument_size_in_bytes = 0;
int64_t host_output_size_in_bytes = 0;
int64_t host_alias_size_in_bytes = 0;
int64_t host_temp_size_in_bytes = 0;
std::string serialized_hlo_proto = "";
std::string DebugString() const;
CompiledMemoryStatsProto ToProto();
static CompiledMemoryStats FromProto(const CompiledMemoryStatsProto& proto);
void PopulateBufferStatsFromAllocations(
absl::Span<const BufferAllocation> allocs);
};
class PjRtExecutable {
public:
virtual ~PjRtExecutable() = default;
virtual int num_replicas() const = 0;
virtual int num_partitions() const = 0;
virtual int64_t SizeOfGeneratedCodeInBytes() const = 0;
virtual absl::string_view name() const = 0;
virtual absl::StatusOr<std::vector<std::shared_ptr<HloModule>>>
GetHloModules() const = 0;
virtual absl::StatusOr<std::vector<Shape>> GetOutputShapes() const;
virtual absl::StatusOr<std::vector<std::vector<PrimitiveType>>>
GetOutputElementTypes() const;
virtual absl::StatusOr<std::vector<std::vector<DimensionVector>>>
GetOutputDimensions() const;
virtual absl::StatusOr<std::vector<std::unique_ptr<PjRtLayout>>>
GetParameterLayouts() const;
virtual absl::StatusOr<std::vector<std::unique_ptr<PjRtLayout>>>
GetOutputLayouts() const;
virtual absl::StatusOr<std::vector<std::vector<absl::string_view>>>
GetOutputMemoryKinds() const = 0;
virtual std::optional<std::vector<OpSharding>> GetParameterShardings() const;
virtual std::optional<std::vector<OpSharding>> GetOutputShardings() const;
virtual absl::StatusOr<CompiledMemoryStats> GetCompiledMemoryStats() const {
return Unimplemented("Retrieving CompiledMemoryStats is not supported.");
}
virtual absl::StatusOr<absl::flat_hash_map<std::string, PjRtValueType>>
GetCostAnalysis() const = 0;
virtual absl::StatusOr<std::string> SerializeExecutable() const {
return Unimplemented("Serializing executable is not supported.");
}
virtual absl::StatusOr<std::string> FingerprintExecutable() const {
return Unimplemented("Fingerprinting executable is not supported.");
}
virtual absl::StatusOr<struct CompileOptions> GetCompileOptions() const {
return Unimplemented("CompileOptions not available.");
}
};
class PjRtExecutableUtil {
public:
static absl::StatusOr<absl::flat_hash_map<std::string, PjRtValueType>>
RunHloCostAnalysis(const PjRtExecutable& executable,
HloCostAnalysis* hlo_cost_analysis);
static absl::StatusOr<absl::flat_hash_map<std::string, PjRtValueType>>
RunHloCostAnalysis(
const std::vector<std::shared_ptr<xla::HloModule>>& hlo_modules,
HloCostAnalysis* hlo_cost_analysis);
};
}
#endif
#include "xla/pjrt/pjrt_executable.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/client/executable_build_options.h"
#include "xla/layout.h"
#include "xla/pjrt/compile_options.pb.h"
#include "xla/pjrt/execute_options.pb.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_layout.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/computation_layout.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/shape.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
void SetOptionOverride(OptionOverrideProto& option, const std::string& value) {
option.set_string_field(value);
}
void SetOptionOverride(OptionOverrideProto& option, bool value) {
option.set_bool_field(value);
}
void SetOptionOverride(OptionOverrideProto& option, int64_t value) {
option.set_int_field(value);
}
void SetOptionOverride(OptionOverrideProto& option, double value) {
option.set_double_field(value);
}
}
absl::StatusOr<CompileOptionsProto> CompileOptions::ToProto() const {
CompileOptionsProto output;
if (argument_layouts.has_value()) {
for (const auto& layout : *argument_layouts) {
*output.add_argument_layouts() = layout.ToProto();
}
}
output.set_parameter_is_tupled_arguments(parameter_is_tupled_arguments);
TF_ASSIGN_OR_RETURN(*output.mutable_executable_build_options(),
executable_build_options.ToProto());
output.set_compile_portable_executable(compile_portable_executable);
output.set_profile_version(profile_version);
if (multi_slice_config != nullptr) {
output.set_serialized_multi_slice_config(multi_slice_config->Serialize());
}
for (auto& env_option_override : env_option_overrides) {
auto& tmp =
(*output.mutable_env_option_overrides())[env_option_override.first];
std::visit([&](const auto& arg) { SetOptionOverride(tmp, arg); },
env_option_override.second);
}
if (target_config.has_value()) {
*output.mutable_target_config() = target_config->ToProto();
}
return output;
}
void CompileOptions::SerializeEnvOptionOverrides(
google::protobuf::Map<std::string, xla::OptionOverrideProto>*
output_env_option_overrides) const {
for (auto& env_option_override : env_option_overrides) {
auto& tmp = (*output_env_option_overrides)[env_option_override.first];
std::visit([&](const auto& arg) { SetOptionOverride(tmp, arg); },
env_option_override.second);
}
}
absl::StatusOr<CompileOptions> CompileOptions::FromProto(
const CompileOptionsProto& proto) {
if (!proto.serialized_multi_slice_config().empty()) {
return Unimplemented(
"multi_slice_config not supported in CompileOptions::FromProto.");
}
CompileOptions output;
if (proto.argument_layouts_size() > 0) {
std::vector<Shape> output_argument_layouts;
output_argument_layouts.reserve(proto.argument_layouts_size());
for (const auto& argument_layout : proto.argument_layouts()) {
output_argument_layouts.emplace_back(Shape(argument_layout));
}
output.argument_layouts = std::move(output_argument_layouts);
}
output.parameter_is_tupled_arguments = proto.parameter_is_tupled_arguments();
TF_ASSIGN_OR_RETURN(
ExecutableBuildOptions executable_build_options,
ExecutableBuildOptionsFromProto(proto.executable_build_options()));
output.executable_build_options = executable_build_options;
output.compile_portable_executable = proto.compile_portable_executable();
output.profile_version = proto.profile_version();
TF_ASSIGN_OR_RETURN(output.env_option_overrides,
LoadEnvOptionOverrides(proto.env_option_overrides()));
if (proto.has_target_config()) {
output.target_config = xla::Compiler::TargetConfig(proto.target_config());
}
return output;
}
MultiSliceConfig::~MultiSliceConfig() = default;
absl::StatusOr<ExecuteOptionsProto> ExecuteOptions::ToProto() const {
ExecuteOptionsProto proto;
proto.set_arguments_are_tupled(arguments_are_tupled);
proto.set_untuple_result(untuple_result);
proto.set_launch_id(launch_id);
if (context != nullptr) {
return absl::UnimplementedError(
"ExecuteOptions with non-nullptr context is not serializable");
}
proto.set_strict_shape_checking(strict_shape_checking);
if (multi_slice_config != nullptr) {
return absl::UnimplementedError(
"ExecuteOptions with multi-slice config is not serializable");
}
if (!send_callbacks.empty() || !recv_callbacks.empty()) {
return absl::UnimplementedError(
"ExecuteOptions with send/recv calbacks is not serializable");
}
proto.set_use_major_to_minor_data_layout_for_callbacks(
use_major_to_minor_data_layout_for_callbacks);
switch (execution_mode) {
case ExecutionMode::kDefault:
proto.set_execution_mode(EXECUTION_MODE_DEFAULT);
break;
case ExecutionMode::kSynchronous:
proto.set_execution_mode(EXECUTION_MODE_SYNCHRONOUS);
break;
case ExecutionMode::kAsynchronous:
proto.set_execution_mode(EXECUTION_MODE_ASYNCHRONOUS);
break;
}
proto.mutable_non_donatable_input_indices()->Add(
non_donatable_input_indices.begin(), non_donatable_input_indices.end());
return proto;
}
absl::StatusOr<ExecuteOptions> ExecuteOptions::FromProto(
const ExecuteOptionsProto& proto) {
ExecuteOptions options;
options.arguments_are_tupled = proto.arguments_are_tupled();
options.untuple_result = proto.untuple_result();
options.launch_id = proto.launch_id();
options.strict_shape_checking = proto.strict_shape_checking();
options.use_major_to_minor_data_layout_for_callbacks =
proto.use_major_to_minor_data_layout_for_callbacks();
switch (proto.execution_mode()) {
case EXECUTION_MODE_DEFAULT:
options.execution_mode = ExecutionMode::kDefault;
break;
case EXECUTION_MODE_SYNCHRONOUS:
options.execution_mode = ExecutionMode::kSynchronous;
break;
case EXECUTION_MODE_ASYNCHRONOUS:
options.execution_mode = ExecutionMode::kAsynchronous;
break;
default:
return absl::UnimplementedError(
absl::StrCat("Unknown execution mode: ", proto.execution_mode()));
}
options.non_donatable_input_indices.insert(
proto.non_donatable_input_indices().begin(),
proto.non_donatable_input_indices().end());
return options;
}
CompiledMemoryStatsProto CompiledMemoryStats::ToProto() {
CompiledMemoryStatsProto proto;
proto.set_generated_code_size_in_bytes(generated_code_size_in_bytes);
proto.set_argument_size_in_bytes(argument_size_in_bytes);
proto.set_output_size_in_bytes(output_size_in_bytes);
proto.set_alias_size_in_bytes(alias_size_in_bytes);
proto.set_temp_size_in_bytes(temp_size_in_bytes);
proto.mutable_hlo_proto()->ParseFromString(serialized_hlo_proto);
proto.set_host_generated_code_size_in_bytes(
host_generated_code_size_in_bytes);
proto.set_host_argument_size_in_bytes(host_argument_size_in_bytes);
proto.set_host_output_size_in_bytes(host_output_size_in_bytes);
proto.set_host_alias_size_in_bytes(host_alias_size_in_bytes);
proto.set_host_temp_size_in_bytes(host_temp_size_in_bytes);
return proto;
}
CompiledMemoryStats CompiledMemoryStats::FromProto(
const CompiledMemoryStatsProto& proto) {
CompiledMemoryStats stats;
stats.generated_code_size_in_bytes = proto.generated_code_size_in_bytes();
stats.argument_size_in_bytes = proto.argument_size_in_bytes();
stats.output_size_in_bytes = proto.output_size_in_bytes();
stats.alias_size_in_bytes = proto.alias_size_in_bytes();
stats.temp_size_in_bytes = proto.temp_size_in_bytes();
stats.serialized_hlo_proto = proto.hlo_proto().SerializeAsString();
stats.host_generated_code_size_in_bytes =
proto.host_generated_code_size_in_bytes();
stats.host_argument_size_in_bytes = proto.host_argument_size_in_bytes();
stats.host_output_size_in_bytes = proto.host_output_size_in_bytes();
stats.host_alias_size_in_bytes = proto.host_alias_size_in_bytes();
stats.host_temp_size_in_bytes = proto.host_temp_size_in_bytes();
return stats;
}
void CompiledMemoryStats::PopulateBufferStatsFromAllocations(
absl::Span<const BufferAllocation> allocs) {
argument_size_in_bytes = 0;
output_size_in_bytes = 0;
temp_size_in_bytes = 0;
alias_size_in_bytes = 0;
host_argument_size_in_bytes = 0;
host_output_size_in_bytes = 0;
host_temp_size_in_bytes = 0;
host_alias_size_in_bytes = 0;
for (auto& alloc : allocs) {
int64_t alloc_memory_space = -1;
for (const auto& [value, _] : alloc.assigned_buffers()) {
const HloPosition& defining_position = value->defining_position();
int64_t memory_space = Layout::kDefaultMemorySpace;
if (defining_position.shape().has_layout()) {
memory_space = defining_position.shape().layout().memory_space();
}
if (alloc_memory_space == -1) {
alloc_memory_space = memory_space;
} else {
CHECK(alloc_memory_space == memory_space &&
"expected same memory space for all assignments in allocation");
}
}
bool is_host = alloc_memory_space == Layout::kHostMemorySpace;
int64_t size = alloc.size();
if (alloc.is_entry_computation_parameter()) {
if (is_host) {
host_argument_size_in_bytes += size;
} else {
argument_size_in_bytes += size;
}
if (alloc.is_parameter_aliased_with_output()) {
if (is_host) {
host_alias_size_in_bytes += size;
} else {
alias_size_in_bytes += size;
}
}
}
if (alloc.maybe_live_out()) {
if (is_host) {
host_output_size_in_bytes += size;
} else {
output_size_in_bytes += size;
}
}
if (alloc.IsPreallocatedTempBuffer()) {
if (is_host) {
host_temp_size_in_bytes += size;
} else {
temp_size_in_bytes += size;
}
}
}
}
void GetOpSharding(std::vector<OpSharding>& out, const OpSharding& sharding) {
if (sharding.type() == OpSharding::TUPLE) {
for (const OpSharding& s : sharding.tuple_shardings()) {
GetOpSharding(out, s);
}
} else {
out.push_back(sharding);
}
}
std::optional<std::vector<OpSharding>> PjRtExecutable::GetOutputShardings()
const {
auto modules = GetHloModules();
if (!modules.ok() || (*modules).empty() ||
!(*modules)[0]->has_spmd_output_sharding()) {
return std::nullopt;
}
std::vector<OpSharding> out;
GetOpSharding(out, (*modules)[0]->spmd_output_sharding().ToProto());
return out;
}
std::optional<std::vector<OpSharding>> PjRtExecutable::GetParameterShardings()
const {
auto modules = GetHloModules();
if (!modules.ok() || (*modules).empty() ||
!(*modules)[0]->has_spmd_parameters_shardings()) {
return std::nullopt;
}
std::vector<OpSharding> out;
for (const auto& s : (*modules)[0]->spmd_parameters_shardings()) {
GetOpSharding(out, s.ToProto());
}
return out;
}
absl::StatusOr<std::vector<Shape>> PjRtExecutable::GetOutputShapes() const {
TF_ASSIGN_OR_RETURN(auto modules, GetHloModules());
std::vector<Shape> output_shapes;
output_shapes.reserve(modules.size());
for (const auto& module : modules) {
output_shapes.push_back(module->result_shape());
}
return output_shapes;
}
absl::StatusOr<std::vector<std::vector<PrimitiveType>>>
PjRtExecutable::GetOutputElementTypes() const {
TF_ASSIGN_OR_RETURN(auto output_shapes, GetOutputShapes());
std::vector<std::vector<PrimitiveType>> output_element_types;
output_element_types.reserve(output_shapes.size());
for (int i = 0; i < output_shapes.size(); ++i) {
const Shape& output_shape = output_shapes[i];
std::vector<PrimitiveType> element_types;
if (output_shape.IsTuple()) {
const auto& tuple_shapes = output_shape.tuple_shapes();
element_types.reserve(tuple_shapes.size());
for (int j = 0; j < tuple_shapes.size(); ++j) {
if (tuple_shapes[j].IsTuple()) {
return Unimplemented(
"GetOutputElementTypes() doesn't support programs with "
"nested-tupled outputs.");
}
element_types.push_back(tuple_shapes[j].element_type());
}
} else {
element_types.reserve(1);
element_types.push_back(output_shape.element_type());
}
output_element_types.push_back(std::move(element_types));
}
return output_element_types;
}
absl::StatusOr<std::vector<std::vector<DimensionVector>>>
PjRtExecutable::GetOutputDimensions() const {
TF_ASSIGN_OR_RETURN(auto output_shapes, GetOutputShapes());
std::vector<std::vector<DimensionVector>> output_dimensions;
output_dimensions.reserve(output_shapes.size());
for (int i = 0; i < output_shapes.size(); ++i) {
const Shape& output_shape = output_shapes[i];
std::vector<DimensionVector> dimensions;
if (output_shape.IsTuple()) {
const auto& tuple_shapes = output_shape.tuple_shapes();
dimensions.reserve(tuple_shapes.size());
for (int j = 0; j < tuple_shapes.size(); ++j) {
if (tuple_shapes[j].IsTuple()) {
return Unimplemented(
"GetOutputDimensions() doesn't support programs with "
"nested-tupled outputs.");
}
dimensions.push_back(
ShapeUtil::CreateDimensionVectorFromShape(tuple_shapes[j]));
}
} else {
dimensions.reserve(1);
dimensions.push_back(
ShapeUtil::CreateDimensionVectorFromShape(output_shape));
}
output_dimensions.push_back(std::move(dimensions));
}
return output_dimensions;
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtLayout>>>
PjRtExecutable::GetParameterLayouts() const {
TF_ASSIGN_OR_RETURN(std::vector<std::shared_ptr<HloModule>> hlo_modules,
GetHloModules());
if (hlo_modules.size() > 1) {
return Unimplemented(
"PjRtExecutable::GetParameterLayouts doesn't support MPMD "
"executables.");
}
if (hlo_modules.empty()) {
return InvalidArgument(
"PjRtExecutable::GetParameterLayouts: couldn't retrieve HLO module "
"from executable.");
}
ComputationLayout comp_layout = hlo_modules[0]->entry_computation_layout();
TF_ASSIGN_OR_RETURN(std::vector<Layout> layouts,
comp_layout.FlattenedParameterLayouts());
std::vector<std::unique_ptr<PjRtLayout> | #include "xla/pjrt/pjrt_executable.h"
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/client/executable_build_options.h"
#include "xla/pjrt/compile_options.pb.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status_matchers.h"
namespace xla {
namespace {
using ::tsl::testing::StatusIs;
TEST(CompileOptionsTest, Serialization) {
CompileOptions src;
src.compile_portable_executable = true;
src.parameter_is_tupled_arguments = true;
src.profile_version = 1;
src.argument_layouts = {ShapeUtil::MakeShape(S32, {1})};
ExecutableBuildOptions build_option;
build_option.set_device_assignment(DeviceAssignment(1, 1));
src.executable_build_options = build_option;
TF_ASSERT_OK_AND_ASSIGN(CompileOptionsProto proto, src.ToProto());
TF_ASSERT_OK_AND_ASSIGN(CompileOptions output,
CompileOptions::FromProto(proto));
TF_ASSERT_OK_AND_ASSIGN(CompileOptionsProto output_proto, src.ToProto());
EXPECT_EQ(proto.SerializeAsString(), output_proto.SerializeAsString());
}
TEST(CompileOptionsTest, MultiSliceConfigNotSupported) {
CompileOptionsProto proto;
*proto.mutable_serialized_multi_slice_config() = "multi_size_config";
auto option = CompileOptions::FromProto(proto);
EXPECT_THAT(
option.status(),
StatusIs(
absl::StatusCode::kUnimplemented,
"multi_slice_config not supported in CompileOptions::FromProto."));
}
TEST(ExecuteOptionsTest, Serialization) {
ExecuteOptions src;
src.arguments_are_tupled = true;
src.untuple_result = false;
src.launch_id = 1234;
src.strict_shape_checking = true;
src.execution_mode = ExecuteOptions::ExecutionMode::kAsynchronous;
src.non_donatable_input_indices = {2, 3};
TF_ASSERT_OK_AND_ASSIGN(ExecuteOptionsProto proto, src.ToProto());
TF_ASSERT_OK_AND_ASSIGN(ExecuteOptions output,
ExecuteOptions::FromProto(proto));
TF_ASSERT_OK_AND_ASSIGN(ExecuteOptionsProto output_proto, src.ToProto());
EXPECT_EQ(proto.SerializeAsString(), output_proto.SerializeAsString());
}
TEST(ExecuteOptionsTest, SendRecvNotSupported) {
ExecuteOptions options;
std::vector<std::vector<SendCallback>> send_callbacks(1);
options.send_callbacks = send_callbacks;
std::vector<std::vector<RecvCallback>> recv_callbacks(1);
options.recv_callbacks = recv_callbacks;
EXPECT_THAT(
options.ToProto(),
StatusIs(absl::StatusCode::kUnimplemented,
"ExecuteOptions with send/recv calbacks is not serializable"));
}
TEST(ExecuteOptionsTest, ApplyOptionsCanParseStrings) {
using OptionOverride = std::variant<std::string, bool, int64_t, double>;
std::vector<std::pair<std::string, OptionOverride>> env_override_options;
env_override_options = {
{"xla_gpu_use_runtime_fusion", std::string("True")},
{"xla_gpu_graph_min_graph_size", std::string("2")},
{"xla_gpu_redzone_scratch_max_megabytes", std::string("3400")},
{"xla_gpu_auto_spmd_partitioning_memory_budget_ratio", 0.9},
{"xla_gpu_pgle_profile_file_or_directory_path", std::string("abc")}};
CompileOptions src;
src.env_option_overrides = env_override_options;
auto s = src.ApplyAllOptionOverrides();
auto& debug_options = src.executable_build_options.debug_options();
EXPECT_EQ(debug_options.xla_gpu_use_runtime_fusion(), true);
EXPECT_EQ(debug_options.xla_gpu_graph_min_graph_size(), 2);
EXPECT_EQ(debug_options.xla_gpu_redzone_scratch_max_megabytes(), 3400);
EXPECT_FLOAT_EQ(
debug_options.xla_gpu_auto_spmd_partitioning_memory_budget_ratio(), 0.9);
EXPECT_EQ(debug_options.xla_gpu_pgle_profile_file_or_directory_path(), "abc");
}
TEST(CompiledMemoryStatsTest, Serialization) {
CompiledMemoryStats stats;
stats.generated_code_size_in_bytes = 2;
stats.argument_size_in_bytes = 3;
stats.output_size_in_bytes = 5;
stats.alias_size_in_bytes = 7;
stats.temp_size_in_bytes = 11;
stats.host_generated_code_size_in_bytes = 13;
stats.host_argument_size_in_bytes = 17;
stats.host_output_size_in_bytes = 19;
stats.host_alias_size_in_bytes = 23;
stats.host_temp_size_in_bytes = 29;
CompiledMemoryStatsProto serialized = stats.ToProto();
CompiledMemoryStats deserialized = CompiledMemoryStats::FromProto(serialized);
EXPECT_EQ(serialized.SerializeAsString(),
deserialized.ToProto().SerializeAsString());
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.